From 80099d9a2ff1685ecc08b6f71bcba777ca51ca1d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 23 Aug 2024 10:57:03 +0300
Subject: [PATCH 001/655] [#1328] pilorama: Add tricky test for
SortedByFilename
Signed-off-by: Evgenii Stratonikov
---
.../pilorama/forest_test.go | 62 +++++++++++++++++++
1 file changed, 62 insertions(+)
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 001d095c8..c6c6e8c8b 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1,11 +1,13 @@
package pilorama
import (
+ "bytes"
"context"
"crypto/rand"
"fmt"
mrand "math/rand"
"path/filepath"
+ "slices"
"strconv"
"strings"
"sync"
@@ -232,6 +234,66 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
}
+// The issue which we call "BugWithSkip" is easiest to understand when filenames are
+// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
+// The bug happens when we switch between length during listing.
+// Thus this test contains numbers from 1 to 1000 and batch size of size 100.
+func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
+ t.Skip()
+ for i := range providers {
+ t.Run(providers[i].name, func(t *testing.T) {
+ testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
+ })
+ }
+}
+
+func testForestTreeSortedIterationBugWithSkip(t *testing.T, s ForestStorage) {
+ defer func() { require.NoError(t, s.Close()) }()
+
+ cid := cidtest.ID()
+ d := CIDDescriptor{cid, 0, 1}
+ treeID := "version"
+ treeAdd := func(t *testing.T, ts int, filename string) {
+ _, err := s.TreeMove(context.Background(), d, treeID, &Move{
+ Child: RootID + uint64(ts),
+ Parent: RootID,
+ Meta: Meta{
+ Time: Timestamp(ts),
+ Items: []KeyValue{
+ {Key: AttributeFilename, Value: []byte(filename)},
+ },
+ },
+ })
+ require.NoError(t, err)
+ }
+
+ const count = 2000
+ treeAdd(t, 1, "")
+ for i := 1; i < count; i++ {
+ treeAdd(t, i+1, strconv.Itoa(i+1))
+ }
+
+ var result []MultiNodeInfo
+ treeAppend := func(t *testing.T, last *string, count int) *string {
+ res, cursor, err := s.TreeSortedByFilename(context.Background(), d.CID, treeID, MultiNode{RootID}, last, count)
+ require.NoError(t, err)
+ result = append(result, res...)
+ return cursor
+ }
+
+ const batchSize = 10
+ last := treeAppend(t, nil, batchSize)
+ for i := 1; i < count/batchSize; i++ {
+ last = treeAppend(t, last, batchSize)
+ }
+ require.Len(t, result, count)
+ require.True(t, slices.IsSortedFunc(result, func(a, b MultiNodeInfo) int {
+ filenameA := findAttr(a.Meta, AttributeFilename)
+ filenameB := findAttr(b.Meta, AttributeFilename)
+ return bytes.Compare(filenameA, filenameB)
+ }))
+}
+
func TestForest_TreeSortedIteration(t *testing.T) {
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
From 806ea37101fc35cedca3e10681e43d7a91968d67 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 23 Aug 2024 10:58:33 +0300
Subject: [PATCH 002/655] [#1328] pilorama: Do not skip items in
SortedByFilename
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Benchmark results:
```
goos: linux
goarch: amd64
pkg: git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama
cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
│ old │ new │
│ sec/op │ sec/op vs base │
ForestSortedIteration/bbolt,root-8 207.2µ ± 6% 173.6µ ± 6% -16.23% (p=0.000 n=10)
ForestSortedIteration/bbolt,leaf-8 3.910µ ± 5% 3.928µ ± 7% ~ (p=0.529 n=10)
geomean 28.46µ 26.11µ -8.27%
```
They are not representative, as the worst case is when we have multiple
items of different lengths. However, `FileName` is usually less than 100
in practice, so the asymptotics is the same.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/pilorama/boltdb.go | 1 +
.../pilorama/forest_test.go | 3 +-
pkg/local_object_storage/pilorama/heap.go | 38 +++++++++++++++----
3 files changed, 32 insertions(+), 10 deletions(-)
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 48363ceac..29941be83 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -1161,6 +1161,7 @@ func (t *boltForest) fillSortedChildren(b *bbolt.Bucket, nodeIDs MultiNode, h *f
lastFilename = nil
nodes = nil
length = actualLength + 1
+ count = 0
c.Seek(append(prefix, byte(length), byte(length>>8)))
c.Prev() // c.Next() will be performed by for loop
}
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index c6c6e8c8b..ecca9842f 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -237,9 +237,8 @@ func BenchmarkForestSortedIteration(b *testing.B) {
// The issue which we call "BugWithSkip" is easiest to understand when filenames are
// monotonically increasing numbers. We want the list of sorted filenames to have different length interleaved.
// The bug happens when we switch between length during listing.
-// Thus this test contains numbers from 1 to 1000 and batch size of size 100.
+// Thus this test contains numbers from 1 to 2000 and batch size of size 10.
func TestForest_TreeSortedIterationBugWithSkip(t *testing.T) {
- t.Skip()
for i := range providers {
t.Run(providers[i].name, func(t *testing.T) {
testForestTreeSortedIterationBugWithSkip(t, providers[i].construct(t))
diff --git a/pkg/local_object_storage/pilorama/heap.go b/pkg/local_object_storage/pilorama/heap.go
index ec57b9e1f..5a00bcf7a 100644
--- a/pkg/local_object_storage/pilorama/heap.go
+++ b/pkg/local_object_storage/pilorama/heap.go
@@ -2,6 +2,8 @@ package pilorama
import (
"container/heap"
+ "slices"
+ "strings"
)
type heapInfo struct {
@@ -28,9 +30,10 @@ func (h *filenameHeap) Pop() any {
// fixedHeap maintains a fixed number of smallest elements started at some point.
type fixedHeap struct {
- start *string
- count int
- h *filenameHeap
+ start *string
+ sorted bool
+ count int
+ h *filenameHeap
}
func newHeap(start *string, count int) *fixedHeap {
@@ -44,20 +47,39 @@ func newHeap(start *string, count int) *fixedHeap {
}
}
+const amortizationMultiplier = 5
+
func (h *fixedHeap) push(id MultiNode, filename string) bool {
if h.start != nil && filename <= *h.start {
return false
}
- heap.Push(h.h, heapInfo{id: id, filename: filename})
- if h.h.Len() > h.count {
- heap.Remove(h.h, h.h.Len()-1)
+
+ *h.h = append(*h.h, heapInfo{id: id, filename: filename})
+ h.sorted = false
+
+ if h.h.Len() > h.count*amortizationMultiplier {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ *h.h = (*h.h)[:h.count]
}
return true
}
func (h *fixedHeap) pop() (heapInfo, bool) {
- if h.h.Len() != 0 {
- return heap.Pop(h.h).(heapInfo), true
+ if !h.sorted {
+ slices.SortFunc(*h.h, func(a, b heapInfo) int {
+ return strings.Compare(a.filename, b.filename)
+ })
+ if len(*h.h) > h.count {
+ *h.h = (*h.h)[:h.count]
+ }
+ h.sorted = true
+ }
+ if len(*h.h) != 0 {
+ info := (*h.h)[0]
+ *h.h = (*h.h)[1:]
+ return info, true
}
return heapInfo{}, false
}
From fa7f9fbce240dde6e0a3c6ded13cb769bbe3b2bf Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 26 Aug 2024 15:36:39 +0300
Subject: [PATCH 003/655] [#1333] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index b665709cc..b0df97511 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 54e0d0301..f3a17363d 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88 h1:ckD87Z4pvPtu2hjpRcqPHlAtgOHPZfSW3x+zzwZztiY=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 5e9a97fd3eabc58347260b06fc6e1da4f5cf2185 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 27 Aug 2024 14:46:56 +0300
Subject: [PATCH 004/655] [#1336] go.mod: Update api-go and sdk-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 4 +-
go.sum | 8 +-
pkg/services/control/ir/service_frostfs.pb.go | 32 +--
pkg/services/control/service_frostfs.pb.go | 196 +++++++++---------
pkg/services/control/types_frostfs.pb.go | 6 +-
pkg/services/tree/service_frostfs.pb.go | 98 ++++-----
6 files changed, 172 insertions(+), 172 deletions(-)
diff --git a/go.mod b/go.mod
index b0df97511..b7f59c823 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index f3a17363d..d2f926151 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88 h1:ckD87Z4pvPtu2hjpRcqPHlAtgOHPZfSW3x+zzwZztiY=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240826113619-9e82a5a31a88/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac h1:Gu3oiPnsSZPgwsUYs2f3xTQwndM/OWM/zo3zbN4rOb8=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b h1:ZCJBVmQDcdv0twpX9xJU/AQwX+dXyvVfqr0Pq3x+3yk=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240820072028-6dd7be11d13b/go.mod h1:aaC2OR34tVrBwd0Z2gqoN5WLtV/idKqpqPDhb4XqmCo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7 h1:9eZidZMT4tHOdc6GZRPlZR12IToKqHhUd5wzevdDUqo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7/go.mod h1:VzVYcwo/eXjkdo5ktPdZeAE4fsnZX6zEun3g+5E2weo=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
diff --git a/pkg/services/control/ir/service_frostfs.pb.go b/pkg/services/control/ir/service_frostfs.pb.go
index 786095802..66d196617 100644
--- a/pkg/services/control/ir/service_frostfs.pb.go
+++ b/pkg/services/control/ir/service_frostfs.pb.go
@@ -164,10 +164,10 @@ func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -495,10 +495,10 @@ func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -810,10 +810,10 @@ func (x *TickEpochRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1125,10 +1125,10 @@ func (x *TickEpochResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1471,10 +1471,10 @@ func (x *RemoveNodeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1786,10 +1786,10 @@ func (x *RemoveNodeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2163,10 +2163,10 @@ func (x *RemoveContainerRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2478,10 +2478,10 @@ func (x *RemoveContainerResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index afd1c3c41..a446c5e59 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -164,10 +164,10 @@ func (x *HealthCheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -542,10 +542,10 @@ func (x *HealthCheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -904,10 +904,10 @@ func (x *SetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1185,10 +1185,10 @@ func (x *SetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1466,10 +1466,10 @@ func (x *GetNetmapStatusRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1828,10 +1828,10 @@ func (x *GetNetmapStatusResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2157,10 +2157,10 @@ func (x *DropObjectsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2438,10 +2438,10 @@ func (x *DropObjectsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2719,10 +2719,10 @@ func (x *ListShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2888,7 +2888,7 @@ func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
return
}
for i := range x.Shards {
- if x.Shards[i] != nil && x.Shards[i].StableSize() != 0 {
+ if x.Shards[i] != nil {
x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -3057,10 +3057,10 @@ func (x *ListShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3464,10 +3464,10 @@ func (x *SetShardModeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3745,10 +3745,10 @@ func (x *SetShardModeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4122,10 +4122,10 @@ func (x *SynchronizeTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4403,10 +4403,10 @@ func (x *SynchronizeTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4763,10 +4763,10 @@ func (x *EvacuateShardRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5078,10 +5078,10 @@ func (x *EvacuateShardResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5438,10 +5438,10 @@ func (x *FlushCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5719,10 +5719,10 @@ func (x *FlushCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6065,10 +6065,10 @@ func (x *DoctorRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6346,10 +6346,10 @@ func (x *DoctorResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6772,10 +6772,10 @@ func (x *StartShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshale
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7053,10 +7053,10 @@ func (x *StartShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7334,10 +7334,10 @@ func (x *GetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7830,10 +7830,10 @@ func (x *GetShardEvacuationStatusResponse_Body) EmitProtobuf(mm *easyproto.Messa
if int32(x.Status) != 0 {
mm.AppendInt32(5, int32(x.Status))
}
- if x.Duration != nil && x.Duration.StableSize() != 0 {
+ if x.Duration != nil {
x.Duration.EmitProtobuf(mm.AppendMessage(6))
}
- if x.StartedAt != nil && x.StartedAt.StableSize() != 0 {
+ if x.StartedAt != nil {
x.StartedAt.EmitProtobuf(mm.AppendMessage(7))
}
if len(x.ErrorMessage) != 0 {
@@ -8321,10 +8321,10 @@ func (x *GetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -8602,10 +8602,10 @@ func (x *ResetShardEvacuationStatusRequest) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -8883,10 +8883,10 @@ func (x *ResetShardEvacuationStatusResponse) EmitProtobuf(mm *easyproto.MessageM
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9164,10 +9164,10 @@ func (x *StopShardEvacuationRequest) EmitProtobuf(mm *easyproto.MessageMarshaler
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9445,10 +9445,10 @@ func (x *StopShardEvacuationResponse) EmitProtobuf(mm *easyproto.MessageMarshale
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -9613,7 +9613,7 @@ func (x *AddChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.Chain) != 0 {
@@ -9795,10 +9795,10 @@ func (x *AddChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10110,10 +10110,10 @@ func (x *AddChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarsha
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10278,7 +10278,7 @@ func (x *GetChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.ChainId) != 0 {
@@ -10460,10 +10460,10 @@ func (x *GetChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMarshal
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10775,10 +10775,10 @@ func (x *GetChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMarsha
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -10941,7 +10941,7 @@ func (x *ListChainLocalOverridesRequest_Body) EmitProtobuf(mm *easyproto.Message
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -11094,10 +11094,10 @@ func (x *ListChainLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMarsh
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11423,10 +11423,10 @@ func (x *ListChainLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11738,10 +11738,10 @@ func (x *ListTargetsLocalOverridesRequest) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -11907,7 +11907,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.Mess
return
}
for i := range x.Targets {
- if x.Targets[i] != nil && x.Targets[i].StableSize() != 0 {
+ if x.Targets[i] != nil {
x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -12076,10 +12076,10 @@ func (x *ListTargetsLocalOverridesResponse) EmitProtobuf(mm *easyproto.MessageMa
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12244,7 +12244,7 @@ func (x *RemoveChainLocalOverrideRequest_Body) EmitProtobuf(mm *easyproto.Messag
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
if len(x.ChainId) != 0 {
@@ -12426,10 +12426,10 @@ func (x *RemoveChainLocalOverrideRequest) EmitProtobuf(mm *easyproto.MessageMars
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12707,10 +12707,10 @@ func (x *RemoveChainLocalOverrideResponse) EmitProtobuf(mm *easyproto.MessageMar
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -12873,7 +12873,7 @@ func (x *RemoveChainLocalOverridesByTargetRequest_Body) EmitProtobuf(mm *easypro
if x == nil {
return
}
- if x.Target != nil && x.Target.StableSize() != 0 {
+ if x.Target != nil {
x.Target.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -13026,10 +13026,10 @@ func (x *RemoveChainLocalOverridesByTargetRequest) EmitProtobuf(mm *easyproto.Me
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -13307,10 +13307,10 @@ func (x *RemoveChainLocalOverridesByTargetResponse) EmitProtobuf(mm *easyproto.M
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -13760,10 +13760,10 @@ func (x *SealWriteCacheRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14120,7 +14120,7 @@ func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshale
return
}
for i := range x.Results {
- if x.Results[i] != nil && x.Results[i].StableSize() != 0 {
+ if x.Results[i] != nil {
x.Results[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -14289,10 +14289,10 @@ func (x *SealWriteCacheResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14618,10 +14618,10 @@ func (x *DetachShardsRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -14899,10 +14899,10 @@ func (x *DetachShardsResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 9aff26a98..3cc37245f 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -549,7 +549,7 @@ func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.Addresses[j])
}
for i := range x.Attributes {
- if x.Attributes[i] != nil && x.Attributes[i].StableSize() != 0 {
+ if x.Attributes[i] != nil {
x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -817,7 +817,7 @@ func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(1, x.Epoch)
}
for i := range x.Nodes {
- if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ if x.Nodes[i] != nil {
x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1013,7 +1013,7 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.MetabasePath)
}
for i := range x.Blobstor {
- if x.Blobstor[i] != nil && x.Blobstor[i].StableSize() != 0 {
+ if x.Blobstor[i] != nil {
x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
}
}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index c4d44253d..3c6ba21b7 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -70,7 +70,7 @@ func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.ParentId)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -346,10 +346,10 @@ func (x *AddRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -661,10 +661,10 @@ func (x *AddResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -852,7 +852,7 @@ func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(4, x.Path[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
}
@@ -1168,10 +1168,10 @@ func (x *AddByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1530,10 +1530,10 @@ func (x *AddByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -1938,10 +1938,10 @@ func (x *RemoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2219,10 +2219,10 @@ func (x *RemoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2410,7 +2410,7 @@ func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(4, x.NodeId)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
}
@@ -2712,10 +2712,10 @@ func (x *MoveRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -2993,10 +2993,10 @@ func (x *MoveResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3553,10 +3553,10 @@ func (x *GetNodeByPathRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -3734,7 +3734,7 @@ func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler
mm.AppendUint64(2, x.Timestamp)
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -3967,7 +3967,7 @@ func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler
return
}
for i := range x.Nodes {
- if x.Nodes[i] != nil && x.Nodes[i].StableSize() != 0 {
+ if x.Nodes[i] != nil {
x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -4136,10 +4136,10 @@ func (x *GetNodeByPathResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4506,7 +4506,7 @@ func (x *GetSubTreeRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.BearerToken) != 0 {
mm.AppendBytes(5, x.BearerToken)
}
- if x.OrderBy != nil && x.OrderBy.StableSize() != 0 {
+ if x.OrderBy != nil {
x.OrderBy.EmitProtobuf(mm.AppendMessage(6))
}
}
@@ -4803,10 +4803,10 @@ func (x *GetSubTreeRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -4993,7 +4993,7 @@ func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.Timestamp[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil && x.Meta[i].StableSize() != 0 {
+ if x.Meta[i] != nil {
x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -5282,10 +5282,10 @@ func (x *GetSubTreeResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5597,10 +5597,10 @@ func (x *TreeListRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -5926,10 +5926,10 @@ func (x *TreeListResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6102,7 +6102,7 @@ func (x *ApplyRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.TreeId) != 0 {
mm.AppendString(2, x.TreeId)
}
- if x.Operation != nil && x.Operation.StableSize() != 0 {
+ if x.Operation != nil {
x.Operation.EmitProtobuf(mm.AppendMessage(3))
}
}
@@ -6307,10 +6307,10 @@ func (x *ApplyRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6588,10 +6588,10 @@ func (x *ApplyResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -6996,10 +6996,10 @@ func (x *GetOpLogRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7162,7 +7162,7 @@ func (x *GetOpLogResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Operation != nil && x.Operation.StableSize() != 0 {
+ if x.Operation != nil {
x.Operation.EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -7315,10 +7315,10 @@ func (x *GetOpLogResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7596,10 +7596,10 @@ func (x *HealthcheckResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -7877,10 +7877,10 @@ func (x *HealthcheckRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if x == nil {
return
}
- if x.Body != nil && x.Body.StableSize() != 0 {
+ if x.Body != nil {
x.Body.EmitProtobuf(mm.AppendMessage(1))
}
- if x.Signature != nil && x.Signature.StableSize() != 0 {
+ if x.Signature != nil {
x.Signature.EmitProtobuf(mm.AppendMessage(2))
}
}
From d6b42972a81f18d4351d81b154f75815f39546ea Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 27 Aug 2024 16:09:13 +0300
Subject: [PATCH 005/655] [#1338] object: Fix audit patch stream
Signed-off-by: Airat Arifullin
---
pkg/services/object/audit.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/audit.go b/pkg/services/object/audit.go
index b924386d1..39e1f9f2d 100644
--- a/pkg/services/object/audit.go
+++ b/pkg/services/object/audit.go
@@ -204,7 +204,7 @@ func (a *auditPatchStream) CloseAndRecv(ctx context.Context) (*object.PatchRespo
if err != nil {
a.failed = true
}
- a.objectID = resp.GetBody().ObjectID
+ a.objectID = resp.GetBody().GetObjectID()
audit.LogRequestWithKey(a.log, objectGRPC.ObjectService_Patch_FullMethodName, a.key,
audit.TargetFromContainerIDObjectID(a.containerID, a.objectID),
!a.failed)
From 6488ddee882ddb2facd8554aa274986076de0bbc Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 27 Aug 2024 16:09:59 +0300
Subject: [PATCH 006/655] [#1338] object: Fix range provider in `Patch` handler
Signed-off-by: Airat Arifullin
---
pkg/services/object/patch/range_provider.go | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/pkg/services/object/patch/range_provider.go b/pkg/services/object/patch/range_provider.go
index 755c5bf60..cb3f7c342 100644
--- a/pkg/services/object/patch/range_provider.go
+++ b/pkg/services/object/patch/range_provider.go
@@ -30,6 +30,12 @@ type rangeProvider struct {
var _ patcherSDK.RangeProvider = (*rangeProvider)(nil)
func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.Reader {
+ // Remote GetRange request to a container node uses an SDK-client that fails range validation
+ // with zero-length. However, from the patcher's point of view, such request is still valid.
+ if rng.GetLength() == 0 {
+ return &nopReader{}
+ }
+
pipeReader, pipeWriter := io.Pipe()
var rngPrm getsvc.RangePrm
@@ -61,3 +67,9 @@ func (r *rangeProvider) GetRange(ctx context.Context, rng *objectSDK.Range) io.R
return pipeReader
}
+
+type nopReader struct{}
+
+func (nopReader) Read(_ []byte) (int, error) {
+ return 0, io.EOF
+}
From 7abbdca0641f1526c347da60012f437be57254e8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 28 Aug 2024 13:56:45 +0300
Subject: [PATCH 007/655] [#1340] getSvc: Fix access denied error handling
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/get.go | 8 +++++++
pkg/services/object/get/remote.go | 7 +++++-
pkg/services/object/get/v2/get_forwarder.go | 26 ++++++++++++---------
3 files changed, 29 insertions(+), 12 deletions(-)
diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go
index 07a2f3a72..03b7f8bf2 100644
--- a/pkg/services/object/get/get.go
+++ b/pkg/services/object/get/get.go
@@ -2,9 +2,11 @@ package getsvc
import (
"context"
+ "errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"go.uber.org/zap"
)
@@ -120,6 +122,12 @@ func (exec *request) analyzeStatus(ctx context.Context, execCnr bool) {
exec.log.Debug(logs.OperationFinishedWithError,
zap.Error(exec.err),
)
+ var errAccessDenied *apistatus.ObjectAccessDenied
+ if execCnr && errors.As(exec.err, &errAccessDenied) {
+ // Local get can't return access denied error, so this error was returned by
+ // write to the output stream. So there is no need to try to find object on other nodes.
+ return
+ }
if execCnr {
exec.executeOnContainer(ctx)
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index ce9abfe1c..163767c43 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -31,6 +31,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
var errECInfo *objectSDK.ECInfoError
var errRemoved *apistatus.ObjectAlreadyRemoved
var errOutOfRange *apistatus.ObjectOutOfRange
+ var errAccessDenied *apistatus.ObjectAccessDenied
switch {
default:
@@ -38,7 +39,11 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
if r.status != statusEC {
// for raw requests, continue to collect other parts
r.status = statusUndefined
- r.err = new(apistatus.ObjectNotFound)
+ if errors.As(err, &errAccessDenied) {
+ r.err = err
+ } else {
+ r.err = new(apistatus.ObjectNotFound)
+ }
}
return false
case err == nil:
diff --git a/pkg/services/object/get/v2/get_forwarder.go b/pkg/services/object/get/v2/get_forwarder.go
index 774f98643..18194c740 100644
--- a/pkg/services/object/get/v2/get_forwarder.go
+++ b/pkg/services/object/get/v2/get_forwarder.go
@@ -23,12 +23,14 @@ import (
)
type getRequestForwarder struct {
- OnceResign sync.Once
- OnceHeaderSending sync.Once
- GlobalProgress int
- Key *ecdsa.PrivateKey
- Request *objectV2.GetRequest
- Stream *streamObjectWriter
+ OnceResign sync.Once
+ GlobalProgress int
+ Key *ecdsa.PrivateKey
+ Request *objectV2.GetRequest
+ Stream *streamObjectWriter
+
+ headerSent bool
+ headerSentGuard sync.Mutex
}
func (f *getRequestForwarder) forwardRequestToNode(ctx context.Context, addr network.Address, c client.MultiAddressClient, pubkey []byte) (*objectSDK.Object, error) {
@@ -83,13 +85,15 @@ func (f *getRequestForwarder) writeHeader(ctx context.Context, v *objectV2.GetOb
obj.SetSignature(v.GetSignature())
obj.SetHeader(v.GetHeader())
- var err error
- f.OnceHeaderSending.Do(func() {
- err = f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj))
- })
- if err != nil {
+ f.headerSentGuard.Lock()
+ defer f.headerSentGuard.Unlock()
+ if f.headerSent {
+ return nil
+ }
+ if err := f.Stream.WriteHeader(ctx, objectSDK.NewFromV2(obj)); err != nil {
return errCouldNotWriteObjHeader(err)
}
+ f.headerSent = true
return nil
}
From 01b6f1733cbc9d0ce133b5970a28817423686e2c Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:29:07 +0300
Subject: [PATCH 008/655] [#1341] Makefile: Build linter with -trimpath
Fix error with go1.23:
```
Error: build linters: unable to load custom analyzer "truecloudlab-linters": ../linters/bin/external_linters.so, plugin.Open("/repo/frostfs/linters/bin/external_linters"): plugin was built with a different version of package cmp
Failed executing command with error: build linters: unable to load custom analyzer "truecloudlab-linters": ../linters/bin/external_linters.so, plugin.Open("/repo/frostfs/linters/bin/external_linters"): plugin was built with a different version of package cmp
```
Signed-off-by: Evgenii Stratonikov
---
Makefile | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile b/Makefile
index 94a8a14c3..3a6dea7c3 100755
--- a/Makefile
+++ b/Makefile
@@ -9,7 +9,7 @@ HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.1
-TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
+TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
PROTOC_OS_VERSION=osx-x86_64
@@ -197,7 +197,7 @@ lint-install:
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
- @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
+ @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
From 7e97df4878b9bf95f88712394bb21201eb072012 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:31:35 +0300
Subject: [PATCH 009/655] [#1341] Makefile: Update golangci-lint
Signed-off-by: Evgenii Stratonikov
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 3a6dea7c3..71492ef17 100755
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,7 @@ HUB_IMAGE ?= truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
-LINT_VERSION ?= 1.60.1
+LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.7
PROTOC_VERSION ?= 25.0
PROTOGEN_FROSTFS_VERSION ?= $(shell go list -f '{{.Version}}' -m git.frostfs.info/TrueCloudLab/frostfs-api-go/v2)
From 03976c6ed5df58b83f788a18416e27f9144342b1 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 28 Aug 2024 14:45:57 +0300
Subject: [PATCH 010/655] [#1341] .golangci.yml: Replace exportloopref with
copyloopvar
exportloopref is deprecated.
gopatch:
```
@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-value := value
...
}
@@
var index, value identifier
var slice expression
@@
for index, value := range slice {
...
-index := index
...
}
@@
var value identifier
var channel expression
@@
for value := range channel {
...
-value := value
...
}
```
Signed-off-by: Evgenii Stratonikov
---
.golangci.yml | 2 +-
cmd/frostfs-adm/internal/modules/morph/generate/generate.go | 2 --
cmd/frostfs-cli/modules/container/list.go | 1 -
cmd/frostfs-cli/modules/object/nodes.go | 3 ---
.../blobstor/blobovniczatree/rebuild.go | 3 ---
pkg/local_object_storage/blobstor/info.go | 1 -
pkg/local_object_storage/engine/control.go | 2 --
pkg/local_object_storage/engine/shards.go | 1 -
pkg/local_object_storage/engine/writecache.go | 1 -
pkg/local_object_storage/pilorama/forest_test.go | 1 -
pkg/local_object_storage/writecache/flush_test.go | 1 -
pkg/services/object/get/assemblerec.go | 1 -
pkg/services/object/put/common.go | 1 -
pkg/services/object/put/ec.go | 1 -
pkg/services/policer/ec.go | 2 --
pkg/services/tree/sync.go | 6 +-----
pkg/services/tree/sync_test.go | 2 --
17 files changed, 2 insertions(+), 29 deletions(-)
diff --git a/.golangci.yml b/.golangci.yml
index 2e9e78fc3..971f0d0e7 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -67,7 +67,7 @@ linters:
- bidichk
- durationcheck
- exhaustive
- - exportloopref
+ - copyloopvar
- gofmt
- goimports
- misspell
diff --git a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
index c7de599e5..7af776797 100644
--- a/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
+++ b/cmd/frostfs-adm/internal/modules/morph/generate/generate.go
@@ -73,7 +73,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
return nil, fmt.Errorf("can't fetch password: %w", err)
}
- i := i
errG.Go(func() error {
p := filepath.Join(walletDir, innerring.GlagoliticLetter(i).String()+".json")
f, err := os.OpenFile(p, os.O_CREATE, 0o644)
@@ -107,7 +106,6 @@ func initializeWallets(v *viper.Viper, walletDir string, size int) ([]string, er
// Create consensus account with 2*N/3+1 multi-signature.
bftCount := smartcontract.GetDefaultHonestNodeCount(size)
for i := range wallets {
- i := i
ps := pubs.Copy()
errG.Go(func() error {
if err := addMultisigAccount(wallets[i], majCount, constants.CommitteeAccountName, passwords[i], ps); err != nil {
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index a1410d7a0..6d0019ec4 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -70,7 +70,6 @@ var listContainersCmd = &cobra.Command{
continue
}
- cnrID := cnrID
prmGet.ClientParams.ContainerID = &cnrID
res, err := internalclient.GetContainer(cmd.Context(), prmGet)
if err != nil {
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 4efe04d16..896f6f17f 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -393,8 +393,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
eg, egCtx := errgroup.WithContext(cmd.Context())
for _, cand := range candidates {
- cand := cand
-
eg.Go(func() error {
cli, err := createClient(egCtx, cmd, cand, pk)
if err != nil {
@@ -405,7 +403,6 @@ func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, pk *ecdsa.
}
for _, object := range objects {
- object := object
eg.Go(func() error {
stored, err := isObjectStoredOnNode(egCtx, cmd, object.containerID, object.objectID, cli, pk)
resultMtx.Lock()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 93ef8ba2e..cfc17cfae 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -160,9 +160,6 @@ func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovn
eg, egCtx := errgroup.WithContext(ctx)
for addr, data := range batch {
- addr := addr
- data := data
-
if err := limiter.AcquireWorkSlot(egCtx); err != nil {
_ = eg.Wait()
return result.Load(), err
diff --git a/pkg/local_object_storage/blobstor/info.go b/pkg/local_object_storage/blobstor/info.go
index 8a5bb870a..c1c47f3bb 100644
--- a/pkg/local_object_storage/blobstor/info.go
+++ b/pkg/local_object_storage/blobstor/info.go
@@ -43,7 +43,6 @@ func (b *BlobStor) ObjectsCount(ctx context.Context) (uint64, error) {
eg, egCtx := errgroup.WithContext(ctx)
for i := range b.storage {
- i := i
eg.Go(func() error {
v, e := b.storage[i].Storage.ObjectsCount(egCtx)
if e != nil {
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 03196400a..4778cf539 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -82,8 +82,6 @@ func (e *StorageEngine) Init(ctx context.Context) error {
}
for id, sh := range e.shards {
- id := id
- sh := sh
eg.Go(func() error {
if err := sh.Init(ctx); err != nil {
errCh <- shardInitError{
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 56d4fcd4a..980b38a63 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -367,7 +367,6 @@ func (e *StorageEngine) closeShards(deletedShards []hashedShard) error {
var multiErrGuard sync.Mutex
var eg errgroup.Group
for _, sh := range deletedShards {
- sh := sh
eg.Go(func() error {
err := sh.SetMode(mode.Disabled)
if err != nil {
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 3e8f387ef..7710bc7f4 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -102,7 +102,6 @@ func (e *StorageEngine) SealWriteCache(ctx context.Context, prm SealWriteCachePr
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
- shardID := shardID
eg.Go(func() error {
e.mtx.RLock()
sh, ok := e.shards[shardID.String()]
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index ecca9842f..41d7a567c 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -1459,7 +1459,6 @@ func testTreeLastSyncHeight(t *testing.T, f ForestStorage) {
func TestForest_ListTrees(t *testing.T) {
for i := range providers {
- i := i
t.Run(providers[i].name, func(t *testing.T) {
testTreeListTrees(t, providers[i].construct)
})
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 3c951bebe..a637da45d 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -170,7 +170,6 @@ func runFlushTest[Option any](
t.Run("ignore errors", func(t *testing.T) {
for _, f := range failures {
- f := f
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index 6a02673c3..dde0d7dad 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -170,7 +170,6 @@ func (a *assemblerec) processECNodesRequests(ctx context.Context, nodes []placem
eg.SetLimit(dataCount)
for _, ch := range a.ecInfo.localChunks {
- ch := ch
eg.Go(func() error {
select {
case <-ctx.Done():
diff --git a/pkg/services/object/put/common.go b/pkg/services/object/put/common.go
index 6696a192b..cbb7f5f33 100644
--- a/pkg/services/object/put/common.go
+++ b/pkg/services/object/put/common.go
@@ -71,7 +71,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
var wg sync.WaitGroup
for _, addr := range addrs {
- addr := addr
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go
index fbb51912c..1fadf65fe 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/put/ec.go
@@ -216,7 +216,6 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- idx := idx
eg.Go(func() error {
return e.writePart(egCtx, parts[idx], idx, nodes, visited)
})
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 0a118797d..61a65fc21 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -357,8 +357,6 @@ func (p *Policer) collectExistedChunks(ctx context.Context, objInfo objectcore.I
parts := make([]*objectSDK.Object, objInfo.ECInfo.Total)
errGroup, egCtx := errgroup.WithContext(ctx)
for idx, nodes := range existedChunks {
- idx := idx
- nodes := nodes
errGroup.Go(func() error {
var objID oid.Address
objID.SetContainer(parentAddress.Container())
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index 0f85f50b1..be22074a5 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -190,8 +190,6 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
var prev *pilorama.Move
for m := range operationStream {
- m := m
-
// skip already applied op
if prev != nil && prev.Time == m.Time {
continue
@@ -287,8 +285,6 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
allNodesSynced.Store(true)
for i, n := range nodes {
- i := i
- n := n
errGroup.Go(func() error {
var nodeSynced bool
n.IterateNetworkEndpoints(func(addr string) bool {
@@ -421,7 +417,7 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
var wg sync.WaitGroup
for _, cnr := range cnrs {
wg.Add(1)
- cnr := cnr
+
err := s.syncPool.Submit(func() {
defer wg.Done()
s.log.Debug(logs.TreeSyncingContainerTrees, zap.Stringer("cid", cnr))
diff --git a/pkg/services/tree/sync_test.go b/pkg/services/tree/sync_test.go
index 190b4ccbb..497d90554 100644
--- a/pkg/services/tree/sync_test.go
+++ b/pkg/services/tree/sync_test.go
@@ -51,8 +51,6 @@ func Test_mergeOperationStreams(t *testing.T) {
// generate and put values to all chans
for i, ch := range nodeOpChans {
- i := i
- ch := ch
go func() {
for _, tm := range tt.opTimes[i] {
op := &pilorama.Move{}
From 6c2146bbc13e118d1518010d144ebc4dfadd5d6d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 28 Aug 2024 18:32:30 +0300
Subject: [PATCH 011/655] [#1334] metabase: Add upgrade from v2 to v3
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/root.go | 15 +
.../internal/modules/metabase/upgrade.go | 86 +++++
cmd/frostfs-adm/internal/modules/root.go | 2 +
pkg/local_object_storage/metabase/upgrade.go | 364 ++++++++++++++++++
.../metabase/upgrade_test.go | 215 +++++++++++
pkg/local_object_storage/metabase/util.go | 9 +-
pkg/local_object_storage/metabase/version.go | 15 +
7 files changed, 703 insertions(+), 3 deletions(-)
create mode 100644 cmd/frostfs-adm/internal/modules/metabase/root.go
create mode 100644 cmd/frostfs-adm/internal/modules/metabase/upgrade.go
create mode 100644 pkg/local_object_storage/metabase/upgrade.go
create mode 100644 pkg/local_object_storage/metabase/upgrade_test.go
diff --git a/cmd/frostfs-adm/internal/modules/metabase/root.go b/cmd/frostfs-adm/internal/modules/metabase/root.go
new file mode 100644
index 000000000..5b21ed273
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/metabase/root.go
@@ -0,0 +1,15 @@
+package metabase
+
+import "github.com/spf13/cobra"
+
+// RootCmd is a root command of config section.
+var RootCmd = &cobra.Command{
+ Use: "metabase",
+ Short: "Section for metabase commands",
+}
+
+func init() {
+ RootCmd.AddCommand(UpgradeCmd)
+
+ initUpgradeCommand()
+}
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
new file mode 100644
index 000000000..83e085df4
--- /dev/null
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -0,0 +1,86 @@
+package metabase
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
+ shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "github.com/spf13/cobra"
+)
+
+const (
+ pathFlag = "path"
+ noCompactFlag = "no-compact"
+)
+
+var errNoPathsFound = errors.New("no metabase paths found")
+
+var path string
+
+var UpgradeCmd = &cobra.Command{
+ Use: "upgrade",
+ Short: "Upgrade metabase to latest version",
+ RunE: upgrade,
+}
+
+func upgrade(cmd *cobra.Command, _ []string) error {
+ configFile, err := cmd.Flags().GetString(commonflags.ConfigFlag)
+ if err != nil {
+ return err
+ }
+ configDir, err := cmd.Flags().GetString(commonflags.ConfigDirFlag)
+ if err != nil {
+ return err
+ }
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
+ var paths []string
+ if path != "" {
+ paths = append(paths, path)
+ }
+ appCfg := config.New(configFile, configDir, config.EnvPrefix)
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return fmt.Errorf("failed to get metabase paths: %w", err)
+ }
+ if len(paths) == 0 {
+ return errNoPathsFound
+ }
+ cmd.Println("found", len(paths), "metabases:")
+ for i, path := range paths {
+ cmd.Println(i+1, ":", path)
+ }
+ result := make(map[string]bool)
+ for _, path := range paths {
+ cmd.Println("upgrading metabase", path, "...")
+ if err := meta.Upgrade(cmd.Context(), path, !noCompact, func(a ...any) {
+ cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
+ }); err != nil {
+ result[path] = false
+ cmd.Println("error: failed to upgrade metabase", path, ":", err)
+ } else {
+ result[path] = true
+ cmd.Println("metabase", path, "upgraded successfully")
+ }
+ }
+ for mb, ok := range result {
+ if ok {
+ cmd.Println(mb, ": success")
+ } else {
+ cmd.Println(mb, ": failed")
+ }
+ }
+ return nil
+}
+
+func initUpgradeCommand() {
+ flags := UpgradeCmd.Flags()
+ flags.StringVar(&path, pathFlag, "", "Path to metabase file")
+ flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
+}
diff --git a/cmd/frostfs-adm/internal/modules/root.go b/cmd/frostfs-adm/internal/modules/root.go
index 8595483ab..defd898c8 100644
--- a/cmd/frostfs-adm/internal/modules/root.go
+++ b/cmd/frostfs-adm/internal/modules/root.go
@@ -5,6 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/config"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/storagecfg"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
@@ -41,6 +42,7 @@ func init() {
rootCmd.AddCommand(config.RootCmd)
rootCmd.AddCommand(morph.RootCmd)
rootCmd.AddCommand(storagecfg.RootCmd)
+ rootCmd.AddCommand(metabase.RootCmd)
rootCmd.AddCommand(autocomplete.Command("frostfs-adm"))
rootCmd.AddCommand(gendoc.Command(rootCmd, gendoc.Options{}))
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
new file mode 100644
index 000000000..014e50286
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -0,0 +1,364 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "sync/atomic"
+ "time"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ upgradeLogFrequency = 50_000
+ upgradeWorkersCount = 1_000
+ compactMaxTxSize = 256 << 20
+ upgradeTimeout = 1 * time.Second
+)
+
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
+ 2: upgradeFromV2ToV3,
+}
+
+func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
+ if _, err := os.Stat(path); err != nil {
+ return fmt.Errorf("check metabase existence: %w", err)
+ }
+ opts := bbolt.DefaultOptions
+ opts.Timeout = upgradeTimeout
+ db, err := bbolt.Open(path, os.ModePerm, opts)
+ if err != nil {
+ return fmt.Errorf("open metabase: %w", err)
+ }
+ var version uint64
+ if err := db.View(func(tx *bbolt.Tx) error {
+ var e error
+ version, e = currentVersion(tx)
+ return e
+ }); err != nil {
+ return err
+ }
+ updater, found := updates[version]
+ if !found {
+ return fmt.Errorf("unsupported version %d: no update available", version)
+ }
+ if err := updater(ctx, db, log); err != nil {
+ return fmt.Errorf("update metabase schema: %w", err)
+ }
+ if compact {
+ log("compacting metabase...")
+ err := compactDB(db)
+ if err != nil {
+ return fmt.Errorf("compact metabase: %w", err)
+ }
+ log("metabase compacted")
+ }
+ return db.Close()
+}
+
+func compactDB(db *bbolt.DB) error {
+ sourcePath := db.Path()
+ tmpFileName := sourcePath + "." + time.Now().Format(time.RFC3339)
+ f, err := os.Stat(sourcePath)
+ if err != nil {
+ return err
+ }
+ dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
+ Timeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return fmt.Errorf("can't open new metabase to compact: %w", err)
+ }
+ if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
+ return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
+ }
+ if err := dst.Close(); err != nil {
+ return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close source metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ if err := os.Rename(tmpFileName, sourcePath); err != nil {
+ return fmt.Errorf("replace source metabase with compacted: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
+ return nil
+}
+
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropUserAttributes(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropOwnerIDIndex(ctx, db, log); err != nil {
+ return err
+ }
+ if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
+ return err
+ }
+ return db.Update(func(tx *bbolt.Tx) error {
+ return updateVersion(tx, version)
+ })
+}
+
+type objectIDToExpEpoch struct {
+ containerID cid.ID
+ objectID oid.ID
+ expirationEpoch uint64
+}
+
+func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ log("filling expiration epoch buckets...")
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ _, err := tx.CreateBucketIfNotExists(expEpochToObjectBucketName)
+ return err
+ }); err != nil {
+ return err
+ }
+ objects := make(chan objectIDToExpEpoch)
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return selectObjectsWithExpirationEpoch(ctx, db, objects)
+ })
+ var count atomic.Uint64
+ for i := 0; i < upgradeWorkersCount; i++ {
+ eg.Go(func() error {
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case obj, ok := <-objects:
+ if !ok {
+ return nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ if err := putUniqueIndexItem(tx, namedBucketItem{
+ name: expEpochToObjectBucketName,
+ key: expirationEpochKey(obj.expirationEpoch, obj.containerID, obj.objectID),
+ val: zeroValue,
+ }); err != nil {
+ return err
+ }
+ val := make([]byte, epochSize)
+ binary.LittleEndian.PutUint64(val, obj.expirationEpoch)
+ return putUniqueIndexItem(tx, namedBucketItem{
+ name: objectToExpirationEpochBucketName(obj.containerID, make([]byte, bucketKeySize)),
+ key: objectKey(obj.objectID, make([]byte, objectKeySize)),
+ val: val,
+ })
+ }); err != nil {
+ return err
+ }
+ }
+ if c := count.Add(1); c%upgradeLogFrequency == 0 {
+ log("expiration epoch filled for", c, "objects...")
+ }
+ }
+ })
+ }
+ err := eg.Wait()
+ if err != nil {
+ log("expiration epoch buckets completed completed with error:", err)
+ return err
+ }
+ log("filling expiration epoch buckets completed successfully, total", count.Load(), "objects")
+ return nil
+}
+
+func selectObjectsWithExpirationEpoch(ctx context.Context, db *bbolt.DB, objects chan objectIDToExpEpoch) error {
+ defer close(objects)
+
+ const batchSize = 1000
+ it := &objectsWithExpirationEpochBatchIterator{
+ lastAttributeKey: usrAttrPrefix,
+ }
+ for {
+ if err := getNextObjectsWithExpirationEpochBatch(ctx, db, it, batchSize); err != nil {
+ return err
+ }
+ for _, item := range it.items {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case objects <- item:
+ }
+ }
+
+ if len(it.items) < batchSize {
+ return nil
+ }
+ it.items = nil
+ }
+}
+
+var (
+ usrAttrPrefix = []byte{userAttributePrefix}
+ errBatchSizeLimit = errors.New("batch size limit")
+)
+
+type objectsWithExpirationEpochBatchIterator struct {
+ lastAttributeKey []byte
+ lastAttributeValue []byte
+ lastAttrKeyValueItem []byte
+ items []objectIDToExpEpoch
+}
+
+// - {prefix}{containerID}{attributeKey} <- bucket
+// -- {attributeValue} <- bucket, expirationEpoch
+// --- {objectID}: zeroValue <- record
+
+func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, it *objectsWithExpirationEpochBatchIterator, batchSize int) error {
+ seekAttrValue := it.lastAttributeValue
+ seekAttrKVItem := it.lastAttrKeyValueItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ attrKeyC := tx.Cursor()
+ for attrKey, _ := attrKeyC.Seek(it.lastAttributeKey); attrKey != nil && bytes.HasPrefix(attrKey, usrAttrPrefix); attrKey, _ = attrKeyC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if len(attrKey) <= 1+cidSize {
+ continue
+ }
+ attributeKey := string(attrKey[1+cidSize:])
+ if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
+ continue
+ }
+ var containerID cid.ID
+ if err := containerID.Decode(attrKey[1 : 1+cidSize]); err != nil {
+ return fmt.Errorf("decode container id from user attribute bucket: %w", err)
+ }
+ if err := iterateExpirationAttributeKeyBucket(ctx, tx.Bucket(attrKey), it, batchSize, containerID, attrKey, seekAttrValue, seekAttrKVItem); err != nil {
+ return err
+ }
+ seekAttrValue = nil
+ seekAttrKVItem = nil
+ }
+ return nil
+ })
+ if err != nil && !errors.Is(err, errBatchSizeLimit) {
+ return err
+ }
+ return nil
+}
+
+func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, it *objectsWithExpirationEpochBatchIterator, batchSize int, containerID cid.ID, attrKey, seekAttrValue, seekAttrKVItem []byte) error {
+ attrValueC := b.Cursor()
+ for attrValue, v := attrValueC.Seek(seekAttrValue); attrValue != nil; attrValue, v = attrValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v != nil {
+ continue // need to iterate over buckets, not records
+ }
+ expirationEpoch, err := strconv.ParseUint(string(attrValue), 10, 64)
+ if err != nil {
+ return fmt.Errorf("could not parse expiration epoch: %w", err)
+ }
+ expirationEpochBucket := b.Bucket(attrValue)
+ attrKeyValueC := expirationEpochBucket.Cursor()
+ for attrKeyValueItem, v := attrKeyValueC.Seek(seekAttrKVItem); attrKeyValueItem != nil; attrKeyValueItem, v = attrKeyValueC.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ if v == nil {
+ continue // need to iterate over records, not buckets
+ }
+ if bytes.Equal(it.lastAttributeKey, attrKey) && bytes.Equal(it.lastAttributeValue, attrValue) && bytes.Equal(it.lastAttrKeyValueItem, attrKeyValueItem) {
+ continue
+ }
+ var objectID oid.ID
+ if err := objectID.Decode(attrKeyValueItem); err != nil {
+ return fmt.Errorf("decode object id from container '%s' expiration epoch %d: %w", containerID, expirationEpoch, err)
+ }
+ it.lastAttributeKey = bytes.Clone(attrKey)
+ it.lastAttributeValue = bytes.Clone(attrValue)
+ it.lastAttrKeyValueItem = bytes.Clone(attrKeyValueItem)
+ it.items = append(it.items, objectIDToExpEpoch{
+ containerID: containerID,
+ objectID: objectID,
+ expirationEpoch: expirationEpoch,
+ })
+ if len(it.items) == batchSize {
+ return errBatchSizeLimit
+ }
+ }
+ seekAttrKVItem = nil
+ }
+ return nil
+}
+
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
+ log(append([]any{"user attributes:"}, a...)...)
+ })
+}
+
+func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
+ log(append([]any{"owner ID index:"}, a...)...)
+ })
+}
+
+func dropPayloadChecksumIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+ return dropBucketsByPrefix(ctx, db, []byte{payloadHashPrefix}, func(a ...any) {
+ log(append([]any{"payload checksum:"}, a...)...)
+ })
+}
+
+func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log func(a ...any)) error {
+ log("deleting buckets...")
+ const batch = 1000
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ for _, k := range keys {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ if count += uint64(len(keys)); count%upgradeLogFrequency == 0 {
+ log("deleted", count, "buckets")
+ }
+ }
+}
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
new file mode 100644
index 000000000..dc3d7d07d
--- /dev/null
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -0,0 +1,215 @@
+//go:build integration
+
+package meta
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "testing"
+ "time"
+
+ objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+const upgradeFilePath = "/path/to/metabase.v2"
+
+func TestUpgradeV2ToV3(t *testing.T) {
+ path := createTempCopy(t, upgradeFilePath)
+ defer func() {
+ require.NoError(t, os.Remove(path))
+ }()
+ db := New(WithPath(path), WithEpochState(epochState{e: 1000}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
+ require.NoError(t, db.Close())
+ require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+ fmt.Println()
+}
+
+func createTempCopy(t *testing.T, path string) string {
+ src, err := os.Open(path)
+ require.NoError(t, err)
+
+ tmpPath := upgradeFilePath + time.Now().Format(time.RFC3339)
+ dest, err := os.Create(tmpPath)
+ require.NoError(t, err)
+
+ _, err = io.Copy(dest, src)
+ require.NoError(t, err)
+
+ require.NoError(t, src.Close())
+ require.NoError(t, dest.Close())
+
+ return tmpPath
+}
+
+func TestGenerateMetabaseFile(t *testing.T) {
+ t.Skip("for generating db")
+ const (
+ containersCount = 10_000
+ simpleObjectsCount = 500_000
+ complexObjectsCount = 500_000 // x2
+ deletedByGCMarksCount = 100_000
+ deletedByTombstoneCount = 100_000 // x2
+ lockedCount = 100_000 // x2
+
+ allocSize = 128 << 20
+ generateWorkersCount = 1_000
+ minEpoch = 1_000
+ maxFilename = 1_000
+ maxStorageID = 10_000
+ )
+
+ db := New(WithPath(upgradeFilePath), WithEpochState(epochState{e: minEpoch}), WithLogger(test.NewLogger(t)))
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ db.boltDB.AllocSize = allocSize
+ db.boltDB.NoSync = true
+ require.NoError(t, db.Init())
+ containers := make([]cid.ID, containersCount)
+ for i := range containers {
+ containers[i] = cidtest.ID()
+ }
+ oc, err := db.ObjectCounters()
+ require.NoError(t, err)
+ require.True(t, oc.IsZero())
+ eg, ctx := errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects
+ for i := 0; i < simpleObjectsCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // complex objects
+ for i := 0; i < complexObjectsCount; i++ {
+ i := i
+ eg.Go(func() error {
+ parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ child.SetParent(parent)
+ idParent, _ := parent.ID()
+ child.SetParentID(idParent)
+ testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: child,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("complex objects generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects deleted by gc marks
+ for i := 0; i < deletedByGCMarksCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects deleted by gc marks generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(10000)
+ // simple objects deleted by tombstones
+ for i := 0; i < deletedByTombstoneCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ tomb := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ tomb.SetType(objectSDK.TypeTombstone)
+ _, err = db.Put(ctx, PutPrm{
+ obj: tomb,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ tombAddr := object.AddressOf(tomb)
+ _, err = db.Inhume(ctx, InhumePrm{
+ target: []oid.Address{object.AddressOf(obj)},
+ tomb: &tombAddr,
+ })
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects deleted by tombstones generated")
+ eg, ctx = errgroup.WithContext(context.Background())
+ eg.SetLimit(generateWorkersCount)
+ // simple objects locked by locks
+ for i := 0; i < lockedCount; i++ {
+ i := i
+ eg.Go(func() error {
+ obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ _, err := db.Put(ctx, PutPrm{
+ obj: obj,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ lock := testutil.GenerateObjectWithCID(containers[i%len(containers)])
+ lock.SetType(objectSDK.TypeLock)
+ testutil.AddAttribute(lock, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
+ _, err = db.Put(ctx, PutPrm{
+ obj: lock,
+ id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
+ })
+ require.NoError(t, err)
+ err = db.Lock(ctx, containers[i%len(containers)], object.AddressOf(lock).Object(), []oid.ID{object.AddressOf(obj).Object()})
+ require.NoError(t, err)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+ db.log.Info("simple objects locked by locks generated")
+ require.NoError(t, db.boltDB.Sync())
+ require.NoError(t, db.Close())
+}
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 9134616fe..eef7210dc 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -94,11 +94,13 @@ const (
// ownerPrefix was used for prefixing FKBT index buckets mapping owner to object IDs.
// Key: owner ID
// Value: bucket containing object IDs as keys
- _
+ // removed in version 3
+ ownerPrefix
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- _
+ // removed in version 3
+ userAttributePrefix
// ====================
// List index buckets.
@@ -107,7 +109,8 @@ const (
// payloadHashPrefix was used for prefixing List index buckets mapping payload hash to a list of object IDs.
// Key: payload hash
// Value: list of object IDs
- _
+ // removed in version 3
+ payloadHashPrefix
// parentPrefix is used for prefixing List index buckets mapping parent ID to a list of children IDs.
// Key: parent ID
// Value: list of object IDs
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index bb2b66d9b..9e15babbc 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -2,6 +2,7 @@ package meta
import (
"encoding/binary"
+ "errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -18,6 +19,8 @@ var versionKey = []byte("version")
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
+var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
+
func checkVersion(tx *bbolt.Tx, initialized bool) error {
var knownVersion bool
@@ -59,3 +62,15 @@ func updateVersion(tx *bbolt.Tx, version uint64) error {
}
return b.Put(versionKey, data)
}
+
+func currentVersion(tx *bbolt.Tx) (uint64, error) {
+ b := tx.Bucket(shardInfoBucket)
+ if b == nil {
+ return 0, errVersionUndefinedNoInfoBucket
+ }
+ data := b.Get(versionKey)
+ if len(data) != 8 {
+ return 0, fmt.Errorf("version undefined: invalid version data length %d", len(data))
+ }
+ return binary.LittleEndian.Uint64(data), nil
+}
From 882c068410628e4e8a7fe929189e97f4e796a5d1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 16:33:30 +0300
Subject: [PATCH 012/655] [#1334] metabase: Store upgrade flag
This allows to check if metabase upgrade was not completed.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 12 +++++++++++
pkg/local_object_storage/metabase/version.go | 11 +++++++++-
.../metabase/version_test.go | 20 +++++++++++++++++++
pkg/local_object_storage/shard/control.go | 2 +-
4 files changed, 43 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index 014e50286..a4c7707b4 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -51,9 +51,21 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any))
if !found {
return fmt.Errorf("unsupported version %d: no update available", version)
}
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Put(upgradeKey, zeroValue)
+ }); err != nil {
+ return fmt.Errorf("set upgrade key %w", err)
+ }
if err := updater(ctx, db, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(shardInfoBucket)
+ return b.Delete(upgradeKey)
+ }); err != nil {
+ return fmt.Errorf("delete upgrade key %w", err)
+ }
if compact {
log("compacting metabase...")
err := compactDB(db)
diff --git a/pkg/local_object_storage/metabase/version.go b/pkg/local_object_storage/metabase/version.go
index 9e15babbc..048bb9af6 100644
--- a/pkg/local_object_storage/metabase/version.go
+++ b/pkg/local_object_storage/metabase/version.go
@@ -12,13 +12,18 @@ import (
// version contains current metabase version.
const version = 3
-var versionKey = []byte("version")
+var (
+ versionKey = []byte("version")
+ upgradeKey = []byte("upgrade")
+)
// ErrOutdatedVersion is returned on initializing
// an existing metabase that is not compatible with
// the current code version.
var ErrOutdatedVersion = logicerr.New("invalid version, resynchronization is required")
+var ErrIncompletedUpgrade = logicerr.New("metabase upgrade is not completed")
+
var errVersionUndefinedNoInfoBucket = errors.New("version undefined: no info bucket")
func checkVersion(tx *bbolt.Tx, initialized bool) error {
@@ -35,6 +40,10 @@ func checkVersion(tx *bbolt.Tx, initialized bool) error {
return fmt.Errorf("%w: expected=%d, stored=%d", ErrOutdatedVersion, version, stored)
}
}
+ data = b.Get(upgradeKey)
+ if len(data) > 0 {
+ return ErrIncompletedUpgrade
+ }
}
if !initialized {
diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go
index b2af428ff..75229a1b4 100644
--- a/pkg/local_object_storage/metabase/version_test.go
+++ b/pkg/local_object_storage/metabase/version_test.go
@@ -84,4 +84,24 @@ func TestVersion(t *testing.T) {
require.NoError(t, db.Close())
})
})
+ t.Run("incompleted upgrade", func(t *testing.T) {
+ db := newDB(t)
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Put(upgradeKey, zeroValue)
+ }))
+ require.ErrorIs(t, db.Init(), ErrIncompletedUpgrade)
+ require.NoError(t, db.Close())
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return tx.Bucket(shardInfoBucket).Delete(upgradeKey)
+ }))
+ require.NoError(t, db.Init())
+ require.NoError(t, db.Close())
+ })
}
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 936a506c0..1626d5804 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -171,7 +171,7 @@ func (s *Shard) initializeComponents(m mode.Mode) error {
for _, component := range components {
if err := component.Init(); err != nil {
if component == s.metaBase {
- if errors.Is(err, meta.ErrOutdatedVersion) {
+ if errors.Is(err, meta.ErrOutdatedVersion) || errors.Is(err, meta.ErrIncompletedUpgrade) {
return fmt.Errorf("metabase initialization: %w", err)
}
From 98fe24cdb7347b9b4ddfb1c8fe5e2025465d7692 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 29 Aug 2024 15:45:33 +0300
Subject: [PATCH 013/655] [#1343] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
.../modules/control/shards_list.go | 6 +-
cmd/frostfs-cli/modules/tree/add.go | 6 +-
cmd/frostfs-cli/modules/tree/move.go | 2 +-
go.mod | 2 +-
go.sum | 4 +-
pkg/services/control/server/list_shards.go | 10 +-
pkg/services/control/server/policy_engine.go | 4 +-
.../control/server/seal_writecache.go | 4 +-
pkg/services/control/service_frostfs.pb.go | 66 +++----
pkg/services/control/types_frostfs.pb.go | 86 ++++-----
pkg/services/tree/service.go | 22 +--
pkg/services/tree/service_frostfs.pb.go | 172 ++++++++----------
pkg/services/tree/signature_test.go | 2 +-
13 files changed, 180 insertions(+), 206 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index 07c5bcd9a..e9e49bb29 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -61,7 +61,7 @@ func listShards(cmd *cobra.Command, _ []string) {
}
}
-func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
+func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
@@ -83,7 +83,7 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []*control.ShardInfo) {
cmd.Print(buf.String()) // pretty printer emits newline, so no need for Println
}
-func prettyPrintShards(cmd *cobra.Command, ii []*control.ShardInfo) {
+func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
for _, i := range ii {
pathPrinter := func(name, path string) string {
if path == "" {
@@ -121,7 +121,7 @@ func shardModeToString(m control.ShardMode) string {
return "unknown"
}
-func sortShardsByID(ii []*control.ShardInfo) {
+func sortShardsByID(ii []control.ShardInfo) {
sort.Slice(ii, func(i, j int) bool {
return bytes.Compare(ii[i].GetShard_ID(), ii[j].GetShard_ID()) < 0
})
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index 068b1d185..0b8dc292f 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -77,13 +77,13 @@ func add(cmd *cobra.Command, _ []string) {
cmd.Println("Node ID: ", resp.GetBody().GetNodeId())
}
-func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
+func parseMeta(cmd *cobra.Command) ([]tree.KeyValue, error) {
raws, _ := cmd.Flags().GetStringSlice(metaFlagKey)
if len(raws) == 0 {
return nil, nil
}
- pairs := make([]*tree.KeyValue, 0, len(raws))
+ pairs := make([]tree.KeyValue, 0, len(raws))
for i := range raws {
k, v, found := strings.Cut(raws[i], "=")
if !found {
@@ -94,7 +94,7 @@ func parseMeta(cmd *cobra.Command) ([]*tree.KeyValue, error) {
pair.Key = k
pair.Value = []byte(v)
- pairs = append(pairs, &pair)
+ pairs = append(pairs, pair)
}
return pairs, nil
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 95516940c..24abbd650 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -75,7 +75,7 @@ func move(cmd *cobra.Command, _ []string) {
resp, err := cli.GetSubTree(ctx, subTreeReq)
commonCmd.ExitOnErr(cmd, "rpc call: %w", err)
- var meta []*tree.KeyValue
+ var meta []tree.KeyValue
subtreeResp, err := resp.Recv()
for ; err == nil; subtreeResp, err = resp.Recv() {
meta = subtreeResp.GetBody().GetMeta()
diff --git a/go.mod b/go.mod
index b7f59c823..358370201 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index d2f926151..be82bff70 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac h1:Gu3oiPnsSZPgwsUYs2f3xTQwndM/OWM/zo3zbN4rOb8=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240827104600-eba18f6e67ac/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61 h1:bw9EVGWnfY9awFb5XYR52AGbzgg3o04gZF66yHob48c=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index b639245c1..56bd9fc1f 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -25,7 +25,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
info := s.s.DumpInfo()
- shardInfos := make([]*control.ShardInfo, 0, len(info.Shards))
+ shardInfos := make([]control.ShardInfo, 0, len(info.Shards))
for _, sh := range info.Shards {
si := new(control.ShardInfo)
@@ -54,7 +54,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
- shardInfos = append(shardInfos, si)
+ shardInfos = append(shardInfos, *si)
}
body.SetShards(shardInfos)
@@ -67,10 +67,10 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
return resp, nil
}
-func blobstorInfoToProto(info blobstor.Info) []*control.BlobstorInfo {
- res := make([]*control.BlobstorInfo, len(info.SubStorages))
+func blobstorInfoToProto(info blobstor.Info) []control.BlobstorInfo {
+ res := make([]control.BlobstorInfo, len(info.SubStorages))
for i := range info.SubStorages {
- res[i] = &control.BlobstorInfo{
+ res[i] = control.BlobstorInfo{
Path: info.SubStorages[i].Path,
Type: info.SubStorages[i].Type,
}
diff --git a/pkg/services/control/server/policy_engine.go b/pkg/services/control/server/policy_engine.go
index 98daac8a6..ab8258e27 100644
--- a/pkg/services/control/server/policy_engine.go
+++ b/pkg/services/control/server/policy_engine.go
@@ -220,13 +220,13 @@ func (s *Server) ListTargetsLocalOverrides(_ context.Context, req *control.ListT
if err != nil {
return nil, status.Error(getCodeByLocalStorageErr(err), err.Error())
}
- targets := make([]*control.ChainTarget, 0, len(apeTargets))
+ targets := make([]control.ChainTarget, 0, len(apeTargets))
for i := range apeTargets {
target, err := controlTarget(&apeTargets[i])
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
- targets = append(targets, &target)
+ targets = append(targets, target)
}
resp := &control.ListTargetsLocalOverridesResponse{
diff --git a/pkg/services/control/server/seal_writecache.go b/pkg/services/control/server/seal_writecache.go
index 1737677b7..6799bdcac 100644
--- a/pkg/services/control/server/seal_writecache.go
+++ b/pkg/services/control/server/seal_writecache.go
@@ -32,12 +32,12 @@ func (s *Server) SealWriteCache(ctx context.Context, req *control.SealWriteCache
resp := &control.SealWriteCacheResponse{Body: &control.SealWriteCacheResponse_Body{}}
for _, r := range res.ShardResults {
if r.Success {
- resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
Shard_ID: *r.ShardID,
Success: true,
})
} else {
- resp.Body.Results = append(resp.GetBody().GetResults(), &control.SealWriteCacheResponse_Body_Status{
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.SealWriteCacheResponse_Body_Status{
Shard_ID: *r.ShardID,
Error: r.ErrorMsg,
})
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index a446c5e59..eb0d95c64 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -2851,7 +2851,7 @@ func (x *ListShardsRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ListShardsResponse_Body struct {
- Shards []*ShardInfo `json:"shards"`
+ Shards []ShardInfo `json:"shards"`
}
var (
@@ -2869,7 +2869,7 @@ func (x *ListShardsResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Shards {
- size += proto.NestedStructureSize(1, x.Shards[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Shards[i])
}
return size
}
@@ -2888,9 +2888,7 @@ func (x *ListShardsResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
return
}
for i := range x.Shards {
- if x.Shards[i] != nil {
- x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Shards[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -2908,8 +2906,8 @@ func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Shards")
}
- x.Shards = append(x.Shards, new(ShardInfo))
- ff := x.Shards[len(x.Shards)-1]
+ x.Shards = append(x.Shards, ShardInfo{})
+ ff := &x.Shards[len(x.Shards)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -2917,13 +2915,13 @@ func (x *ListShardsResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
}
return nil
}
-func (x *ListShardsResponse_Body) GetShards() []*ShardInfo {
+func (x *ListShardsResponse_Body) GetShards() []ShardInfo {
if x != nil {
return x.Shards
}
return nil
}
-func (x *ListShardsResponse_Body) SetShards(v []*ShardInfo) {
+func (x *ListShardsResponse_Body) SetShards(v []ShardInfo) {
x.Shards = v
}
@@ -2981,11 +2979,11 @@ func (x *ListShardsResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "shards":
{
- var f *ShardInfo
- var list []*ShardInfo
+ var f ShardInfo
+ var list []ShardInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(ShardInfo)
+ f = ShardInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -11870,7 +11868,7 @@ func (x *ListTargetsLocalOverridesRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ListTargetsLocalOverridesResponse_Body struct {
- Targets []*ChainTarget `json:"targets"`
+ Targets []ChainTarget `json:"targets"`
}
var (
@@ -11888,7 +11886,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Targets {
- size += proto.NestedStructureSize(1, x.Targets[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Targets[i])
}
return size
}
@@ -11907,9 +11905,7 @@ func (x *ListTargetsLocalOverridesResponse_Body) EmitProtobuf(mm *easyproto.Mess
return
}
for i := range x.Targets {
- if x.Targets[i] != nil {
- x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Targets[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -11927,8 +11923,8 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Targets")
}
- x.Targets = append(x.Targets, new(ChainTarget))
- ff := x.Targets[len(x.Targets)-1]
+ x.Targets = append(x.Targets, ChainTarget{})
+ ff := &x.Targets[len(x.Targets)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -11936,13 +11932,13 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalProtobuf(src []byte) (
}
return nil
}
-func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []*ChainTarget {
+func (x *ListTargetsLocalOverridesResponse_Body) GetTargets() []ChainTarget {
if x != nil {
return x.Targets
}
return nil
}
-func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []*ChainTarget) {
+func (x *ListTargetsLocalOverridesResponse_Body) SetTargets(v []ChainTarget) {
x.Targets = v
}
@@ -12000,11 +11996,11 @@ func (x *ListTargetsLocalOverridesResponse_Body) UnmarshalEasyJSON(in *jlexer.Le
switch key {
case "targets":
{
- var f *ChainTarget
- var list []*ChainTarget
+ var f ChainTarget
+ var list []ChainTarget
in.Delim('[')
for !in.IsDelim(']') {
- f = new(ChainTarget)
+ f = ChainTarget{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -14083,7 +14079,7 @@ func (x *SealWriteCacheResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer)
}
type SealWriteCacheResponse_Body struct {
- Results []*SealWriteCacheResponse_Body_Status `json:"results"`
+ Results []SealWriteCacheResponse_Body_Status `json:"results"`
}
var (
@@ -14101,7 +14097,7 @@ func (x *SealWriteCacheResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Results {
- size += proto.NestedStructureSize(1, x.Results[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
}
return size
}
@@ -14120,9 +14116,7 @@ func (x *SealWriteCacheResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshale
return
}
for i := range x.Results {
- if x.Results[i] != nil {
- x.Results[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -14140,8 +14134,8 @@ func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error)
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Results")
}
- x.Results = append(x.Results, new(SealWriteCacheResponse_Body_Status))
- ff := x.Results[len(x.Results)-1]
+ x.Results = append(x.Results, SealWriteCacheResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -14149,13 +14143,13 @@ func (x *SealWriteCacheResponse_Body) UnmarshalProtobuf(src []byte) (err error)
}
return nil
}
-func (x *SealWriteCacheResponse_Body) GetResults() []*SealWriteCacheResponse_Body_Status {
+func (x *SealWriteCacheResponse_Body) GetResults() []SealWriteCacheResponse_Body_Status {
if x != nil {
return x.Results
}
return nil
}
-func (x *SealWriteCacheResponse_Body) SetResults(v []*SealWriteCacheResponse_Body_Status) {
+func (x *SealWriteCacheResponse_Body) SetResults(v []SealWriteCacheResponse_Body_Status) {
x.Results = v
}
@@ -14213,11 +14207,11 @@ func (x *SealWriteCacheResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "results":
{
- var f *SealWriteCacheResponse_Body_Status
- var list []*SealWriteCacheResponse_Body_Status
+ var f SealWriteCacheResponse_Body_Status
+ var list []SealWriteCacheResponse_Body_Status
in.Delim('[')
for !in.IsDelim(']') {
- f = new(SealWriteCacheResponse_Body_Status)
+ f = SealWriteCacheResponse_Body_Status{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 3cc37245f..42c1afa52 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -500,10 +500,10 @@ func (x *NodeInfo_Attribute) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type NodeInfo struct {
- PublicKey []byte `json:"publicKey"`
- Addresses []string `json:"addresses"`
- Attributes []*NodeInfo_Attribute `json:"attributes"`
- State NetmapStatus `json:"state"`
+ PublicKey []byte `json:"publicKey"`
+ Addresses []string `json:"addresses"`
+ Attributes []NodeInfo_Attribute `json:"attributes"`
+ State NetmapStatus `json:"state"`
}
var (
@@ -523,7 +523,7 @@ func (x *NodeInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.PublicKey)
size += proto.RepeatedStringSize(2, x.Addresses)
for i := range x.Attributes {
- size += proto.NestedStructureSize(3, x.Attributes[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Attributes[i])
}
size += proto.EnumSize(4, int32(x.State))
return size
@@ -549,9 +549,7 @@ func (x *NodeInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.Addresses[j])
}
for i := range x.Attributes {
- if x.Attributes[i] != nil {
- x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Attributes[i].EmitProtobuf(mm.AppendMessage(3))
}
if int32(x.State) != 0 {
mm.AppendInt32(4, int32(x.State))
@@ -584,8 +582,8 @@ func (x *NodeInfo) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Attributes")
}
- x.Attributes = append(x.Attributes, new(NodeInfo_Attribute))
- ff := x.Attributes[len(x.Attributes)-1]
+ x.Attributes = append(x.Attributes, NodeInfo_Attribute{})
+ ff := &x.Attributes[len(x.Attributes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -617,13 +615,13 @@ func (x *NodeInfo) GetAddresses() []string {
func (x *NodeInfo) SetAddresses(v []string) {
x.Addresses = v
}
-func (x *NodeInfo) GetAttributes() []*NodeInfo_Attribute {
+func (x *NodeInfo) GetAttributes() []NodeInfo_Attribute {
if x != nil {
return x.Attributes
}
return nil
}
-func (x *NodeInfo) SetAttributes(v []*NodeInfo_Attribute) {
+func (x *NodeInfo) SetAttributes(v []NodeInfo_Attribute) {
x.Attributes = v
}
func (x *NodeInfo) GetState() NetmapStatus {
@@ -731,11 +729,11 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "attributes":
{
- var f *NodeInfo_Attribute
- var list []*NodeInfo_Attribute
+ var f NodeInfo_Attribute
+ var list []NodeInfo_Attribute
in.Delim('[')
for !in.IsDelim(']') {
- f = new(NodeInfo_Attribute)
+ f = NodeInfo_Attribute{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -775,8 +773,8 @@ func (x *NodeInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type Netmap struct {
- Epoch uint64 `json:"epoch"`
- Nodes []*NodeInfo `json:"nodes"`
+ Epoch uint64 `json:"epoch"`
+ Nodes []NodeInfo `json:"nodes"`
}
var (
@@ -795,7 +793,7 @@ func (x *Netmap) StableSize() (size int) {
}
size += proto.UInt64Size(1, x.Epoch)
for i := range x.Nodes {
- size += proto.NestedStructureSize(2, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(2, &x.Nodes[i])
}
return size
}
@@ -817,9 +815,7 @@ func (x *Netmap) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(1, x.Epoch)
}
for i := range x.Nodes {
- if x.Nodes[i] != nil {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
- }
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(2))
}
}
@@ -843,8 +839,8 @@ func (x *Netmap) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Nodes")
}
- x.Nodes = append(x.Nodes, new(NodeInfo))
- ff := x.Nodes[len(x.Nodes)-1]
+ x.Nodes = append(x.Nodes, NodeInfo{})
+ ff := &x.Nodes[len(x.Nodes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -861,13 +857,13 @@ func (x *Netmap) GetEpoch() uint64 {
func (x *Netmap) SetEpoch(v uint64) {
x.Epoch = v
}
-func (x *Netmap) GetNodes() []*NodeInfo {
+func (x *Netmap) GetNodes() []NodeInfo {
if x != nil {
return x.Nodes
}
return nil
}
-func (x *Netmap) SetNodes(v []*NodeInfo) {
+func (x *Netmap) SetNodes(v []NodeInfo) {
x.Nodes = v
}
@@ -936,11 +932,11 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "nodes":
{
- var f *NodeInfo
- var list []*NodeInfo
+ var f NodeInfo
+ var list []NodeInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(NodeInfo)
+ f = NodeInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -958,13 +954,13 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ShardInfo struct {
- Shard_ID []byte `json:"shardID"`
- MetabasePath string `json:"metabasePath"`
- Blobstor []*BlobstorInfo `json:"blobstor"`
- WritecachePath string `json:"writecachePath"`
- Mode ShardMode `json:"mode"`
- ErrorCount uint32 `json:"errorCount"`
- PiloramaPath string `json:"piloramaPath"`
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
}
var (
@@ -984,7 +980,7 @@ func (x *ShardInfo) StableSize() (size int) {
size += proto.BytesSize(1, x.Shard_ID)
size += proto.StringSize(2, x.MetabasePath)
for i := range x.Blobstor {
- size += proto.NestedStructureSize(3, x.Blobstor[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Blobstor[i])
}
size += proto.StringSize(4, x.WritecachePath)
size += proto.EnumSize(5, int32(x.Mode))
@@ -1013,9 +1009,7 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(2, x.MetabasePath)
}
for i := range x.Blobstor {
- if x.Blobstor[i] != nil {
- x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Blobstor[i].EmitProtobuf(mm.AppendMessage(3))
}
if len(x.WritecachePath) != 0 {
mm.AppendString(4, x.WritecachePath)
@@ -1057,8 +1051,8 @@ func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Blobstor")
}
- x.Blobstor = append(x.Blobstor, new(BlobstorInfo))
- ff := x.Blobstor[len(x.Blobstor)-1]
+ x.Blobstor = append(x.Blobstor, BlobstorInfo{})
+ ff := &x.Blobstor[len(x.Blobstor)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -1108,13 +1102,13 @@ func (x *ShardInfo) GetMetabasePath() string {
func (x *ShardInfo) SetMetabasePath(v string) {
x.MetabasePath = v
}
-func (x *ShardInfo) GetBlobstor() []*BlobstorInfo {
+func (x *ShardInfo) GetBlobstor() []BlobstorInfo {
if x != nil {
return x.Blobstor
}
return nil
}
-func (x *ShardInfo) SetBlobstor(v []*BlobstorInfo) {
+func (x *ShardInfo) SetBlobstor(v []BlobstorInfo) {
x.Blobstor = v
}
func (x *ShardInfo) GetWritecachePath() string {
@@ -1250,11 +1244,11 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "blobstor":
{
- var f *BlobstorInfo
- var list []*BlobstorInfo
+ var f BlobstorInfo
+ var list []BlobstorInfo
in.Delim('[')
for !in.IsDelim(']') {
- f = new(BlobstorInfo)
+ f = BlobstorInfo{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 2012f53d2..4da61617f 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -360,7 +360,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
return nil, err
}
- info := make([]*GetNodeByPathResponse_Info, 0, len(nodes))
+ info := make([]GetNodeByPathResponse_Info, 0, len(nodes))
for _, node := range nodes {
m, parent, err := s.forest.TreeGetMeta(ctx, cid, b.GetTreeId(), node)
if err != nil {
@@ -374,11 +374,11 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
if b.GetAllAttributes() {
x.Meta = metaToProto(m.Items)
} else {
- var metaValue []*KeyValue
+ var metaValue []KeyValue
for _, kv := range m.Items {
for _, attr := range b.GetAttributes() {
if kv.Key == attr {
- metaValue = append(metaValue, &KeyValue{
+ metaValue = append(metaValue, KeyValue{
Key: kv.Key,
Value: kv.Value,
})
@@ -388,7 +388,7 @@ func (s *Service) GetNodeByPath(ctx context.Context, req *GetNodeByPathRequest)
}
x.Meta = metaValue
}
- info = append(info, &x)
+ info = append(info, x)
}
return &GetNodeByPathResponse{
@@ -782,21 +782,19 @@ func (s *Service) TreeList(ctx context.Context, req *TreeListRequest) (*TreeList
}, nil
}
-func protoToMeta(arr []*KeyValue) []pilorama.KeyValue {
+func protoToMeta(arr []KeyValue) []pilorama.KeyValue {
meta := make([]pilorama.KeyValue, len(arr))
for i, kv := range arr {
- if kv != nil {
- meta[i].Key = kv.GetKey()
- meta[i].Value = kv.GetValue()
- }
+ meta[i].Key = kv.GetKey()
+ meta[i].Value = kv.GetValue()
}
return meta
}
-func metaToProto(arr []pilorama.KeyValue) []*KeyValue {
- meta := make([]*KeyValue, len(arr))
+func metaToProto(arr []pilorama.KeyValue) []KeyValue {
+ meta := make([]KeyValue, len(arr))
for i, kv := range arr {
- meta[i] = &KeyValue{
+ meta[i] = KeyValue{
Key: kv.Key,
Value: kv.Value,
}
diff --git a/pkg/services/tree/service_frostfs.pb.go b/pkg/services/tree/service_frostfs.pb.go
index 3c6ba21b7..7b6abb1dd 100644
--- a/pkg/services/tree/service_frostfs.pb.go
+++ b/pkg/services/tree/service_frostfs.pb.go
@@ -16,11 +16,11 @@ import (
)
type AddRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -41,7 +41,7 @@ func (x *AddRequest_Body) StableSize() (size int) {
size += proto.StringSize(2, x.TreeId)
size += proto.UInt64Size(3, x.ParentId)
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
size += proto.BytesSize(5, x.BearerToken)
return size
@@ -70,9 +70,7 @@ func (x *AddRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.ParentId)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(5, x.BearerToken)
@@ -111,8 +109,8 @@ func (x *AddRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -153,13 +151,13 @@ func (x *AddRequest_Body) GetParentId() uint64 {
func (x *AddRequest_Body) SetParentId(v uint64) {
x.ParentId = v
}
-func (x *AddRequest_Body) GetMeta() []*KeyValue {
+func (x *AddRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *AddRequest_Body) SetMeta(v []*KeyValue) {
+func (x *AddRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *AddRequest_Body) GetBearerToken() []byte {
@@ -264,11 +262,11 @@ func (x *AddRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -793,12 +791,12 @@ func (x *AddResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type AddByPathRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- PathAttribute string `json:"pathAttribute"`
- Path []string `json:"path"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ PathAttribute string `json:"pathAttribute"`
+ Path []string `json:"path"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -820,7 +818,7 @@ func (x *AddByPathRequest_Body) StableSize() (size int) {
size += proto.StringSize(3, x.PathAttribute)
size += proto.RepeatedStringSize(4, x.Path)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
@@ -852,9 +850,7 @@ func (x *AddByPathRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendString(4, x.Path[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(6, x.BearerToken)
@@ -899,8 +895,8 @@ func (x *AddByPathRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -950,13 +946,13 @@ func (x *AddByPathRequest_Body) GetPath() []string {
func (x *AddByPathRequest_Body) SetPath(v []string) {
x.Path = v
}
-func (x *AddByPathRequest_Body) GetMeta() []*KeyValue {
+func (x *AddByPathRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *AddByPathRequest_Body) SetMeta(v []*KeyValue) {
+func (x *AddByPathRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *AddByPathRequest_Body) GetBearerToken() []byte {
@@ -1086,11 +1082,11 @@ func (x *AddByPathRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -2351,12 +2347,12 @@ func (x *RemoveResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type MoveRequest_Body struct {
- ContainerId []byte `json:"containerId"`
- TreeId string `json:"treeId"`
- ParentId uint64 `json:"parentId"`
- NodeId uint64 `json:"nodeId"`
- Meta []*KeyValue `json:"meta"`
- BearerToken []byte `json:"bearerToken"`
+ ContainerId []byte `json:"containerId"`
+ TreeId string `json:"treeId"`
+ ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Meta []KeyValue `json:"meta"`
+ BearerToken []byte `json:"bearerToken"`
}
var (
@@ -2378,7 +2374,7 @@ func (x *MoveRequest_Body) StableSize() (size int) {
size += proto.UInt64Size(3, x.ParentId)
size += proto.UInt64Size(4, x.NodeId)
for i := range x.Meta {
- size += proto.NestedStructureSize(5, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(5, &x.Meta[i])
}
size += proto.BytesSize(6, x.BearerToken)
return size
@@ -2410,9 +2406,7 @@ func (x *MoveRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(4, x.NodeId)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(5))
}
if len(x.BearerToken) != 0 {
mm.AppendBytes(6, x.BearerToken)
@@ -2457,8 +2451,8 @@ func (x *MoveRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -2508,13 +2502,13 @@ func (x *MoveRequest_Body) GetNodeId() uint64 {
func (x *MoveRequest_Body) SetNodeId(v uint64) {
x.NodeId = v
}
-func (x *MoveRequest_Body) GetMeta() []*KeyValue {
+func (x *MoveRequest_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *MoveRequest_Body) SetMeta(v []*KeyValue) {
+func (x *MoveRequest_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *MoveRequest_Body) GetBearerToken() []byte {
@@ -2630,11 +2624,11 @@ func (x *MoveRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -3685,10 +3679,10 @@ func (x *GetNodeByPathRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetNodeByPathResponse_Info struct {
- NodeId uint64 `json:"nodeId"`
- Timestamp uint64 `json:"timestamp"`
- Meta []*KeyValue `json:"meta"`
- ParentId uint64 `json:"parentId"`
+ NodeId uint64 `json:"nodeId"`
+ Timestamp uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
+ ParentId uint64 `json:"parentId"`
}
var (
@@ -3708,7 +3702,7 @@ func (x *GetNodeByPathResponse_Info) StableSize() (size int) {
size += proto.UInt64Size(1, x.NodeId)
size += proto.UInt64Size(2, x.Timestamp)
for i := range x.Meta {
- size += proto.NestedStructureSize(3, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(3, &x.Meta[i])
}
size += proto.UInt64Size(4, x.ParentId)
return size
@@ -3734,9 +3728,7 @@ func (x *GetNodeByPathResponse_Info) EmitProtobuf(mm *easyproto.MessageMarshaler
mm.AppendUint64(2, x.Timestamp)
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(3))
}
if x.ParentId != 0 {
mm.AppendUint64(4, x.ParentId)
@@ -3769,8 +3761,8 @@ func (x *GetNodeByPathResponse_Info) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -3802,13 +3794,13 @@ func (x *GetNodeByPathResponse_Info) GetTimestamp() uint64 {
func (x *GetNodeByPathResponse_Info) SetTimestamp(v uint64) {
x.Timestamp = v
}
-func (x *GetNodeByPathResponse_Info) GetMeta() []*KeyValue {
+func (x *GetNodeByPathResponse_Info) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *GetNodeByPathResponse_Info) SetMeta(v []*KeyValue) {
+func (x *GetNodeByPathResponse_Info) SetMeta(v []KeyValue) {
x.Meta = v
}
func (x *GetNodeByPathResponse_Info) GetParentId() uint64 {
@@ -3902,11 +3894,11 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -3930,7 +3922,7 @@ func (x *GetNodeByPathResponse_Info) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetNodeByPathResponse_Body struct {
- Nodes []*GetNodeByPathResponse_Info `json:"nodes"`
+ Nodes []GetNodeByPathResponse_Info `json:"nodes"`
}
var (
@@ -3948,7 +3940,7 @@ func (x *GetNodeByPathResponse_Body) StableSize() (size int) {
return 0
}
for i := range x.Nodes {
- size += proto.NestedStructureSize(1, x.Nodes[i])
+ size += proto.NestedStructureSizeUnchecked(1, &x.Nodes[i])
}
return size
}
@@ -3967,9 +3959,7 @@ func (x *GetNodeByPathResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler
return
}
for i := range x.Nodes {
- if x.Nodes[i] != nil {
- x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
- }
+ x.Nodes[i].EmitProtobuf(mm.AppendMessage(1))
}
}
@@ -3987,8 +3977,8 @@ func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Nodes")
}
- x.Nodes = append(x.Nodes, new(GetNodeByPathResponse_Info))
- ff := x.Nodes[len(x.Nodes)-1]
+ x.Nodes = append(x.Nodes, GetNodeByPathResponse_Info{})
+ ff := &x.Nodes[len(x.Nodes)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -3996,13 +3986,13 @@ func (x *GetNodeByPathResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
}
return nil
}
-func (x *GetNodeByPathResponse_Body) GetNodes() []*GetNodeByPathResponse_Info {
+func (x *GetNodeByPathResponse_Body) GetNodes() []GetNodeByPathResponse_Info {
if x != nil {
return x.Nodes
}
return nil
}
-func (x *GetNodeByPathResponse_Body) SetNodes(v []*GetNodeByPathResponse_Info) {
+func (x *GetNodeByPathResponse_Body) SetNodes(v []GetNodeByPathResponse_Info) {
x.Nodes = v
}
@@ -4060,11 +4050,11 @@ func (x *GetNodeByPathResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
switch key {
case "nodes":
{
- var f *GetNodeByPathResponse_Info
- var list []*GetNodeByPathResponse_Info
+ var f GetNodeByPathResponse_Info
+ var list []GetNodeByPathResponse_Info
in.Delim('[')
for !in.IsDelim(']') {
- f = new(GetNodeByPathResponse_Info)
+ f = GetNodeByPathResponse_Info{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
@@ -4935,10 +4925,10 @@ func (x *GetSubTreeRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type GetSubTreeResponse_Body struct {
- NodeId []uint64 `json:"nodeId"`
- ParentId []uint64 `json:"parentId"`
- Timestamp []uint64 `json:"timestamp"`
- Meta []*KeyValue `json:"meta"`
+ NodeId []uint64 `json:"nodeId"`
+ ParentId []uint64 `json:"parentId"`
+ Timestamp []uint64 `json:"timestamp"`
+ Meta []KeyValue `json:"meta"`
}
var (
@@ -4965,7 +4955,7 @@ func (x *GetSubTreeResponse_Body) StableSize() (size int) {
size += protowire.SizeGroup(protowire.Number(3), protowire.SizeVarint(x.Timestamp[i]))
}
for i := range x.Meta {
- size += proto.NestedStructureSize(4, x.Meta[i])
+ size += proto.NestedStructureSizeUnchecked(4, &x.Meta[i])
}
return size
}
@@ -4993,9 +4983,7 @@ func (x *GetSubTreeResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
mm.AppendUint64(3, x.Timestamp[j])
}
for i := range x.Meta {
- if x.Meta[i] != nil {
- x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
- }
+ x.Meta[i].EmitProtobuf(mm.AppendMessage(4))
}
}
@@ -5031,8 +5019,8 @@ func (x *GetSubTreeResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
if !ok {
return fmt.Errorf("cannot unmarshal field %s", "Meta")
}
- x.Meta = append(x.Meta, new(KeyValue))
- ff := x.Meta[len(x.Meta)-1]
+ x.Meta = append(x.Meta, KeyValue{})
+ ff := &x.Meta[len(x.Meta)-1]
if err := ff.UnmarshalProtobuf(data); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
@@ -5067,13 +5055,13 @@ func (x *GetSubTreeResponse_Body) GetTimestamp() []uint64 {
func (x *GetSubTreeResponse_Body) SetTimestamp(v []uint64) {
x.Timestamp = v
}
-func (x *GetSubTreeResponse_Body) GetMeta() []*KeyValue {
+func (x *GetSubTreeResponse_Body) GetMeta() []KeyValue {
if x != nil {
return x.Meta
}
return nil
}
-func (x *GetSubTreeResponse_Body) SetMeta(v []*KeyValue) {
+func (x *GetSubTreeResponse_Body) SetMeta(v []KeyValue) {
x.Meta = v
}
@@ -5206,11 +5194,11 @@ func (x *GetSubTreeResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
case "meta":
{
- var f *KeyValue
- var list []*KeyValue
+ var f KeyValue
+ var list []KeyValue
in.Delim('[')
for !in.IsDelim(']') {
- f = new(KeyValue)
+ f = KeyValue{}
f.UnmarshalEasyJSON(in)
list = append(list, f)
in.WantComma()
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index ce5039f7c..3c3ebfe89 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -123,7 +123,7 @@ func TestMessageSign(t *testing.T) {
ContainerId: rawCID1,
ParentId: 1,
NodeId: 2,
- Meta: []*KeyValue{
+ Meta: []KeyValue{
{Key: "kkk", Value: []byte("vvv")},
},
},
From 2b3fc50681f7e463a74c6075ad16788fc52cfa3f Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 30 Aug 2024 14:02:57 +0300
Subject: [PATCH 014/655] [#1320] shard: Fix
TestGCDropsObjectInhumedFromWritecache flaky test
The `TestGCDropsObjectInhumedFromWritecache` test was flaky because a
running asynchronous rebuild operation prevented GC from deleting the
object. A test-only shard option `WithDisabledRebuild` has been added
to fix this.
Signed-off-by: Aleksey Savchuk
---
pkg/local_object_storage/shard/control.go | 4 ++--
pkg/local_object_storage/shard/gc_test.go | 2 +-
pkg/local_object_storage/shard/shard.go | 10 ++++++++++
3 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 1626d5804..6efe4ec37 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -130,7 +130,7 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
s.rb = newRebuilder(s.rebuildLimiter)
- if !m.NoMetabase() {
+ if !m.NoMetabase() && !s.rebuildDisabled {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
s.writecacheSealCancel.Store(dummyCancel)
@@ -398,7 +398,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
defer unlock()
s.rb.Stop(s.log)
- if !s.info.Mode.NoMetabase() {
+ if !s.info.Mode.NoMetabase() && !s.rebuildDisabled {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 90958cd35..1c0ef1c2e 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -187,7 +187,7 @@ func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC()},
+ additionalShardOptions: []Option{WithDisabledGC(), WithDisabledRebuild()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
defer func() { require.NoError(t, sh.Close()) }()
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 93f5354a7..d11bcc36b 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -139,6 +139,8 @@ type cfg struct {
reportErrorFunc func(selfID string, message string, err error)
rebuildLimiter RebuildWorkerLimiter
+
+ rebuildDisabled bool
}
func defaultCfg() *cfg {
@@ -410,6 +412,14 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
+// WithDisabledRebuild returns an option to disable a shard rebuild.
+// For testing purposes only.
+func WithDisabledRebuild() Option {
+ return func(c *cfg) {
+ c.rebuildDisabled = true
+ }
+}
+
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From a685fcdc963b0f58003059bb2dae2d21c925e25a Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 30 Aug 2024 19:20:55 +0300
Subject: [PATCH 015/655] [#1317] go.mod: Use range over int
Since Go 1.22 a "for" statement with a "range" clause is able
to iterate through integer values from zero to an upper limit.
gopatch script:
@@
var i, e expression
@@
-for i := 0; i <= e - 1; i++ {
+for i := range e {
...
}
@@
var i, e expression
@@
-for i := 0; i <= e; i++ {
+for i := range e + 1 {
...
}
@@
var i, e expression
@@
-for i := 0; i < e; i++ {
+for i := range e {
...
}
Signed-off-by: Ekaterina Lebedeva
---
.../modules/morph/contract/dump_hashes.go | 4 ++--
.../modules/morph/helper/local_client.go | 2 +-
.../internal/modules/morph/helper/util.go | 2 +-
.../morph/initialize/initialize_test.go | 4 ++--
cmd/frostfs-cli/internal/client/client.go | 2 +-
cmd/frostfs-cli/modules/container/create.go | 2 +-
cmd/frostfs-cli/modules/container/delete.go | 2 +-
cmd/frostfs-cli/modules/object/nodes.go | 2 +-
cmd/frostfs-cli/modules/util/acl.go | 2 +-
cmd/frostfs-node/config/node/config.go | 2 +-
cmd/frostfs-node/morph.go | 2 +-
pkg/innerring/indexer_test.go | 2 +-
pkg/innerring/notary.go | 2 +-
.../processors/alphabet/handlers_test.go | 6 ++---
.../processors/governance/handlers_test.go | 2 +-
.../processors/governance/list_test.go | 4 ++--
.../blobovnicza/sizes_test.go | 2 +-
.../blobovniczatree/concurrency_test.go | 2 +-
.../blobstor/blobovniczatree/rebuild_test.go | 2 +-
.../blobstor/blobstor_test.go | 12 +++++-----
.../blobstor/compression/bench_test.go | 2 +-
.../blobstor/fstree/fstree_test.go | 6 ++---
.../blobstor/perf_test.go | 4 ++--
.../engine/control_test.go | 4 ++--
.../engine/engine_test.go | 12 +++++-----
.../engine/evacuate_test.go | 4 ++--
pkg/local_object_storage/engine/list_test.go | 2 +-
.../engine/remove_copies.go | 2 +-
.../engine/remove_copies_test.go | 4 ++--
.../engine/shards_test.go | 2 +-
pkg/local_object_storage/engine/tree_test.go | 6 ++---
.../internal/testutil/generators_test.go | 6 ++---
.../metabase/containers_test.go | 8 +++----
.../metabase/counter_test.go | 8 +++----
.../metabase/delete_test.go | 2 +-
pkg/local_object_storage/metabase/get_test.go | 4 ++--
.../metabase/list_test.go | 12 +++++-----
.../metabase/lock_test.go | 4 ++--
pkg/local_object_storage/metabase/put_test.go | 2 +-
.../metabase/reset_test.go | 2 +-
.../metabase/select_test.go | 6 ++---
pkg/local_object_storage/metabase/upgrade.go | 2 +-
.../metabase/upgrade_test.go | 10 ++++----
pkg/local_object_storage/pilorama/boltdb.go | 2 +-
.../pilorama/forest_test.go | 24 +++++++++----------
pkg/local_object_storage/shard/list_test.go | 4 ++--
.../shard/metrics_test.go | 10 ++++----
pkg/local_object_storage/shard/refill_test.go | 4 ++--
.../writecache/benchmark/writecache_test.go | 2 +-
pkg/local_object_storage/writecache/flush.go | 2 +-
pkg/morph/event/notary_preparator_test.go | 2 +-
pkg/morph/timer/block_test.go | 2 +-
pkg/network/tls_test.go | 2 +-
pkg/services/control/server/evacuate.go | 2 +-
pkg/services/object/acl/v2/util_test.go | 2 +-
pkg/services/object/get/get_test.go | 8 +++----
pkg/services/object/get/getrangeec_test.go | 2 +-
pkg/services/object/put/ec.go | 2 +-
pkg/services/object/search/search_test.go | 4 ++--
.../object_manager/placement/cache_test.go | 4 ++--
.../object_manager/placement/traverser.go | 4 ++--
.../placement/traverser_test.go | 6 ++---
.../storage/persistent/executor_test.go | 2 +-
pkg/services/tree/getsubtree_test.go | 2 +-
pkg/services/tree/replicator.go | 2 +-
pkg/util/sync/key_locker_test.go | 2 +-
66 files changed, 135 insertions(+), 135 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
index 5a0d29550..be2134b77 100644
--- a/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
+++ b/cmd/frostfs-adm/internal/modules/morph/contract/dump_hashes.go
@@ -68,7 +68,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
if irSize != 0 {
bw.Reset()
- for i := 0; i < irSize; i++ {
+ for i := range irSize {
emit.AppCall(bw.BinWriter, cs.Hash, "resolve", callflag.ReadOnly,
helper.GetAlphabetNNSDomain(i),
int64(nns.TXT))
@@ -79,7 +79,7 @@ func dumpContractHashes(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("can't fetch info from NNS: %w", err)
}
- for i := 0; i < irSize; i++ {
+ for i := range irSize {
info := contractDumpInfo{name: fmt.Sprintf("alphabet %d", i)}
if h, err := helper.ParseNNSResolveResult(alphaRes.Stack[i]); err == nil {
info.hash = h
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
index 44d1b4ecf..375fa84d7 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/local_client.go
@@ -224,7 +224,7 @@ func (l *LocalClient) CalculateNetworkFee(tx *transaction.Transaction) (int64, e
paramz = []manifest.Parameter{{Type: smartcontract.SignatureType}}
} else if nSigs, _, ok := vm.ParseMultiSigContract(w.VerificationScript); ok {
paramz = make([]manifest.Parameter, nSigs)
- for j := 0; j < nSigs; j++ {
+ for j := range nSigs {
paramz[j] = manifest.Parameter{Type: smartcontract.SignatureType}
}
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/util.go b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
index 2d9281c24..8c6b90539 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/util.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/util.go
@@ -44,7 +44,7 @@ func openAlphabetWallets(v *viper.Viper, walletDir string) ([]*wallet.Wallet, er
var wallets []*wallet.Wallet
var letter string
- for i := 0; i < constants.MaxAlphabetNodes; i++ {
+ for i := range constants.MaxAlphabetNodes {
letter = innerring.GlagoliticLetter(i).String()
p := filepath.Join(walletDir, letter+".json")
var w *wallet.Wallet
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
index 6c52aa2ab..74f5d3e88 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_test.go
@@ -113,7 +113,7 @@ func generateTestData(dir string, size int) error {
}
var pubs []string
- for i := 0; i < size; i++ {
+ for i := range size {
p := filepath.Join(dir, innerring.GlagoliticLetter(i).String()+".json")
w, err := wallet.NewWalletFromFile(p)
if err != nil {
@@ -148,7 +148,7 @@ func generateTestData(dir string, size int) error {
}
func setTestCredentials(v *viper.Viper, size int) {
- for i := 0; i < size; i++ {
+ for i := range size {
v.Set("credentials."+innerring.GlagoliticLetter(i).String(), strconv.FormatUint(uint64(i), 10))
}
v.Set("credentials.contract", constants.TestContractPassword)
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index a0fa22410..57bcf5620 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -692,7 +692,7 @@ func SearchObjects(ctx context.Context, prm SearchObjectsPrm) (*SearchObjectsRes
for {
n, ok = rdr.Read(buf)
- for i := 0; i < n; i++ {
+ for i := range n {
list = append(list, buf[i])
}
if !ok {
diff --git a/cmd/frostfs-cli/modules/container/create.go b/cmd/frostfs-cli/modules/container/create.go
index c6f576908..f37b169ce 100644
--- a/cmd/frostfs-cli/modules/container/create.go
+++ b/cmd/frostfs-cli/modules/container/create.go
@@ -139,7 +139,7 @@ It will be stored in sidechain when inner ring will accepts it.`,
},
}
- for i := 0; i < awaitTimeout; i++ {
+ for range awaitTimeout {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
diff --git a/cmd/frostfs-cli/modules/container/delete.go b/cmd/frostfs-cli/modules/container/delete.go
index e5425bf25..c20188884 100644
--- a/cmd/frostfs-cli/modules/container/delete.go
+++ b/cmd/frostfs-cli/modules/container/delete.go
@@ -110,7 +110,7 @@ Only owner of the container has a permission to remove container.`,
},
}
- for i := 0; i < awaitTimeout; i++ {
+ for range awaitTimeout {
time.Sleep(1 * time.Second)
_, err := internalclient.GetContainer(cmd.Context(), getPrm)
diff --git a/cmd/frostfs-cli/modules/object/nodes.go b/cmd/frostfs-cli/modules/object/nodes.go
index 896f6f17f..0eac4e6d2 100644
--- a/cmd/frostfs-cli/modules/object/nodes.go
+++ b/cmd/frostfs-cli/modules/object/nodes.go
@@ -195,7 +195,7 @@ func flattenComplexMembersIfECContainer(cmd *cobra.Command, cnrID cid.ID, member
prmHead.SetRawFlag(true) // to get an error instead of whole object
eg, egCtx := errgroup.WithContext(cmd.Context())
- for idx := 0; idx < len(members); idx++ {
+ for idx := range len(members) {
partObjID := members[idx]
eg.Go(func() error {
diff --git a/cmd/frostfs-cli/modules/util/acl.go b/cmd/frostfs-cli/modules/util/acl.go
index 4c2e324b3..145dcc756 100644
--- a/cmd/frostfs-cli/modules/util/acl.go
+++ b/cmd/frostfs-cli/modules/util/acl.go
@@ -33,7 +33,7 @@ func PrettyPrintTableBACL(cmd *cobra.Command, bacl *acl.Basic) {
fmt.Fprintln(w, strings.Join(bits, "\t"))
// Footer
footer := []string{"X F"}
- for i := 0; i < 7; i++ {
+ for range 7 {
footer = append(footer, "U S O B")
}
fmt.Fprintln(w, strings.Join(footer, "\t"))
diff --git a/cmd/frostfs-node/config/node/config.go b/cmd/frostfs-node/config/node/config.go
index 97aca274a..4d063245b 100644
--- a/cmd/frostfs-node/config/node/config.go
+++ b/cmd/frostfs-node/config/node/config.go
@@ -121,7 +121,7 @@ func BootstrapAddresses(c *config.Config) (addr network.AddressGroup) {
func Attributes(c *config.Config) (attrs []string) {
const maxAttributes = 100
- for i := 0; i < maxAttributes; i++ {
+ for i := range maxAttributes {
attr := config.StringSafe(c.Sub(subsection), attributePrefix+"_"+strconv.Itoa(i))
if attr == "" {
return
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 1b148095b..7178cd97d 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -157,7 +157,7 @@ var (
)
func waitNotaryDeposit(ctx context.Context, c *cfg, tx util.Uint256) error {
- for i := 0; i < notaryDepositRetriesAmount; i++ {
+ for range notaryDepositRetriesAmount {
c.log.Debug(logs.ClientAttemptToWaitForNotaryDepositTransactionToGetPersisted)
select {
case <-ctx.Done():
diff --git a/pkg/innerring/indexer_test.go b/pkg/innerring/indexer_test.go
index 5bc2cc988..c8a819b5b 100644
--- a/pkg/innerring/indexer_test.go
+++ b/pkg/innerring/indexer_test.go
@@ -237,7 +237,7 @@ func BenchmarkKeyPosition(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
if keyPosition(key, list) != 5 {
b.FailNow()
}
diff --git a/pkg/innerring/notary.go b/pkg/innerring/notary.go
index c601f5587..e6f2b1de4 100644
--- a/pkg/innerring/notary.go
+++ b/pkg/innerring/notary.go
@@ -90,7 +90,7 @@ func (s *Server) initNotary(ctx context.Context, deposit depositor, await awaite
}
func awaitNotaryDepositInClient(ctx context.Context, cli *client.Client, txHash util.Uint256) error {
- for i := 0; i < notaryDepositTimeout; i++ {
+ for range notaryDepositTimeout {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index 346901949..dfda37472 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -21,7 +21,7 @@ func TestProcessorEmitsGasToNetmapAndAlphabet(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{{20}, {25}}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -98,7 +98,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
@@ -170,7 +170,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
var parsedWallets []util.Uint160 = []util.Uint160{}
alphabetContracts := innerring.NewAlphabetContracts()
- for i := 0; i <= index; i++ {
+ for i := range index + 1 {
alphabetContracts[innerring.GlagoliticLetter(i)] = util.Uint160{uint8(i)}
}
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index 2a505f8d1..b73e24318 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -190,7 +190,7 @@ func generateTestKeys(t *testing.T) testKeys {
for {
var result testKeys
- for i := 0; i < 4; i++ {
+ for range 4 {
pk, err := keys.NewPrivateKey()
require.NoError(t, err, "failed to create private key")
result.sidechainKeys = append(result.sidechainKeys, pk.PublicKey())
diff --git a/pkg/innerring/processors/governance/list_test.go b/pkg/innerring/processors/governance/list_test.go
index d099ec837..4ecebf05b 100644
--- a/pkg/innerring/processors/governance/list_test.go
+++ b/pkg/innerring/processors/governance/list_test.go
@@ -49,7 +49,7 @@ func TestNewAlphabetList(t *testing.T) {
}
ln := len(rounds)
- for i := 0; i < ln; i++ {
+ for i := range ln {
list, err = newAlphabetList(list, exp)
require.NoError(t, err)
require.True(t, equalPublicKeyLists(list, rounds[i]))
@@ -131,7 +131,7 @@ func TestUpdateInnerRing(t *testing.T) {
func generateKeys(n int) (keys.PublicKeys, error) {
pubKeys := make(keys.PublicKeys, 0, n)
- for i := 0; i < n; i++ {
+ for range n {
privKey, err := keys.NewPrivateKey()
if err != nil {
return nil, err
diff --git a/pkg/local_object_storage/blobovnicza/sizes_test.go b/pkg/local_object_storage/blobovnicza/sizes_test.go
index 01093b8d7..d582fc5e4 100644
--- a/pkg/local_object_storage/blobovnicza/sizes_test.go
+++ b/pkg/local_object_storage/blobovnicza/sizes_test.go
@@ -42,7 +42,7 @@ func TestSizes(t *testing.T) {
func BenchmarkUpperBound(b *testing.B) {
for _, size := range []uint64{1, 1023, 65 * 1024, 40 * 1024 * 1024} {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
_ = upperPowerOfTwo(size)
}
})
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
index 5bed86142..cc8a52d03 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/concurrency_test.go
@@ -34,7 +34,7 @@ func TestBlobovniczaTree_Concurrency(t *testing.T) {
var cnt atomic.Int64
var wg sync.WaitGroup
- for i := 0; i < 1000; i++ {
+ for range 1000 {
wg.Add(1)
go func() {
defer wg.Done()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 7a1de4c13..4a51fd86a 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -127,7 +127,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
eg, egCtx := errgroup.WithContext(context.Background())
storageIDs := make(map[oid.Address][]byte)
storageIDsGuard := &sync.Mutex{}
- for i := 0; i < 100; i++ {
+ for range 100 {
eg.Go(func() error {
obj := blobstortest.NewObject(1024)
data, err := obj.Marshal()
diff --git a/pkg/local_object_storage/blobstor/blobstor_test.go b/pkg/local_object_storage/blobstor/blobstor_test.go
index c7d80dc84..bed5e0eb9 100644
--- a/pkg/local_object_storage/blobstor/blobstor_test.go
+++ b/pkg/local_object_storage/blobstor/blobstor_test.go
@@ -60,7 +60,7 @@ func TestCompression(t *testing.T) {
bigObj := make([]*objectSDK.Object, objCount)
smallObj := make([]*objectSDK.Object, objCount)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
bigObj[i] = testObject(smallSizeLimit * 2)
smallObj[i] = testObject(smallSizeLimit / 2)
}
@@ -219,7 +219,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount; i++ {
+ for range concurrentPutCount {
wg.Add(1)
go func() {
testPut(t, blobStor, bigObj)
@@ -235,7 +235,7 @@ func TestConcurrentPut(t *testing.T) {
bigObj := testObject(smallSizeLimit * 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount+1; i++ {
+ for range concurrentPutCount + 1 {
wg.Add(1)
go func() {
testPutFileExistsError(t, blobStor, bigObj)
@@ -251,7 +251,7 @@ func TestConcurrentPut(t *testing.T) {
smallObj := testObject(smallSizeLimit / 2)
var wg sync.WaitGroup
- for i := 0; i < concurrentPutCount; i++ {
+ for range concurrentPutCount {
wg.Add(1)
go func() {
testPut(t, blobStor, smallObj)
@@ -302,7 +302,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, bigObj)
var wg sync.WaitGroup
- for i := 0; i < 2; i++ {
+ for range 2 {
wg.Add(1)
go func() {
testDelete(t, blobStor, bigObj)
@@ -319,7 +319,7 @@ func TestConcurrentDelete(t *testing.T) {
testPut(t, blobStor, smallObj)
var wg sync.WaitGroup
- for i := 0; i < 2; i++ {
+ for range 2 {
wg.Add(1)
go func() {
testDelete(t, blobStor, smallObj)
diff --git a/pkg/local_object_storage/blobstor/compression/bench_test.go b/pkg/local_object_storage/blobstor/compression/bench_test.go
index 986912985..9f70f8ec2 100644
--- a/pkg/local_object_storage/blobstor/compression/bench_test.go
+++ b/pkg/local_object_storage/blobstor/compression/bench_test.go
@@ -36,7 +36,7 @@ func BenchmarkCompression(b *testing.B) {
func benchWith(b *testing.B, c Config, data []byte) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_ = c.Compress(data)
}
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index d633cbac3..5786dfd3b 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -28,7 +28,7 @@ func Benchmark_addressFromString(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+ for range b.N {
_, err := addressFromString(s)
if err != nil {
b.Fatalf("benchmark error: %v", err)
@@ -73,7 +73,7 @@ func TestObjectCounter(t *testing.T) {
eg, egCtx := errgroup.WithContext(context.Background())
eg.Go(func() error {
- for j := 0; j < 1_000; j++ {
+ for range 1_000 {
_, err := fst.Put(egCtx, putPrm)
if err != nil {
return err
@@ -84,7 +84,7 @@ func TestObjectCounter(t *testing.T) {
eg.Go(func() error {
var le logicerr.Logical
- for j := 0; j < 1_000; j++ {
+ for range 1_000 {
_, err := fst.Delete(egCtx, delPrm)
if err != nil && !errors.As(err, &le) {
return err
diff --git a/pkg/local_object_storage/blobstor/perf_test.go b/pkg/local_object_storage/blobstor/perf_test.go
index 501c95a1d..1ac769e36 100644
--- a/pkg/local_object_storage/blobstor/perf_test.go
+++ b/pkg/local_object_storage/blobstor/perf_test.go
@@ -110,7 +110,7 @@ func BenchmarkSubstorageReadPerf(b *testing.B) {
// Fill database
var errG errgroup.Group
- for i := 0; i < tt.size; i++ {
+ for range tt.size {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
errG.Go(func() error {
@@ -203,7 +203,7 @@ func BenchmarkSubstorageIteratePerf(b *testing.B) {
defer func() { require.NoError(b, st.Close()) }()
// Fill database
- for i := 0; i < tt.size; i++ {
+ for range tt.size {
obj := objGen.Next()
addr := testutil.AddressFromObject(b, obj)
raw, err := obj.Marshal()
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index f0809883c..2de92ae84 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -208,7 +208,7 @@ func TestPersistentShardID(t *testing.T) {
require.NoError(t, te.ng.Close(context.Background()))
newTe := newEngineWithErrorThreshold(t, dir, 1)
- for i := 0; i < len(newTe.shards); i++ {
+ for i := range len(newTe.shards) {
require.Equal(t, te.shards[i].id, newTe.shards[i].id)
}
require.NoError(t, newTe.ng.Close(context.Background()))
@@ -269,7 +269,7 @@ func TestReload(t *testing.T) {
e, currShards := engineWithShards(t, removePath, shardNum)
var rcfg ReConfiguration
- for i := 0; i < len(currShards)-1; i++ { // without one of the shards
+ for i := range len(currShards) - 1 { // without one of the shards
rcfg.AddShard(currShards[i], nil)
}
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 49976abbb..525e17f34 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -44,7 +44,7 @@ func BenchmarkExists(b *testing.B) {
func benchmarkExists(b *testing.B, shardNum int) {
shards := make([]*shard.Shard, shardNum)
- for i := 0; i < shardNum; i++ {
+ for i := range shardNum {
shards[i] = testNewShard(b)
}
@@ -52,7 +52,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
defer func() { require.NoError(b, e.Close(context.Background())) }()
addr := oidtest.Address()
- for i := 0; i < 100; i++ {
+ for range 100 {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
err := Put(context.Background(), e, obj)
if err != nil {
@@ -62,7 +62,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
b.ReportAllocs()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
+ for range b.N {
var shPrm shard.ExistsPrm
shPrm.Address = addr
shPrm.ParentAddress = oid.Address{}
@@ -109,7 +109,7 @@ func (te *testEngineWrapper) setInitializedShards(t testing.TB, shards ...*shard
func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrapper {
shards := make([]*shard.Shard, 0, num)
- for i := 0; i < num; i++ {
+ for range num {
shards = append(shards, testNewShard(t))
}
@@ -117,7 +117,7 @@ func (te *testEngineWrapper) setShardsNum(t testing.TB, num int) *testEngineWrap
}
func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
- for i := 0; i < num; i++ {
+ for i := range num {
opts := shardOpts(i)
id, err := te.engine.AddShard(context.Background(), opts...)
require.NoError(t, err)
@@ -127,7 +127,7 @@ func (te *testEngineWrapper) setShardsNumOpts(t testing.TB, num int, shardOpts f
}
func (te *testEngineWrapper) setShardsNumAdditionalOpts(t testing.TB, num int, shardOpts func(id int) []shard.Option) *testEngineWrapper {
- for i := 0; i < num; i++ {
+ for i := range num {
defaultOpts := testDefaultShardOptions(t)
opts := append(defaultOpts, shardOpts(i)...)
id, err := te.engine.AddShard(context.Background(), opts...)
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 55268b549..8d25dad4a 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -61,7 +61,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
}
for _, sh := range ids {
- for i := 0; i < objPerShard; i++ {
+ for range objPerShard {
contID := cidtest.ID()
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -554,7 +554,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, "", st.ErrorMessage(), "invalid final error message")
expectedTreeOps := make(map[string][]*pilorama.Move)
- for i := 0; i < len(e.shards); i++ {
+ for i := range len(e.shards) {
sourceTrees, err := pilorama.TreeListAll(context.Background(), e.shards[ids[i].String()])
require.NoError(t, err, "list source trees failed")
require.Len(t, sourceTrees, 3)
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index dd8a2e8a0..11a6c7841 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -79,7 +79,7 @@ func TestListWithCursor(t *testing.T) {
expected := make([]object.Info, 0, tt.objectNum)
got := make([]object.Info, 0, tt.objectNum)
- for i := 0; i < tt.objectNum; i++ {
+ for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
diff --git a/pkg/local_object_storage/engine/remove_copies.go b/pkg/local_object_storage/engine/remove_copies.go
index b99cf4f44..5e1ced56a 100644
--- a/pkg/local_object_storage/engine/remove_copies.go
+++ b/pkg/local_object_storage/engine/remove_copies.go
@@ -87,7 +87,7 @@ func (e *StorageEngine) RemoveDuplicates(ctx context.Context, prm RemoveDuplicat
}
})
- for i := 0; i < prm.Concurrency; i++ {
+ for range prm.Concurrency {
errG.Go(func() error {
return e.removeObjects(ctx, ch)
})
diff --git a/pkg/local_object_storage/engine/remove_copies_test.go b/pkg/local_object_storage/engine/remove_copies_test.go
index 99963576c..6d2291c74 100644
--- a/pkg/local_object_storage/engine/remove_copies_test.go
+++ b/pkg/local_object_storage/engine/remove_copies_test.go
@@ -96,7 +96,7 @@ loop:
require.FailNow(t, "unexpected object was removed", removed[i].addr)
}
- for i := 0; i < copyCount; i++ {
+ for i := range copyCount {
if i%3 == 0 {
require.True(t, removedMask[i], "object %d was expected to be removed", i)
} else {
@@ -207,7 +207,7 @@ func TestRebalanceExitByContext(t *testing.T) {
}()
const removeCount = 3
- for i := 0; i < removeCount-1; i++ {
+ for range removeCount - 1 {
<-deleteCh
signal <- struct{}{}
}
diff --git a/pkg/local_object_storage/engine/shards_test.go b/pkg/local_object_storage/engine/shards_test.go
index f4c7a4309..3347d58f1 100644
--- a/pkg/local_object_storage/engine/shards_test.go
+++ b/pkg/local_object_storage/engine/shards_test.go
@@ -72,7 +72,7 @@ func TestSortShardsByWeight(t *testing.T) {
var shards1 []hashedShard
var weights1 []float64
var shards2 []hashedShard
- for i := 0; i < numOfShards; i++ {
+ for i := range numOfShards {
shards1 = append(shards1, hashedShard{
hash: uint64(i),
})
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 2739058e9..6f694f082 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -34,7 +34,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
d := pilorama.CIDDescriptor{CID: cid, Position: 0, Size: 1}
treeID := "someTree"
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
err := Put(context.Background(), te.ng, obj)
@@ -56,7 +56,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
prm.WithFilters(fs)
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := te.ng.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
@@ -67,7 +67,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
}
})
b.Run("TreeGetByPath", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
nodes, err := te.ng.TreeGetByPath(context.Background(), cid, treeID, pilorama.AttributeFilename, []string{strconv.Itoa(objCount / 2)}, true)
if err != nil {
b.Fatal(err)
diff --git a/pkg/local_object_storage/internal/testutil/generators_test.go b/pkg/local_object_storage/internal/testutil/generators_test.go
index f7be6014d..cc6f726a4 100644
--- a/pkg/local_object_storage/internal/testutil/generators_test.go
+++ b/pkg/local_object_storage/internal/testutil/generators_test.go
@@ -13,7 +13,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
ObjSize: 10,
MaxObjects: 4,
}
- for i := 0; i < 40; i++ {
+ for range 40 {
obj := gen.Next()
id, isSet := obj.ID()
i := binary.LittleEndian.Uint64(id[:])
@@ -26,7 +26,7 @@ func TestOverwriteObjGenerator(t *testing.T) {
func TestRandObjGenerator(t *testing.T) {
gen := &RandObjGenerator{ObjSize: 10}
- for i := 0; i < 10; i++ {
+ for range 10 {
obj := gen.Next()
require.Equal(t, gen.ObjSize, uint64(len(obj.Payload())))
@@ -50,7 +50,7 @@ func TestSeqObjGenerator(t *testing.T) {
func TestRandAddrGenerator(t *testing.T) {
gen := RandAddrGenerator(5)
- for i := 0; i < 50; i++ {
+ for range 50 {
addr := gen.Next()
id := addr.Object()
k := binary.LittleEndian.Uint64(id[:])
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 5d6788d7e..8b1874458 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -24,7 +24,7 @@ func TestDB_Containers(t *testing.T) {
cids := make(map[string]int, N)
- for i := 0; i < N; i++ {
+ for range N {
obj := testutil.GenerateObject()
cnr, _ := obj.ContainerID()
@@ -95,7 +95,7 @@ func TestDB_ContainersCount(t *testing.T) {
expected := make([]cid.ID, 0, R+T+SG+L)
for _, upload := range uploadObjects {
- for i := 0; i < upload.amount; i++ {
+ for range upload.amount {
obj := testutil.GenerateObject()
obj.SetType(upload.typ)
@@ -126,11 +126,11 @@ func TestDB_ContainerSize(t *testing.T) {
cids := make(map[cid.ID]int, C)
objs := make(map[cid.ID][]*objectSDK.Object, C*N)
- for i := 0; i < C; i++ {
+ for range C {
cnr := cidtest.ID()
cids[cnr] = 0
- for j := 0; j < N; j++ {
+ for range N {
size := rand.Intn(1024)
parent := testutil.GenerateObjectWithCID(cnr)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index 1797fc0aa..d1f808a63 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -39,14 +39,14 @@ func TestCounters(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
oo := make([]*objectSDK.Object, 0, objCount)
- for i := 0; i < objCount; i++ {
+ for range objCount {
oo = append(oo, testutil.GenerateObject())
}
var prm meta.PutPrm
exp := make(map[cid.ID]meta.ObjectCounters)
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
prm.SetObject(oo[i])
cnrID, _ := oo[i].ContainerID()
c := meta.ObjectCounters{}
@@ -187,7 +187,7 @@ func TestCounters(t *testing.T) {
// put objects and check that parent info
// does not affect the counter
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
o := testutil.GenerateObject()
if i < objCount/2 { // half of the objs will have the parent
o.SetParent(parObj)
@@ -535,7 +535,7 @@ func putObjs(t *testing.T, db *meta.DB, count int, withParent bool) []*objectSDK
parent := testutil.GenerateObject()
oo := make([]*objectSDK.Object, 0, count)
- for i := 0; i < count; i++ {
+ for i := range count {
o := testutil.GenerateObject()
if withParent {
o.SetParent(parent)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index 2053874d0..cb85157e7 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -131,7 +131,7 @@ func TestDelete(t *testing.T) {
defer func() { require.NoError(t, db.Close()) }()
cnr := cidtest.ID()
- for i := 0; i < 10; i++ {
+ for range 10 {
obj := testutil.GenerateObjectWithCID(cnr)
var prm meta.PutPrm
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 247ddf9cd..7654d2cd8 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -223,7 +223,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
defer func() { require.NoError(b, db.Close()) }()
addrs := make([]oid.Address, 0, numOfObj)
- for i := 0; i < numOfObj; i++ {
+ for range numOfObj {
raw := testutil.GenerateObject()
addrs = append(addrs, object.AddressOf(raw))
@@ -261,7 +261,7 @@ func benchmarkGet(b *testing.B, numOfObj int) {
b.Run("serial", func(b *testing.B) {
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
var getPrm meta.GetPrm
getPrm.SetAddress(addrs[i%len(addrs)])
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index a92e2eff4..6207497b1 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -35,7 +35,7 @@ func listWithCursorPrepareDB(b *testing.B) *meta.DB {
defer func() { require.NoError(b, db.Close()) }()
obj := testutil.GenerateObject()
- for i := 0; i < 100_000; i++ { // should be a multiple of all batch sizes
+ for i := range 100_000 { // should be a multiple of all batch sizes
obj.SetID(oidtest.ID())
if i%9 == 0 { // let's have 9 objects per container
obj.SetContainerID(cidtest.ID())
@@ -51,7 +51,7 @@ func benchmarkListWithCursor(b *testing.B, db *meta.DB, batchSize int) {
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := db.ListWithCursor(context.Background(), prm)
if err != nil {
if err != meta.ErrEndOfListing {
@@ -80,7 +80,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expected := make([]object.Info, 0, total)
// fill metabase with objects
- for i := 0; i < containers; i++ {
+ for range containers {
containerID := cidtest.ID()
// add one regular object
@@ -140,7 +140,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
expectedIterations--
}
- for i := 0; i < expectedIterations; i++ {
+ for range expectedIterations {
res, cursor, err = metaListWithCursor(db, uint32(countPerReq), cursor)
require.NoError(t, err, "count:%d", countPerReq)
got = append(got, res...)
@@ -169,7 +169,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
expected := make(map[string]int, total)
// fill metabase with objects
- for i := 0; i < total; i++ {
+ for range total {
obj := testutil.GenerateObject()
err := putBig(db, obj)
require.NoError(t, err)
@@ -186,7 +186,7 @@ func TestAddObjectDuringListingWithCursor(t *testing.T) {
}
// add new objects
- for i := 0; i < total; i++ {
+ for range total {
obj := testutil.GenerateObject()
err = putBig(db, obj)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 2d7bfc1cc..62a109b02 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -155,7 +155,7 @@ func TestDB_Lock(t *testing.T) {
inhumePrm.SetGCMark()
- for i := 0; i < objsNum; i++ {
+ for i := range objsNum {
inhumePrm.SetAddresses(objectcore.AddressOf(objs[i]))
res, err = db.Inhume(context.Background(), inhumePrm)
@@ -255,7 +255,7 @@ func putAndLockObj(t *testing.T, db *meta.DB, numOfLockedObjs int) ([]*objectSDK
lockedObjs := make([]*objectSDK.Object, 0, numOfLockedObjs)
lockedObjIDs := make([]oid.ID, 0, numOfLockedObjs)
- for i := 0; i < numOfLockedObjs; i++ {
+ for range numOfLockedObjs {
obj := testutil.GenerateObjectWithCID(cnr)
err := putBig(db, obj)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/put_test.go b/pkg/local_object_storage/metabase/put_test.go
index 84e4029f2..914f5ef06 100644
--- a/pkg/local_object_storage/metabase/put_test.go
+++ b/pkg/local_object_storage/metabase/put_test.go
@@ -74,7 +74,7 @@ func BenchmarkPut(b *testing.B) {
objs := prepareObjects(b.N)
b.ResetTimer()
b.ReportAllocs()
- for i := 0; i < b.N; i++ {
+ for range b.N {
if err := metaPut(db, objs[index.Add(1)], nil); err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/metabase/reset_test.go b/pkg/local_object_storage/metabase/reset_test.go
index 66f5eefc6..993079dce 100644
--- a/pkg/local_object_storage/metabase/reset_test.go
+++ b/pkg/local_object_storage/metabase/reset_test.go
@@ -34,7 +34,7 @@ func TestResetDropsContainerBuckets(t *testing.T) {
defer func() { require.NoError(t, db.Close()) }()
- for idx := 0; idx < 100; idx++ {
+ for idx := range 100 {
var putPrm PutPrm
putPrm.SetObject(testutil.GenerateObject())
putPrm.SetStorageID([]byte(fmt.Sprintf("0/%d", idx)))
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 4fbc5910e..0fab3a108 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -920,7 +920,7 @@ func TestDB_SelectSplitID_EC(t *testing.T) {
ec, err := erasurecode.NewConstructor(dataCount, parityCount)
require.NoError(t, err)
- for i := 0; i < partCount; i++ {
+ for i := range partCount {
cs, err := ec.Split(tt.objects[i], &pk.PrivateKey)
require.NoError(t, err)
@@ -1070,7 +1070,7 @@ func BenchmarkSelect(b *testing.B) {
cid := cidtest.ID()
- for i := 0; i < objCount; i++ {
+ for i := range objCount {
var attr objectSDK.Attribute
attr.SetKey("myHeader")
attr.SetValue(strconv.Itoa(i))
@@ -1129,7 +1129,7 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
prm.SetContainerID(cid)
prm.SetFilters(fs)
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, err := db.Select(context.Background(), prm)
if err != nil {
b.Fatal(err)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index a4c7707b4..e9abd746c 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -143,7 +143,7 @@ func createExpirationEpochBuckets(ctx context.Context, db *bbolt.DB, log func(a
return selectObjectsWithExpirationEpoch(ctx, db, objects)
})
var count atomic.Uint64
- for i := 0; i < upgradeWorkersCount; i++ {
+ for range upgradeWorkersCount {
eg.Go(func() error {
for {
select {
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index dc3d7d07d..3797de0a4 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -91,7 +91,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx := errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects
- for i := 0; i < simpleObjectsCount; i++ {
+ for i := range simpleObjectsCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -110,7 +110,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// complex objects
- for i := 0; i < complexObjectsCount; i++ {
+ for i := range complexObjectsCount {
i := i
eg.Go(func() error {
parent := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -134,7 +134,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects deleted by gc marks
- for i := 0; i < deletedByGCMarksCount; i++ {
+ for i := range deletedByGCMarksCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -156,7 +156,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(10000)
// simple objects deleted by tombstones
- for i := 0; i < deletedByTombstoneCount; i++ {
+ for i := range deletedByTombstoneCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
@@ -186,7 +186,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
eg, ctx = errgroup.WithContext(context.Background())
eg.SetLimit(generateWorkersCount)
// simple objects locked by locks
- for i := 0; i < lockedCount; i++ {
+ for i := range lockedCount {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
diff --git a/pkg/local_object_storage/pilorama/boltdb.go b/pkg/local_object_storage/pilorama/boltdb.go
index 29941be83..e2d69cafa 100644
--- a/pkg/local_object_storage/pilorama/boltdb.go
+++ b/pkg/local_object_storage/pilorama/boltdb.go
@@ -705,7 +705,7 @@ func (t *boltForest) applyOperation(logBucket, treeBucket *bbolt.Bucket, ms []*M
key, value = c.Prev()
}
- for i := 0; i < len(ms); i++ {
+ for i := range len(ms) {
// Loop invariant: key represents the next stored timestamp after ms[i].Time.
// 2. Insert the operation.
diff --git a/pkg/local_object_storage/pilorama/forest_test.go b/pkg/local_object_storage/pilorama/forest_test.go
index 41d7a567c..854fe0aad 100644
--- a/pkg/local_object_storage/pilorama/forest_test.go
+++ b/pkg/local_object_storage/pilorama/forest_test.go
@@ -194,7 +194,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
const total = 100_000
d := CIDDescriptor{cnr, 0, 1}
- for i := 0; i < total; i++ {
+ for i := range total {
u, err := uuid.NewRandom()
if err != nil {
b.FailNow()
@@ -216,7 +216,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
b.Run(providers[i].name+",root", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{RootID}, nil, 100)
if err != nil || len(res) != 100 {
b.Fatalf("err %v, count %d", err, len(res))
@@ -224,7 +224,7 @@ func BenchmarkForestSortedIteration(b *testing.B) {
}
})
b.Run(providers[i].name+",leaf", func(b *testing.B) {
- for i := 0; i < b.N; i++ {
+ for range b.N {
res, _, err := f.TreeSortedByFilename(context.Background(), cnr, treeID, MultiNode{1}, nil, 100)
if err != nil || len(res) != 0 {
b.FailNow()
@@ -804,7 +804,7 @@ func testForestApplySameOperation(t *testing.T, constructor func(t testing.TB, _
defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.TreeApply(ctx, cid, treeID, &logs[0], false))
- for i := 0; i < batchSize; i++ {
+ for range batchSize {
errG.Go(func() error {
return s.TreeApply(ctx, cid, treeID, &logs[2], false)
})
@@ -1043,7 +1043,7 @@ func TestForest_ParallelApply(t *testing.T) {
// The operations are guaranteed to be applied and returned sorted by `Time`.
func prepareRandomTree(nodeCount, opCount int) []Move {
ops := make([]Move, nodeCount+opCount)
- for i := 0; i < nodeCount; i++ {
+ for i := range nodeCount {
ops[i] = Move{
Parent: 0,
Meta: Meta{
@@ -1121,14 +1121,14 @@ func testForestTreeParallelApply(t *testing.T, constructor func(t testing.TB, _
require.NoError(t, expected.TreeApply(context.Background(), cid, treeID, &ops[i], false))
}
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
actual := constructor(t, WithMaxBatchSize(batchSize), WithNoSync(true))
wg := new(sync.WaitGroup)
ch := make(chan *Move)
- for i := 0; i < batchSize; i++ {
+ for range batchSize {
wg.Add(1)
go func() {
defer wg.Done()
@@ -1170,7 +1170,7 @@ func testForestTreeApplyRandom(t *testing.T, constructor func(t testing.TB, _ ..
}
const iterCount = 200
- for i := 0; i < iterCount; i++ {
+ for range iterCount {
// Shuffle random operations, leave initialization in place.
r.Shuffle(len(ops), func(i, j int) { ops[i], ops[j] = ops[j], ops[i] })
@@ -1247,7 +1247,7 @@ func BenchmarkApplyReorderLast(b *testing.B) {
Child: uint64(r.Intn(benchNodeCount)),
}
if i != 0 && i%blockSize == 0 {
- for j := 0; j < blockSize/2; j++ {
+ for j := range blockSize / 2 {
ops[i-j], ops[i+j-blockSize] = ops[i+j-blockSize], ops[i-j]
}
}
@@ -1265,7 +1265,7 @@ func benchmarkApply(b *testing.B, s Forest, genFunc func(int) []Move) {
cid := cidtest.ID()
treeID := "version"
ch := make(chan int, b.N)
- for i := 0; i < b.N; i++ {
+ for i := range b.N {
ch <- i
}
@@ -1311,7 +1311,7 @@ func testTreeGetByPath(t *testing.T, s ForestStorage) {
if mf, ok := s.(*memoryForest); ok {
single := mf.treeMap[cid.String()+"/"+treeID]
t.Run("test meta", func(t *testing.T) {
- for i := 0; i < 6; i++ {
+ for i := range 6 {
require.Equal(t, uint64(i), single.infoMap[Node(i+1)].Meta.Time)
}
})
@@ -1492,7 +1492,7 @@ func testTreeListTreesCount(t *testing.T, constructor func(t testing.TB, _ ...Op
var expected []ContainerIDTreeID
treeIDs := []string{"version", "system", "s", "avada kedavra"}
- for i := 0; i < count; i++ {
+ for i := range count {
cid := cidtest.ID()
treeID := treeIDs[i%len(treeIDs)]
expected = append(expected, ContainerIDTreeID{
diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go
index 8a49a36fd..3414dc76a 100644
--- a/pkg/local_object_storage/shard/list_test.go
+++ b/pkg/local_object_storage/shard/list_test.go
@@ -39,11 +39,11 @@ func testShardList(t *testing.T, sh *Shard) {
var errG errgroup.Group
errG.SetLimit(C * N)
- for i := 0; i < C; i++ {
+ for range C {
errG.Go(func() error {
cnr := cidtest.ID()
- for j := 0; j < N; j++ {
+ for range N {
errG.Go(func() error {
obj := testutil.GenerateObjectWithCID(cnr)
testutil.AddPayload(obj, 1<<2)
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 38d465f31..1ef849c02 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -206,7 +206,7 @@ func TestCounters(t *testing.T) {
const objNumber = 10
oo := make([]*objectSDK.Object, objNumber)
- for i := 0; i < objNumber; i++ {
+ for i := range objNumber {
oo[i] = testutil.GenerateObject()
}
@@ -248,7 +248,7 @@ func TestCounters(t *testing.T) {
var prm PutPrm
- for i := 0; i < objNumber; i++ {
+ for i := range objNumber {
prm.SetObject(oo[i])
_, err := sh.Put(context.Background(), prm)
@@ -269,7 +269,7 @@ func TestCounters(t *testing.T) {
var prm InhumePrm
inhumedNumber := objNumber / 4
- for i := 0; i < inhumedNumber; i++ {
+ for i := range inhumedNumber {
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
_, err := sh.Inhume(context.Background(), prm)
@@ -317,7 +317,7 @@ func TestCounters(t *testing.T) {
_, err := sh.Inhume(context.Background(), prm)
require.NoError(t, err)
- for i := 0; i < inhumedNumber; i++ {
+ for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
require.True(t, ok)
expectedLogicalSizes[cid.EncodeToString()] -= int64(oo[i].PayloadSize())
@@ -419,7 +419,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
func addrFromObjs(oo []*objectSDK.Object) []oid.Address {
aa := make([]oid.Address, len(oo))
- for i := 0; i < len(oo); i++ {
+ for i := range len(oo) {
aa[i] = objectcore.AddressOf(oo[i])
}
diff --git a/pkg/local_object_storage/shard/refill_test.go b/pkg/local_object_storage/shard/refill_test.go
index 509ccaaa6..0025bb45a 100644
--- a/pkg/local_object_storage/shard/refill_test.go
+++ b/pkg/local_object_storage/shard/refill_test.go
@@ -38,7 +38,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
var putPrm PutPrm
- for i := 0; i < objectsCount/2; i++ {
+ for range objectsCount / 2 {
obj := testutil.GenerateObject()
testutil.AddAttribute(obj, "foo", "bar")
testutil.AddPayload(obj, 1<<5) // blobvnicza tree obj
@@ -49,7 +49,7 @@ func benchRefillMetabase(b *testing.B, objectsCount int) {
require.NoError(b, err)
}
- for i := 0; i < objectsCount/2; i++ {
+ for range objectsCount / 2 {
obj := testutil.GenerateObject()
testutil.AddAttribute(obj, "foo", "bar")
obj.SetID(oidtest.ID())
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 4f4398452..4da9a26d7 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -54,7 +54,7 @@ func benchmarkPutSeq(b *testing.B, cache writecache.Cache, size uint64) {
objGen := testutil.RandObjGenerator{ObjSize: size}
b.ResetTimer()
- for n := 0; n < b.N; n++ {
+ for range b.N {
obj := objGen.Next()
rawData, err := obj.Marshal()
require.NoError(b, err, "marshaling object")
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index e34f5a76b..930ac8431 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -41,7 +41,7 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- for i := 0; i < c.workersCount; i++ {
+ for range c.workersCount {
c.wg.Add(1)
go c.workerFlushSmall(ctx)
}
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 8da9d868a..4c269bcbd 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -439,7 +439,7 @@ func TestPrepare_CorrectNR(t *testing.T) {
)
for _, test := range tests {
- for i := 0; i < 1; i++ { // run tests against 3 and 4 witness NR
+ for i := range 1 { // run tests against 3 and 4 witness NR
for _, dummyMultisig := range []bool{true, false} { // run tests against empty and dummy multisig/Notary witness
additionalWitness := i == 0
nr := correctNR(script(test.hash, test.method, test.args...), dummyMultisig, additionalWitness)
diff --git a/pkg/morph/timer/block_test.go b/pkg/morph/timer/block_test.go
index 93bb04de5..7929754c1 100644
--- a/pkg/morph/timer/block_test.go
+++ b/pkg/morph/timer/block_test.go
@@ -208,7 +208,7 @@ func TestBlockTimer_TickSameHeight(t *testing.T) {
require.NoError(t, bt.Reset())
check := func(t *testing.T, h uint32, base, delta int) {
- for i := 0; i < 2*int(blockDur); i++ {
+ for range 2 * int(blockDur) {
bt.Tick(h)
require.Equal(t, base, baseCounter)
require.Equal(t, delta, deltaCounter)
diff --git a/pkg/network/tls_test.go b/pkg/network/tls_test.go
index 6c352484b..14729f4c2 100644
--- a/pkg/network/tls_test.go
+++ b/pkg/network/tls_test.go
@@ -37,7 +37,7 @@ func BenchmarkAddressTLSEnabled(b *testing.B) {
b.ReportAllocs()
var enabled bool
- for i := 0; i < b.N; i++ {
+ for range b.N {
enabled = addr.IsTLSEnabled()
}
require.True(b, enabled)
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
index 0ba8be765..300cb9dc9 100644
--- a/pkg/services/control/server/evacuate.go
+++ b/pkg/services/control/server/evacuate.go
@@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
nodes := placement.FlattenNodes(ns)
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := 0; i < len(nodes); i++ {
+ for i := range len(nodes) {
if bytes.Equal(nodes[i].PublicKey(), bs) {
copy(nodes[i:], nodes[i+1:])
nodes = nodes[:len(nodes)-1]
diff --git a/pkg/services/object/acl/v2/util_test.go b/pkg/services/object/acl/v2/util_test.go
index 394feef4e..435339683 100644
--- a/pkg/services/object/acl/v2/util_test.go
+++ b/pkg/services/object/acl/v2/util_test.go
@@ -33,7 +33,7 @@ func TestOriginalTokens(t *testing.T) {
var sTokenV2 session.Token
sToken.WriteToV2(&sTokenV2)
- for i := 0; i < 10; i++ {
+ for i := range 10 {
metaHeaders := testGenerateMetaHeader(uint32(i), &bTokenV2, &sTokenV2)
res, err := originalSessionToken(metaHeaders)
require.NoError(t, err)
diff --git a/pkg/services/object/get/get_test.go b/pkg/services/object/get/get_test.go
index 1fc6b7b20..6827018dc 100644
--- a/pkg/services/object/get/get_test.go
+++ b/pkg/services/object/get/get_test.go
@@ -470,7 +470,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
@@ -508,7 +508,7 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
ids := make([]oid.ID, 0, ln)
payload := make([]byte, 0, ln*10)
- for i := 0; i < ln; i++ {
+ for i := range ln {
ids = append(ids, curID)
addr.SetObject(curID)
@@ -1750,7 +1750,7 @@ func TestGetRange(t *testing.T) {
},
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
@@ -1811,7 +1811,7 @@ func TestGetRange(t *testing.T) {
},
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), payload)
diff --git a/pkg/services/object/get/getrangeec_test.go b/pkg/services/object/get/getrangeec_test.go
index b8497d7d1..a6882d4a8 100644
--- a/pkg/services/object/get/getrangeec_test.go
+++ b/pkg/services/object/get/getrangeec_test.go
@@ -131,7 +131,7 @@ func TestGetRangeEC(t *testing.T) {
clients: clients,
})
- for from := 0; from < totalSize-1; from++ {
+ for from := range totalSize - 1 {
for to := from; to < totalSize; to++ {
t.Run(fmt.Sprintf("from=%d,to=%d", from, to), func(t *testing.T) {
testGetRange(t, svc, addr, uint64(from), uint64(to), obj.Payload())
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/put/ec.go
index 1fadf65fe..9980f6d61 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/put/ec.go
@@ -276,7 +276,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
}
// try to save to any node not visited by current part
- for i := 0; i < len(nodes); i++ {
+ for i := range len(nodes) {
select {
case <-ctx.Done():
return ctx.Err()
diff --git a/pkg/services/object/search/search_test.go b/pkg/services/object/search/search_test.go
index 679380402..44abcfe5b 100644
--- a/pkg/services/object/search/search_test.go
+++ b/pkg/services/object/search/search_test.go
@@ -151,7 +151,7 @@ func testSHA256() (cs [sha256.Size]byte) {
func generateIDs(num int) []oid.ID {
res := make([]oid.ID, num)
- for i := 0; i < num; i++ {
+ for i := range num {
res[i].SetSHA256(testSHA256())
}
@@ -232,7 +232,7 @@ func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
ns := make([]netmap.NodeInfo, dim[i])
as := make([]string, dim[i])
- for j := 0; j < dim[i]; j++ {
+ for j := range dim[i] {
a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
strconv.Itoa(i),
strconv.Itoa(60000+j),
diff --git a/pkg/services/object_manager/placement/cache_test.go b/pkg/services/object_manager/placement/cache_test.go
index 07e9340dc..a890d5357 100644
--- a/pkg/services/object_manager/placement/cache_test.go
+++ b/pkg/services/object_manager/placement/cache_test.go
@@ -64,7 +64,7 @@ func TestContainerNodesCache(t *testing.T) {
nm2 := nm(1, nodes[1:2])
cnr := [size * 2]cid.ID{}
res := [size * 2][][]netmapSDK.NodeInfo{}
- for i := 0; i < size*2; i++ {
+ for i := range size * 2 {
cnr[i] = cidtest.ID()
var err error
@@ -77,7 +77,7 @@ func TestContainerNodesCache(t *testing.T) {
require.NoError(t, err)
require.Equal(t, res[i], r)
}
- for i := 0; i < size; i++ {
+ for i := range size {
r, err := c.ContainerNodes(nm2, cnr[i], pp)
require.NoError(t, err)
require.NotEqual(t, res[i], r)
diff --git a/pkg/services/object_manager/placement/traverser.go b/pkg/services/object_manager/placement/traverser.go
index 9a5877c52..4e790628f 100644
--- a/pkg/services/object_manager/placement/traverser.go
+++ b/pkg/services/object_manager/placement/traverser.go
@@ -136,7 +136,7 @@ func defaultCopiesVector(policy netmap.PlacementPolicy) []int {
replNum := policy.NumberOfReplicas()
copyVector := make([]int, 0, replNum)
- for i := 0; i < replNum; i++ {
+ for i := range replNum {
copyVector = append(copyVector, int(policy.ReplicaDescriptor(i).NumberOfObjects()+policy.ReplicaDescriptor(i).GetECDataCount()+policy.ReplicaDescriptor(i).GetECParityCount()))
}
@@ -212,7 +212,7 @@ func (t *Traverser) Next() []Node {
nodes := make([]Node, count)
- for i := 0; i < count; i++ {
+ for i := range count {
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
if err != nil {
return nil
diff --git a/pkg/services/object_manager/placement/traverser_test.go b/pkg/services/object_manager/placement/traverser_test.go
index f5731c81e..b3b57677d 100644
--- a/pkg/services/object_manager/placement/traverser_test.go
+++ b/pkg/services/object_manager/placement/traverser_test.go
@@ -48,7 +48,7 @@ func testPlacement(ss, rs []int) ([][]netmap.NodeInfo, container.Container) {
for i := range ss {
ns := make([]netmap.NodeInfo, 0, ss[i])
- for j := 0; j < ss[i]; j++ {
+ for range ss[i] {
ns = append(ns, testNode(num))
num++
}
@@ -125,7 +125,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
)
require.NoError(t, err)
- for i := 0; i < len(nodes[0]); i++ {
+ for range len(nodes[0]) {
require.NotNil(t, tr.Next())
}
@@ -164,7 +164,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
require.Empty(t, tr.Next())
require.False(t, tr.Success())
- for i := 0; i < replicas[curVector]; i++ {
+ for range replicas[curVector] {
tr.SubmitSuccess()
}
}
diff --git a/pkg/services/session/storage/persistent/executor_test.go b/pkg/services/session/storage/persistent/executor_test.go
index 39cb14359..124d36930 100644
--- a/pkg/services/session/storage/persistent/executor_test.go
+++ b/pkg/services/session/storage/persistent/executor_test.go
@@ -39,7 +39,7 @@ func TestTokenStore(t *testing.T) {
tokens := make([]tok, 0, tokenNumber)
- for i := 0; i < tokenNumber; i++ {
+ for i := range tokenNumber {
req.SetExpiration(uint64(i))
res, err := ts.Create(context.Background(), req)
diff --git a/pkg/services/tree/getsubtree_test.go b/pkg/services/tree/getsubtree_test.go
index 677431889..95bdda34b 100644
--- a/pkg/services/tree/getsubtree_test.go
+++ b/pkg/services/tree/getsubtree_test.go
@@ -62,7 +62,7 @@ func TestGetSubTree(t *testing.T) {
loop:
for i := 1; i < len(acc.seen); i++ {
parent := acc.seen[i].Body.ParentId
- for j := 0; j < i; j++ {
+ for j := range i {
if acc.seen[j].Body.NodeId[0] == parent[0] {
continue loop
}
diff --git a/pkg/services/tree/replicator.go b/pkg/services/tree/replicator.go
index 346198b3c..95c8f8013 100644
--- a/pkg/services/tree/replicator.go
+++ b/pkg/services/tree/replicator.go
@@ -133,7 +133,7 @@ func (s *Service) ReplicateTreeOp(ctx context.Context, n netmapSDK.NodeInfo, req
}
func (s *Service) replicateLoop(ctx context.Context) {
- for i := 0; i < s.replicatorWorkerCount; i++ {
+ for range s.replicatorWorkerCount {
go s.replicationWorker(ctx)
go s.localReplicationWorker(ctx)
}
diff --git a/pkg/util/sync/key_locker_test.go b/pkg/util/sync/key_locker_test.go
index 3b3e6a694..259064ecf 100644
--- a/pkg/util/sync/key_locker_test.go
+++ b/pkg/util/sync/key_locker_test.go
@@ -13,7 +13,7 @@ func TestKeyLocker(t *testing.T) {
taken := false
eg, _ := errgroup.WithContext(context.Background())
keyLocker := NewKeyLocker[int]()
- for i := 0; i < 100; i++ {
+ for range 100 {
eg.Go(func() error {
keyLocker.Lock(0)
defer keyLocker.Unlock(0)
From a4fb7f085b4cd2c5d7bb2ec91e6e626238dead54 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Tue, 3 Sep 2024 11:39:02 +0300
Subject: [PATCH 016/655] [#1348] go.mod: Update api-go and sdk-go
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/internal/client/client.go | 26 -----
cmd/frostfs-cli/modules/container/get_eacl.go | 68 -------------
cmd/frostfs-cli/modules/container/root.go | 2 -
cmd/frostfs-cli/modules/util/ape.go | 6 --
go.mod | 4 +-
go.sum | 8 +-
.../transport/container/grpc/service.go | 15 ---
pkg/services/container/ape.go | 12 ---
pkg/services/container/ape_test.go | 95 -------------------
pkg/services/container/audit.go | 11 ---
pkg/services/container/executor.go | 14 ---
pkg/services/container/morph/executor.go | 37 --------
pkg/services/container/server.go | 1 -
pkg/services/container/sign.go | 9 --
14 files changed, 6 insertions(+), 302 deletions(-)
delete mode 100644 cmd/frostfs-cli/modules/container/get_eacl.go
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 57bcf5620..03a987a57 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -17,7 +17,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -192,31 +191,6 @@ func DeleteContainer(ctx context.Context, prm DeleteContainerPrm) (res DeleteCon
return
}
-// EACLPrm groups parameters of EACL operation.
-type EACLPrm struct {
- Client *client.Client
- ClientParams client.PrmContainerEACL
-}
-
-// EACLRes groups the resulting values of EACL operation.
-type EACLRes struct {
- cliRes *client.ResContainerEACL
-}
-
-// EACL returns requested eACL table.
-func (x EACLRes) EACL() eacl.Table {
- return x.cliRes.Table()
-}
-
-// EACL reads eACL table from FrostFS by container ID.
-//
-// Returns any error which prevented the operation from completing correctly in error return.
-func EACL(ctx context.Context, prm EACLPrm) (res EACLRes, err error) {
- res.cliRes, err = prm.Client.ContainerEACL(ctx, prm.ClientParams)
-
- return
-}
-
// NetworkInfoPrm groups parameters of NetworkInfo operation.
type NetworkInfoPrm struct {
Client *client.Client
diff --git a/cmd/frostfs-cli/modules/container/get_eacl.go b/cmd/frostfs-cli/modules/container/get_eacl.go
deleted file mode 100644
index 4ed1c82e1..000000000
--- a/cmd/frostfs-cli/modules/container/get_eacl.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package container
-
-import (
- "os"
-
- internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
- commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
- "github.com/spf13/cobra"
-)
-
-var getExtendedACLCmd = &cobra.Command{
- Use: "get-eacl",
- Short: "Get extended ACL table of container",
- Long: `Get extended ACL table of container`,
- Run: func(cmd *cobra.Command, _ []string) {
- id := parseContainerID(cmd)
- pk := key.GetOrGenerate(cmd)
- cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
-
- eaclPrm := internalclient.EACLPrm{
- Client: cli,
- ClientParams: client.PrmContainerEACL{
- ContainerID: &id,
- },
- }
-
- res, err := internalclient.EACL(cmd.Context(), eaclPrm)
- commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
-
- eaclTable := res.EACL()
-
- if containerPathTo == "" {
- cmd.Println("eACL: ")
- common.PrettyPrintJSON(cmd, &eaclTable, "eACL")
-
- return
- }
-
- var data []byte
-
- if containerJSON {
- data, err = eaclTable.MarshalJSON()
- commonCmd.ExitOnErr(cmd, "can't encode to JSON: %w", err)
- } else {
- data, err = eaclTable.Marshal()
- commonCmd.ExitOnErr(cmd, "can't encode to binary: %w", err)
- }
-
- cmd.Println("dumping data to file:", containerPathTo)
-
- err = os.WriteFile(containerPathTo, data, 0o644)
- commonCmd.ExitOnErr(cmd, "could not write eACL to file: %w", err)
- },
-}
-
-func initContainerGetEACLCmd() {
- commonflags.Init(getExtendedACLCmd)
-
- flags := getExtendedACLCmd.Flags()
-
- flags.StringVar(&containerID, commonflags.CIDFlag, "", commonflags.CIDFlagUsage)
- flags.StringVar(&containerPathTo, "to", "", "Path to dump encoded container (default: binary encoded)")
- flags.BoolVar(&containerJSON, commonflags.JSON, false, "Encode EACL table in json format")
-}
diff --git a/cmd/frostfs-cli/modules/container/root.go b/cmd/frostfs-cli/modules/container/root.go
index d5f0fd776..2da21e767 100644
--- a/cmd/frostfs-cli/modules/container/root.go
+++ b/cmd/frostfs-cli/modules/container/root.go
@@ -25,7 +25,6 @@ func init() {
deleteContainerCmd,
listContainerObjectsCmd,
getContainerInfoCmd,
- getExtendedACLCmd,
containerNodesCmd,
policyPlaygroundCmd,
}
@@ -37,7 +36,6 @@ func init() {
initContainerDeleteCmd()
initContainerListObjectsCmd()
initContainerInfoCmd()
- initContainerGetEACLCmd()
initContainerNodesCmd()
initContainerPolicyPlaygroundCmd()
diff --git a/cmd/frostfs-cli/modules/util/ape.go b/cmd/frostfs-cli/modules/util/ape.go
index 9af57434a..73c368510 100644
--- a/cmd/frostfs-cli/modules/util/ape.go
+++ b/cmd/frostfs-cli/modules/util/ape.go
@@ -258,10 +258,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
return []string{nativeschema.MethodDeleteContainer}, false, nil
case "container.get":
return []string{nativeschema.MethodGetContainer}, false, nil
- case "container.setcontainereacl":
- return []string{nativeschema.MethodSetContainerEACL}, false, nil
- case "container.getcontainereacl":
- return []string{nativeschema.MethodGetContainerEACL}, false, nil
case "container.list":
return []string{nativeschema.MethodListContainers}, false, nil
case "container.*":
@@ -269,8 +265,6 @@ func parseAction(lexeme string) ([]string, bool, error) {
nativeschema.MethodPutContainer,
nativeschema.MethodDeleteContainer,
nativeschema.MethodGetContainer,
- nativeschema.MethodSetContainerEACL,
- nativeschema.MethodGetContainerEACL,
nativeschema.MethodListContainers,
}, false, nil
default:
diff --git a/go.mod b/go.mod
index 358370201..19bf7852f 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index be82bff70..8ebd59157 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61 h1:bw9EVGWnfY9awFb5XYR52AGbzgg3o04gZF66yHob48c=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240828085308-5e1c6a908f61/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7 h1:9eZidZMT4tHOdc6GZRPlZR12IToKqHhUd5wzevdDUqo=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240827083309-f0b9493ce3f7/go.mod h1:VzVYcwo/eXjkdo5ktPdZeAE4fsnZX6zEun3g+5E2weo=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go
index f0206dd5c..9fae22b45 100644
--- a/pkg/network/transport/container/grpc/service.go
+++ b/pkg/network/transport/container/grpc/service.go
@@ -80,18 +80,3 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
return resp.ToGRPCMessage().(*containerGRPC.ListResponse), nil
}
-
-// GetExtendedACL converts gRPC GetExtendedACLRequest message and passes it to internal Container service.
-func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExtendedACLRequest) (*containerGRPC.GetExtendedACLResponse, error) {
- getEACLReq := new(container.GetExtendedACLRequest)
- if err := getEACLReq.FromGRPCMessage(req); err != nil {
- return nil, err
- }
-
- resp, err := s.srv.GetExtendedACL(ctx, getEACLReq)
- if err != nil {
- return nil, err
- }
-
- return resp.ToGRPCMessage().(*containerGRPC.GetExtendedACLResponse), nil
-}
diff --git a/pkg/services/container/ape.go b/pkg/services/container/ape.go
index 6f8a8e0e6..d92ecf58b 100644
--- a/pkg/services/container/ape.go
+++ b/pkg/services/container/ape.go
@@ -102,18 +102,6 @@ func (ac *apeChecker) Get(ctx context.Context, req *container.GetRequest) (*cont
return ac.next.Get(ctx, req)
}
-func (ac *apeChecker) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.GetExtendedACL")
- defer span.End()
-
- if err := ac.validateContainerBoundedOperation(ctx, req.GetBody().GetContainerID(), req.GetMetaHeader(), req.GetVerificationHeader(),
- nativeschema.MethodGetContainerEACL); err != nil {
- return nil, err
- }
-
- return ac.next.GetExtendedACL(ctx, req)
-}
-
func (ac *apeChecker) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "apeChecker.List")
defer span.End()
diff --git a/pkg/services/container/ape_test.go b/pkg/services/container/ape_test.go
index 68c1158a6..d6f9b75ef 100644
--- a/pkg/services/container/ape_test.go
+++ b/pkg/services/container/ape_test.go
@@ -49,7 +49,6 @@ func TestAPE(t *testing.T) {
t.Run("deny get container by user claim tag", testDenyGetContainerByUserClaimTag)
t.Run("deny get container by IP", testDenyGetContainerByIP)
t.Run("deny get container by group id", testDenyGetContainerByGroupID)
- t.Run("deny get container eACL for IR with session token", testDenyGetContainerEACLForIRSessionToken)
t.Run("deny put container for others with session token", testDenyPutContainerForOthersSessionToken)
t.Run("deny put container, read namespace from frostfsID", testDenyPutContainerReadNamespaceFromFrostfsID)
t.Run("deny put container with invlaid namespace", testDenyPutContainerInvalidNamespace)
@@ -663,95 +662,6 @@ func testDenyGetContainerByGroupID(t *testing.T) {
require.ErrorAs(t, err, &errAccessDenied)
}
-func testDenyGetContainerEACLForIRSessionToken(t *testing.T) {
- t.Parallel()
- srv := &srvStub{
- calls: map[string]int{},
- }
- router := inmemory.NewInMemory()
- contRdr := &containerStub{
- c: map[cid.ID]*containercore.Container{},
- }
- ir := &irStub{
- keys: [][]byte{},
- }
- nm := &netmapStub{}
- frostfsIDSubjectReader := &frostfsidStub{
- subjects: map[util.Uint160]*client.Subject{},
- }
- apeSrv := NewAPEServer(router, contRdr, ir, nm, frostfsIDSubjectReader, srv)
-
- contID := cidtest.ID()
- testContainer := containertest.Container()
- pp := netmap.PlacementPolicy{}
- require.NoError(t, pp.DecodeString("REP 1"))
- testContainer.SetPlacementPolicy(pp)
- contRdr.c[contID] = &containercore.Container{Value: testContainer}
-
- nm.currentEpoch = 100
- nm.netmaps = map[uint64]*netmap.NetMap{}
- var testNetmap netmap.NetMap
- testNetmap.SetEpoch(nm.currentEpoch)
- testNetmap.SetNodes([]netmap.NodeInfo{{}})
- nm.netmaps[nm.currentEpoch] = &testNetmap
- nm.netmaps[nm.currentEpoch-1] = &testNetmap
-
- _, _, err := router.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.ContainerTarget(contID.EncodeToString()), &chain.Chain{
- Rules: []chain.Rule{
- {
- Status: chain.AccessDenied,
- Actions: chain.Actions{
- Names: []string{
- nativeschema.MethodGetContainerEACL,
- },
- },
- Resources: chain.Resources{
- Names: []string{
- fmt.Sprintf(nativeschema.ResourceFormatRootContainer, contID.EncodeToString()),
- },
- },
- Condition: []chain.Condition{
- {
- Kind: chain.KindRequest,
- Key: nativeschema.PropertyKeyActorRole,
- Value: nativeschema.PropertyValueContainerRoleIR,
- Op: chain.CondStringEquals,
- },
- },
- },
- },
- })
- require.NoError(t, err)
-
- req := &container.GetExtendedACLRequest{}
- req.SetBody(&container.GetExtendedACLRequestBody{})
- var refContID refs.ContainerID
- contID.WriteToV2(&refContID)
- req.GetBody().SetContainerID(&refContID)
-
- pk, err := keys.NewPrivateKey()
- require.NoError(t, err)
- require.NoError(t, signature.SignServiceMessage(&pk.PrivateKey, req))
-
- sessionPK, err := keys.NewPrivateKey()
- require.NoError(t, err)
- sToken := sessiontest.ContainerSigned()
- sToken.ApplyOnlyTo(contID)
- require.NoError(t, sToken.Sign(sessionPK.PrivateKey))
- var sTokenV2 session.Token
- sToken.WriteToV2(&sTokenV2)
- metaHeader := new(session.RequestMetaHeader)
- metaHeader.SetSessionToken(&sTokenV2)
- req.SetMetaHeader(metaHeader)
-
- ir.keys = append(ir.keys, sessionPK.PublicKey().Bytes())
-
- resp, err := apeSrv.GetExtendedACL(context.Background(), req)
- require.Nil(t, resp)
- var errAccessDenied *apistatus.ObjectAccessDenied
- require.ErrorAs(t, err, &errAccessDenied)
-}
-
func testDenyPutContainerForOthersSessionToken(t *testing.T) {
t.Parallel()
srv := &srvStub{
@@ -1164,11 +1074,6 @@ func (s *srvStub) Get(context.Context, *container.GetRequest) (*container.GetRes
return &container.GetResponse{}, nil
}
-func (s *srvStub) GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- s.calls["GetExtendedACL"]++
- return &container.GetExtendedACLResponse{}, nil
-}
-
func (s *srvStub) List(context.Context, *container.ListRequest) (*container.ListResponse, error) {
s.calls["List"]++
return &container.ListResponse{}, nil
diff --git a/pkg/services/container/audit.go b/pkg/services/container/audit.go
index 34fd5923f..b257272f5 100644
--- a/pkg/services/container/audit.go
+++ b/pkg/services/container/audit.go
@@ -52,17 +52,6 @@ func (a *auditService) Get(ctx context.Context, req *container.GetRequest) (*con
return res, err
}
-// GetExtendedACL implements Server.
-func (a *auditService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- res, err := a.next.GetExtendedACL(ctx, req)
- if !a.enabled.Load() {
- return res, err
- }
- audit.LogRequest(a.log, container_grpc.ContainerService_GetExtendedACL_FullMethodName, req,
- audit.TargetFromRef(req.GetBody().GetContainerID(), &cid.ID{}), err == nil)
- return res, err
-}
-
// List implements Server.
func (a *auditService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) {
res, err := a.next.List(ctx, req)
diff --git a/pkg/services/container/executor.go b/pkg/services/container/executor.go
index b64963e25..0917e3bd0 100644
--- a/pkg/services/container/executor.go
+++ b/pkg/services/container/executor.go
@@ -14,7 +14,6 @@ type ServiceExecutor interface {
Delete(context.Context, *session.Token, *container.DeleteRequestBody) (*container.DeleteResponseBody, error)
Get(context.Context, *container.GetRequestBody) (*container.GetResponseBody, error)
List(context.Context, *container.ListRequestBody) (*container.ListResponseBody, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error)
}
type executorSvc struct {
@@ -94,16 +93,3 @@ func (s *executorSvc) List(ctx context.Context, req *container.ListRequest) (*co
s.respSvc.SetMeta(resp)
return resp, nil
}
-
-func (s *executorSvc) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- respBody, err := s.exec.GetExtendedACL(ctx, req.GetBody())
- if err != nil {
- return nil, fmt.Errorf("could not execute GetEACL request: %w", err)
- }
-
- resp := new(container.GetExtendedACLResponse)
- resp.SetBody(respBody)
-
- s.respSvc.SetMeta(resp)
- return resp, nil
-}
diff --git a/pkg/services/container/morph/executor.go b/pkg/services/container/morph/executor.go
index 57dac32f0..05d8749cf 100644
--- a/pkg/services/container/morph/executor.go
+++ b/pkg/services/container/morph/executor.go
@@ -201,40 +201,3 @@ func (s *morphExecutor) List(_ context.Context, body *container.ListRequestBody)
return res, nil
}
-
-func (s *morphExecutor) GetExtendedACL(_ context.Context, body *container.GetExtendedACLRequestBody) (*container.GetExtendedACLResponseBody, error) {
- idV2 := body.GetContainerID()
- if idV2 == nil {
- return nil, errors.New("missing container ID")
- }
-
- var id cid.ID
-
- err := id.ReadFromV2(*idV2)
- if err != nil {
- return nil, fmt.Errorf("invalid container ID: %w", err)
- }
-
- eaclInfo, err := s.rdr.GetEACL(id)
- if err != nil {
- return nil, err
- }
-
- var sigV2 refs.Signature
- eaclInfo.Signature.WriteToV2(&sigV2)
-
- var tokV2 *sessionV2.Token
-
- if eaclInfo.Session != nil {
- tokV2 = new(sessionV2.Token)
-
- eaclInfo.Session.WriteToV2(tokV2)
- }
-
- res := new(container.GetExtendedACLResponseBody)
- res.SetEACL(eaclInfo.Value.ToV2())
- res.SetSignature(&sigV2)
- res.SetSessionToken(tokV2)
-
- return res, nil
-}
diff --git a/pkg/services/container/server.go b/pkg/services/container/server.go
index d714d7f02..a19d83c56 100644
--- a/pkg/services/container/server.go
+++ b/pkg/services/container/server.go
@@ -12,5 +12,4 @@ type Server interface {
Get(context.Context, *container.GetRequest) (*container.GetResponse, error)
Delete(context.Context, *container.DeleteRequest) (*container.DeleteResponse, error)
List(context.Context, *container.ListRequest) (*container.ListResponse, error)
- GetExtendedACL(context.Context, *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error)
}
diff --git a/pkg/services/container/sign.go b/pkg/services/container/sign.go
index 62aa3fe27..f7f5d6486 100644
--- a/pkg/services/container/sign.go
+++ b/pkg/services/container/sign.go
@@ -56,12 +56,3 @@ func (s *signService) List(ctx context.Context, req *container.ListRequest) (*co
resp, err := util.EnsureNonNilResponse(s.svc.List(ctx, req))
return resp, s.sigSvc.SignResponse(resp, err)
}
-
-func (s *signService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) {
- if err := s.sigSvc.VerifyRequest(req); err != nil {
- resp := new(container.GetExtendedACLResponse)
- return resp, s.sigSvc.SignResponse(resp, err)
- }
- resp, err := util.EnsureNonNilResponse(s.svc.GetExtendedACL(ctx, req))
- return resp, s.sigSvc.SignResponse(resp, err)
-}
From b9043433a098ae242c559a79afe6f1ced5068e68 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 15 Jul 2024 14:07:32 +0300
Subject: [PATCH 017/655] [#1223] scripts: Add script to populate metabase
Signed-off-by: Aleksey Savchuk
---
.../populate-metabase/internal/generate.go | 132 +++++++++
.../populate-metabase/internal/populate.go | 263 ++++++++++++++++++
scripts/populate-metabase/main.go | 159 +++++++++++
3 files changed, 554 insertions(+)
create mode 100644 scripts/populate-metabase/internal/generate.go
create mode 100644 scripts/populate-metabase/internal/populate.go
create mode 100644 scripts/populate-metabase/main.go
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
new file mode 100644
index 000000000..d2004b673
--- /dev/null
+++ b/scripts/populate-metabase/internal/generate.go
@@ -0,0 +1,132 @@
+package internal
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ objecttest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "golang.org/x/exp/rand"
+)
+
+func GeneratePayloadPool(count uint, size uint) [][]byte {
+ pool := [][]byte{}
+ for i := uint(0); i < count; i++ {
+ payload := make([]byte, size)
+ _, _ = rand.Read(payload)
+
+ pool = append(pool, payload)
+ }
+ return pool
+}
+
+func GenerateAttributePool(count uint) []objectSDK.Attribute {
+ pool := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
+ for j := uint(0); j < count; j++ {
+ attr := *objectSDK.NewAttribute()
+ attr.SetKey(fmt.Sprintf("key%d", i))
+ attr.SetValue(fmt.Sprintf("value%d", j))
+ pool = append(pool, attr)
+ }
+ }
+ return pool
+}
+
+func GenerateOwnerPool(count uint) []user.ID {
+ pool := []user.ID{}
+ for i := uint(0); i < count; i++ {
+ pool = append(pool, usertest.ID())
+ }
+ return pool
+}
+
+type ObjectOption func(obj *objectSDK.Object)
+
+func GenerateObject(options ...ObjectOption) *objectSDK.Object {
+ var ver version.Version
+ ver.SetMajor(2)
+ ver.SetMinor(1)
+
+ payload := make([]byte, 0)
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cidtest.ID())
+
+ header := objecttest.Object().GetECHeader()
+ header.SetParent(oidtest.ID())
+ obj.SetECHeader(header)
+
+ obj.SetVersion(&ver)
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ for _, option := range options {
+ option(obj)
+ }
+
+ return obj
+}
+
+func WithContainerID(cid cid.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetContainerID(cid)
+ }
+}
+
+func WithType(typ objectSDK.Type) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetType(typ)
+ }
+}
+
+func WithPayloadFromPool(pool [][]byte) ObjectOption {
+ payload := pool[rand.Intn(len(pool))]
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(payload))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ return func(obj *objectSDK.Object) {
+ obj.SetPayload(payload)
+ obj.SetPayloadSize(uint64(len(payload)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+ }
+}
+
+func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ attrs := []objectSDK.Attribute{}
+ for i := uint(0); i < count; i++ {
+ attrs = append(attrs, pool[rand.Intn(len(pool))])
+ }
+ obj.SetAttributes(attrs...)
+ }
+}
+
+func WithOwnerIDFromPool(pool []user.ID) ObjectOption {
+ return func(obj *objectSDK.Object) {
+ obj.SetOwnerID(pool[rand.Intn(len(pool))])
+ }
+}
diff --git a/scripts/populate-metabase/internal/populate.go b/scripts/populate-metabase/internal/populate.go
new file mode 100644
index 000000000..390c1cdc0
--- /dev/null
+++ b/scripts/populate-metabase/internal/populate.go
@@ -0,0 +1,263 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "sync"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "golang.org/x/sync/errgroup"
+)
+
+type EpochState struct{}
+
+func (s EpochState) CurrentEpoch() uint64 {
+ return 0
+}
+
+func PopulateWithObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ digits := "0123456789"
+
+ for i := uint(0); i < count; i++ {
+ obj := factory()
+
+ id := []byte(fmt.Sprintf(
+ "%c/%c/%c",
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)],
+ digits[rand.Int()%len(digits)],
+ ))
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+ prm.SetStorageID(id)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func PopulateWithBigObjects(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ for i := uint(0); i < count; i++ {
+ group.Go(func() error {
+ if err := populateWithBigObject(ctx, db, factory); err != nil {
+ return fmt.Errorf("couldn't put a big object: %w", err)
+ }
+ return nil
+ })
+ }
+}
+
+func populateWithBigObject(
+ ctx context.Context,
+ db *meta.DB,
+ factory func() *objectSDK.Object,
+) error {
+ t := &target{db: db}
+
+ pk, _ := keys.NewPrivateKey()
+ p := transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: &pk.PrivateKey,
+ NextTargetInit: func() transformer.ObjectWriter { return t },
+ NetworkState: EpochState{},
+ MaxSize: 10,
+ })
+
+ obj := factory()
+ payload := make([]byte, 30)
+
+ err := p.WriteHeader(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Write(ctx, payload)
+ if err != nil {
+ return err
+ }
+
+ _, err = p.Close(ctx)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type target struct {
+ db *meta.DB
+}
+
+func (t *target) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ _, err := t.db.Put(ctx, prm)
+ return err
+}
+
+func PopulateGraveyard(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ ts := factory()
+ ts.SetType(objectSDK.TypeTombstone)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(ts)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a tombstone object: %w", err)
+ }
+ return nil
+ })
+
+ cID, _ := ts.ContainerID()
+ oID, _ := ts.ID()
+
+ var tsAddr oid.Address
+
+ tsAddr.SetContainer(cID)
+ tsAddr.SetObject(oID)
+
+ addrs := make(chan oid.Address, workBufferSize)
+
+ go func() {
+ defer close(addrs)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for i := uint(0); i < count; i++ {
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ defer wg.Done()
+
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ cID, _ := obj.ContainerID()
+ oID, _ := obj.ID()
+
+ var addr oid.Address
+ addr.SetContainer(cID)
+ addr.SetObject(oID)
+
+ addrs <- addr
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for addr := range addrs {
+ prm := meta.InhumePrm{}
+ prm.SetAddresses(addr)
+ prm.SetTombstoneAddress(tsAddr)
+
+ group.Go(func() error {
+ if _, err := db.Inhume(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't inhume an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
+
+func PopulateLocked(
+ ctx context.Context,
+ db *meta.DB,
+ group *errgroup.Group,
+ workBufferSize int,
+ count uint,
+ factory func() *objectSDK.Object,
+) {
+ locker := factory()
+ locker.SetType(objectSDK.TypeLock)
+
+ prm := meta.PutPrm{}
+ prm.SetObject(locker)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put a locker object: %w", err)
+ }
+ return nil
+ })
+
+ ids := make(chan oid.ID, workBufferSize)
+
+ go func() {
+ defer close(ids)
+
+ wg := &sync.WaitGroup{}
+ wg.Add(int(count))
+
+ for i := uint(0); i < count; i++ {
+ defer wg.Done()
+
+ obj := factory()
+
+ prm := meta.PutPrm{}
+ prm.SetObject(obj)
+
+ group.Go(func() error {
+ if _, err := db.Put(ctx, prm); err != nil {
+ return fmt.Errorf("couldn't put an object: %w", err)
+ }
+
+ id, _ := obj.ID()
+ ids <- id
+ return nil
+ })
+ }
+ wg.Wait()
+ }()
+
+ go func() {
+ for id := range ids {
+ lockerCID, _ := locker.ContainerID()
+ lockerOID, _ := locker.ID()
+
+ group.Go(func() error {
+ if err := db.Lock(ctx, lockerCID, lockerOID, []oid.ID{id}); err != nil {
+ return fmt.Errorf("couldn't lock an object: %w", err)
+ }
+ return nil
+ })
+ }
+ }()
+}
diff --git a/scripts/populate-metabase/main.go b/scripts/populate-metabase/main.go
new file mode 100644
index 000000000..2bc7a5553
--- /dev/null
+++ b/scripts/populate-metabase/main.go
@@ -0,0 +1,159 @@
+package main
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/scripts/populate-metabase/internal"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ path string
+ force bool
+ jobs uint
+
+ numContainers,
+ numObjects,
+ numAttributesPerObj,
+ numOwners,
+ numPayloads,
+ numAttributes uint
+)
+
+func main() {
+ flag.StringVar(&path, "path", "", "Path to metabase")
+ flag.BoolVar(&force, "force", false, "Rewrite existing database")
+ flag.UintVar(&jobs, "j", 10000, "Number of jobs to run")
+
+ flag.UintVar(&numContainers, "containers", 0, "Number of containers to be created")
+ flag.UintVar(&numObjects, "objects", 0, "Number of objects per container")
+ flag.UintVar(&numAttributesPerObj, "attributes", 0, "Number of attributes per object")
+
+ flag.UintVar(&numOwners, "distinct-owners", 10, "Number of distinct owners to be used")
+ flag.UintVar(&numPayloads, "distinct-payloads", 10, "Number of distinct payloads to be used")
+ flag.UintVar(&numAttributes, "distinct-attributes", 10, "Number of distinct attributes to be used")
+
+ flag.Parse()
+
+ exitIf(numPayloads == 0, "must have payloads\n")
+ exitIf(numAttributes == 0, "must have attributes\n")
+ exitIf(numOwners == 0, "must have owners\n")
+ exitIf(len(path) == 0, "path to metabase not specified\n")
+ exitIf(
+ numAttributesPerObj > numAttributes,
+ "object can't have more attributes than available\n",
+ )
+
+ info, err := os.Stat(path)
+ exitIf(
+ err != nil && !errors.Is(err, os.ErrNotExist),
+ "couldn't get path info: %s\n", err,
+ )
+
+ // Path exits.
+ if err == nil {
+ exitIf(info.IsDir(), "path is a directory\n")
+ exitIf(!force, "couldn't rewrite existing file, use '-force' flag\n")
+
+ err = os.Remove(path)
+ exitIf(err != nil, "couldn't remove existing file: %s\n", err)
+ }
+
+ err = populate()
+ exitIf(err != nil, "couldn't populate the metabase: %s\n", err)
+}
+
+func getObjectFactory(opts ...internal.ObjectOption) func() *objectSDK.Object {
+ return func() *objectSDK.Object {
+ return internal.GenerateObject(opts...)
+ }
+}
+
+func populate() (err error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ db := meta.New([]meta.Option{
+ meta.WithPath(path),
+ meta.WithPermissions(0o600),
+ meta.WithEpochState(internal.EpochState{}),
+ }...)
+
+ if err = db.Open(ctx, mode.ReadWrite); err != nil {
+ return fmt.Errorf("couldn't open the metabase: %w", err)
+ }
+ defer func() {
+ if errOnClose := db.Close(); errOnClose != nil {
+ err = errors.Join(
+ err,
+ fmt.Errorf("couldn't close the metabase: %w", db.Close()),
+ )
+ }
+ }()
+
+ if err = db.Init(); err != nil {
+ return fmt.Errorf("couldn't init the metabase: %w", err)
+ }
+
+ payloads := internal.GeneratePayloadPool(numPayloads, 32)
+ attributes := internal.GenerateAttributePool(numAttributes)
+ owners := internal.GenerateOwnerPool(numOwners)
+
+ types := []objectSDK.Type{
+ objectSDK.TypeRegular,
+ objectSDK.TypeLock,
+ objectSDK.TypeTombstone,
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.SetLimit(int(jobs))
+
+ for i := uint(0); i < numContainers; i++ {
+ cid := cidtest.ID()
+
+ for _, typ := range types {
+ internal.PopulateWithObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(typ),
+ internal.WithPayloadFromPool(payloads),
+ internal.WithOwnerIDFromPool(owners),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ ))
+ }
+ internal.PopulateWithBigObjects(ctx, db, eg, numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateGraveyard(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ internal.PopulateLocked(ctx, db, eg, int(jobs), numObjects, getObjectFactory(
+ internal.WithContainerID(cid),
+ internal.WithType(objectSDK.TypeRegular),
+ internal.WithAttributesFromPool(attributes, numAttributesPerObj),
+ internal.WithOwnerIDFromPool(owners),
+ ))
+ }
+
+ return eg.Wait()
+}
+
+func exitIf(cond bool, format string, args ...any) {
+ if cond {
+ fmt.Fprintf(os.Stderr, format, args...)
+ os.Exit(1)
+ }
+}
From 1ae86f35a8d95a3f2258eaefb772482d0af873f6 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 18 Jul 2024 18:26:11 +0300
Subject: [PATCH 018/655] [#1223] lens/tui: Add metabase schema
Signed-off-by: Aleksey Savchuk
---
.../internal/schema/common/format.go | 43 +++
.../internal/schema/common/raw.go | 29 ++
.../internal/schema/common/schema.go | 81 ++++++
.../schema/metabase/buckets/detailed.go | 29 ++
.../schema/metabase/buckets/filter.go | 81 ++++++
.../schema/metabase/buckets/parsers.go | 111 ++++++++
.../schema/metabase/buckets/prefix.go | 53 ++++
.../schema/metabase/buckets/string.go | 48 ++++
.../internal/schema/metabase/buckets/types.go | 166 ++++++++++++
.../internal/schema/metabase/parser.go | 29 ++
.../schema/metabase/records/detailed.go | 65 +++++
.../schema/metabase/records/filter.go | 145 ++++++++++
.../schema/metabase/records/parsers.go | 251 ++++++++++++++++++
.../schema/metabase/records/string.go | 135 ++++++++++
.../internal/schema/metabase/records/types.go | 82 ++++++
.../internal/schema/metabase/records/util.go | 20 ++
go.mod | 5 +-
go.sum | 9 +
18 files changed, 1381 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/schema/common/format.go
create mode 100644 cmd/frostfs-lens/internal/schema/common/raw.go
create mode 100644 cmd/frostfs-lens/internal/schema/common/schema.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/parser.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/filter.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/string.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/types.go
create mode 100644 cmd/frostfs-lens/internal/schema/metabase/records/util.go
diff --git a/cmd/frostfs-lens/internal/schema/common/format.go b/cmd/frostfs-lens/internal/schema/common/format.go
new file mode 100644
index 000000000..4ed7e96f2
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/format.go
@@ -0,0 +1,43 @@
+package common
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/gdamore/tcell/v2"
+)
+
+type FormatOptions struct {
+ Color tcell.Color
+
+ Bold,
+ Italic,
+ Underline,
+ StrikeThrough bool
+}
+
+func Format(s string, opts FormatOptions) string {
+ var boldTag, italicTag, underlineTag, strikeThroughTag string
+
+ switch {
+ case opts.Bold:
+ boldTag = "b"
+ case opts.Italic:
+ italicTag = "i"
+ case opts.Underline:
+ underlineTag = "u"
+ case opts.StrikeThrough:
+ strikeThroughTag = "s"
+ }
+
+ attrs := fmt.Sprintf(
+ "%s%s%s%s", boldTag, italicTag, underlineTag, strikeThroughTag,
+ )
+ color := strconv.FormatInt(int64(opts.Color.Hex()), 16)
+
+ return fmt.Sprintf("[#%06s::%s]%s[-::-]", color, attrs, s)
+}
+
+func FormatSimple(s string, c tcell.Color) string {
+ return Format(s, FormatOptions{Color: c})
+}
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
new file mode 100644
index 000000000..0990e24c3
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -0,0 +1,29 @@
+package common
+
+import (
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/mr-tron/base58"
+)
+
+type RawEntry struct {
+ key, value []byte
+}
+
+var RawParser Parser = rawParser
+
+func rawParser(key, value []byte) (SchemaEntry, Parser, error) {
+ return &RawEntry{key: key, value: value}, rawParser, nil
+}
+
+func (r *RawEntry) String() string {
+ return FormatSimple(base58.Encode(r.key), tcell.ColorRed)
+}
+
+func (r *RawEntry) DetailedString() string {
+ return spew.Sdump(r)
+}
+
+func (r *RawEntry) Filter(string, any) FilterResult {
+ return No
+}
diff --git a/cmd/frostfs-lens/internal/schema/common/schema.go b/cmd/frostfs-lens/internal/schema/common/schema.go
new file mode 100644
index 000000000..9bad19032
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/common/schema.go
@@ -0,0 +1,81 @@
+package common
+
+import (
+ "errors"
+ "fmt"
+)
+
+type FilterResult byte
+
+const (
+ No FilterResult = iota
+ Maybe
+ Yes
+)
+
+func IfThenElse(condition bool, onSuccess, onFailure FilterResult) FilterResult {
+ var res FilterResult
+ if condition {
+ res = onSuccess
+ } else {
+ res = onFailure
+ }
+ return res
+}
+
+type SchemaEntry interface {
+ String() string
+ DetailedString() string
+ Filter(typ string, val any) FilterResult
+}
+
+type (
+ Parser func(key, value []byte) (SchemaEntry, Parser, error)
+ FallbackParser func(key, value []byte) (SchemaEntry, Parser)
+)
+
+func Any(parsers ...Parser) Parser {
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ var errs error
+ for _, parser := range parsers {
+ ret, next, err := parser(key, value)
+ if err == nil {
+ return ret, next, nil
+ }
+ errs = errors.Join(errs, err)
+ }
+ return nil, nil, fmt.Errorf("no parser succeeded: %w", errs)
+ }
+}
+
+func WithFallback(parser Parser, fallback FallbackParser) Parser {
+ if parser == nil {
+ return fallback.ToParser()
+ }
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ entry, next, err := parser(key, value)
+ if err == nil {
+ return entry, WithFallback(next, fallback), nil
+ }
+ return fallback.ToParser()(key, value)
+ }
+}
+
+func (fp FallbackParser) ToParser() Parser {
+ return func(key, value []byte) (SchemaEntry, Parser, error) {
+ entry, next := fp(key, value)
+ return entry, next, nil
+ }
+}
+
+func (p Parser) ToFallbackParser() FallbackParser {
+ return func(key, value []byte) (SchemaEntry, Parser) {
+ entry, next, err := p(key, value)
+ if err != nil {
+ panic(fmt.Errorf(
+ "couldn't use that parser as a fallback parser, it returned an error: %w", err,
+ ))
+ }
+ return entry, next
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
new file mode 100644
index 000000000..6a08a723e
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/detailed.go
@@ -0,0 +1,29 @@
+package buckets
+
+import (
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (b *PrefixBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *PrefixContainerBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *ContainerBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserAttributeKeyBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *UserAttributeValueBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
new file mode 100644
index 000000000..891c4004f
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/filter.go
@@ -0,0 +1,81 @@
+package buckets
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+func (b *PrefixBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return b.resolvers.cidResolver(false)
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *PrefixContainerBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return b.resolvers.cidResolver(b.id.Equals(id))
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *UserBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return b.resolvers.cidResolver(false)
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *ContainerBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return b.resolvers.cidResolver(b.id.Equals(id))
+ case "oid":
+ return b.resolvers.oidResolver(false)
+ default:
+ return common.No
+ }
+}
+
+func (b *UserAttributeKeyBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(b.id.Equals(id), common.Yes, common.No)
+ case "oid":
+ return common.Maybe
+ case "key":
+ key := val.(string)
+ return common.IfThenElse(b.key == key, common.Yes, common.No)
+ case "value":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (b *UserAttributeValueBucket) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ return common.Maybe
+ case "value":
+ value := val.(string)
+ return common.IfThenElse(b.value == value, common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
new file mode 100644
index 000000000..24cc0e52d
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/parsers.go
@@ -0,0 +1,111 @@
+package buckets
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/records"
+)
+
+var (
+ GraveyardParser = NewPrefixBucketParser(Graveyard, records.GraveyardRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ })
+
+ GarbageParser = NewPrefixBucketParser(Garbage, records.GarbageRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ })
+
+ ContainerVolumeParser = NewPrefixBucketParser(ContainerVolume, records.ContainerVolumeRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: StrictResolver,
+ })
+
+ LockedParser = NewPrefixBucketParser(
+ Locked,
+ NewContainerBucketParser(
+ records.LockedRecordParser,
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ ),
+ Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: LenientResolver,
+ },
+ )
+
+ ShardInfoParser = NewPrefixBucketParser(ShardInfo, records.ShardInfoRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ PrimaryParser = NewPrefixContainerBucketParser(Primary, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ LockersParser = NewPrefixContainerBucketParser(Lockers, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ TombstoneParser = NewPrefixContainerBucketParser(Tombstone, records.ObjectRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ SmallParser = NewPrefixContainerBucketParser(Small, records.SmallRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ RootParser = NewPrefixContainerBucketParser(Root, records.RootRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ OwnerParser = NewPrefixContainerBucketParser(
+ Owner,
+ NewUserBucketParser(
+ records.OwnerRecordParser,
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ ),
+ Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ },
+ )
+
+ UserAttributeParser = NewUserAttributeKeyBucketParser(
+ NewUserAttributeValueBucketParser(records.UserAttributeRecordParser),
+ )
+
+ PayloadHashParser = NewPrefixContainerBucketParser(PayloadHash, records.PayloadHashRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ParentParser = NewPrefixContainerBucketParser(Parent, records.ParentRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+
+ SplitParser = NewPrefixContainerBucketParser(Split, records.SplitRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ContainerCountersParser = NewPrefixBucketParser(ContainerCounters, records.ContainerCountersRecordParser, Resolvers{
+ cidResolver: LenientResolver,
+ oidResolver: StrictResolver,
+ })
+
+ ECInfoParser = NewPrefixContainerBucketParser(ECInfo, records.ECInfoRecordParser, Resolvers{
+ cidResolver: StrictResolver,
+ oidResolver: LenientResolver,
+ })
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
new file mode 100644
index 000000000..2fb122940
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/prefix.go
@@ -0,0 +1,53 @@
+package buckets
+
+type Prefix byte
+
+const (
+ Graveyard Prefix = iota
+ Garbage
+ ToMoveIt
+ ContainerVolume
+ Locked
+ ShardInfo
+ Primary
+ Lockers
+ _
+ Tombstone
+ Small
+ Root
+ Owner
+ UserAttribute
+ PayloadHash
+ Parent
+ Split
+ ContainerCounters
+ ECInfo
+)
+
+var x = map[Prefix]string{
+ Graveyard: "Graveyard",
+ Garbage: "Garbage",
+ ToMoveIt: "To Move It",
+ ContainerVolume: "Container Volume",
+ Locked: "Locked",
+ ShardInfo: "Shard Info",
+ Primary: "Primary",
+ Lockers: "Lockers",
+ Tombstone: "Tombstone",
+ Small: "Small",
+ Root: "Root",
+ Owner: "Owner",
+ UserAttribute: "User Attribute",
+ PayloadHash: "Payload Hash",
+ Parent: "Parent",
+ Split: "Split",
+ ContainerCounters: "Container Counters",
+ ECInfo: "EC Info",
+}
+
+func (p Prefix) String() string {
+ if s, ok := x[p]; ok {
+ return s
+ }
+ return "Unknown Prefix"
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
new file mode 100644
index 000000000..db90bddbd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/string.go
@@ -0,0 +1,48 @@
+package buckets
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+)
+
+func (b *PrefixBucket) String() string {
+ return common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ )
+}
+
+func (b *PrefixContainerBucket) String() string {
+ return fmt.Sprintf(
+ "%s CID %s",
+ common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ ),
+ common.FormatSimple(b.id.String(), tcell.ColorAqua),
+ )
+}
+
+func (b *UserBucket) String() string {
+ return "UID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
+}
+
+func (b *ContainerBucket) String() string {
+ return "CID " + common.FormatSimple(b.id.String(), tcell.ColorAqua)
+}
+
+func (b *UserAttributeKeyBucket) String() string {
+ return fmt.Sprintf("%s CID %s ATTR-KEY %s",
+ common.FormatSimple(
+ fmt.Sprintf("(%2d %-18s)", b.prefix, b.prefix), tcell.ColorLime,
+ ),
+ common.FormatSimple(
+ fmt.Sprintf("%-44s", b.id), tcell.ColorAqua,
+ ),
+ common.FormatSimple(b.key, tcell.ColorAqua),
+ )
+}
+
+func (b *UserAttributeValueBucket) String() string {
+ return "ATTR-VALUE " + common.FormatSimple(b.value, tcell.ColorAqua)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
new file mode 100644
index 000000000..82b47dd85
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/buckets/types.go
@@ -0,0 +1,166 @@
+package buckets
+
+import (
+ "errors"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "github.com/mr-tron/base58"
+)
+
+type (
+ PrefixBucket struct {
+ prefix Prefix
+ resolvers Resolvers
+ }
+
+ PrefixContainerBucket struct {
+ prefix Prefix
+ id cid.ID
+ resolvers Resolvers
+ }
+
+ ContainerBucket struct {
+ id cid.ID
+ resolvers Resolvers
+ }
+
+ UserBucket struct {
+ id user.ID
+ resolvers Resolvers
+ }
+
+ UserAttributeKeyBucket struct {
+ prefix Prefix
+ id cid.ID
+ key string
+ }
+
+ UserAttributeValueBucket struct {
+ value string
+ }
+)
+
+type (
+ FilterResolver = func(result bool) common.FilterResult
+
+ Resolvers struct {
+ cidResolver FilterResolver
+ oidResolver FilterResolver
+ }
+)
+
+var (
+ StrictResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.No) }
+ LenientResolver = func(x bool) common.FilterResult { return common.IfThenElse(x, common.Yes, common.Maybe) }
+)
+
+var (
+ ErrNotBucket = errors.New("not a bucket")
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
+)
+
+func NewPrefixBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 1 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b PrefixBucket
+ if b.prefix = Prefix(key[0]); b.prefix != prefix {
+ return nil, nil, ErrInvalidPrefix
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewPrefixContainerBucketParser(prefix Prefix, next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 33 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b PrefixContainerBucket
+ if b.prefix = Prefix(key[0]); b.prefix != prefix {
+ return nil, nil, ErrInvalidPrefix
+ }
+ if err := b.id.Decode(key[1:]); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewUserBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ var b UserBucket
+ if err := b.id.DecodeString(base58.Encode(key)); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewContainerBucketParser(next common.Parser, resolvers Resolvers) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b ContainerBucket
+ if err := b.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ b.resolvers = resolvers
+ return &b, next, nil
+ }
+}
+
+func NewUserAttributeKeyBucketParser(next common.Parser) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) < 34 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b UserAttributeKeyBucket
+ if b.prefix = Prefix(key[0]); b.prefix != UserAttribute {
+ return nil, nil, ErrInvalidPrefix
+ }
+ if err := b.id.Decode(key[1:33]); err != nil {
+ return nil, nil, err
+ }
+ b.key = string(key[33:])
+ return &b, next, nil
+ }
+}
+
+func NewUserAttributeValueBucketParser(next common.Parser) common.Parser {
+ return func(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, ErrNotBucket
+ }
+ if len(key) == 0 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var b UserAttributeValueBucket
+ b.value = string(key)
+ return &b, next, nil
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/parser.go b/cmd/frostfs-lens/internal/schema/metabase/parser.go
new file mode 100644
index 000000000..ea095e207
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/parser.go
@@ -0,0 +1,29 @@
+package metabase
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase/buckets"
+)
+
+var MetabaseParser = common.WithFallback(
+ common.Any(
+ buckets.GraveyardParser,
+ buckets.GarbageParser,
+ buckets.ContainerVolumeParser,
+ buckets.LockedParser,
+ buckets.ShardInfoParser,
+ buckets.PrimaryParser,
+ buckets.LockersParser,
+ buckets.TombstoneParser,
+ buckets.SmallParser,
+ buckets.RootParser,
+ buckets.OwnerParser,
+ buckets.UserAttributeParser,
+ buckets.PayloadHashParser,
+ buckets.ParentParser,
+ buckets.SplitParser,
+ buckets.ContainerCountersParser,
+ buckets.ECInfoParser,
+ ),
+ common.RawParser.ToFallbackParser(),
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
new file mode 100644
index 000000000..2dda15b4f
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/detailed.go
@@ -0,0 +1,65 @@
+package records
+
+import (
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (r *GraveyardRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *GarbageRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ContainerVolumeRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *LockedRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ShardInfoRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ObjectRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *SmallRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *RootRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *OwnerRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *UserAttributeRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *PayloadHashRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ParentRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *SplitRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ContainerCountersRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *ECInfoRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/filter.go b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
new file mode 100644
index 000000000..880a7a8ff
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/filter.go
@@ -0,0 +1,145 @@
+package records
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+func (r *GraveyardRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.object.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.object.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *GarbageRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ContainerVolumeRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ShardInfoRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *LockedRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ObjectRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *SmallRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *RootRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *OwnerRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *UserAttributeRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *PayloadHashRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *ParentRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.parent.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *SplitRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *ContainerCountersRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
+
+func (r *ECInfoRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.id.Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
new file mode 100644
index 000000000..1b070e2a0
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/parsers.go
@@ -0,0 +1,251 @@
+package records
+
+import (
+ "encoding/binary"
+ "errors"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+)
+
+var (
+ ErrInvalidKeyLength = errors.New("invalid key length")
+ ErrInvalidValueLength = errors.New("invalid value length")
+ ErrInvalidPrefix = errors.New("invalid prefix")
+)
+
+func GraveyardRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 64 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ if len(value) != 64 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ var (
+ cnr cid.ID
+ obj oid.ID
+ r GraveyardRecord
+ )
+
+ _ = cnr.Decode(key[:32])
+ _ = obj.Decode(key[32:])
+
+ r.object.SetContainer(cnr)
+ r.object.SetObject(obj)
+
+ _ = cnr.Decode(value[:32])
+ _ = obj.Decode(value[32:])
+
+ r.tombstone.SetContainer(cnr)
+ r.tombstone.SetObject(obj)
+
+ return &r, nil, nil
+}
+
+func GarbageRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 64 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var (
+ cnr cid.ID
+ obj oid.ID
+ r GarbageRecord
+ )
+
+ _ = cnr.Decode(key[:32])
+ _ = obj.Decode(key[32:])
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ return &r, nil, nil
+}
+
+func ContainerVolumeRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ if len(value) != 8 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ var r ContainerVolumeRecord
+
+ _ = r.id.Decode(key)
+ r.volume = binary.LittleEndian.Uint64(value)
+
+ return &r, nil, nil
+}
+
+func LockedRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r LockedRecord
+ err error
+ )
+
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ShardInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) == 0 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+
+ var r ShardInfoRecord
+ if string(key) == "id" {
+ r.label = string(key)
+ r.value = shard.ID(value).String()
+
+ return &r, nil, nil
+ }
+
+ if len(value) != 8 {
+ return nil, nil, ErrInvalidValueLength
+ }
+ r.label = string(key)
+ r.value = strconv.FormatUint(binary.LittleEndian.Uint64(value), 10)
+
+ return &r, nil, nil
+}
+
+func ObjectRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var r ObjectRecord
+
+ _ = r.id.Decode(key)
+ if err := r.object.Unmarshal(value); err != nil {
+ return nil, nil, err
+ }
+
+ return &r, nil, nil
+}
+
+func SmallRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r SmallRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if len(value) != 0 {
+ x := string(value)
+ r.storageID = &x
+ }
+ return &r, nil, nil
+}
+
+func RootRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r RootRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if len(value) == 0 {
+ return &r, nil, nil
+ }
+ r.info = &objectSDK.SplitInfo{}
+ if err := r.info.Unmarshal(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func OwnerRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ var r OwnerRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func UserAttributeRecordParser(key, _ []byte) (common.SchemaEntry, common.Parser, error) {
+ var r UserAttributeRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func PayloadHashRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(key) != 32 {
+ return nil, nil, ErrInvalidKeyLength
+ }
+ var (
+ err error
+ r PayloadHashRecord
+ )
+
+ r.checksum.SetSHA256([32]byte(key))
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ParentRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r ParentRecord
+ err error
+ )
+ if err = r.parent.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func SplitRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ err error
+ r SplitRecord
+ )
+ if err = r.id.UnmarshalBinary(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
+
+func ContainerCountersRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if len(value) != 24 {
+ return nil, nil, ErrInvalidValueLength
+ }
+
+ var r ContainerCountersRecord
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+
+ r.logical = binary.LittleEndian.Uint64(value[:8])
+ r.physical = binary.LittleEndian.Uint64(value[8:16])
+ r.user = binary.LittleEndian.Uint64(value[16:24])
+
+ return &r, nil, nil
+}
+
+func ECInfoRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var (
+ r ECInfoRecord
+ err error
+ )
+
+ if err := r.id.Decode(key); err != nil {
+ return nil, nil, err
+ }
+ if r.ids, err = DecodeOIDs(value); err != nil {
+ return nil, nil, err
+ }
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
new file mode 100644
index 000000000..a6c70d537
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -0,0 +1,135 @@
+package records
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+func (r *GraveyardRecord) String() string {
+ return fmt.Sprintf(
+ "Object CID %s OID %s %c Tombstone CID %s OID %s",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.object.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.object.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.tombstone.Object()), tcell.ColorAqua),
+ )
+}
+
+func (r *GarbageRecord) String() string {
+ return fmt.Sprintf(
+ "CID %-44s OID %-44s",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ )
+}
+
+func (r *ContainerVolumeRecord) String() string {
+ return fmt.Sprintf(
+ "CID %-44s %c %d",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ r.volume,
+ )
+}
+
+func (r *LockedRecord) String() string {
+ return fmt.Sprintf(
+ "Locker OID %s %c Locked [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ShardInfoRecord) String() string {
+ return fmt.Sprintf("%-13s %c %s", r.label, tview.Borders.Vertical, r.value)
+}
+
+func (r *ObjectRecord) String() string {
+ return fmt.Sprintf(
+ "OID %s %c Object {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (r *SmallRecord) String() string {
+ s := fmt.Sprintf(
+ "OID %s %c",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+ if r.storageID != nil {
+ s = fmt.Sprintf("%s %s", s, *r.storageID)
+ }
+ return s
+}
+
+func (r *RootRecord) String() string {
+ s := fmt.Sprintf(
+ "Root OID %s %c",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+ if r.info != nil {
+ s += " Split info {...}"
+ }
+ return s
+}
+
+func (r *OwnerRecord) String() string {
+ return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
+}
+
+func (r *UserAttributeRecord) String() string {
+ return "OID " + common.FormatSimple(r.id.String(), tcell.ColorAqua)
+}
+
+func (r *PayloadHashRecord) String() string {
+ return fmt.Sprintf(
+ "Checksum %s %c [%d]OID {...}",
+ common.FormatSimple(r.checksum.String(), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ParentRecord) String() string {
+ return fmt.Sprintf(
+ "Parent OID %s %c [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.parent), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *SplitRecord) String() string {
+ return fmt.Sprintf(
+ "Split ID %s %c [%d]OID {...}",
+ common.FormatSimple(r.id.String(), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
+
+func (r *ContainerCountersRecord) String() string {
+ return fmt.Sprintf(
+ "CID %s %c logical %d, physical %d, user %d",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ r.logical, r.physical, r.user,
+ )
+}
+
+func (r *ECInfoRecord) String() string {
+ return fmt.Sprintf(
+ "OID %s %c [%d]OID {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ len(r.ids),
+ )
+}
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/types.go b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
new file mode 100644
index 000000000..34c1c29fd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/types.go
@@ -0,0 +1,82 @@
+package records
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/google/uuid"
+)
+
+type (
+ GraveyardRecord struct {
+ object, tombstone oid.Address
+ }
+
+ GarbageRecord struct {
+ addr oid.Address
+ }
+
+ ContainerVolumeRecord struct {
+ id cid.ID
+ volume uint64
+ }
+
+ LockedRecord struct {
+ id oid.ID
+ ids []oid.ID
+ }
+
+ ShardInfoRecord struct {
+ label string
+ value string
+ }
+
+ ObjectRecord struct {
+ id oid.ID
+ object objectSDK.Object
+ }
+
+ SmallRecord struct {
+ id oid.ID
+ storageID *string // optional
+ }
+
+ RootRecord struct {
+ id oid.ID
+ info *objectSDK.SplitInfo // optional
+ }
+
+ OwnerRecord struct {
+ id oid.ID
+ }
+
+ UserAttributeRecord struct {
+ id oid.ID
+ }
+
+ PayloadHashRecord struct {
+ checksum checksum.Checksum
+ ids []oid.ID
+ }
+
+ ParentRecord struct {
+ parent oid.ID
+ ids []oid.ID
+ }
+
+ SplitRecord struct {
+ id uuid.UUID
+ ids []oid.ID
+ }
+
+ ContainerCountersRecord struct {
+ id cid.ID
+ logical, physical, user uint64
+ }
+
+ ECInfoRecord struct {
+ id oid.ID
+ ids []oid.ID
+ }
+)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/util.go b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
new file mode 100644
index 000000000..f50ebe951
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/util.go
@@ -0,0 +1,20 @@
+package records
+
+import (
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/nspcc-dev/neo-go/pkg/io"
+)
+
+func DecodeOIDs(data []byte) ([]oid.ID, error) {
+ r := io.NewBinReaderFromBuf(data)
+
+ size := r.ReadVarUint()
+ oids := make([]oid.ID, size)
+
+ for i := uint64(0); i < size; i++ {
+ if err := oids[i].Decode(r.ReadVarBytes()); err != nil {
+ return nil, err
+ }
+ }
+ return oids, nil
+}
diff --git a/go.mod b/go.mod
index 19bf7852f..be3c6e74d 100644
--- a/go.mod
+++ b/go.mod
@@ -17,7 +17,9 @@ require (
github.com/VictoriaMetrics/easyproto v0.1.4
github.com/cheggaaa/pb v1.0.29
github.com/chzyer/readline v1.5.1
+ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568
+ github.com/gdamore/tcell/v2 v2.7.4
github.com/go-pkgz/expirable-cache/v3 v3.0.0
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
@@ -65,10 +67,10 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
- github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/davidmz/go-pageant v1.0.2 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -85,6 +87,7 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
github.com/klauspost/reedsolomon v1.12.1 // indirect
+ github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
diff --git a/go.sum b/go.sum
index 8ebd59157..d0218a348 100644
--- a/go.sum
+++ b/go.sum
@@ -75,6 +75,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
+github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
+github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -142,6 +146,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
+github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
+github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
@@ -218,6 +224,7 @@ github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5E
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
@@ -352,6 +359,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -359,6 +367,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
From 9cbd32bce8524e32711287f86196382517f37562 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 16 Aug 2024 14:33:03 +0300
Subject: [PATCH 019/655] [#1223] lens/tui: Add writecache schema
Signed-off-by: Aleksey Savchuk
---
.../internal/schema/writecache/parsers.go | 63 ++++++++++++++++++
.../internal/schema/writecache/types.go | 66 +++++++++++++++++++
2 files changed, 129 insertions(+)
create mode 100644 cmd/frostfs-lens/internal/schema/writecache/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/writecache/types.go
diff --git a/cmd/frostfs-lens/internal/schema/writecache/parsers.go b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
new file mode 100644
index 000000000..7d70b27b2
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/writecache/parsers.go
@@ -0,0 +1,63 @@
+package writecache
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+var WritecacheParser = common.WithFallback(
+ DefaultBucketParser,
+ common.RawParser.ToFallbackParser(),
+)
+
+func DefaultBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+ if !bytes.Equal(key, []byte{0}) {
+ return nil, nil, errors.New("invalid key")
+ }
+ return &DefaultBucket{}, DefaultRecordParser, nil
+}
+
+func DefaultRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ parts := strings.Split(string(key), "/")
+
+ if len(parts) != 2 {
+ return nil, nil, errors.New("invalid key, expected address string /")
+ }
+
+ cnrRaw, err := base58.Decode(parts[0])
+ if err != nil {
+ return nil, nil, errors.New("can't decode CID string")
+ }
+ objRaw, err := base58.Decode(parts[1])
+ if err != nil {
+ return nil, nil, errors.New("can't decode OID string")
+ }
+
+ cnr := cid.ID{}
+ if err := cnr.Decode(cnrRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode CID: %w", err)
+ }
+ obj := oid.ID{}
+ if err := obj.Decode(objRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode OID: %w", err)
+ }
+
+ var r DefaultRecord
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ r.data = value[:]
+
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
new file mode 100644
index 000000000..3f71c5366
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -0,0 +1,66 @@
+package writecache
+
+import (
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type (
+ DefaultBucket struct{}
+
+ DefaultRecord struct {
+ addr oid.Address
+ data []byte
+ }
+)
+
+func (b *DefaultBucket) String() string {
+ return common.FormatSimple("0 Default", tcell.ColorLime)
+}
+
+func (r *DefaultRecord) String() string {
+ return fmt.Sprintf(
+ "CID %s OID %s %c Data {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (b *DefaultBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (r *DefaultRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (b *DefaultBucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return common.Maybe
+ case "oid":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (r *DefaultRecord) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
From ed396448acbd3792da3ae0b756233e8c8fe67fee Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 11 Jul 2024 19:39:54 +0300
Subject: [PATCH 020/655] [#1223] lens/tui: Add TUI app to explore metabase
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/meta/root.go | 1 +
cmd/frostfs-lens/internal/meta/tui.go | 82 ++++
cmd/frostfs-lens/internal/tui/buckets.go | 257 ++++++++++
cmd/frostfs-lens/internal/tui/db.go | 160 +++++++
cmd/frostfs-lens/internal/tui/detailed.go | 24 +
cmd/frostfs-lens/internal/tui/filter.go | 44 ++
cmd/frostfs-lens/internal/tui/input.go | 77 +++
cmd/frostfs-lens/internal/tui/loading.go | 72 +++
cmd/frostfs-lens/internal/tui/records.go | 271 +++++++++++
cmd/frostfs-lens/internal/tui/types.go | 18 +
cmd/frostfs-lens/internal/tui/ui.go | 548 ++++++++++++++++++++++
cmd/frostfs-lens/internal/tui/util.go | 97 ++++
go.mod | 3 +-
go.sum | 6 +-
14 files changed, 1657 insertions(+), 3 deletions(-)
create mode 100644 cmd/frostfs-lens/internal/meta/tui.go
create mode 100644 cmd/frostfs-lens/internal/tui/buckets.go
create mode 100644 cmd/frostfs-lens/internal/tui/db.go
create mode 100644 cmd/frostfs-lens/internal/tui/detailed.go
create mode 100644 cmd/frostfs-lens/internal/tui/filter.go
create mode 100644 cmd/frostfs-lens/internal/tui/input.go
create mode 100644 cmd/frostfs-lens/internal/tui/loading.go
create mode 100644 cmd/frostfs-lens/internal/tui/records.go
create mode 100644 cmd/frostfs-lens/internal/tui/types.go
create mode 100644 cmd/frostfs-lens/internal/tui/ui.go
create mode 100644 cmd/frostfs-lens/internal/tui/util.go
diff --git a/cmd/frostfs-lens/internal/meta/root.go b/cmd/frostfs-lens/internal/meta/root.go
index 6741abd0c..351d1ce80 100644
--- a/cmd/frostfs-lens/internal/meta/root.go
+++ b/cmd/frostfs-lens/internal/meta/root.go
@@ -32,6 +32,7 @@ func init() {
inspectCMD,
listGraveyardCMD,
listGarbageCMD,
+ tuiCMD,
)
}
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
new file mode 100644
index 000000000..00e8bf117
--- /dev/null
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -0,0 +1,82 @@
+package meta
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Metabase exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore metabase and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+- attr key[/value]
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ // Need if app was stopped with Ctrl-C.
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.MetabaseParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+ _ = ui.AddCompositeFilter("attr", tui.AttributeParser, "key[/value]")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/buckets.go b/cmd/frostfs-lens/internal/tui/buckets.go
new file mode 100644
index 000000000..3f5088e7a
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/buckets.go
@@ -0,0 +1,257 @@
+package tui
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type BucketsView struct {
+ *tview.Box
+
+ mu sync.Mutex
+
+ view *tview.TreeView
+ nodeToUpdate *tview.TreeNode
+
+ ui *UI
+ filter *Filter
+}
+
+type bucketNode struct {
+ bucket *Bucket
+ filter *Filter
+}
+
+func NewBucketsView(ui *UI, filter *Filter) *BucketsView {
+ return &BucketsView{
+ Box: tview.NewBox(),
+ view: tview.NewTreeView(),
+ ui: ui,
+ filter: filter,
+ }
+}
+
+func (v *BucketsView) Mount(_ context.Context) error {
+ root := tview.NewTreeNode(".")
+ root.SetExpanded(false)
+ root.SetSelectable(false)
+ root.SetReference(&bucketNode{
+ bucket: &Bucket{NextParser: v.ui.rootParser},
+ filter: v.filter,
+ })
+
+ v.nodeToUpdate = root
+
+ v.view.SetRoot(root)
+ v.view.SetCurrentNode(root)
+
+ return nil
+}
+
+func (v *BucketsView) Update(ctx context.Context) error {
+ if v.nodeToUpdate == nil {
+ return nil
+ }
+ defer func() { v.nodeToUpdate = nil }()
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ ready := make(chan struct{})
+ errCh := make(chan error)
+
+ tmp := tview.NewTreeNode(v.nodeToUpdate.GetText())
+ tmp.SetReference(v.nodeToUpdate.GetReference())
+
+ node := v.nodeToUpdate.GetReference().(*bucketNode)
+
+ go func() {
+ defer close(ready)
+
+ hasBuckets, err := HasBuckets(ctx, v.ui.db, node.bucket.Path)
+ if err != nil {
+ errCh <- err
+ }
+
+ // Show the selected bucket's records instead.
+ if !hasBuckets && node.bucket.NextParser != nil {
+ v.ui.moveNextPage(NewRecordsView(v.ui, node.bucket, node.filter))
+ }
+
+ if v.nodeToUpdate.IsExpanded() {
+ return
+ }
+
+ err = v.loadNodeChildren(ctx, tmp, node.filter)
+ if err != nil {
+ errCh <- err
+ }
+ }()
+
+ select {
+ case <-ctx.Done():
+ case <-ready:
+ v.mu.Lock()
+ v.nodeToUpdate.SetChildren(tmp.GetChildren())
+ v.nodeToUpdate.SetExpanded(!v.nodeToUpdate.IsExpanded())
+ v.mu.Unlock()
+ case err := <-errCh:
+ return err
+ }
+
+ return nil
+}
+
+func (v *BucketsView) Unmount() {
+}
+
+func (v *BucketsView) Draw(screen tcell.Screen) {
+ x, y, width, height := v.GetInnerRect()
+ v.view.SetRect(x, y, width, height)
+
+ v.view.Draw(screen)
+}
+
+func (v *BucketsView) loadNodeChildren(
+ ctx context.Context, node *tview.TreeNode, filter *Filter,
+) error {
+ parentBucket := node.GetReference().(*bucketNode).bucket
+
+ path := parentBucket.Path
+ parser := parentBucket.NextParser
+
+ buffer, err := LoadBuckets(ctx, v.ui.db, path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
+
+ for item := range buffer {
+ if item.err != nil {
+ return item.err
+ }
+ bucket := item.val
+
+ bucket.Entry, bucket.NextParser, err = parser(bucket.Name, nil)
+ if err != nil {
+ return err
+ }
+
+ satisfies, err := v.bucketSatisfiesFilter(ctx, bucket, filter)
+ if err != nil {
+ return err
+ }
+ if !satisfies {
+ continue
+ }
+
+ child := tview.NewTreeNode(bucket.Entry.String()).
+ SetSelectable(true).
+ SetExpanded(false).
+ SetReference(&bucketNode{
+ bucket: bucket,
+ filter: filter.Apply(bucket.Entry),
+ })
+
+ node.AddChild(child)
+ }
+
+ return nil
+}
+
+func (v *BucketsView) bucketSatisfiesFilter(
+ ctx context.Context, bucket *Bucket, filter *Filter,
+) (bool, error) {
+ // Does the current bucket satisfies the filter?
+ filter = filter.Apply(bucket.Entry)
+
+ if filter.Result() == common.Yes {
+ return true, nil
+ }
+
+ if filter.Result() == common.No {
+ return false, nil
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Check the current bucket's nested buckets if exist
+ bucketsBuffer, err := LoadBuckets(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
+
+ for item := range bucketsBuffer {
+ if item.err != nil {
+ return false, item.err
+ }
+ b := item.val
+
+ b.Entry, b.NextParser, err = bucket.NextParser(b.Name, nil)
+ if err != nil {
+ return false, err
+ }
+
+ satisfies, err := v.bucketSatisfiesFilter(ctx, b, filter)
+ if err != nil {
+ return false, err
+ }
+ if satisfies {
+ return true, nil
+ }
+ }
+
+ // Check the current bucket's nested records if exist
+ recordsBuffer, err := LoadRecords(ctx, v.ui.db, bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return false, err
+ }
+
+ for item := range recordsBuffer {
+ if item.err != nil {
+ return false, item.err
+ }
+ r := item.val
+
+ r.Entry, _, err = bucket.NextParser(r.Key, r.Value)
+ if err != nil {
+ return false, err
+ }
+
+ if filter.Apply(r.Entry).Result() == common.Yes {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func (v *BucketsView) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return v.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ currentNode := v.view.GetCurrentNode()
+ if currentNode == nil {
+ return
+ }
+
+ switch event.Key() {
+ case tcell.KeyEnter:
+ // Expand or collapse the selected bucket's nested buckets,
+ // otherwise, navigate to that bucket's records.
+ v.nodeToUpdate = currentNode
+ case tcell.KeyCtrlR:
+ // Navigate to the selected bucket's records.
+ bucketNode := currentNode.GetReference().(*bucketNode)
+ v.ui.moveNextPage(NewRecordsView(v.ui, bucketNode.bucket, bucketNode.filter))
+ case tcell.KeyCtrlD:
+ // Navigate to the selected bucket's detailed view.
+ bucketNode := currentNode.GetReference().(*bucketNode)
+ v.ui.moveNextPage(NewDetailedView(bucketNode.bucket.Entry.DetailedString()))
+ default:
+ v.view.InputHandler()(event, func(tview.Primitive) {})
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/db.go b/cmd/frostfs-lens/internal/tui/db.go
new file mode 100644
index 000000000..d0cf611d4
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/db.go
@@ -0,0 +1,160 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "go.etcd.io/bbolt"
+)
+
+type Item[T any] struct {
+ val T
+ err error
+}
+
+func resolvePath(tx *bbolt.Tx, path [][]byte) (*bbolt.Bucket, error) {
+ if len(path) == 0 {
+ return nil, errors.New("can't find bucket without path")
+ }
+
+ name := path[0]
+ bucket := tx.Bucket(name)
+ if bucket == nil {
+ return nil, fmt.Errorf("no bucket with name %s", name)
+ }
+ for _, name := range path[1:] {
+ bucket = bucket.Bucket(name)
+ if bucket == nil {
+ return nil, fmt.Errorf("no bucket with name %s", name)
+ }
+ }
+ return bucket, nil
+}
+
+func load[T any](
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+ filter func(key, value []byte) bool, transform func(key, value []byte) T,
+) (<-chan Item[T], error) {
+ buffer := make(chan Item[T], bufferSize)
+
+ go func() {
+ defer close(buffer)
+
+ err := db.View(func(tx *bbolt.Tx) error {
+ var cursor *bbolt.Cursor
+ if len(path) == 0 {
+ cursor = tx.Cursor()
+ } else {
+ bucket, err := resolvePath(tx, path)
+ if err != nil {
+ buffer <- Item[T]{err: fmt.Errorf("can't find bucket: %w", err)}
+ return nil
+ }
+ cursor = bucket.Cursor()
+ }
+
+ key, value := cursor.First()
+ for {
+ if key == nil {
+ return nil
+ }
+ if filter != nil && !filter(key, value) {
+ key, value = cursor.Next()
+ continue
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil
+ case buffer <- Item[T]{val: transform(key, value)}:
+ key, value = cursor.Next()
+ }
+ }
+ })
+ if err != nil {
+ buffer <- Item[T]{err: err}
+ }
+ }()
+
+ return buffer, nil
+}
+
+func LoadBuckets(
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+) (<-chan Item[*Bucket], error) {
+ buffer, err := load(
+ ctx, db, path, bufferSize,
+ func(_, value []byte) bool {
+ return value == nil
+ },
+ func(key, _ []byte) *Bucket {
+ base := make([][]byte, 0, len(path))
+ base = append(base, path...)
+
+ return &Bucket{
+ Name: key,
+ Path: append(base, key),
+ }
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
+
+ return buffer, nil
+}
+
+func LoadRecords(
+ ctx context.Context, db *bbolt.DB, path [][]byte, bufferSize int,
+) (<-chan Item[*Record], error) {
+ buffer, err := load(
+ ctx, db, path, bufferSize,
+ func(_, value []byte) bool {
+ return value != nil
+ },
+ func(key, value []byte) *Record {
+ base := make([][]byte, 0, len(path))
+ base = append(base, path...)
+
+ return &Record{
+ Key: key,
+ Value: value,
+ Path: append(base, key),
+ }
+ },
+ )
+ if err != nil {
+ return nil, fmt.Errorf("can't start iterating bucket: %w", err)
+ }
+
+ return buffer, nil
+}
+
+// HasBuckets checks if a bucket has nested buckets. It relies on assumption
+// that a bucket can have either nested buckets or records but not both.
+func HasBuckets(ctx context.Context, db *bbolt.DB, path [][]byte) (bool, error) {
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ buffer, err := load(
+ ctx, db, path, 1,
+ nil,
+ func(_, value []byte) []byte { return value },
+ )
+ if err != nil {
+ return false, err
+ }
+
+ x, ok := <-buffer
+ if !ok {
+ return false, nil
+ }
+ if x.err != nil {
+ return false, err
+ }
+ if x.val != nil {
+ return false, err
+ }
+ return true, nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/detailed.go b/cmd/frostfs-lens/internal/tui/detailed.go
new file mode 100644
index 000000000..b2d897230
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/detailed.go
@@ -0,0 +1,24 @@
+package tui
+
+import (
+ "context"
+
+ "github.com/rivo/tview"
+)
+
+type DetailedView struct {
+ *tview.TextView
+}
+
+func NewDetailedView(detailed string) *DetailedView {
+ v := &DetailedView{
+ TextView: tview.NewTextView(),
+ }
+ v.SetDynamicColors(true)
+ v.SetText(detailed)
+ return v
+}
+
+func (v *DetailedView) Mount(_ context.Context) error { return nil }
+func (v *DetailedView) Update(_ context.Context) error { return nil }
+func (v *DetailedView) Unmount() {}
diff --git a/cmd/frostfs-lens/internal/tui/filter.go b/cmd/frostfs-lens/internal/tui/filter.go
new file mode 100644
index 000000000..e7879eca7
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/filter.go
@@ -0,0 +1,44 @@
+package tui
+
+import (
+ "maps"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+)
+
+type Filter struct {
+ values map[string]any
+ results map[string]common.FilterResult
+}
+
+func NewFilter(values map[string]any) *Filter {
+ f := &Filter{
+ values: maps.Clone(values),
+ results: make(map[string]common.FilterResult),
+ }
+ for tag := range values {
+ f.results[tag] = common.No
+ }
+ return f
+}
+
+func (f *Filter) Apply(e common.SchemaEntry) *Filter {
+ filter := &Filter{
+ values: f.values,
+ results: maps.Clone(f.results),
+ }
+
+ for tag, value := range filter.values {
+ filter.results[tag] = max(filter.results[tag], e.Filter(tag, value))
+ }
+
+ return filter
+}
+
+func (f *Filter) Result() common.FilterResult {
+ current := common.Yes
+ for _, r := range f.results {
+ current = min(r, current)
+ }
+ return current
+}
diff --git a/cmd/frostfs-lens/internal/tui/input.go b/cmd/frostfs-lens/internal/tui/input.go
new file mode 100644
index 000000000..4fdf97119
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/input.go
@@ -0,0 +1,77 @@
+package tui
+
+import (
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type InputFieldWithHistory struct {
+ *tview.InputField
+ history []string
+ historyLimit int
+ historyPointer int
+ currentContent string
+}
+
+func NewInputFieldWithHistory(historyLimit int) *InputFieldWithHistory {
+ return &InputFieldWithHistory{
+ InputField: tview.NewInputField(),
+ historyLimit: historyLimit,
+ }
+}
+
+func (f *InputFieldWithHistory) AddToHistory(s string) {
+ // Stop scrolling history on history change, need to start scrolling again.
+ defer func() { f.historyPointer = len(f.history) }()
+
+ // Used history data for search prompt, so just make that data recent.
+ if f.historyPointer != len(f.history) && s == f.history[f.historyPointer] {
+ f.history = append(f.history[:f.historyPointer], f.history[f.historyPointer+1:]...)
+ f.history = append(f.history, s)
+ }
+
+ if len(f.history) == f.historyLimit {
+ f.history = f.history[1:]
+ }
+ f.history = append(f.history, s)
+}
+
+func (f *InputFieldWithHistory) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return f.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ switch event.Key() {
+ case tcell.KeyDown:
+ if len(f.history) == 0 {
+ return
+ }
+ // Need to start iterating before.
+ if f.historyPointer == len(f.history) {
+ return
+ }
+ // Iterate to most recent prompts.
+ f.historyPointer++
+ // Stop iterating over history.
+ if f.historyPointer == len(f.history) {
+ f.InputField.SetText(f.currentContent)
+ return
+ }
+ f.InputField.SetText(f.history[f.historyPointer])
+ case tcell.KeyUp:
+ if len(f.history) == 0 {
+ return
+ }
+ // Start iterating over history.
+ if f.historyPointer == len(f.history) {
+ f.currentContent = f.InputField.GetText()
+ }
+ // End of history.
+ if f.historyPointer == 0 {
+ return
+ }
+ // Iterate to least recent prompts.
+ f.historyPointer--
+ f.InputField.SetText(f.history[f.historyPointer])
+ default:
+ f.InputField.InputHandler()(event, func(tview.Primitive) {})
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/loading.go b/cmd/frostfs-lens/internal/tui/loading.go
new file mode 100644
index 000000000..4b9384ad4
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/loading.go
@@ -0,0 +1,72 @@
+package tui
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type LoadingBar struct {
+ *tview.Box
+ view *tview.TextView
+ secondsElapsed atomic.Int64
+ needDrawFunc func()
+ reset func()
+}
+
+func NewLoadingBar(needDrawFunc func()) *LoadingBar {
+ b := &LoadingBar{
+ Box: tview.NewBox(),
+ view: tview.NewTextView(),
+ needDrawFunc: needDrawFunc,
+ }
+ b.view.SetBackgroundColor(tview.Styles.PrimaryTextColor)
+ b.view.SetTextColor(b.GetBackgroundColor())
+
+ return b
+}
+
+func (b *LoadingBar) Start(ctx context.Context) {
+ ctx, b.reset = context.WithCancel(ctx)
+
+ go func() {
+ ticker := time.NewTicker(1 * time.Second)
+ defer ticker.Stop()
+
+ b.secondsElapsed.Store(0)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ b.secondsElapsed.Add(1)
+ b.needDrawFunc()
+ }
+ }
+ }()
+}
+
+func (b *LoadingBar) Stop() {
+ b.reset()
+}
+
+func (b *LoadingBar) Draw(screen tcell.Screen) {
+ seconds := b.secondsElapsed.Load()
+
+ var time string
+ switch {
+ case seconds < 60:
+ time = fmt.Sprintf("%ds", seconds)
+ default:
+ time = fmt.Sprintf("%dm%ds", seconds/60, seconds%60)
+ }
+ b.view.SetText(fmt.Sprintf(" Loading... %s (press Escape to cancel) ", time))
+
+ x, y, width, _ := b.GetInnerRect()
+ b.view.SetRect(x, y, width, 1)
+ b.view.Draw(screen)
+}
diff --git a/cmd/frostfs-lens/internal/tui/records.go b/cmd/frostfs-lens/internal/tui/records.go
new file mode 100644
index 000000000..5f53ed287
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/records.go
@@ -0,0 +1,271 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type updateType int
+
+const (
+ other updateType = iota
+ moveToPrevPage
+ moveToNextPage
+ moveUp
+ moveDown
+ moveHome
+ moveEnd
+)
+
+type RecordsView struct {
+ *tview.Box
+
+ mu sync.RWMutex
+
+ onUnmount func()
+
+ bucket *Bucket
+ records []*Record
+
+ buffer chan *Record
+
+ firstRecordIndex int
+ lastRecordIndex int
+ selectedRecordIndex int
+
+ updateType updateType
+
+ ui *UI
+ filter *Filter
+}
+
+func NewRecordsView(ui *UI, bucket *Bucket, filter *Filter) *RecordsView {
+ return &RecordsView{
+ Box: tview.NewBox(),
+ bucket: bucket,
+ ui: ui,
+ filter: filter,
+ }
+}
+
+func (v *RecordsView) Mount(ctx context.Context) error {
+ if v.onUnmount != nil {
+ return errors.New("try to mount already mounted component")
+ }
+
+ ctx, v.onUnmount = context.WithCancel(ctx)
+
+ tempBuffer, err := LoadRecords(ctx, v.ui.db, v.bucket.Path, v.ui.loadBufferSize)
+ if err != nil {
+ return err
+ }
+
+ v.buffer = make(chan *Record, v.ui.loadBufferSize)
+ go func() {
+ defer close(v.buffer)
+
+ for item := range tempBuffer {
+ if item.err != nil {
+ v.ui.stopOnError(err)
+ break
+ }
+ record := item.val
+
+ record.Entry, _, err = v.bucket.NextParser(record.Key, record.Value)
+ if err != nil {
+ v.ui.stopOnError(err)
+ break
+ }
+
+ if v.filter.Apply(record.Entry).Result() != common.Yes {
+ continue
+ }
+
+ v.buffer <- record
+ }
+ }()
+
+ return nil
+}
+
+func (v *RecordsView) Unmount() {
+ if v.onUnmount == nil {
+ panic("try to unmount not mounted component")
+ }
+ v.onUnmount()
+ v.onUnmount = nil
+}
+
+func (v *RecordsView) Update(ctx context.Context) error {
+ _, _, _, recordsPerPage := v.GetInnerRect()
+ firstRecordIndex, lastRecordIndex, selectedRecordIndex := v.getNewIndexes()
+
+loop:
+ for len(v.records) < lastRecordIndex {
+ select {
+ case <-ctx.Done():
+ return nil
+ case record, ok := <-v.buffer:
+ if !ok {
+ break loop
+ }
+ v.records = append(v.records, record)
+ }
+ }
+
+ // Set the update type to its default value after some specific key event
+ // has been handled.
+ v.updateType = other
+
+ firstRecordIndex = max(0, min(firstRecordIndex, len(v.records)-recordsPerPage))
+ lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
+ selectedRecordIndex = min(selectedRecordIndex, lastRecordIndex-1)
+
+ v.mu.Lock()
+ v.firstRecordIndex = firstRecordIndex
+ v.lastRecordIndex = lastRecordIndex
+ v.selectedRecordIndex = selectedRecordIndex
+ v.mu.Unlock()
+
+ return nil
+}
+
+func (v *RecordsView) getNewIndexes() (int, int, int) {
+ v.mu.RLock()
+ firstRecordIndex := v.firstRecordIndex
+ lastRecordIndex := v.lastRecordIndex
+ selectedRecordIndex := v.selectedRecordIndex
+ v.mu.RUnlock()
+
+ _, _, _, recordsPerPage := v.GetInnerRect()
+
+ switch v.updateType {
+ case moveUp:
+ if selectedRecordIndex != firstRecordIndex {
+ selectedRecordIndex--
+ break
+ }
+ firstRecordIndex = max(0, firstRecordIndex-1)
+ lastRecordIndex = min(firstRecordIndex+recordsPerPage, len(v.records))
+ selectedRecordIndex = firstRecordIndex
+ case moveToPrevPage:
+ if selectedRecordIndex != firstRecordIndex {
+ selectedRecordIndex = firstRecordIndex
+ break
+ }
+ firstRecordIndex = max(0, firstRecordIndex-recordsPerPage)
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = firstRecordIndex
+ case moveDown:
+ if selectedRecordIndex != lastRecordIndex-1 {
+ selectedRecordIndex++
+ break
+ }
+ firstRecordIndex++
+ lastRecordIndex++
+ selectedRecordIndex++
+ case moveToNextPage:
+ if selectedRecordIndex != lastRecordIndex-1 {
+ selectedRecordIndex = lastRecordIndex - 1
+ break
+ }
+ firstRecordIndex += recordsPerPage
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = lastRecordIndex - 1
+ case moveHome:
+ firstRecordIndex = 0
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ selectedRecordIndex = 0
+ case moveEnd:
+ lastRecordIndex = math.MaxInt32
+ firstRecordIndex = lastRecordIndex - recordsPerPage
+ selectedRecordIndex = lastRecordIndex - 1
+ default:
+ lastRecordIndex = firstRecordIndex + recordsPerPage
+ }
+
+ return firstRecordIndex, lastRecordIndex, selectedRecordIndex
+}
+
+func (v *RecordsView) GetInnerRect() (int, int, int, int) {
+ x, y, width, height := v.Box.GetInnerRect()
+
+ // Left padding.
+ x = min(x+3, x+width-1)
+ width = max(width-3, 0)
+
+ return x, y, width, height
+}
+
+func (v *RecordsView) Draw(screen tcell.Screen) {
+ v.mu.RLock()
+ firstRecordIndex := v.firstRecordIndex
+ lastRecordIndex := v.lastRecordIndex
+ selectedRecordIndex := v.selectedRecordIndex
+ records := v.records
+ v.mu.RUnlock()
+
+ v.DrawForSubclass(screen, v)
+
+ x, y, width, height := v.GetInnerRect()
+ if height == 0 {
+ return
+ }
+
+ // No records in that bucket.
+ if firstRecordIndex == lastRecordIndex {
+ tview.Print(
+ screen, "Empty Bucket", x, y, width, tview.AlignCenter, tview.Styles.PrimaryTextColor,
+ )
+ return
+ }
+
+ for index := firstRecordIndex; index < lastRecordIndex; index++ {
+ result := records[index].Entry
+ text := result.String()
+
+ if index == selectedRecordIndex {
+ text = fmt.Sprintf("[:white]%s[:-]", text)
+ tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimitiveBackgroundColor)
+ } else {
+ tview.Print(screen, text, x, y, width, tview.AlignLeft, tview.Styles.PrimaryTextColor)
+ }
+
+ y++
+ }
+}
+
+func (v *RecordsView) InputHandler() func(event *tcell.EventKey, _ func(p tview.Primitive)) {
+ return v.WrapInputHandler(func(event *tcell.EventKey, _ func(p tview.Primitive)) {
+ switch m, k := event.Modifiers(), event.Key(); {
+ case m == 0 && k == tcell.KeyPgUp:
+ v.updateType = moveToPrevPage
+ case m == 0 && k == tcell.KeyPgDn:
+ v.updateType = moveToNextPage
+ case m == 0 && k == tcell.KeyUp:
+ v.updateType = moveUp
+ case m == 0 && k == tcell.KeyDown:
+ v.updateType = moveDown
+ case m == 0 && k == tcell.KeyHome:
+ v.updateType = moveHome
+ case m == 0 && k == tcell.KeyEnd:
+ v.updateType = moveEnd
+ case k == tcell.KeyEnter:
+ v.mu.RLock()
+ selectedRecordIndex := v.selectedRecordIndex
+ records := v.records
+ v.mu.RUnlock()
+ if len(records) != 0 {
+ current := records[selectedRecordIndex]
+ v.ui.moveNextPage(NewDetailedView(current.Entry.DetailedString()))
+ }
+ }
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/types.go b/cmd/frostfs-lens/internal/tui/types.go
new file mode 100644
index 000000000..4a227fe64
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/types.go
@@ -0,0 +1,18 @@
+package tui
+
+import (
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+)
+
+type Bucket struct {
+ Name []byte
+ Path [][]byte
+ Entry common.SchemaEntry
+ NextParser common.Parser
+}
+
+type Record struct {
+ Key, Value []byte
+ Path [][]byte
+ Entry common.SchemaEntry
+}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
new file mode 100644
index 000000000..701f2b331
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -0,0 +1,548 @@
+package tui
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+ "go.etcd.io/bbolt"
+)
+
+type Config struct {
+ LoadBufferSize int
+ SearchHistorySize int
+ LoadingIndicatorLag time.Duration
+}
+
+var DefaultConfig = Config{
+ LoadBufferSize: 100,
+ SearchHistorySize: 100,
+ LoadingIndicatorLag: 500 * time.Millisecond,
+}
+
+type Primitive interface {
+ tview.Primitive
+
+ Mount(ctx context.Context) error
+ Update(ctx context.Context) error
+ Unmount()
+}
+
+type UI struct {
+ *tview.Box
+
+ // Need to use context while updating pages those read data from a database.
+ // Context should be shared among all mount and updates. Current TUI library
+ // doesn't use contexts at all, so I do that feature by myself.
+ //nolint:containedctx
+ ctx context.Context
+ onStop func()
+
+ app *tview.Application
+ db *bbolt.DB
+
+ pageHistory []Primitive
+ mountedPage Primitive
+
+ pageToMount Primitive
+
+ pageStub tview.Primitive
+
+ infoBar *tview.TextView
+ searchBar *InputFieldWithHistory
+ loadingBar *LoadingBar
+ helpBar *tview.TextView
+
+ searchErrorBar *tview.TextView
+
+ isSearching bool
+ isLoading atomic.Bool
+ isShowingError bool
+ isShowingHelp bool
+
+ loadBufferSize int
+
+ rootParser common.Parser
+
+ loadingIndicatorLag time.Duration
+
+ cancelLoading func()
+
+ filters map[string]func(string) (any, error)
+ compositeFilters map[string]func(string) (map[string]any, error)
+ filterHints map[string]string
+}
+
+func NewUI(
+ ctx context.Context,
+ app *tview.Application,
+ db *bbolt.DB,
+ rootParser common.Parser,
+ cfg *Config,
+) *UI {
+ spew.Config.DisableMethods = true
+
+ if cfg == nil {
+ cfg = &DefaultConfig
+ }
+
+ ui := &UI{
+ Box: tview.NewBox(),
+
+ app: app,
+ db: db,
+ rootParser: rootParser,
+
+ filters: make(map[string]func(string) (any, error)),
+ compositeFilters: make(map[string]func(string) (map[string]any, error)),
+ filterHints: make(map[string]string),
+
+ loadBufferSize: cfg.LoadBufferSize,
+ loadingIndicatorLag: cfg.LoadingIndicatorLag,
+ }
+
+ ui.ctx, ui.onStop = context.WithCancel(ctx)
+
+ backgroundColor := ui.GetBackgroundColor()
+ textColor := tview.Styles.PrimaryTextColor
+
+ inverseBackgroundColor := textColor
+ inverseTextColor := backgroundColor
+
+ alertTextColor := tcell.ColorRed
+
+ ui.pageStub = tview.NewBox()
+
+ ui.infoBar = tview.NewTextView()
+ ui.infoBar.SetBackgroundColor(inverseBackgroundColor)
+ ui.infoBar.SetTextColor(inverseTextColor)
+ ui.infoBar.SetText(
+ fmt.Sprintf(" %s (press h for help, q to quit) ", db.Path()),
+ )
+
+ ui.searchBar = NewInputFieldWithHistory(cfg.SearchHistorySize)
+ ui.searchBar.SetFieldBackgroundColor(backgroundColor)
+ ui.searchBar.SetFieldTextColor(textColor)
+ ui.searchBar.SetLabelColor(textColor)
+ ui.searchBar.Focus(nil)
+ ui.searchBar.SetLabel("/")
+
+ ui.searchErrorBar = tview.NewTextView()
+ ui.searchErrorBar.SetBackgroundColor(backgroundColor)
+ ui.searchErrorBar.SetTextColor(alertTextColor)
+
+ ui.helpBar = tview.NewTextView()
+ ui.helpBar.SetBackgroundColor(inverseBackgroundColor)
+ ui.helpBar.SetTextColor(inverseTextColor)
+ ui.helpBar.SetText(" Press Enter for next page or Escape to exit help ")
+
+ ui.loadingBar = NewLoadingBar(ui.triggerDraw)
+
+ ui.pageToMount = NewBucketsView(ui, NewFilter(nil))
+
+ return ui
+}
+
+func (ui *UI) checkFilterExists(typ string) bool {
+ if _, ok := ui.filters[typ]; ok {
+ return true
+ }
+ if _, ok := ui.compositeFilters[typ]; ok {
+ return true
+ }
+ return false
+}
+
+func (ui *UI) AddFilter(
+ typ string,
+ parser func(string) (any, error),
+ helpHint string,
+) error {
+ if ui.checkFilterExists(typ) {
+ return fmt.Errorf("filter %s already exists", typ)
+ }
+ ui.filters[typ] = parser
+ ui.filterHints[typ] = helpHint
+ return nil
+}
+
+func (ui *UI) AddCompositeFilter(
+ typ string,
+ parser func(string) (map[string]any, error),
+ helpHint string,
+) error {
+ if ui.checkFilterExists(typ) {
+ return fmt.Errorf("filter %s already exists", typ)
+ }
+ ui.compositeFilters[typ] = parser
+ ui.filterHints[typ] = helpHint
+ return nil
+}
+
+func (ui *UI) stopOnError(err error) {
+ if err != nil {
+ ui.onStop()
+ ui.app.QueueEvent(tcell.NewEventError(err))
+ }
+}
+
+func (ui *UI) stop() {
+ ui.onStop()
+ ui.app.Stop()
+}
+
+func (ui *UI) movePrevPage() {
+ if len(ui.pageHistory) != 0 {
+ ui.mountedPage.Unmount()
+ ui.mountedPage = ui.pageHistory[len(ui.pageHistory)-1]
+ ui.pageHistory = ui.pageHistory[:len(ui.pageHistory)-1]
+ ui.triggerDraw()
+ }
+}
+
+func (ui *UI) moveNextPage(page Primitive) {
+ ui.pageToMount = page
+ ui.triggerDraw()
+}
+
+func (ui *UI) triggerDraw() {
+ go ui.app.QueueUpdateDraw(func() {})
+}
+
+func (ui *UI) Draw(screen tcell.Screen) {
+ if ui.isLoading.Load() {
+ ui.draw(screen)
+ return
+ }
+
+ ui.isLoading.Store(true)
+
+ ctx, cancel := context.WithCancel(ui.ctx)
+
+ ready := make(chan struct{})
+ go func() {
+ ui.load(ctx)
+
+ cancel()
+ close(ready)
+ ui.isLoading.Store(false)
+ }()
+
+ select {
+ case <-ready:
+ case <-time.After(ui.loadingIndicatorLag):
+ ui.loadingBar.Start(ui.ctx)
+ ui.cancelLoading = cancel
+
+ go func() {
+ <-ready
+ ui.loadingBar.Stop()
+ ui.triggerDraw()
+ }()
+ }
+
+ ui.draw(screen)
+}
+
+func (ui *UI) load(ctx context.Context) {
+ if ui.mountedPage == nil && ui.pageToMount == nil {
+ ui.stop()
+ return
+ }
+
+ if ui.pageToMount != nil {
+ ui.mountAndUpdate(ctx)
+ } else {
+ ui.update(ctx)
+ }
+}
+
+func (ui *UI) draw(screen tcell.Screen) {
+ ui.DrawForSubclass(screen, ui)
+ x, y, width, height := ui.GetInnerRect()
+
+ var (
+ pageToDraw tview.Primitive
+ barToDraw tview.Primitive
+ )
+
+ switch {
+ case ui.isShowingHelp:
+ pageToDraw = ui.pageStub
+ case ui.mountedPage != nil:
+ pageToDraw = ui.mountedPage
+ default:
+ pageToDraw = ui.pageStub
+ }
+
+ pageToDraw.SetRect(x, y, width, height-1)
+ pageToDraw.Draw(screen)
+
+ // Search bar uses cursor and we need to hide it when another bar is drawn.
+ screen.HideCursor()
+
+ switch {
+ case ui.isLoading.Load():
+ barToDraw = ui.loadingBar
+ case ui.isSearching:
+ barToDraw = ui.searchBar
+ case ui.isShowingError:
+ barToDraw = ui.searchErrorBar
+ case ui.isShowingHelp:
+ barToDraw = ui.helpBar
+ default:
+ barToDraw = ui.infoBar
+ }
+
+ barToDraw.SetRect(x, y+height-1, width, 1)
+ barToDraw.Draw(screen)
+}
+
+func (ui *UI) mountAndUpdate(ctx context.Context) {
+ defer func() {
+ // Operation succeeded or was canceled, either way reset page to mount.
+ ui.pageToMount = nil
+ }()
+
+ // Mount should use app global context.
+ //nolint:contextcheck
+ err := ui.pageToMount.Mount(ui.ctx)
+ if err != nil {
+ ui.stopOnError(err)
+ return
+ }
+
+ x, y, width, height := ui.GetInnerRect()
+ ui.pageToMount.SetRect(x, y, width, height-1)
+
+ s := loadOp(ctx, ui.pageToMount.Update)
+ if s.err != nil {
+ ui.pageToMount.Unmount()
+ ui.stopOnError(s.err)
+ return
+ }
+ // Update was canceled.
+ if !s.done {
+ ui.pageToMount.Unmount()
+ return
+ }
+
+ if ui.mountedPage != nil {
+ ui.pageHistory = append(ui.pageHistory, ui.mountedPage)
+ }
+ ui.mountedPage = ui.pageToMount
+}
+
+func (ui *UI) update(ctx context.Context) {
+ x, y, width, height := ui.GetInnerRect()
+ ui.mountedPage.SetRect(x, y, width, height-1)
+
+ s := loadOp(ctx, ui.mountedPage.Update)
+ if s.err != nil {
+ ui.stopOnError(s.err)
+ return
+ }
+}
+
+type status struct {
+ done bool
+ err error
+}
+
+func loadOp(ctx context.Context, op func(ctx context.Context) error) status {
+ errCh := make(chan error)
+ go func() {
+ errCh <- op(ctx)
+ }()
+
+ select {
+ case <-ctx.Done():
+ return status{done: false, err: nil}
+ case err := <-errCh:
+ return status{done: true, err: err}
+ }
+}
+
+func (ui *UI) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return ui.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ switch {
+ case ui.isLoading.Load():
+ ui.handleInputOnLoading(event)
+ case ui.isShowingHelp:
+ ui.handleInputOnShowingHelp(event)
+ case ui.isShowingError:
+ ui.handleInputOnShowingError()
+ case ui.isSearching:
+ ui.handleInputOnSearching(event)
+ default:
+ ui.handleInput(event)
+ }
+ })
+}
+
+func (ui *UI) handleInput(event *tcell.EventKey) {
+ m, k, r := event.Modifiers(), event.Key(), event.Rune()
+
+ switch {
+ case k == tcell.KeyEsc:
+ ui.movePrevPage()
+ case m == 0 && k == tcell.KeyRune && r == 'h':
+ ui.isShowingHelp = true
+ case m == 0 && k == tcell.KeyRune && r == '/':
+ ui.isSearching = true
+ case m == 0 && k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ default:
+ if ui.mountedPage != nil {
+ ui.mountedPage.InputHandler()(event, func(tview.Primitive) {})
+ }
+ }
+}
+
+func (ui *UI) handleInputOnLoading(event *tcell.EventKey) {
+ switch k, r := event.Key(), event.Rune(); {
+ case k == tcell.KeyEsc:
+ ui.cancelLoading()
+ case k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ }
+}
+
+func (ui *UI) handleInputOnShowingError() {
+ ui.isShowingError = false
+ ui.isSearching = true
+}
+
+func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
+ k, r := event.Key(), event.Rune()
+
+ switch {
+ case k == tcell.KeyEsc:
+ ui.isShowingHelp = false
+ case k == tcell.KeyRune && r == 'q':
+ ui.stop()
+ default:
+ }
+}
+
+func (ui *UI) handleInputOnSearching(event *tcell.EventKey) {
+ m, k := event.Modifiers(), event.Key()
+
+ switch {
+ case k == tcell.KeyEnter:
+ prompt := ui.searchBar.GetText()
+
+ res, err := ui.processPrompt(prompt)
+ if err != nil {
+ ui.isShowingError = true
+ ui.isSearching = false
+ ui.searchErrorBar.SetText(err.Error() + " (press any key to continue)")
+ return
+ }
+
+ switch ui.mountedPage.(type) {
+ case *BucketsView:
+ ui.moveNextPage(NewBucketsView(ui, res))
+ case *RecordsView:
+ bucket := ui.mountedPage.(*RecordsView).bucket
+ ui.moveNextPage(NewRecordsView(ui, bucket, res))
+ }
+
+ if ui.searchBar.GetText() != "" {
+ ui.searchBar.AddToHistory(ui.searchBar.GetText())
+ }
+
+ ui.searchBar.SetText("")
+ ui.isSearching = false
+ case k == tcell.KeyEsc:
+ ui.isSearching = false
+ case (k == tcell.KeyBackspace2 || m&tcell.ModCtrl != 0 && k == tcell.KeyETB) && len(ui.searchBar.GetText()) == 0:
+ ui.isSearching = false
+ default:
+ ui.searchBar.InputHandler()(event, func(tview.Primitive) {})
+ }
+
+ ui.Box.MouseHandler()
+}
+
+func (ui *UI) WithPrompt(prompt string) error {
+ filter, err := ui.processPrompt(prompt)
+ if err != nil {
+ return err
+ }
+
+ ui.pageToMount = NewBucketsView(ui, filter)
+
+ if prompt != "" {
+ ui.searchBar.AddToHistory(prompt)
+ }
+
+ return nil
+}
+
+func (ui *UI) processPrompt(prompt string) (filter *Filter, err error) {
+ if prompt == "" {
+ return NewFilter(nil), nil
+ }
+
+ filterMap := make(map[string]any)
+
+ for _, filterString := range strings.Split(prompt, "+") {
+ parts := strings.Split(filterString, ":")
+ if len(parts) != 2 {
+ return nil, errors.New("expected 'tag:value [+ tag:value]...'")
+ }
+
+ filterTag := strings.TrimSpace(parts[0])
+ filterValueString := strings.TrimSpace(parts[1])
+
+ if _, exists := filterMap[filterTag]; exists {
+ return nil, fmt.Errorf("duplicate filter tag '%s'", filterTag)
+ }
+
+ parser, ok := ui.filters[filterTag]
+ if ok {
+ filterValue, err := parser(filterValueString)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse '%s' filter value: %w", filterTag, err)
+ }
+
+ filterMap[filterTag] = filterValue
+ continue
+ }
+
+ compositeParser, ok := ui.compositeFilters[filterTag]
+ if ok {
+ compositeFilterValue, err := compositeParser(filterValueString)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "can't parse '%s' filter value '%s': %w",
+ filterTag, filterValueString, err,
+ )
+ }
+
+ for tag, value := range compositeFilterValue {
+ if _, exists := filterMap[tag]; exists {
+ return nil, fmt.Errorf(
+ "found duplicate filter tag '%s' while processing composite filter with tag '%s'",
+ tag, filterTag,
+ )
+ }
+
+ filterMap[tag] = value
+ }
+ continue
+ }
+
+ return nil, fmt.Errorf("unknown filter tag '%s'", filterTag)
+ }
+
+ return NewFilter(filterMap), nil
+}
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
new file mode 100644
index 000000000..d4e13b2a9
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -0,0 +1,97 @@
+package tui
+
+import (
+ "errors"
+ "strings"
+
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+func CIDParser(s string) (any, error) {
+ data, err := base58.Decode(s)
+ if err != nil {
+ return nil, err
+ }
+ var id cid.ID
+ if err = id.Decode(data); err != nil {
+ return nil, err
+ }
+ return id, nil
+}
+
+func OIDParser(s string) (any, error) {
+ data, err := base58.Decode(s)
+ if err != nil {
+ return nil, err
+ }
+ var id oid.ID
+ if err = id.Decode(data); err != nil {
+ return nil, err
+ }
+ return id, nil
+}
+
+func AddressParser(s string) (map[string]any, error) {
+ m := make(map[string]any)
+
+ parts := strings.Split(s, "/")
+ if len(parts) != 2 {
+ return nil, errors.New("expected /")
+ }
+ cnr, err := CIDParser(parts[0])
+ if err != nil {
+ return nil, err
+ }
+ obj, err := OIDParser(parts[1])
+ if err != nil {
+ return nil, err
+ }
+
+ m["cid"] = cnr
+ m["oid"] = obj
+
+ return m, nil
+}
+
+func keyParser(s string) (any, error) {
+ if s == "" {
+ return nil, errors.New("empty attribute key")
+ }
+ return s, nil
+}
+
+func valueParser(s string) (any, error) {
+ if s == "" {
+ return nil, errors.New("empty attribute value")
+ }
+ return s, nil
+}
+
+func AttributeParser(s string) (map[string]any, error) {
+ m := make(map[string]any)
+
+ parts := strings.Split(s, "/")
+ if len(parts) != 1 && len(parts) != 2 {
+ return nil, errors.New("expected or /")
+ }
+
+ key, err := keyParser(parts[0])
+ if err != nil {
+ return nil, err
+ }
+ m["key"] = key
+
+ if len(parts) == 1 {
+ return m, nil
+ }
+
+ value, err := valueParser(parts[1])
+ if err != nil {
+ return nil, err
+ }
+ m["value"] = value
+
+ return m, nil
+}
diff --git a/go.mod b/go.mod
index be3c6e74d..93eef5b8c 100644
--- a/go.mod
+++ b/go.mod
@@ -32,6 +32,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
+ github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130
github.com/spf13/cast v1.6.0
github.com/spf13/cobra v1.8.1
github.com/spf13/pflag v1.0.5
@@ -106,7 +107,7 @@ require (
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
- github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
diff --git a/go.sum b/go.sum
index d0218a348..102501484 100644
--- a/go.sum
+++ b/go.sum
@@ -223,10 +223,12 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130 h1:o1CYtoFOm6xJK3DvDAEG5wDJPLj+SoxUtUDFaQgt1iY=
+github.com/rivo/tview v0.0.0-20240625185742-b0a7293b8130/go.mod h1:02iFIz7K/A9jGCvrizLPvoqr4cEIx7q54RH5Qudkrss=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
-github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
From e6553363905ef350b9faf12e7a42d52cf624815c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 16 Aug 2024 17:27:35 +0300
Subject: [PATCH 021/655] [#1223] lens/tui: Add app help
Signed-off-by: Aleksey Savchuk
---
.../internal/tui/help-pages/hotkeys.txt | 38 +++++++
.../internal/tui/help-pages/searching.txt | 26 +++++
cmd/frostfs-lens/internal/tui/help.go | 101 ++++++++++++++++++
cmd/frostfs-lens/internal/tui/ui.go | 15 ++-
4 files changed, 179 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
create mode 100644 cmd/frostfs-lens/internal/tui/help-pages/searching.txt
create mode 100644 cmd/frostfs-lens/internal/tui/help.go
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
new file mode 100644
index 000000000..c371b34e9
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help-pages/hotkeys.txt
@@ -0,0 +1,38 @@
+[green::b]HOTKEYS[-::-]
+
+ [green::b]Navigation[-::-]
+
+ [yellow::b]Down Arrow[-::-] / [yellow::b]j[-::-]
+ Scroll down.
+
+ [yellow::b]Up Arrow[-::-] / [yellow::b]k[-::-]
+ Scroll up.
+
+ [yellow::b]Page Down[-::-] / [yellow::b]Ctrl-f[-::-]
+ Scroll down by a full page.
+
+ [yellow::b]Page Up[-::-] / [yellow::b]Ctrl-b[-::-]
+ Scroll up by a full page.
+
+ [green::b]Actions[-::-]
+
+ [yellow::b]Enter[-::-]
+ Perform actions based on the current context:
+ - In Buckets View:
+ - Expand/collapse the selected bucket to show/hide its nested buckets.
+ - If no nested buckets exist, navigate to the selected bucket's records.
+ - In Records View: Open the detailed view of the selected record.
+
+ [yellow::b]Escape[-::-]
+ Return to the previous page, opposite of [yellow::b]Enter[-::-].
+
+ Refer to the [green::b]SEARCHING[-::-] section for more specific actions.
+
+
+ [green::b]Alternative Action Hotkeys[-::-]
+
+ [yellow::b]Ctrl-r[-::-]
+ Directly navigate to the selected bucket's records.
+
+ [yellow::b]Ctrl-d[-::-]
+ Access the detailed view of the selected bucket.
diff --git a/cmd/frostfs-lens/internal/tui/help-pages/searching.txt b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
new file mode 100644
index 000000000..bc2be512b
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help-pages/searching.txt
@@ -0,0 +1,26 @@
+[green::b]SEARCHING[-::-]
+
+ [green::b]Hotkeys[-::-]
+
+ [yellow::b]/[-::-]
+ Initiate the search prompt.
+ - The prompt follows this syntax: [yellow::b]tag:value [+ tag:value]...[-::-]
+ - Multiple filter can be combined with [yellow::b]+[-::-], the result is an intersection of those filters' result sets.
+ - Any leading and trailing whitespace will be ignored.
+ - An empty prompt will return all results with no filters applied.
+ - Refer to the [green::b]Available Search Filters[-::-] section below for a list of valid filter tags.
+
+ [yellow::b]Enter[-::-]
+ Execute the search based on the entered prompt.
+ - If the prompt is invalid, an error message will be displayed.
+
+ [yellow::b]Escape[-::-]
+ Exit the search prompt without performing a search.
+
+ [yellow::b]Down Arrow[-::-], [yellow::b]Up Arrow[-::-]
+ Scroll through the search history.
+
+
+ [green::b]Available Search Filters[-::-]
+
+%s
diff --git a/cmd/frostfs-lens/internal/tui/help.go b/cmd/frostfs-lens/internal/tui/help.go
new file mode 100644
index 000000000..3ab8fede0
--- /dev/null
+++ b/cmd/frostfs-lens/internal/tui/help.go
@@ -0,0 +1,101 @@
+package tui
+
+import (
+ _ "embed"
+ "fmt"
+ "strings"
+
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+var (
+ //go:embed help-pages/hotkeys.txt
+ hotkeysHelpText string
+
+ //go:embed help-pages/searching.txt
+ searchingHelpText string
+)
+
+type HelpPage struct {
+ *tview.Box
+ pages []*tview.TextView
+ currentPage int
+
+ filters []string
+ filterHints map[string]string
+}
+
+func NewHelpPage(filters []string, hints map[string]string) *HelpPage {
+ hp := &HelpPage{
+ Box: tview.NewBox(),
+ filters: filters,
+ filterHints: hints,
+ }
+
+ page := tview.NewTextView().
+ SetDynamicColors(true).
+ SetText(hotkeysHelpText)
+ hp.addPage(page)
+
+ page = tview.NewTextView().
+ SetDynamicColors(true).
+ SetText(fmt.Sprintf(searchingHelpText, hp.getFiltersText()))
+ hp.addPage(page)
+
+ return hp
+}
+
+func (hp *HelpPage) addPage(page *tview.TextView) {
+ hp.pages = append(hp.pages, page)
+}
+
+func (hp *HelpPage) getFiltersText() string {
+ if len(hp.filters) == 0 {
+ return "\t\tNo filters defined.\n"
+ }
+
+ filtersText := strings.Builder{}
+ gapSize := 4
+
+ tagMaxWidth := 3
+ for _, filter := range hp.filters {
+ tagMaxWidth = max(tagMaxWidth, len(filter))
+ }
+ filtersText.WriteString("\t\t[yellow::b]Tag")
+ filtersText.WriteString(strings.Repeat(" ", gapSize))
+ filtersText.WriteString("\tValue[-::-]\n\n")
+
+ for _, filter := range hp.filters {
+ filtersText.WriteString("\t\t")
+ filtersText.WriteString(filter)
+ filtersText.WriteString(strings.Repeat(" ", tagMaxWidth-len(filter)+gapSize))
+ filtersText.WriteString(hp.filterHints[filter])
+ filtersText.WriteRune('\n')
+ }
+
+ return filtersText.String()
+}
+
+func (hp *HelpPage) Draw(screen tcell.Screen) {
+ x, y, width, height := hp.GetInnerRect()
+ hp.pages[hp.currentPage].SetRect(x+1, y+1, width-2, height-2)
+ hp.pages[hp.currentPage].Draw(screen)
+}
+
+func (hp *HelpPage) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
+ return hp.WrapInputHandler(func(event *tcell.EventKey, _ func(tview.Primitive)) {
+ if event.Key() == tcell.KeyEnter {
+ hp.currentPage++
+ hp.currentPage %= len(hp.pages)
+ return
+ }
+ hp.pages[hp.currentPage].InputHandler()(event, func(tview.Primitive) {})
+ })
+}
+
+func (hp *HelpPage) MouseHandler() func(action tview.MouseAction, event *tcell.EventMouse, setFocus func(p tview.Primitive)) (consumed bool, capture tview.Primitive) {
+ return hp.WrapMouseHandler(func(action tview.MouseAction, event *tcell.EventMouse, _ func(tview.Primitive)) (consumed bool, capture tview.Primitive) {
+ return hp.pages[hp.currentPage].MouseHandler()(action, event, func(tview.Primitive) {})
+ })
+}
diff --git a/cmd/frostfs-lens/internal/tui/ui.go b/cmd/frostfs-lens/internal/tui/ui.go
index 701f2b331..bcc082821 100644
--- a/cmd/frostfs-lens/internal/tui/ui.go
+++ b/cmd/frostfs-lens/internal/tui/ui.go
@@ -60,6 +60,8 @@ type UI struct {
loadingBar *LoadingBar
helpBar *tview.TextView
+ helpPage *HelpPage
+
searchErrorBar *tview.TextView
isSearching bool
@@ -275,7 +277,17 @@ func (ui *UI) draw(screen tcell.Screen) {
switch {
case ui.isShowingHelp:
- pageToDraw = ui.pageStub
+ if ui.helpPage == nil {
+ var filters []string
+ for f := range ui.filters {
+ filters = append(filters, f)
+ }
+ for f := range ui.compositeFilters {
+ filters = append(filters, f)
+ }
+ ui.helpPage = NewHelpPage(filters, ui.filterHints)
+ }
+ pageToDraw = ui.helpPage
case ui.mountedPage != nil:
pageToDraw = ui.mountedPage
default:
@@ -429,6 +441,7 @@ func (ui *UI) handleInputOnShowingHelp(event *tcell.EventKey) {
case k == tcell.KeyRune && r == 'q':
ui.stop()
default:
+ ui.helpPage.InputHandler()(event, func(tview.Primitive) {})
}
}
From 371d97f61adc7cc74da815764a75a5438e865eda Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 19 Aug 2024 18:02:11 +0300
Subject: [PATCH 022/655] [#1223] lens/tui: Add TUI app for write cache
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/writecache/root.go | 2 +-
cmd/frostfs-lens/internal/writecache/tui.go | 79 ++++++++++++++++++++
2 files changed, 80 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/writecache/tui.go
diff --git a/cmd/frostfs-lens/internal/writecache/root.go b/cmd/frostfs-lens/internal/writecache/root.go
index eb3b325b6..d7d6db240 100644
--- a/cmd/frostfs-lens/internal/writecache/root.go
+++ b/cmd/frostfs-lens/internal/writecache/root.go
@@ -17,5 +17,5 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD)
+ Root.AddCommand(listCMD, inspectCMD, tuiCMD)
}
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
new file mode 100644
index 000000000..6b7532b08
--- /dev/null
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -0,0 +1,79 @@
+package writecache
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/writecache"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Write cache exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore write cache and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.WritecacheParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
From 7768a482b595e578570f6b3c705b6f7754705fab Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 22 Aug 2024 15:07:51 +0300
Subject: [PATCH 023/655] [#1223] lens/tui: Add TUI app for blobovnicza
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/blobovnicza/root.go | 2 +-
cmd/frostfs-lens/internal/blobovnicza/tui.go | 79 ++++++++++++++
.../internal/schema/blobovnicza/parsers.go | 96 +++++++++++++++++
.../internal/schema/blobovnicza/types.go | 101 ++++++++++++++++++
4 files changed, 277 insertions(+), 1 deletion(-)
create mode 100644 cmd/frostfs-lens/internal/blobovnicza/tui.go
create mode 100644 cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
create mode 100644 cmd/frostfs-lens/internal/schema/blobovnicza/types.go
diff --git a/cmd/frostfs-lens/internal/blobovnicza/root.go b/cmd/frostfs-lens/internal/blobovnicza/root.go
index 0a0cd955d..9d8ef3dad 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/root.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/root.go
@@ -19,7 +19,7 @@ var Root = &cobra.Command{
}
func init() {
- Root.AddCommand(listCMD, inspectCMD)
+ Root.AddCommand(listCMD, inspectCMD, tuiCMD)
}
func openBlobovnicza(cmd *cobra.Command) *blobovnicza.Blobovnicza {
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
new file mode 100644
index 000000000..eb4a5ff59
--- /dev/null
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -0,0 +1,79 @@
+package blobovnicza
+
+import (
+ "context"
+ "fmt"
+
+ common "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal"
+ schema "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/blobovnicza"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
+ "github.com/rivo/tview"
+ "github.com/spf13/cobra"
+ "go.etcd.io/bbolt"
+)
+
+var tuiCMD = &cobra.Command{
+ Use: "explore",
+ Short: "Blobovnicza exploration with a terminal UI",
+ Long: `Launch a terminal UI to explore blobovnicza and search for data.
+
+Available search filters:
+- cid CID
+- oid OID
+- addr CID/OID
+`,
+ Run: tuiFunc,
+}
+
+var initialPrompt string
+
+func init() {
+ common.AddComponentPathFlag(tuiCMD, &vPath)
+
+ tuiCMD.Flags().StringVar(
+ &initialPrompt,
+ "filter",
+ "",
+ "Filter prompt to start with, format 'tag:value [+ tag:value]...'",
+ )
+}
+
+func tuiFunc(cmd *cobra.Command, _ []string) {
+ common.ExitOnErr(cmd, runTUI(cmd))
+}
+
+func runTUI(cmd *cobra.Command) error {
+ db, err := openDB(false)
+ if err != nil {
+ return fmt.Errorf("couldn't open database: %w", err)
+ }
+ defer db.Close()
+
+ ctx, cancel := context.WithCancel(cmd.Context())
+ defer cancel()
+
+ app := tview.NewApplication()
+ ui := tui.NewUI(ctx, app, db, schema.BlobovniczaParser, nil)
+
+ _ = ui.AddFilter("cid", tui.CIDParser, "CID")
+ _ = ui.AddFilter("oid", tui.OIDParser, "OID")
+ _ = ui.AddCompositeFilter("addr", tui.AddressParser, "CID/OID")
+
+ err = ui.WithPrompt(initialPrompt)
+ if err != nil {
+ return fmt.Errorf("invalid filter prompt: %w", err)
+ }
+
+ app.SetRoot(ui, true).SetFocus(ui)
+ return app.Run()
+}
+
+func openDB(writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
new file mode 100644
index 000000000..02b6cf414
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/blobovnicza/parsers.go
@@ -0,0 +1,96 @@
+package blobovnicza
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/mr-tron/base58"
+)
+
+var BlobovniczaParser = common.WithFallback(
+ common.Any(
+ MetaBucketParser,
+ BucketParser,
+ ),
+ common.RawParser.ToFallbackParser(),
+)
+
+func MetaBucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+
+ if string(key) != "META" {
+ return nil, nil, errors.New("invalid bucket name")
+ }
+
+ return &MetaBucket{}, MetaRecordParser, nil
+}
+
+func MetaRecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ var r MetaRecord
+
+ if len(key) == 0 {
+ return nil, nil, errors.New("invalid key")
+ }
+
+ r.label = string(key)
+ r.count = binary.LittleEndian.Uint64(value)
+
+ return &r, nil, nil
+}
+
+func BucketParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ if value != nil {
+ return nil, nil, errors.New("not a bucket")
+ }
+
+ size, n := binary.Varint(key)
+ if n <= 0 {
+ return nil, nil, errors.New("invalid size")
+ }
+
+ return &Bucket{size: size}, RecordParser, nil
+}
+
+func RecordParser(key, value []byte) (common.SchemaEntry, common.Parser, error) {
+ parts := strings.Split(string(key), "/")
+
+ if len(parts) != 2 {
+ return nil, nil, errors.New("invalid key, expected address string /")
+ }
+
+ cnrRaw, err := base58.Decode(parts[0])
+ if err != nil {
+ return nil, nil, errors.New("can't decode CID string")
+ }
+ objRaw, err := base58.Decode(parts[1])
+ if err != nil {
+ return nil, nil, errors.New("can't decode OID string")
+ }
+
+ cnr := cid.ID{}
+ if err := cnr.Decode(cnrRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode CID: %w", err)
+ }
+ obj := oid.ID{}
+ if err := obj.Decode(objRaw); err != nil {
+ return nil, nil, fmt.Errorf("can't decode OID: %w", err)
+ }
+
+ var r Record
+
+ r.addr.SetContainer(cnr)
+ r.addr.SetObject(obj)
+
+ if err := r.object.Unmarshal(value); err != nil {
+ return nil, nil, errors.New("can't unmarshal object")
+ }
+
+ return &r, nil, nil
+}
diff --git a/cmd/frostfs-lens/internal/schema/blobovnicza/types.go b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
new file mode 100644
index 000000000..c7ed08cdd
--- /dev/null
+++ b/cmd/frostfs-lens/internal/schema/blobovnicza/types.go
@@ -0,0 +1,101 @@
+package blobovnicza
+
+import (
+ "fmt"
+ "strconv"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/schema/common"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/gdamore/tcell/v2"
+ "github.com/rivo/tview"
+)
+
+type (
+ MetaBucket struct{}
+
+ MetaRecord struct {
+ label string
+ count uint64
+ }
+
+ Bucket struct {
+ size int64
+ }
+
+ Record struct {
+ addr oid.Address
+ object objectSDK.Object
+ }
+)
+
+func (b *MetaBucket) String() string {
+ return common.FormatSimple("META", tcell.ColorLime)
+}
+
+func (b *MetaBucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *MetaBucket) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (r *MetaRecord) String() string {
+ return fmt.Sprintf("%-11s %c %d", r.label, tview.Borders.Vertical, r.count)
+}
+
+func (r *MetaRecord) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *MetaRecord) Filter(string, any) common.FilterResult {
+ return common.No
+}
+
+func (b *Bucket) String() string {
+ return common.FormatSimple(strconv.FormatInt(b.size, 10), tcell.ColorLime)
+}
+
+func (b *Bucket) DetailedString() string {
+ return spew.Sdump(*b)
+}
+
+func (b *Bucket) Filter(typ string, _ any) common.FilterResult {
+ switch typ {
+ case "cid":
+ return common.Maybe
+ case "oid":
+ return common.Maybe
+ default:
+ return common.No
+ }
+}
+
+func (r *Record) String() string {
+ return fmt.Sprintf(
+ "CID %s OID %s %c Object {...}",
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Container()), tcell.ColorAqua),
+ common.FormatSimple(fmt.Sprintf("%-44s", r.addr.Object()), tcell.ColorAqua),
+ tview.Borders.Vertical,
+ )
+}
+
+func (r *Record) DetailedString() string {
+ return spew.Sdump(*r)
+}
+
+func (r *Record) Filter(typ string, val any) common.FilterResult {
+ switch typ {
+ case "cid":
+ id := val.(cid.ID)
+ return common.IfThenElse(r.addr.Container().Equals(id), common.Yes, common.No)
+ case "oid":
+ id := val.(oid.ID)
+ return common.IfThenElse(r.addr.Object().Equals(id), common.Yes, common.No)
+ default:
+ return common.No
+ }
+}
From b3deb893ba26aa3ec9ce93213cef16243cc0f58d Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Fri, 30 Aug 2024 12:09:14 +0300
Subject: [PATCH 024/655] [#1310] object: Move target initialization to
separate package
* Split the logic of write target initialization to different packages;
* Refactor patch and put services: since both service initialize the target
themselves.
Signed-off-by: Airat Arifullin
---
cmd/frostfs-node/cache.go | 6 +-
cmd/frostfs-node/object.go | 17 +-
.../object/{put => common/target}/builder.go | 2 +-
.../object/{put => common/target}/pool.go | 2 +-
pkg/services/object/common/target/target.go | 170 +++++++++++
.../{put => common/target}/validation.go | 2 +-
.../object/{put => common/writer}/common.go | 42 +--
.../writer.go => common/writer/dispatcher.go} | 2 +-
.../{put => common/writer}/distributed.go | 70 ++---
.../object/{put => common/writer}/ec.go | 104 +++----
.../object/{put => common/writer}/local.go | 14 +-
.../object/{put => common/writer}/remote.go | 12 +-
pkg/services/object/common/writer/writer.go | 183 +++++++++++
pkg/services/object/patch/service.go | 25 +-
pkg/services/object/patch/streamer.go | 28 +-
pkg/services/object/patch/util.go | 19 --
pkg/services/object/put/service.go | 114 ++-----
pkg/services/object/put/single.go | 64 ++--
pkg/services/object/put/streamer.go | 289 ++----------------
pkg/services/object/put/v2/streamer.go | 9 +-
pkg/services/replicator/process.go | 4 +-
pkg/services/replicator/replicator.go | 6 +-
22 files changed, 599 insertions(+), 585 deletions(-)
rename pkg/services/object/{put => common/target}/builder.go (98%)
rename pkg/services/object/{put => common/target}/pool.go (96%)
create mode 100644 pkg/services/object/common/target/target.go
rename pkg/services/object/{put => common/target}/validation.go (99%)
rename pkg/services/object/{put => common/writer}/common.go (65%)
rename pkg/services/object/{put/writer.go => common/writer/dispatcher.go} (97%)
rename pkg/services/object/{put => common/writer}/distributed.go (57%)
rename pkg/services/object/{put => common/writer}/ec.go (69%)
rename pkg/services/object/{put => common/writer}/local.go (81%)
rename pkg/services/object/{put => common/writer}/remote.go (92%)
create mode 100644 pkg/services/object/common/writer/writer.go
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index 81d552729..57f65d873 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -7,7 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -305,11 +305,11 @@ type ttlMaxObjectSizeCache struct {
mtx sync.RWMutex
lastUpdated time.Time
lastSize uint64
- src putsvc.MaxSizeSource
+ src objectwriter.MaxSizeSource
metrics cacheMetrics
}
-func newCachedMaxObjectSizeSource(src putsvc.MaxSizeSource) putsvc.MaxSizeSource {
+func newCachedMaxObjectSizeSource(src objectwriter.MaxSizeSource) objectwriter.MaxSizeSource {
return &ttlMaxObjectSizeCache{
src: src,
metrics: metrics.NewCacheMetrics("max_object_size"),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 467c5901b..610e2c363 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -24,6 +24,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl"
v2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/acl/v2"
objectAPE "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/ape"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
deletesvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete"
deletesvcV2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/delete/v2"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
@@ -188,7 +189,7 @@ func initObjectService(c *cfg) {
sDeleteV2 := createDeleteServiceV2(sDelete)
- sPatch := createPatchSvc(sGet, sPut, keyStorage)
+ sPatch := createPatchSvc(sGet, sPut)
// build service pipeline
// grpc | audit | | signature | response | acl | ape | split
@@ -326,7 +327,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
),
replicator.WithLocalStorage(ls),
replicator.WithRemoteSender(
- putsvc.NewRemoteSender(keyStorage, cache),
+ objectwriter.NewRemoteSender(keyStorage, cache),
),
replicator.WithRemoteGetter(
getsvc.NewRemoteGetter(c.clientCache, c.netMapSource, keyStorage),
@@ -338,7 +339,7 @@ func createReplicator(c *cfg, keyStorage *util.KeyStorage, cache *cache.ClientCa
func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetcher) *putsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
- var os putsvc.ObjectStorage = engineWithoutNotifications{
+ var os objectwriter.ObjectStorage = engineWithoutNotifications{
engine: ls,
}
@@ -352,9 +353,9 @@ func createPutSvc(c *cfg, keyStorage *util.KeyStorage, irFetcher *cachedIRFetche
c,
c.cfgNetmap.state,
irFetcher,
- putsvc.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
- putsvc.WithLogger(c.log),
- putsvc.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
+ objectwriter.WithWorkerPools(c.cfgObject.pool.putRemote, c.cfgObject.pool.putLocal),
+ objectwriter.WithLogger(c.log),
+ objectwriter.WithVerifySessionTokenIssuer(!c.cfgObject.skipSessionTokenIssuerVerification),
)
}
@@ -362,8 +363,8 @@ func createPutSvcV2(sPut *putsvc.Service, keyStorage *util.KeyStorage) *putsvcV2
return putsvcV2.NewService(sPut, keyStorage)
}
-func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service, keyStorage *util.KeyStorage) *patchsvc.Service {
- return patchsvc.NewService(keyStorage, sGet, sPut)
+func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Service {
+ return patchsvc.NewService(sPut.Config, sGet)
}
func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
diff --git a/pkg/services/object/put/builder.go b/pkg/services/object/common/target/builder.go
similarity index 98%
rename from pkg/services/object/put/builder.go
rename to pkg/services/object/common/target/builder.go
index 64baf4e05..ea68365a7 100644
--- a/pkg/services/object/put/builder.go
+++ b/pkg/services/object/common/target/builder.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"context"
diff --git a/pkg/services/object/put/pool.go b/pkg/services/object/common/target/pool.go
similarity index 96%
rename from pkg/services/object/put/pool.go
rename to pkg/services/object/common/target/pool.go
index ebe214caf..71da305ad 100644
--- a/pkg/services/object/put/pool.go
+++ b/pkg/services/object/common/target/pool.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"sync"
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
new file mode 100644
index 000000000..00080ace6
--- /dev/null
+++ b/pkg/services/object/common/target/target.go
@@ -0,0 +1,170 @@
+package target
+
+import (
+ "errors"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+)
+
+func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ // prepare needed put parameters
+ if err := preparePrm(prm); err != nil {
+ return nil, fmt.Errorf("could not prepare put parameters: %w", err)
+ }
+
+ if prm.Header.Signature() != nil {
+ return newUntrustedTarget(prm)
+ }
+ return newTrustedTarget(prm)
+}
+
+func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ nodeKey, err := prm.Config.KeyStorage.GetKey(nil)
+ if err != nil {
+ return nil, err
+ }
+ prm.SignRequestPrivateKey = nodeKey
+ }
+
+ // prepare untrusted-Put object target
+ return &validatingPreparedTarget{
+ nextTarget: newInMemoryObjectBuilder(objectwriter.New(prm)),
+ fmt: prm.Config.FormatValidator,
+
+ maxPayloadSz: maxPayloadSz,
+ }, nil
+}
+
+func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
+ if maxPayloadSz == 0 {
+ return nil, errors.New("could not obtain max object size parameter")
+ }
+
+ sToken := prm.Common.SessionToken()
+
+ // prepare trusted-Put object target
+
+ // get private token from local storage
+ var sessionInfo *util.SessionInfo
+
+ if sToken != nil {
+ sessionInfo = &util.SessionInfo{
+ ID: sToken.ID(),
+ Owner: sToken.Issuer(),
+ }
+ }
+
+ key, err := prm.Config.KeyStorage.GetKey(sessionInfo)
+ if err != nil {
+ return nil, fmt.Errorf("could not receive session key: %w", err)
+ }
+
+ // In case session token is missing, the line above returns the default key.
+ // If it isn't owner key, replication attempts will fail, thus this check.
+ ownerObj := prm.Header.OwnerID()
+ if ownerObj.IsEmpty() {
+ return nil, errors.New("missing object owner")
+ }
+
+ if sToken == nil {
+ var ownerSession user.ID
+ user.IDFromKey(&ownerSession, key.PublicKey)
+
+ if !ownerObj.Equals(ownerSession) {
+ return nil, errors.New("session token is missing but object owner id is different from the default key")
+ }
+ } else {
+ if !ownerObj.Equals(sessionInfo.Owner) {
+ return nil, fmt.Errorf("different token issuer and object owner identifiers %s/%s", sessionInfo.Owner, ownerObj)
+ }
+ }
+
+ if prm.SignRequestPrivateKey == nil {
+ prm.SignRequestPrivateKey = key
+ }
+
+ return &validatingTarget{
+ fmt: prm.Config.FormatValidator,
+ nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
+ Key: key,
+ NextTargetInit: func() transformer.ObjectWriter { return objectwriter.New(prm) },
+ NetworkState: prm.Config.NetworkState,
+ MaxSize: maxPayloadSz,
+ WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.Container),
+ SessionToken: sToken,
+ }),
+ }, nil
+}
+
+func preparePrm(prm *objectwriter.Params) error {
+ var err error
+
+ // get latest network map
+ nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
+ if err != nil {
+ //return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
+ return fmt.Errorf("could not get latest network map: %w", err)
+ }
+
+ idCnr, ok := prm.Header.ContainerID()
+ if !ok {
+ return errors.New("missing container ID")
+ }
+
+ // get container to store the object
+ cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
+ if err != nil {
+ //return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
+ return fmt.Errorf("could not get container by ID: %w", err)
+ }
+
+ prm.Container = cnrInfo.Value
+
+ // add common options
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set processing container
+ placement.ForContainer(prm.Container),
+ )
+
+ if ech := prm.Header.ECHeader(); ech != nil {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(ech.Parent()),
+ )
+ } else if id, ok := prm.Header.ID(); ok {
+ prm.TraverseOpts = append(prm.TraverseOpts,
+ // set identifier of the processing object
+ placement.ForObject(id),
+ )
+ }
+
+ // create placement builder from network map
+ builder := placement.NewNetworkMapBuilder(nm)
+
+ if prm.Common.LocalOnly() {
+ // restrict success count to 1 stored copy (to local storage)
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.SuccessAfter(1))
+
+ // use local-only placement builder
+ builder = util.NewLocalPlacement(builder, prm.Config.NetmapKeys)
+ }
+
+ // set placement builder
+ prm.TraverseOpts = append(prm.TraverseOpts, placement.UseBuilder(builder))
+
+ return nil
+}
diff --git a/pkg/services/object/put/validation.go b/pkg/services/object/common/target/validation.go
similarity index 99%
rename from pkg/services/object/put/validation.go
rename to pkg/services/object/common/target/validation.go
index c2b078ef5..b29721d01 100644
--- a/pkg/services/object/put/validation.go
+++ b/pkg/services/object/common/target/validation.go
@@ -1,4 +1,4 @@
-package putsvc
+package target
import (
"bytes"
diff --git a/pkg/services/object/put/common.go b/pkg/services/object/common/writer/common.go
similarity index 65%
rename from pkg/services/object/put/common.go
rename to pkg/services/object/common/writer/common.go
index cbb7f5f33..6689557ee 100644
--- a/pkg/services/object/put/common.go
+++ b/pkg/services/object/common/writer/common.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -13,23 +13,23 @@ import (
"go.uber.org/zap"
)
-type nodeIterator struct {
- traversal
- cfg *cfg
+type NodeIterator struct {
+ Traversal
+ cfg *Config
}
-func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
- return &nodeIterator{
- traversal: traversal{
- opts: opts,
- mExclude: make(map[string]*bool),
+func (c *Config) NewNodeIterator(opts []placement.Option) *NodeIterator {
+ return &NodeIterator{
+ Traversal: Traversal{
+ Opts: opts,
+ Exclude: make(map[string]*bool),
},
cfg: c,
}
}
-func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
- traverser, err := placement.NewTraverser(n.traversal.opts...)
+func (n *NodeIterator) ForEachNode(ctx context.Context, f func(context.Context, NodeDescriptor) error) error {
+ traverser, err := placement.NewTraverser(n.Traversal.Opts...)
if err != nil {
return fmt.Errorf("could not create object placement traverser: %w", err)
}
@@ -56,10 +56,10 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context,
}
// perform additional container broadcast if needed
- if n.traversal.submitPrimaryPlacementFinish() {
- err := n.forEachNode(ctx, f)
+ if n.Traversal.submitPrimaryPlacementFinish() {
+ err := n.ForEachNode(ctx, f)
if err != nil {
- n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
+ n.cfg.Logger.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
// we don't fail primary operation because of broadcast failure
}
}
@@ -67,11 +67,11 @@ func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context,
return nil
}
-func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
+func (n *NodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, NodeDescriptor) error, resErr *atomic.Value) bool {
var wg sync.WaitGroup
for _, addr := range addrs {
- if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
+ if ok := n.Exclude[string(addr.PublicKey())]; ok != nil {
if *ok {
traverser.SubmitSuccess()
}
@@ -86,10 +86,10 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
if err := workerPool.Submit(func() {
defer wg.Done()
- err := f(ctx, nodeDesc{local: isLocal, info: addr})
+ err := f(ctx, NodeDescriptor{Local: isLocal, Info: addr})
if err != nil {
resErr.Store(err)
- svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
+ svcutil.LogServiceError(n.cfg.Logger, "PUT", addr.Addresses(), err)
return
}
@@ -97,7 +97,7 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
*item = true
}); err != nil {
wg.Done()
- svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
+ svcutil.LogWorkerPoolError(n.cfg.Logger, "PUT", err)
return true
}
@@ -105,7 +105,7 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
// in subsequent container broadcast. Note that we don't
// process this node during broadcast if primary placement
// on it failed.
- n.traversal.submitProcessed(addr, item)
+ n.Traversal.submitProcessed(addr, item)
}
wg.Wait()
@@ -113,6 +113,6 @@ func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.
return false
}
-func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
+func NeedAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
}
diff --git a/pkg/services/object/put/writer.go b/pkg/services/object/common/writer/dispatcher.go
similarity index 97%
rename from pkg/services/object/put/writer.go
rename to pkg/services/object/common/writer/dispatcher.go
index 53eee6006..bb9a54ce9 100644
--- a/pkg/services/object/put/writer.go
+++ b/pkg/services/object/common/writer/dispatcher.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
diff --git a/pkg/services/object/put/distributed.go b/pkg/services/object/common/writer/distributed.go
similarity index 57%
rename from pkg/services/object/put/distributed.go
rename to pkg/services/object/common/writer/distributed.go
index 5176f7a54..f62934bed 100644
--- a/pkg/services/object/put/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -13,47 +13,47 @@ type preparedObjectTarget interface {
WriteObject(context.Context, *objectSDK.Object, object.ContentMeta) error
}
-type distributedTarget struct {
+type distributedWriter struct {
+ cfg *Config
+
placementOpts []placement.Option
obj *objectSDK.Object
objMeta object.ContentMeta
- *cfg
+ nodeTargetInitializer func(NodeDescriptor) preparedObjectTarget
- nodeTargetInitializer func(nodeDesc) preparedObjectTarget
-
- relay func(context.Context, nodeDesc) error
+ relay func(context.Context, NodeDescriptor) error
resetSuccessAfterOnBroadcast bool
}
-// parameters and state of container traversal.
-type traversal struct {
- opts []placement.Option
+// parameters and state of container Traversal.
+type Traversal struct {
+ Opts []placement.Option
// need of additional broadcast after the object is saved
- extraBroadcastEnabled bool
+ ExtraBroadcastEnabled bool
// container nodes which was processed during the primary object placement
- mExclude map[string]*bool
+ Exclude map[string]*bool
- resetSuccessAfterOnBroadcast bool
+ ResetSuccessAfterOnBroadcast bool
}
// updates traversal parameters after the primary placement finish and
// returns true if additional container broadcast is needed.
-func (x *traversal) submitPrimaryPlacementFinish() bool {
- if x.extraBroadcastEnabled {
+func (x *Traversal) submitPrimaryPlacementFinish() bool {
+ if x.ExtraBroadcastEnabled {
// do not track success during container broadcast (best-effort)
- x.opts = append(x.opts, placement.WithoutSuccessTracking())
+ x.Opts = append(x.Opts, placement.WithoutSuccessTracking())
- if x.resetSuccessAfterOnBroadcast {
- x.opts = append(x.opts, placement.ResetSuccessAfter())
+ if x.ResetSuccessAfterOnBroadcast {
+ x.Opts = append(x.Opts, placement.ResetSuccessAfter())
}
// avoid 2nd broadcast
- x.extraBroadcastEnabled = false
+ x.ExtraBroadcastEnabled = false
return true
}
@@ -62,22 +62,22 @@ func (x *traversal) submitPrimaryPlacementFinish() bool {
}
// marks the container node as processed during the primary object placement.
-func (x *traversal) submitProcessed(n placement.Node, item *bool) {
- if x.extraBroadcastEnabled {
+func (x *Traversal) submitProcessed(n placement.Node, item *bool) {
+ if x.ExtraBroadcastEnabled {
key := string(n.PublicKey())
- if x.mExclude == nil {
- x.mExclude = make(map[string]*bool, 1)
+ if x.Exclude == nil {
+ x.Exclude = make(map[string]*bool, 1)
}
- x.mExclude[key] = item
+ x.Exclude[key] = item
}
}
-type nodeDesc struct {
- local bool
+type NodeDescriptor struct {
+ Local bool
- info placement.Node
+ Info placement.Node
}
// errIncompletePut is returned if processing on a container fails.
@@ -96,19 +96,19 @@ func (x errIncompletePut) Error() string {
}
// WriteObject implements the transformer.ObjectWriter interface.
-func (t *distributedTarget) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+func (t *distributedWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
t.obj = obj
var err error
- if t.objMeta, err = t.fmtValidator.ValidateContent(t.obj); err != nil {
+ if t.objMeta, err = t.cfg.FormatValidator.ValidateContent(t.obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", t, err)
}
return t.iteratePlacement(ctx)
}
-func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
- if !node.local && t.relay != nil {
+func (t *distributedWriter) sendObject(ctx context.Context, node NodeDescriptor) error {
+ if !node.Local && t.relay != nil {
return t.relay(ctx, node)
}
@@ -121,11 +121,11 @@ func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error
return nil
}
-func (t *distributedTarget) iteratePlacement(ctx context.Context) error {
+func (t *distributedWriter) iteratePlacement(ctx context.Context) error {
id, _ := t.obj.ID()
- iter := t.cfg.newNodeIterator(append(t.placementOpts, placement.ForObject(id)))
- iter.extraBroadcastEnabled = needAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
- iter.resetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
- return iter.forEachNode(ctx, t.sendObject)
+ iter := t.cfg.NewNodeIterator(append(t.placementOpts, placement.ForObject(id)))
+ iter.ExtraBroadcastEnabled = NeedAdditionalBroadcast(t.obj, false /* Distributed target is for cluster-wide PUT */)
+ iter.ResetSuccessAfterOnBroadcast = t.resetSuccessAfterOnBroadcast
+ return iter.ForEachNode(ctx, t.sendObject)
}
diff --git a/pkg/services/object/put/ec.go b/pkg/services/object/common/writer/ec.go
similarity index 69%
rename from pkg/services/object/put/ec.go
rename to pkg/services/object/common/writer/ec.go
index 9980f6d61..fb0a8e4e5 100644
--- a/pkg/services/object/put/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -23,23 +23,23 @@ import (
"golang.org/x/sync/errgroup"
)
-var _ transformer.ObjectWriter = (*ecWriter)(nil)
+var _ transformer.ObjectWriter = (*ECWriter)(nil)
var errUnsupportedECObject = errors.New("object is not supported for erasure coding")
-type ecWriter struct {
- cfg *cfg
- placementOpts []placement.Option
- container containerSDK.Container
- key *ecdsa.PrivateKey
- commonPrm *svcutil.CommonPrm
- relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+type ECWriter struct {
+ Config *Config
+ PlacementOpts []placement.Option
+ Container containerSDK.Container
+ Key *ecdsa.PrivateKey
+ CommonPrm *svcutil.CommonPrm
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
- objMeta object.ContentMeta
- objMetaValid bool
+ ObjectMeta object.ContentMeta
+ ObjectMetaValid bool
}
-func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
relayed, err := e.relayIfNotContainerNode(ctx, obj)
if err != nil {
return err
@@ -53,11 +53,11 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
return errUnsupportedECObject
}
- if !e.objMetaValid {
- if e.objMeta, err = e.cfg.fmtValidator.ValidateContent(obj); err != nil {
+ if !e.ObjectMetaValid {
+ if e.ObjectMeta, err = e.Config.FormatValidator.ValidateContent(obj); err != nil {
return fmt.Errorf("(%T) could not validate payload content: %w", e, err)
}
- e.objMetaValid = true
+ e.ObjectMetaValid = true
}
if obj.ECHeader() != nil {
@@ -66,8 +66,8 @@ func (e *ecWriter) WriteObject(ctx context.Context, obj *objectSDK.Object) error
return e.writeRawObject(ctx, obj)
}
-func (e *ecWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
- if e.relay == nil {
+func (e *ECWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.Object) (bool, error) {
+ if e.Relay == nil {
return false, nil
}
currentNodeIsContainerNode, err := e.currentNodeIsContainerNode()
@@ -90,8 +90,8 @@ func (e *ecWriter) relayIfNotContainerNode(ctx context.Context, obj *objectSDK.O
return true, nil
}
-func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
- t, err := placement.NewTraverser(e.placementOpts...)
+func (e *ECWriter) currentNodeIsContainerNode() (bool, error) {
+ t, err := placement.NewTraverser(e.PlacementOpts...)
if err != nil {
return false, err
}
@@ -101,7 +101,7 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
break
}
for _, node := range nodes {
- if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
return true, nil
}
}
@@ -109,8 +109,8 @@ func (e *ecWriter) currentNodeIsContainerNode() (bool, error) {
return false, nil
}
-func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...)
+func (e *ECWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index uint32) error {
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -126,18 +126,18 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
var info client.NodeInfo
client.NodeInfoFromNetmapElement(&info, node)
- c, err := e.cfg.clientConstructor.Get(info)
+ c, err := e.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
completed := make(chan interface{})
- if poolErr := e.cfg.remotePool.Submit(func() {
+ if poolErr := e.Config.RemotePool.Submit(func() {
defer close(completed)
- err = e.relay(ctx, info, c)
+ err = e.Relay(ctx, info, c)
}); poolErr != nil {
close(completed)
- svcutil.LogWorkerPoolError(e.cfg.log, "PUT", poolErr)
+ svcutil.LogWorkerPoolError(e.Config.Logger, "PUT", poolErr)
return poolErr
}
<-completed
@@ -145,7 +145,7 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
if err == nil {
return nil
}
- e.cfg.log.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
+ e.Config.Logger.Logger.Warn(logs.ECFailedToSendToContainerNode, zap.Stringers("address_group", info.AddressGroup()))
lastErr = err
}
}
@@ -157,12 +157,12 @@ func (e *ecWriter) relayToContainerNode(ctx context.Context, objID oid.ID, index
}
}
-func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
- if e.commonPrm.LocalOnly() {
+func (e *ECWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error {
+ if e.CommonPrm.LocalOnly() {
return e.writePartLocal(ctx, obj)
}
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(obj.ECHeader().Parent()))...)
if err != nil {
return err
}
@@ -187,18 +187,18 @@ func (e *ecWriter) writeECPart(ctx context.Context, obj *objectSDK.Object) error
return nil
}
-func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) error {
// now only single EC policy is supported
- c, err := erasurecode.NewConstructor(policy.ECDataCount(e.container.PlacementPolicy()), policy.ECParityCount(e.container.PlacementPolicy()))
+ c, err := erasurecode.NewConstructor(policy.ECDataCount(e.Container.PlacementPolicy()), policy.ECParityCount(e.Container.PlacementPolicy()))
if err != nil {
return err
}
- parts, err := c.Split(obj, e.key)
+ parts, err := c.Split(obj, e.Key)
if err != nil {
return err
}
objID, _ := obj.ID()
- t, err := placement.NewTraverser(append(e.placementOpts, placement.ForObject(objID))...)
+ t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
@@ -230,7 +230,7 @@ func (e *ecWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
return nil
}
-func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
+func (e *ECWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx int, nodes []placement.Node, visited []atomic.Bool) error {
select {
case <-ctx.Done():
return ctx.Err()
@@ -243,7 +243,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())), zap.Error(err))
@@ -267,7 +267,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -291,7 +291,7 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
if err == nil {
return nil
}
- e.cfg.log.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
+ e.Config.Logger.Warn(logs.ECFailedToSaveECPart, zap.Stringer("part_address", object.AddressOf(obj)),
zap.Stringer("parent_address", obj.ECHeader().Parent()), zap.Int("part_index", partIdx),
zap.String("node", hex.EncodeToString(node.PublicKey())),
zap.Error(err))
@@ -300,22 +300,22 @@ func (e *ecWriter) writePart(ctx context.Context, obj *objectSDK.Object, partIdx
return fmt.Errorf("failed to save EC chunk %s to any node", object.AddressOf(obj))
}
-func (e *ecWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
- if e.cfg.netmapKeys.IsLocalKey(node.PublicKey()) {
+func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+ if e.Config.NetmapKeys.IsLocalKey(node.PublicKey()) {
return e.writePartLocal(ctx, obj)
}
return e.writePartRemote(ctx, obj, node)
}
-func (e *ecWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
+func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
var err error
- localTarget := localTarget{
- storage: e.cfg.localStore,
+ localTarget := LocalTarget{
+ Storage: e.Config.LocalStore,
}
completed := make(chan interface{})
- if poolErr := e.cfg.localPool.Submit(func() {
+ if poolErr := e.Config.LocalPool.Submit(func() {
defer close(completed)
- err = localTarget.WriteObject(ctx, obj, e.objMeta)
+ err = localTarget.WriteObject(ctx, obj, e.ObjectMeta)
}); poolErr != nil {
close(completed)
return poolErr
@@ -324,22 +324,22 @@ func (e *ecWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) er
return err
}
-func (e *ecWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
+func (e *ECWriter) writePartRemote(ctx context.Context, obj *objectSDK.Object, node placement.Node) error {
var clientNodeInfo client.NodeInfo
client.NodeInfoFromNetmapElement(&clientNodeInfo, node)
- remoteTaget := remoteTarget{
- privateKey: e.key,
- clientConstructor: e.cfg.clientConstructor,
- commonPrm: e.commonPrm,
+ remoteTaget := remoteWriter{
+ privateKey: e.Key,
+ clientConstructor: e.Config.ClientConstructor,
+ commonPrm: e.CommonPrm,
nodeInfo: clientNodeInfo,
}
var err error
completed := make(chan interface{})
- if poolErr := e.cfg.remotePool.Submit(func() {
+ if poolErr := e.Config.RemotePool.Submit(func() {
defer close(completed)
- err = remoteTaget.WriteObject(ctx, obj, e.objMeta)
+ err = remoteTaget.WriteObject(ctx, obj, e.ObjectMeta)
}); poolErr != nil {
close(completed)
return poolErr
diff --git a/pkg/services/object/put/local.go b/pkg/services/object/common/writer/local.go
similarity index 81%
rename from pkg/services/object/put/local.go
rename to pkg/services/object/common/writer/local.go
index 54649adc7..02fd25b9e 100644
--- a/pkg/services/object/put/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -24,19 +24,19 @@ type ObjectStorage interface {
IsLocked(context.Context, oid.Address) (bool, error)
}
-type localTarget struct {
- storage ObjectStorage
+type LocalTarget struct {
+ Storage ObjectStorage
}
-func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
switch meta.Type() {
case objectSDK.TypeTombstone:
- err := t.storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not delete objects from tombstone locally: %w", err)
}
case objectSDK.TypeLock:
- err := t.storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
+ err := t.Storage.Lock(ctx, objectCore.AddressOf(obj), meta.Objects())
if err != nil {
return fmt.Errorf("could not lock object from lock objects locally: %w", err)
}
@@ -44,7 +44,7 @@ func (t localTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.storage.Put(ctx, obj); err != nil {
+ if err := t.Storage.Put(ctx, obj); err != nil {
return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
}
return nil
diff --git a/pkg/services/object/put/remote.go b/pkg/services/object/common/writer/remote.go
similarity index 92%
rename from pkg/services/object/put/remote.go
rename to pkg/services/object/common/writer/remote.go
index ee8d64e7a..697613ff7 100644
--- a/pkg/services/object/put/remote.go
+++ b/pkg/services/object/common/writer/remote.go
@@ -1,4 +1,4 @@
-package putsvc
+package writer
import (
"context"
@@ -16,7 +16,7 @@ import (
"google.golang.org/grpc/status"
)
-type remoteTarget struct {
+type remoteWriter struct {
privateKey *ecdsa.PrivateKey
commonPrm *util.CommonPrm
@@ -41,7 +41,7 @@ type RemotePutPrm struct {
obj *objectSDK.Object
}
-func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
+func (t *remoteWriter) WriteObject(ctx context.Context, obj *objectSDK.Object, _ objectcore.ContentMeta) error {
c, err := t.clientConstructor.Get(t.nodeInfo)
if err != nil {
return fmt.Errorf("(%T) could not create SDK client %s: %w", t, t.nodeInfo, err)
@@ -64,7 +64,7 @@ func (t *remoteTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, _
return t.putStream(ctx, prm)
}
-func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteWriter) putStream(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObject(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -72,7 +72,7 @@ func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObje
return nil
}
-func (t *remoteTarget) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
+func (t *remoteWriter) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) error {
_, err := internalclient.PutObjectSingle(ctx, prm)
if err != nil {
return fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
@@ -113,7 +113,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
return err
}
- t := &remoteTarget{
+ t := &remoteWriter{
privateKey: key,
clientConstructor: s.clientConstructor,
}
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
new file mode 100644
index 000000000..3d50da988
--- /dev/null
+++ b/pkg/services/object/common/writer/writer.go
@@ -0,0 +1,183 @@
+package writer
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
+ objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
+)
+
+type MaxSizeSource interface {
+ // MaxObjectSize returns maximum payload size
+ // of physically stored object in system.
+ //
+ // Must return 0 if value can not be obtained.
+ MaxObjectSize() uint64
+}
+
+type ClientConstructor interface {
+ Get(client.NodeInfo) (client.MultiAddressClient, error)
+}
+
+type InnerRing interface {
+ InnerRingKeys() ([][]byte, error)
+}
+
+type FormatValidatorConfig interface {
+ VerifySessionTokenIssuer() bool
+}
+
+// Config represents a set of static parameters that are established during
+// the initialization phase of all services.
+type Config struct {
+ KeyStorage *objutil.KeyStorage
+
+ MaxSizeSrc MaxSizeSource
+
+ LocalStore ObjectStorage
+
+ ContainerSource container.Source
+
+ NetmapSource netmap.Source
+
+ RemotePool, LocalPool util.WorkerPool
+
+ NetmapKeys netmap.AnnouncedKeys
+
+ FormatValidator *object.FormatValidator
+
+ NetworkState netmap.State
+
+ ClientConstructor ClientConstructor
+
+ Logger *logger.Logger
+
+ VerifySessionTokenIssuer bool
+}
+
+type Option func(*Config)
+
+func WithWorkerPools(remote, local util.WorkerPool) Option {
+ return func(c *Config) {
+ c.RemotePool, c.LocalPool = remote, local
+ }
+}
+
+func WithLogger(l *logger.Logger) Option {
+ return func(c *Config) {
+ c.Logger = l
+ }
+}
+
+func WithVerifySessionTokenIssuer(v bool) Option {
+ return func(c *Config) {
+ c.VerifySessionTokenIssuer = v
+ }
+}
+
+func (c *Config) getWorkerPool(pub []byte) (util.WorkerPool, bool) {
+ if c.NetmapKeys.IsLocalKey(pub) {
+ return c.LocalPool, true
+ }
+ return c.RemotePool, false
+}
+
+type Params struct {
+ Config *Config
+
+ Common *objutil.CommonPrm
+
+ Header *objectSDK.Object
+
+ Container containerSDK.Container
+
+ TraverseOpts []placement.Option
+
+ Relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
+
+ SignRequestPrivateKey *ecdsa.PrivateKey
+}
+
+func New(prm *Params) transformer.ObjectWriter {
+ if container.IsECContainer(prm.Container) && object.IsECSupported(prm.Header) {
+ return newECWriter(prm)
+ }
+ return newDefaultObjectWriter(prm, false)
+}
+
+func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.ObjectWriter {
+ var relay func(context.Context, NodeDescriptor) error
+ if prm.Relay != nil {
+ relay = func(ctx context.Context, node NodeDescriptor) error {
+ var info client.NodeInfo
+
+ client.NodeInfoFromNetmapElement(&info, node.Info)
+
+ c, err := prm.Config.ClientConstructor.Get(info)
+ if err != nil {
+ return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
+ }
+
+ return prm.Relay(ctx, info, c)
+ }
+ }
+
+ var resetSuccessAfterOnBroadcast bool
+ traverseOpts := prm.TraverseOpts
+ if forECPlacement && !prm.Common.LocalOnly() {
+ // save non-regular and linking object to EC container.
+ // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
+ traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.Container.PlacementPolicy())+1)))
+ resetSuccessAfterOnBroadcast = true
+ }
+
+ return &distributedWriter{
+ cfg: prm.Config,
+ placementOpts: traverseOpts,
+ resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
+ nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
+ if node.Local {
+ return LocalTarget{
+ Storage: prm.Config.LocalStore,
+ }
+ }
+
+ rt := &remoteWriter{
+ privateKey: prm.SignRequestPrivateKey,
+ commonPrm: prm.Common,
+ clientConstructor: prm.Config.ClientConstructor,
+ }
+
+ client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.Info)
+
+ return rt
+ },
+ relay: relay,
+ }
+}
+
+func newECWriter(prm *Params) transformer.ObjectWriter {
+ return &objectWriterDispatcher{
+ ecWriter: &ECWriter{
+ Config: prm.Config,
+ PlacementOpts: append(prm.TraverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: prm.Container,
+ Key: prm.SignRequestPrivateKey,
+ CommonPrm: prm.Common,
+ Relay: prm.Relay,
+ },
+ repWriter: newDefaultObjectWriter(prm, true),
+ }
+}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index c4ab15abf..f1082dfff 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -2,43 +2,40 @@ package patchsvc
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
)
// Service implements Put operation of Object service v2.
type Service struct {
- keyStorage *util.KeyStorage
+ *objectwriter.Config
getSvc *getsvc.Service
-
- putSvc *putsvc.Service
}
// NewService constructs Service instance from provided options.
-func NewService(ks *util.KeyStorage, getSvc *getsvc.Service, putSvc *putsvc.Service) *Service {
+//
+// Patch service can use the same objectwriter.Config initializied by Put service.
+func NewService(cfg *objectwriter.Config,
+ getSvc *getsvc.Service,
+) *Service {
return &Service{
- keyStorage: ks,
+ Config: cfg,
getSvc: getSvc,
-
- putSvc: putSvc,
}
}
// Put calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
- nodeKey, err := s.keyStorage.GetKey(nil)
+ nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return nil, err
}
return &Streamer{
- getSvc: s.getSvc,
-
- putSvc: s.putSvc,
-
+ Config: s.Config,
+ getSvc: s.getSvc,
localNodeKey: nodeKey,
}, nil
}
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 84363530e..85c28cda0 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -9,8 +9,9 @@ import (
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
refsV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -21,6 +22,8 @@ import (
// Streamer for the patch handler is a pipeline that merges two incoming streams of patches
// and original object payload chunks. The merged result is fed to Put stream target.
type Streamer struct {
+ *objectwriter.Config
+
// Patcher must be initialized at first Streamer.Send call.
patcher patcher.PatchApplier
@@ -28,8 +31,6 @@ type Streamer struct {
getSvc *getsvc.Service
- putSvc *putsvc.Service
-
localNodeKey *ecdsa.PrivateKey
}
@@ -78,11 +79,6 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
localNodeKey: s.localNodeKey,
}
- putstm, err := s.putSvc.Put()
- if err != nil {
- return err
- }
-
hdr := hdrWithSig.GetHeader()
oV2 := new(objectV2.Object)
hV2 := new(objectV2.Header)
@@ -97,14 +93,14 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- prm, err := s.putInitPrm(req, oV2)
+ target, err := target.New(&objectwriter.Params{
+ Config: s.Config,
+ Common: commonPrm,
+ Header: objectSDK.NewFromV2(oV2),
+ SignRequestPrivateKey: s.localNodeKey,
+ })
if err != nil {
- return err
- }
-
- err = putstm.Init(ctx, prm)
- if err != nil {
- return err
+ return fmt.Errorf("target creation: %w", err)
}
patcherPrm := patcher.Params{
@@ -112,7 +108,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
RangeProvider: rangeProvider,
- ObjectWriter: putstm.Target(),
+ ObjectWriter: target,
}
s.patcher = patcher.New(patcherPrm)
diff --git a/pkg/services/object/patch/util.go b/pkg/services/object/patch/util.go
index 1218d6694..4f3c3ef17 100644
--- a/pkg/services/object/patch/util.go
+++ b/pkg/services/object/patch/util.go
@@ -6,31 +6,12 @@ import (
"errors"
"fmt"
- objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
-// putInitPrm initializes put paramerer for Put stream.
-func (s *Streamer) putInitPrm(req *objectV2.PatchRequest, obj *objectV2.Object) (*putsvc.PutInitPrm, error) {
- commonPrm, err := util.CommonPrmFromV2(req)
- if err != nil {
- return nil, err
- }
-
- prm := new(putsvc.PutInitPrm)
- prm.WithObject(objectSDK.NewFromV2(obj)).
- WithCommonPrm(commonPrm).
- WithPrivateKey(s.localNodeKey)
-
- return prm, nil
-}
-
func newOwnerID(vh *session.RequestVerificationHeader) (*refs.OwnerID, error) {
for vh.GetOrigin() != nil {
vh = vh.GetOrigin()
diff --git a/pkg/services/object/put/service.go b/pkg/services/object/put/service.go
index a93873738..8cf4f0d62 100644
--- a/pkg/services/object/put/service.go
+++ b/pkg/services/object/put/service.go
@@ -1,132 +1,66 @@
package putsvc
import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
objutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
-type MaxSizeSource interface {
- // MaxObjectSize returns maximum payload size
- // of physically stored object in system.
- //
- // Must return 0 if value can not be obtained.
- MaxObjectSize() uint64
-}
-
type Service struct {
- *cfg
-}
-
-type Option func(*cfg)
-
-type ClientConstructor interface {
- Get(client.NodeInfo) (client.MultiAddressClient, error)
-}
-
-type InnerRing interface {
- InnerRingKeys() ([][]byte, error)
-}
-
-type FormatValidatorConfig interface {
- VerifySessionTokenIssuer() bool
-}
-
-type cfg struct {
- keyStorage *objutil.KeyStorage
-
- maxSizeSrc MaxSizeSource
-
- localStore ObjectStorage
-
- cnrSrc container.Source
-
- netMapSrc netmap.Source
-
- remotePool, localPool util.WorkerPool
-
- netmapKeys netmap.AnnouncedKeys
-
- fmtValidator *object.FormatValidator
-
- networkState netmap.State
-
- clientConstructor ClientConstructor
-
- log *logger.Logger
-
- verifySessionTokenIssuer bool
+ *objectwriter.Config
}
func NewService(ks *objutil.KeyStorage,
- cc ClientConstructor,
- ms MaxSizeSource,
- os ObjectStorage,
+ cc objectwriter.ClientConstructor,
+ ms objectwriter.MaxSizeSource,
+ os objectwriter.ObjectStorage,
cs container.Source,
ns netmap.Source,
nk netmap.AnnouncedKeys,
nst netmap.State,
- ir InnerRing,
- opts ...Option,
+ ir objectwriter.InnerRing,
+ opts ...objectwriter.Option,
) *Service {
- c := &cfg{
- remotePool: util.NewPseudoWorkerPool(),
- localPool: util.NewPseudoWorkerPool(),
- log: &logger.Logger{Logger: zap.L()},
- keyStorage: ks,
- clientConstructor: cc,
- maxSizeSrc: ms,
- localStore: os,
- cnrSrc: cs,
- netMapSrc: ns,
- netmapKeys: nk,
- networkState: nst,
+ c := &objectwriter.Config{
+ RemotePool: util.NewPseudoWorkerPool(),
+ LocalPool: util.NewPseudoWorkerPool(),
+ Logger: &logger.Logger{Logger: zap.L()},
+ KeyStorage: ks,
+ ClientConstructor: cc,
+ MaxSizeSrc: ms,
+ LocalStore: os,
+ ContainerSource: cs,
+ NetmapSource: ns,
+ NetmapKeys: nk,
+ NetworkState: nst,
}
for i := range opts {
opts[i](c)
}
- c.fmtValidator = object.NewFormatValidator(
+ c.FormatValidator = object.NewFormatValidator(
object.WithLockSource(os),
object.WithNetState(nst),
object.WithInnerRing(ir),
object.WithNetmapSource(ns),
object.WithContainersSource(cs),
- object.WithVerifySessionTokenIssuer(c.verifySessionTokenIssuer),
- object.WithLogger(c.log),
+ object.WithVerifySessionTokenIssuer(c.VerifySessionTokenIssuer),
+ object.WithLogger(c.Logger),
)
return &Service{
- cfg: c,
+ Config: c,
}
}
func (p *Service) Put() (*Streamer, error) {
return &Streamer{
- cfg: p.cfg,
+ Config: p.Config,
}, nil
}
-
-func WithWorkerPools(remote, local util.WorkerPool) Option {
- return func(c *cfg) {
- c.remotePool, c.localPool = remote, local
- }
-}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
-func WithVerifySessionTokenIssuer(v bool) Option {
- return func(c *cfg) {
- c.verifySessionTokenIssuer = v
- }
-}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 3cc8518f5..9b4163268 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -21,6 +21,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
@@ -97,12 +99,12 @@ func (s *Service) validatePutSingle(ctx context.Context, obj *objectSDK.Object)
func (s *Service) validarePutSingleSize(obj *objectSDK.Object) error {
if uint64(len(obj.Payload())) != obj.PayloadSize() {
- return ErrWrongPayloadSize
+ return target.ErrWrongPayloadSize
}
- maxAllowedSize := s.maxSizeSrc.MaxObjectSize()
+ maxAllowedSize := s.Config.MaxSizeSrc.MaxObjectSize()
if obj.PayloadSize() > maxAllowedSize {
- return ErrExceedingMaxSize
+ return target.ErrExceedingMaxSize
}
return nil
@@ -137,11 +139,11 @@ func (s *Service) validatePutSingleChecksum(obj *objectSDK.Object) error {
}
func (s *Service) validatePutSingleObject(ctx context.Context, obj *objectSDK.Object) (object.ContentMeta, error) {
- if err := s.fmtValidator.Validate(ctx, obj, false); err != nil {
+ if err := s.FormatValidator.Validate(ctx, obj, false); err != nil {
return object.ContentMeta{}, fmt.Errorf("coud not validate object format: %w", err)
}
- meta, err := s.fmtValidator.ValidateContent(obj)
+ meta, err := s.FormatValidator.ValidateContent(obj)
if err != nil {
return object.ContentMeta{}, fmt.Errorf("could not validate payload content: %w", err)
}
@@ -164,17 +166,17 @@ func (s *Service) saveToNodes(ctx context.Context, obj *objectSDK.Object, req *o
}
func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlacement, obj *objectSDK.Object, localOnly bool, req *objectAPI.PutSingleRequest, meta object.ContentMeta) error {
- iter := s.cfg.newNodeIterator(placement.placementOptions)
- iter.extraBroadcastEnabled = needAdditionalBroadcast(obj, localOnly)
- iter.resetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
+ iter := s.Config.NewNodeIterator(placement.placementOptions)
+ iter.ExtraBroadcastEnabled = objectwriter.NeedAdditionalBroadcast(obj, localOnly)
+ iter.ResetSuccessAfterOnBroadcast = placement.resetSuccessAfterOnBroadcast
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.keyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
- return iter.forEachNode(ctx, func(ctx context.Context, nd nodeDesc) error {
+ return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
})
}
@@ -184,25 +186,25 @@ func (s *Service) saveToECReplicas(ctx context.Context, placement putSinglePlace
if err != nil {
return err
}
- key, err := s.cfg.keyStorage.GetKey(nil)
+ key, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
return err
}
signer := &putSingleRequestSigner{
req: req,
- keyStorage: s.keyStorage,
+ keyStorage: s.Config.KeyStorage,
signer: &sync.Once{},
}
- w := ecWriter{
- cfg: s.cfg,
- placementOpts: placement.placementOptions,
- objMeta: meta,
- objMetaValid: true,
- commonPrm: commonPrm,
- container: placement.container,
- key: key,
- relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
+ w := objectwriter.ECWriter{
+ Config: s.Config,
+ PlacementOpts: placement.placementOptions,
+ ObjectMeta: meta,
+ ObjectMetaValid: true,
+ CommonPrm: commonPrm,
+ Container: placement.container,
+ Key: key,
+ Relay: func(ctx context.Context, ni client.NodeInfo, mac client.MultiAddressClient) error {
return s.redirectPutSingleRequest(ctx, signer, obj, ni, mac)
},
}
@@ -223,7 +225,7 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
if !ok {
return result, errors.New("missing container ID")
}
- cnrInfo, err := s.cnrSrc.Get(cnrID)
+ cnrInfo, err := s.Config.ContainerSource.Get(cnrID)
if err != nil {
return result, fmt.Errorf("could not get container by ID: %w", err)
}
@@ -247,31 +249,31 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
result.placementOptions = append(result.placementOptions, placement.ForObject(objID))
- latestNetmap, err := netmap.GetLatestNetworkMap(s.netMapSrc)
+ latestNetmap, err := netmap.GetLatestNetworkMap(s.Config.NetmapSource)
if err != nil {
return result, fmt.Errorf("could not get latest network map: %w", err)
}
builder := placement.NewNetworkMapBuilder(latestNetmap)
if localOnly {
result.placementOptions = append(result.placementOptions, placement.SuccessAfter(1))
- builder = svcutil.NewLocalPlacement(builder, s.netmapKeys)
+ builder = svcutil.NewLocalPlacement(builder, s.Config.NetmapKeys)
}
result.placementOptions = append(result.placementOptions, placement.UseBuilder(builder))
return result, nil
}
-func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, obj *objectSDK.Object,
+func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
signer *putSingleRequestSigner, meta object.ContentMeta,
) error {
- if nodeDesc.local {
+ if nodeDesc.Local {
return s.saveLocal(ctx, obj, meta)
}
var info client.NodeInfo
- client.NodeInfoFromNetmapElement(&info, nodeDesc.info)
+ client.NodeInfoFromNetmapElement(&info, nodeDesc.Info)
- c, err := s.clientConstructor.Get(info)
+ c, err := s.Config.ClientConstructor.Get(info)
if err != nil {
return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
}
@@ -280,8 +282,8 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *nodeDesc, o
}
func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
- localTarget := &localTarget{
- storage: s.localStore,
+ localTarget := &objectwriter.LocalTarget{
+ Storage: s.Config.LocalStore,
}
return localTarget.WriteObject(ctx, obj, meta)
}
@@ -314,7 +316,7 @@ func (s *Service) redirectPutSingleRequest(ctx context.Context,
if err != nil {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
- s.log.Warn(logs.PutSingleRedirectFailure,
+ s.Config.Logger.Warn(logs.PutSingleRedirectFailure,
zap.Error(err),
zap.Stringer("address", addr),
zap.Stringer("object_id", objID),
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index 6b396ec96..f3803d433 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -2,33 +2,21 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/policy"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
- pkgutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
- containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
type Streamer struct {
- *cfg
-
- privateKey *ecdsa.PrivateKey
+ *objectwriter.Config
target transformer.ChunkedObjectWriter
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- maxPayloadSz uint64 // network config
}
var errNotInit = errors.New("stream not initialized")
@@ -36,8 +24,23 @@ var errNotInit = errors.New("stream not initialized")
var errInitRecall = errors.New("init recall")
func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
+ if p.target != nil {
+ return errInitRecall
+ }
+
// initialize destination target
- if err := p.initTarget(prm); err != nil {
+ prmTarget := &objectwriter.Params{
+ Config: p.Config,
+ Common: prm.common,
+ Header: prm.hdr,
+ Container: prm.cnr,
+ TraverseOpts: prm.traverseOpts,
+ Relay: p.relay,
+ }
+
+ var err error
+ p.target, err = target.New(prmTarget)
+ if err != nil {
return fmt.Errorf("(%T) could not initialize object target: %w", p, err)
}
@@ -47,253 +50,6 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
return nil
}
-// Target accesses underlying target chunked object writer.
-func (p *Streamer) Target() transformer.ChunkedObjectWriter {
- return p.target
-}
-
-// MaxObjectSize returns maximum payload size for the streaming session.
-//
-// Must be called after the successful Init.
-func (p *Streamer) MaxObjectSize() uint64 {
- return p.maxPayloadSz
-}
-
-func (p *Streamer) initTarget(prm *PutInitPrm) error {
- // prevent re-calling
- if p.target != nil {
- return errInitRecall
- }
-
- // prepare needed put parameters
- if err := p.preparePrm(prm); err != nil {
- return fmt.Errorf("(%T) could not prepare put parameters: %w", p, err)
- }
-
- p.maxPayloadSz = p.maxSizeSrc.MaxObjectSize()
- if p.maxPayloadSz == 0 {
- return fmt.Errorf("(%T) could not obtain max object size parameter", p)
- }
-
- if prm.hdr.Signature() != nil {
- return p.initUntrustedTarget(prm)
- }
- return p.initTrustedTarget(prm)
-}
-
-func (p *Streamer) initUntrustedTarget(prm *PutInitPrm) error {
- p.relay = prm.relay
-
- if prm.privateKey != nil {
- p.privateKey = prm.privateKey
- } else {
- nodeKey, err := p.cfg.keyStorage.GetKey(nil)
- if err != nil {
- return err
- }
- p.privateKey = nodeKey
- }
-
- // prepare untrusted-Put object target
- p.target = &validatingPreparedTarget{
- nextTarget: newInMemoryObjectBuilder(p.newObjectWriter(prm)),
- fmt: p.fmtValidator,
-
- maxPayloadSz: p.maxPayloadSz,
- }
-
- return nil
-}
-
-func (p *Streamer) initTrustedTarget(prm *PutInitPrm) error {
- sToken := prm.common.SessionToken()
-
- // prepare trusted-Put object target
-
- // get private token from local storage
- var sessionInfo *util.SessionInfo
-
- if sToken != nil {
- sessionInfo = &util.SessionInfo{
- ID: sToken.ID(),
- Owner: sToken.Issuer(),
- }
- }
-
- key, err := p.keyStorage.GetKey(sessionInfo)
- if err != nil {
- return fmt.Errorf("(%T) could not receive session key: %w", p, err)
- }
-
- // In case session token is missing, the line above returns the default key.
- // If it isn't owner key, replication attempts will fail, thus this check.
- ownerObj := prm.hdr.OwnerID()
- if ownerObj.IsEmpty() {
- return errors.New("missing object owner")
- }
-
- if sToken == nil {
- var ownerSession user.ID
- user.IDFromKey(&ownerSession, key.PublicKey)
-
- if !ownerObj.Equals(ownerSession) {
- return fmt.Errorf("(%T) session token is missing but object owner id is different from the default key", p)
- }
- } else {
- if !ownerObj.Equals(sessionInfo.Owner) {
- return fmt.Errorf("(%T) different token issuer and object owner identifiers %s/%s", p, sessionInfo.Owner, ownerObj)
- }
- }
-
- if prm.privateKey != nil {
- p.privateKey = prm.privateKey
- } else {
- p.privateKey = key
- }
- p.target = &validatingTarget{
- fmt: p.fmtValidator,
- nextTarget: transformer.NewPayloadSizeLimiter(transformer.Params{
- Key: key,
- NextTargetInit: func() transformer.ObjectWriter { return p.newObjectWriter(prm) },
- NetworkState: p.networkState,
- MaxSize: p.maxPayloadSz,
- WithoutHomomorphicHash: containerSDK.IsHomomorphicHashingDisabled(prm.cnr),
- SessionToken: sToken,
- }),
- }
-
- return nil
-}
-
-func (p *Streamer) preparePrm(prm *PutInitPrm) error {
- var err error
-
- // get latest network map
- nm, err := netmap.GetLatestNetworkMap(p.netMapSrc)
- if err != nil {
- return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
- }
-
- idCnr, ok := prm.hdr.ContainerID()
- if !ok {
- return errors.New("missing container ID")
- }
-
- // get container to store the object
- cnrInfo, err := p.cnrSrc.Get(idCnr)
- if err != nil {
- return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
- }
-
- prm.cnr = cnrInfo.Value
-
- // add common options
- prm.traverseOpts = append(prm.traverseOpts,
- // set processing container
- placement.ForContainer(prm.cnr),
- )
-
- if ech := prm.hdr.ECHeader(); ech != nil {
- prm.traverseOpts = append(prm.traverseOpts,
- // set identifier of the processing object
- placement.ForObject(ech.Parent()),
- )
- } else if id, ok := prm.hdr.ID(); ok {
- prm.traverseOpts = append(prm.traverseOpts,
- // set identifier of the processing object
- placement.ForObject(id),
- )
- }
-
- // create placement builder from network map
- builder := placement.NewNetworkMapBuilder(nm)
-
- if prm.common.LocalOnly() {
- // restrict success count to 1 stored copy (to local storage)
- prm.traverseOpts = append(prm.traverseOpts, placement.SuccessAfter(1))
-
- // use local-only placement builder
- builder = util.NewLocalPlacement(builder, p.netmapKeys)
- }
-
- // set placement builder
- prm.traverseOpts = append(prm.traverseOpts, placement.UseBuilder(builder))
-
- return nil
-}
-
-func (p *Streamer) newObjectWriter(prm *PutInitPrm) transformer.ObjectWriter {
- if container.IsECContainer(prm.cnr) && object.IsECSupported(prm.hdr) {
- return p.newECWriter(prm)
- }
- return p.newDefaultObjectWriter(prm, false)
-}
-
-func (p *Streamer) newDefaultObjectWriter(prm *PutInitPrm, forECPlacement bool) transformer.ObjectWriter {
- var relay func(context.Context, nodeDesc) error
- if p.relay != nil {
- relay = func(ctx context.Context, node nodeDesc) error {
- var info client.NodeInfo
-
- client.NodeInfoFromNetmapElement(&info, node.info)
-
- c, err := p.clientConstructor.Get(info)
- if err != nil {
- return fmt.Errorf("could not create SDK client %s: %w", info.AddressGroup(), err)
- }
-
- return p.relay(ctx, info, c)
- }
- }
-
- var resetSuccessAfterOnBroadcast bool
- traverseOpts := prm.traverseOpts
- if forECPlacement && !prm.common.LocalOnly() {
- // save non-regular and linking object to EC container.
- // EC 2.1 -> REP 2, EC 2.2 -> REP 3 etc.
- traverseOpts = append(traverseOpts, placement.SuccessAfter(uint32(policy.ECParityCount(prm.cnr.PlacementPolicy())+1)))
- resetSuccessAfterOnBroadcast = true
- }
-
- return &distributedTarget{
- cfg: p.cfg,
- placementOpts: traverseOpts,
- resetSuccessAfterOnBroadcast: resetSuccessAfterOnBroadcast,
- nodeTargetInitializer: func(node nodeDesc) preparedObjectTarget {
- if node.local {
- return localTarget{
- storage: p.localStore,
- }
- }
-
- rt := &remoteTarget{
- privateKey: p.privateKey,
- commonPrm: prm.common,
- clientConstructor: p.clientConstructor,
- }
-
- client.NodeInfoFromNetmapElement(&rt.nodeInfo, node.info)
-
- return rt
- },
- relay: relay,
- }
-}
-
-func (p *Streamer) newECWriter(prm *PutInitPrm) transformer.ObjectWriter {
- return &objectWriterDispatcher{
- ecWriter: &ecWriter{
- cfg: p.cfg,
- placementOpts: append(prm.traverseOpts, placement.WithCopyNumbers(nil)), // copies number ignored for EC
- container: prm.cnr,
- key: p.privateKey,
- commonPrm: prm.common,
- relay: p.relay,
- },
- repWriter: p.newDefaultObjectWriter(prm, true),
- }
-}
-
func (p *Streamer) SendChunk(ctx context.Context, prm *PutChunkPrm) error {
if p.target == nil {
return errNotInit
@@ -327,10 +83,3 @@ func (p *Streamer) Close(ctx context.Context) (*PutResponse, error) {
id: ids.SelfID,
}, nil
}
-
-func (c *cfg) getWorkerPool(pub []byte) (pkgutil.WorkerPool, bool) {
- if c.netmapKeys.IsLocalKey(pub) {
- return c.localPool, true
- }
- return c.remotePool, false
-}
diff --git a/pkg/services/object/put/v2/streamer.go b/pkg/services/object/put/v2/streamer.go
index 9c6de4ca8..5bf15b4cd 100644
--- a/pkg/services/object/put/v2/streamer.go
+++ b/pkg/services/object/put/v2/streamer.go
@@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
@@ -55,7 +56,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
s.saveChunks = v.GetSignature() != nil
if s.saveChunks {
- maxSz := s.stream.MaxObjectSize()
+ maxSz := s.stream.MaxSizeSrc.MaxObjectSize()
s.sizes = &sizes{
payloadSz: uint64(v.GetHeader().GetPayloadLength()),
@@ -63,7 +64,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
// check payload size limit overflow
if s.payloadSz > maxSz {
- return putsvc.ErrExceedingMaxSize
+ return target.ErrExceedingMaxSize
}
s.init = req
@@ -74,7 +75,7 @@ func (s *streamer) Send(ctx context.Context, req *object.PutRequest) (err error)
// check payload size overflow
if s.writtenPayload > s.payloadSz {
- return putsvc.ErrWrongPayloadSize
+ return target.ErrWrongPayloadSize
}
}
@@ -117,7 +118,7 @@ func (s *streamer) CloseAndRecv(ctx context.Context) (*object.PutResponse, error
if s.saveChunks {
// check payload size correctness
if s.writtenPayload != s.payloadSz {
- return nil, putsvc.ErrWrongPayloadSize
+ return nil, target.ErrWrongPayloadSize
}
}
diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go
index 3d04b7084..7e5c6e093 100644
--- a/pkg/services/replicator/process.go
+++ b/pkg/services/replicator/process.go
@@ -5,7 +5,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
@@ -52,7 +52,7 @@ func (p *Replicator) HandleReplicationTask(ctx context.Context, task Task, res T
}
}
- prm := new(putsvc.RemotePutPrm).
+ prm := new(objectwriter.RemotePutPrm).
WithObject(task.Obj)
for i := 0; task.NumCopies > 0 && i < len(task.Nodes); i++ {
diff --git a/pkg/services/replicator/replicator.go b/pkg/services/replicator/replicator.go
index a67f2e766..f2f86daf0 100644
--- a/pkg/services/replicator/replicator.go
+++ b/pkg/services/replicator/replicator.go
@@ -4,8 +4,8 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
- putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -24,7 +24,7 @@ type cfg struct {
log *logger.Logger
- remoteSender *putsvc.RemoteSender
+ remoteSender *objectwriter.RemoteSender
remoteGetter *getsvc.RemoteGetter
@@ -67,7 +67,7 @@ func WithLogger(v *logger.Logger) Option {
}
// WithRemoteSender returns option to set remote object sender of Replicator.
-func WithRemoteSender(v *putsvc.RemoteSender) Option {
+func WithRemoteSender(v *objectwriter.RemoteSender) Option {
return func(c *cfg) {
c.remoteSender = v
}
From 108e4e07be5d75f852fee90f8c0d9e17952be3b5 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 3 Sep 2024 12:18:10 +0300
Subject: [PATCH 025/655] [#1349] node: Evacuate objects without setting mode
to `MAINTENANCE`
Signed-off-by: Anton Nikiforov
---
.../modules/control/shards_list.go | 18 ++++----
docs/evacuation.md | 6 +++
internal/metrics/engine.go | 8 ++++
pkg/local_object_storage/engine/evacuate.go | 10 +++++
.../engine/evacuate_test.go | 28 ++++++++++++
pkg/local_object_storage/engine/metrics.go | 1 +
pkg/local_object_storage/engine/shards.go | 4 ++
pkg/local_object_storage/shard/exists.go | 4 ++
pkg/local_object_storage/shard/get.go | 14 +++++-
pkg/local_object_storage/shard/head.go | 7 +++
pkg/local_object_storage/shard/info.go | 3 ++
.../shard/metrics_test.go | 3 ++
pkg/local_object_storage/shard/range.go | 4 ++
pkg/local_object_storage/shard/shard.go | 11 +++++
pkg/services/control/server/evacuate_async.go | 3 ++
pkg/services/control/server/list_shards.go | 1 +
pkg/services/control/types.proto | 3 ++
pkg/services/control/types_frostfs.pb.go | 45 ++++++++++++++++---
18 files changed, 156 insertions(+), 17 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/shards_list.go b/cmd/frostfs-cli/modules/control/shards_list.go
index e9e49bb29..a81034a9e 100644
--- a/cmd/frostfs-cli/modules/control/shards_list.go
+++ b/cmd/frostfs-cli/modules/control/shards_list.go
@@ -65,13 +65,14 @@ func prettyPrintShardsJSON(cmd *cobra.Command, ii []control.ShardInfo) {
out := make([]map[string]any, 0, len(ii))
for _, i := range ii {
out = append(out, map[string]any{
- "shard_id": base58.Encode(i.GetShard_ID()),
- "mode": shardModeToString(i.GetMode()),
- "metabase": i.GetMetabasePath(),
- "blobstor": i.GetBlobstor(),
- "writecache": i.GetWritecachePath(),
- "pilorama": i.GetPiloramaPath(),
- "error_count": i.GetErrorCount(),
+ "shard_id": base58.Encode(i.GetShard_ID()),
+ "mode": shardModeToString(i.GetMode()),
+ "metabase": i.GetMetabasePath(),
+ "blobstor": i.GetBlobstor(),
+ "writecache": i.GetWritecachePath(),
+ "pilorama": i.GetPiloramaPath(),
+ "error_count": i.GetErrorCount(),
+ "evacuation_in_progress": i.GetEvacuationInProgress(),
})
}
@@ -105,7 +106,8 @@ func prettyPrintShards(cmd *cobra.Command, ii []control.ShardInfo) {
sb.String()+
pathPrinter("Write-cache", i.GetWritecachePath())+
pathPrinter("Pilorama", i.GetPiloramaPath())+
- fmt.Sprintf("Error count: %d\n", i.GetErrorCount()),
+ fmt.Sprintf("Error count: %d\n", i.GetErrorCount())+
+ fmt.Sprintf("Evacuation in progress: %t\n", i.GetEvacuationInProgress()),
base58.Encode(i.GetShard_ID()),
shardModeToString(i.GetMode()),
)
diff --git a/docs/evacuation.md b/docs/evacuation.md
index 9db514a9e..885ce169a 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -10,6 +10,12 @@ First of all, by the evacuation the data is transferred to other shards of the s
Only one running evacuation process is allowed on the node at a time.
+It is not necessary to turn maintenance mode on storage node.
+
+Once evacuation from shard started, it is impossible to read data from it via public API, except the case when evacuation stopped manually or node restarted.
+
+Because it is necessary to prevent removing by policer objects with policy `REP 1 ...` from remote node during evacuation.
+
`frostfs-cli` utility is used to manage evacuation.
## Commands
diff --git a/internal/metrics/engine.go b/internal/metrics/engine.go
index e37777e40..1d01c95ed 100644
--- a/internal/metrics/engine.go
+++ b/internal/metrics/engine.go
@@ -27,6 +27,7 @@ type EngineMetrics interface {
IncRefillObjectsCount(shardID, path string, size int, success bool)
SetRefillPercent(shardID, path string, percent uint32)
SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
WriteCache() WriteCacheMetrics
GC() GCMetrics
@@ -45,6 +46,7 @@ type engineMetrics struct {
refillObjCounter *prometheus.GaugeVec
refillPayloadCounter *prometheus.GaugeVec
refillPercentCounter *prometheus.GaugeVec
+ evacuationInProgress *shardIDModeValue
gc *gcMetrics
writeCache *writeCacheMetrics
@@ -72,6 +74,7 @@ func newEngineMetrics() *engineMetrics {
refillObjCounter: newEngineGaugeVector("resync_metabase_objects_total", "Count of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
refillPayloadCounter: newEngineGaugeVector("resync_metabase_objects_size_bytes", "Size of objects resynced from blobstore to metabase", []string{shardIDLabel, pathLabel, successLabel}),
refillPercentCounter: newEngineGaugeVector("resync_metabase_complete_percent", "Percent of resynced from blobstore to metabase completeness", []string{shardIDLabel, pathLabel}),
+ evacuationInProgress: newShardIDMode(engineSubsystem, "evacuation_in_progress", "Shard evacuation in progress"),
}
}
@@ -124,6 +127,7 @@ func (m *engineMetrics) DeleteShardMetrics(shardID string) {
m.refillPercentCounter.DeletePartialMatch(prometheus.Labels{shardIDLabel: shardID})
m.mode.Delete(shardID)
m.refillStatus.DeleteByShardID(shardID)
+ m.evacuationInProgress.Delete(shardID)
}
func (m *engineMetrics) AddToObjectCounter(shardID, objectType string, delta int) {
@@ -213,3 +217,7 @@ func (m *engineMetrics) SetRefillPercent(shardID, path string, percent uint32) {
func (m *engineMetrics) SetRefillStatus(shardID, path, status string) {
m.refillStatus.SetMode(shardID, path, status)
}
+
+func (m *engineMetrics) SetEvacuationInProgress(shardID string, value bool) {
+ m.evacuationInProgress.SetMode(shardID, strconv.FormatBool(value))
+}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 04e427e49..7bef6edfb 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -366,6 +366,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string
listPrm.WithCount(defaultEvacuateBatchSize)
sh := shardsToEvacuate[shardID]
+ sh.SetEvacuationInProgress(true)
var c *meta.Cursor
for {
@@ -655,6 +656,7 @@ func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, to
var getPrm shard.GetPrm
getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
getRes, err := sh.Get(ctx, getPrm)
if err != nil {
@@ -765,3 +767,11 @@ func (e *StorageEngine) ResetEvacuationStatus(ctx context.Context) error {
return e.evacuateLimiter.ResetEvacuationStatus()
}
+
+func (e *StorageEngine) ResetEvacuationStatusForShards() {
+ e.mtx.RLock()
+ defer e.mtx.RUnlock()
+ for _, sh := range e.shards {
+ sh.SetEvacuationInProgress(false)
+ }
+}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 8d25dad4a..28529fab9 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -125,6 +125,34 @@ func TestEvacuateShardObjects(t *testing.T) {
// Second case ensures that all objects are indeed moved and available.
checkHasObjects(t)
+ // Objects on evacuated shards should be logically unavailable, but persisted on disk.
+ // This is necessary to prevent removing it by policer in case of `REP 1` policy.
+ for _, obj := range objects[len(objects)-objPerShard:] {
+ var prmGet shard.GetPrm
+ prmGet.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.Error(t, err)
+
+ prmGet.SkipEvacCheck(true)
+ _, err = e.shards[evacuateShardID].Get(context.Background(), prmGet)
+ require.NoError(t, err)
+
+ var prmHead shard.HeadPrm
+ prmHead.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].Head(context.Background(), prmHead)
+ require.Error(t, err)
+
+ var existsPrm shard.ExistsPrm
+ existsPrm.Address = objectCore.AddressOf(obj)
+ _, err = e.shards[evacuateShardID].Exists(context.Background(), existsPrm)
+ require.Error(t, err)
+
+ var rngPrm shard.RngPrm
+ rngPrm.SetAddress(objectCore.AddressOf(obj))
+ _, err = e.shards[evacuateShardID].GetRange(context.Background(), rngPrm)
+ require.Error(t, err)
+ }
+
// Calling it again is OK, but all objects are already moved, so no new PUTs should be done.
res, err = e.Evacuate(context.Background(), prm)
require.NoError(t, err)
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 72b5ae252..1c088c754 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -30,6 +30,7 @@ type MetricRegister interface {
IncRefillObjectsCount(shardID, path string, size int, success bool)
SetRefillPercent(shardID, path string, percent uint32)
SetRefillStatus(shardID, path, status string)
+ SetEvacuationInProgress(shardID string, value bool)
WriteCache() metrics.WriteCacheMetrics
GC() metrics.GCMetrics
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 980b38a63..40584149e 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -97,6 +97,10 @@ func (m *metricsWithID) SetRefillStatus(path string, status string) {
m.mw.SetRefillStatus(m.id, path, status)
}
+func (m *metricsWithID) SetEvacuationInProgress(value bool) {
+ m.mw.SetEvacuationInProgress(m.id, value)
+}
+
// AddShard adds a new shard to the storage engine.
//
// Returns any error encountered that did not allow adding a shard.
diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go
index b5a9604b4..784bf293a 100644
--- a/pkg/local_object_storage/shard/exists.go
+++ b/pkg/local_object_storage/shard/exists.go
@@ -5,7 +5,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -60,6 +62,8 @@ func (s *Shard) Exists(ctx context.Context, prm ExistsPrm) (ExistsRes, error) {
if s.info.Mode.Disabled() {
return ExistsRes{}, ErrShardDisabled
+ } else if s.info.EvacuationInProgress {
+ return ExistsRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
} else if s.info.Mode.NoMetabase() {
var p common.ExistsPrm
p.Address = prm.Address
diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go
index 2e7c84bcd..d1c393613 100644
--- a/pkg/local_object_storage/shard/get.go
+++ b/pkg/local_object_storage/shard/get.go
@@ -27,8 +27,9 @@ type storFetcher = func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object,
// GetPrm groups the parameters of Get operation.
type GetPrm struct {
- addr oid.Address
- skipMeta bool
+ addr oid.Address
+ skipMeta bool
+ skipEvacCheck bool
}
// GetRes groups the resulting values of Get operation.
@@ -50,6 +51,11 @@ func (p *GetPrm) SetIgnoreMeta(ignore bool) {
p.skipMeta = ignore
}
+// SkipEvacCheck is a Get option which instruct to skip check is evacuation in progress.
+func (p *GetPrm) SkipEvacCheck(val bool) {
+ p.skipEvacCheck = val
+}
+
// Object returns the requested object.
func (r GetRes) Object() *objectSDK.Object {
return r.obj
@@ -85,6 +91,10 @@ func (s *Shard) Get(ctx context.Context, prm GetPrm) (GetRes, error) {
return GetRes{}, ErrShardDisabled
}
+ if s.info.EvacuationInProgress && !prm.skipEvacCheck {
+ return GetRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
cb := func(stor *blobstor.BlobStor, id []byte) (*objectSDK.Object, error) {
var getPrm common.GetPrm
getPrm.Address = prm.addr
diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go
index 9d5d31260..ff57e3bf9 100644
--- a/pkg/local_object_storage/shard/head.go
+++ b/pkg/local_object_storage/shard/head.go
@@ -4,7 +4,9 @@ import (
"context"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.opentelemetry.io/otel/attribute"
@@ -70,6 +72,11 @@ func (s *Shard) Head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
res, err = s.Get(ctx, getPrm)
obj = res.Object()
} else {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ if s.info.EvacuationInProgress {
+ return HeadRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
var headParams meta.GetPrm
headParams.SetAddress(prm.addr)
headParams.SetRaw(prm.raw)
diff --git a/pkg/local_object_storage/shard/info.go b/pkg/local_object_storage/shard/info.go
index 1051ab3db..f01796ec7 100644
--- a/pkg/local_object_storage/shard/info.go
+++ b/pkg/local_object_storage/shard/info.go
@@ -16,6 +16,9 @@ type Info struct {
// Shard mode.
Mode mode.Mode
+ // True when evacuation is in progress.
+ EvacuationInProgress bool
+
// Information about the metabase.
MetaBaseInfo meta.Info
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 1ef849c02..01a85da97 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -192,6 +192,9 @@ func (m *metricsStore) SetRefillStatus(_ string, status string) {
m.refillStatus = status
}
+func (m *metricsStore) SetEvacuationInProgress(bool) {
+}
+
func TestCounters(t *testing.T) {
t.Parallel()
diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go
index 9491543c4..701268820 100644
--- a/pkg/local_object_storage/shard/range.go
+++ b/pkg/local_object_storage/shard/range.go
@@ -87,6 +87,10 @@ func (s *Shard) GetRange(ctx context.Context, prm RngPrm) (RngRes, error) {
s.m.RLock()
defer s.m.RUnlock()
+ if s.info.EvacuationInProgress {
+ return RngRes{}, logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+
if s.info.Mode.Disabled() {
return RngRes{}, ErrShardDisabled
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d11bcc36b..ac389b506 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -97,6 +97,8 @@ type MetricsWriter interface {
SetRefillPercent(path string, percent uint32)
// SetRefillStatus sets refill status.
SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
}
type cfg struct {
@@ -579,3 +581,12 @@ func (s *Shard) DeleteShardMetrics() {
s.cfg.metricsWriter.DeleteShardMetrics()
}
}
+
+func (s *Shard) SetEvacuationInProgress(val bool) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ s.info.EvacuationInProgress = val
+ if s.metricsWriter != nil {
+ s.metricsWriter.SetEvacuationInProgress(val)
+ }
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index b829573ec..aacebe9e3 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -101,6 +101,9 @@ func (s *Server) StopShardEvacuation(ctx context.Context, req *control.StopShard
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
+
+ s.s.ResetEvacuationStatusForShards()
+
return resp, nil
}
diff --git a/pkg/services/control/server/list_shards.go b/pkg/services/control/server/list_shards.go
index 56bd9fc1f..efe2754ea 100644
--- a/pkg/services/control/server/list_shards.go
+++ b/pkg/services/control/server/list_shards.go
@@ -53,6 +53,7 @@ func (s *Server) ListShards(_ context.Context, req *control.ListShardsRequest) (
si.SetMode(m)
si.SetErrorCount(sh.ErrorCount)
+ si.SetEvacuationInProgress(sh.EvacuationInProgress)
shardInfos = append(shardInfos, *si)
}
diff --git a/pkg/services/control/types.proto b/pkg/services/control/types.proto
index 55636d88a..d8135ed64 100644
--- a/pkg/services/control/types.proto
+++ b/pkg/services/control/types.proto
@@ -142,6 +142,9 @@ message ShardInfo {
// Path to shard's pilorama storage.
string pilorama_path = 7 [ json_name = "piloramaPath" ];
+
+ // Evacuation status.
+ bool evacuation_in_progress = 8 [ json_name = "evacuationInProgress" ];
}
// Blobstor component description.
diff --git a/pkg/services/control/types_frostfs.pb.go b/pkg/services/control/types_frostfs.pb.go
index 42c1afa52..f92106589 100644
--- a/pkg/services/control/types_frostfs.pb.go
+++ b/pkg/services/control/types_frostfs.pb.go
@@ -954,13 +954,14 @@ func (x *Netmap) UnmarshalEasyJSON(in *jlexer.Lexer) {
}
type ShardInfo struct {
- Shard_ID []byte `json:"shardID"`
- MetabasePath string `json:"metabasePath"`
- Blobstor []BlobstorInfo `json:"blobstor"`
- WritecachePath string `json:"writecachePath"`
- Mode ShardMode `json:"mode"`
- ErrorCount uint32 `json:"errorCount"`
- PiloramaPath string `json:"piloramaPath"`
+ Shard_ID []byte `json:"shardID"`
+ MetabasePath string `json:"metabasePath"`
+ Blobstor []BlobstorInfo `json:"blobstor"`
+ WritecachePath string `json:"writecachePath"`
+ Mode ShardMode `json:"mode"`
+ ErrorCount uint32 `json:"errorCount"`
+ PiloramaPath string `json:"piloramaPath"`
+ EvacuationInProgress bool `json:"evacuationInProgress"`
}
var (
@@ -986,6 +987,7 @@ func (x *ShardInfo) StableSize() (size int) {
size += proto.EnumSize(5, int32(x.Mode))
size += proto.UInt32Size(6, x.ErrorCount)
size += proto.StringSize(7, x.PiloramaPath)
+ size += proto.BoolSize(8, x.EvacuationInProgress)
return size
}
@@ -1023,6 +1025,9 @@ func (x *ShardInfo) EmitProtobuf(mm *easyproto.MessageMarshaler) {
if len(x.PiloramaPath) != 0 {
mm.AppendString(7, x.PiloramaPath)
}
+ if x.EvacuationInProgress {
+ mm.AppendBool(8, x.EvacuationInProgress)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -1080,6 +1085,12 @@ func (x *ShardInfo) UnmarshalProtobuf(src []byte) (err error) {
return fmt.Errorf("cannot unmarshal field %s", "PiloramaPath")
}
x.PiloramaPath = data
+ case 8: // EvacuationInProgress
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "EvacuationInProgress")
+ }
+ x.EvacuationInProgress = data
}
}
return nil
@@ -1147,6 +1158,15 @@ func (x *ShardInfo) GetPiloramaPath() string {
func (x *ShardInfo) SetPiloramaPath(v string) {
x.PiloramaPath = v
}
+func (x *ShardInfo) GetEvacuationInProgress() bool {
+ if x != nil {
+ return x.EvacuationInProgress
+ }
+ return false
+}
+func (x *ShardInfo) SetEvacuationInProgress(v bool) {
+ x.EvacuationInProgress = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *ShardInfo) MarshalJSON() ([]byte, error) {
@@ -1202,6 +1222,11 @@ func (x *ShardInfo) MarshalEasyJSON(out *jwriter.Writer) {
out.RawString(prefix)
out.String(x.PiloramaPath)
}
+ {
+ const prefix string = ",\"evacuationInProgress\":"
+ out.RawString(prefix)
+ out.Bool(x.EvacuationInProgress)
+ }
out.RawByte('}')
}
@@ -1296,6 +1321,12 @@ func (x *ShardInfo) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.String()
x.PiloramaPath = f
}
+ case "evacuationInProgress":
+ {
+ var f bool
+ f = in.Bool()
+ x.EvacuationInProgress = f
+ }
}
in.WantComma()
}
From 273980cfb99a29eef0dbe4bd013c7a0041b6db79 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Thu, 5 Sep 2024 16:40:32 +0300
Subject: [PATCH 026/655] [#1310] object: Remove irrelevant comments
Signed-off-by: Airat Arifullin
---
pkg/services/object/common/target/target.go | 2 --
1 file changed, 2 deletions(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 00080ace6..980c4c6bd 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -116,7 +116,6 @@ func preparePrm(prm *objectwriter.Params) error {
// get latest network map
nm, err := netmap.GetLatestNetworkMap(prm.Config.NetmapSource)
if err != nil {
- //return fmt.Errorf("(%T) could not get latest network map: %w", p, err)
return fmt.Errorf("could not get latest network map: %w", err)
}
@@ -128,7 +127,6 @@ func preparePrm(prm *objectwriter.Params) error {
// get container to store the object
cnrInfo, err := prm.Config.ContainerSource.Get(idCnr)
if err != nil {
- //return fmt.Errorf("(%T) could not get container by ID: %w", p, err)
return fmt.Errorf("could not get container by ID: %w", err)
}
From f652518c241f405db22259c753be7f8685f39cc7 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 6 Sep 2024 13:09:58 +0300
Subject: [PATCH 027/655] [#1357] go: Fix panic caused by using range over
slice len
If slice is altered in `for` loop, we cannot use range over its
length: it may cause panic if slice gets shorter.
Signed-off-by: Ekaterina Lebedeva
---
pkg/services/control/server/evacuate.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/control/server/evacuate.go b/pkg/services/control/server/evacuate.go
index 300cb9dc9..ae3413373 100644
--- a/pkg/services/control/server/evacuate.go
+++ b/pkg/services/control/server/evacuate.go
@@ -169,7 +169,7 @@ func (s *Server) getContainerNodes(contID cid.ID) ([]netmap.NodeInfo, error) {
nodes := placement.FlattenNodes(ns)
bs := (*keys.PublicKey)(&s.key.PublicKey).Bytes()
- for i := range len(nodes) {
+ for i := 0; i < len(nodes); i++ { // don't use range, slice mutates in body
if bytes.Equal(nodes[i].PublicKey(), bs) {
copy(nodes[i:], nodes[i+1:])
nodes = nodes[:len(nodes)-1]
From 007827255ee65e9d7fd61c0f6bdd6bb59479bac1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 27 Aug 2024 15:51:55 +0300
Subject: [PATCH 028/655] [#1337] blobovniczatree: Add .rebuild temp files
This allows to reduce open/close DBs to check incompleted rebuilds.
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 1 +
.../blobstor/blobovniczatree/blobovnicza.go | 2 +-
.../blobstor/blobovniczatree/control.go | 6 +--
.../blobstor/blobovniczatree/iterate.go | 21 ++++++++--
.../blobstor/blobovniczatree/rebuild.go | 38 ++++++++++++++++++-
.../blobovniczatree/rebuild_failover_test.go | 12 ++++++
6 files changed, 71 insertions(+), 9 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 78bcd0c0e..97b189529 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -543,4 +543,5 @@ const (
WritecacheSealCompletedAsync = "writecache seal completed successfully"
FailedToSealWritecacheAsync = "failed to seal writecache async"
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
+ BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
index 952203367..c909113c7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/blobovnicza.go
@@ -135,7 +135,7 @@ func getBlobovniczaMaxIndex(directory string) (bool, uint64, error) {
var hasDBs bool
var maxIdx uint64
for _, e := range entries {
- if e.IsDir() {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
continue
}
hasDBs = true
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/control.go b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
index a31e9d6cb..681cf876c 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/control.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/control.go
@@ -2,6 +2,7 @@ package blobovniczatree
import (
"context"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -41,10 +42,9 @@ func (b *Blobovniczas) initializeDBs(ctx context.Context) error {
eg, egCtx := errgroup.WithContext(ctx)
eg.SetLimit(b.blzInitWorkerCount)
- visited := make(map[string]struct{})
- err = b.iterateExistingDBPaths(egCtx, func(p string) (bool, error) {
- visited[p] = struct{}{}
+ err = b.iterateIncompletedRebuildDBPaths(egCtx, func(p string) (bool, error) {
eg.Go(func() error {
+ p = strings.TrimSuffix(p, rebuildSuffix)
shBlz := b.getBlobovniczaWithoutCaching(p)
blz, err := shBlz.Open()
if err != nil {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
index af3d9e720..f6acb46aa 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/iterate.go
@@ -188,11 +188,11 @@ func (b *Blobovniczas) iterateExistingDBPaths(ctx context.Context, f func(string
b.dbFilesGuard.RLock()
defer b.dbFilesGuard.RUnlock()
- _, err := b.iterateExistingDBPathsDFS(ctx, "", f)
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return !strings.HasSuffix(path, rebuildSuffix) })
return err
}
-func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path string, f func(string) (bool, error)) (bool, error) {
+func (b *Blobovniczas) iterateExistingPathsDFS(ctx context.Context, path string, f func(string) (bool, error), fileFilter func(path string) bool) (bool, error) {
sysPath := filepath.Join(b.rootPath, path)
entries, err := os.ReadDir(sysPath)
if os.IsNotExist(err) && b.readOnly && path == "" { // non initialized tree in read only mode
@@ -208,7 +208,7 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
default:
}
if entry.IsDir() {
- stop, err := b.iterateExistingDBPathsDFS(ctx, filepath.Join(path, entry.Name()), f)
+ stop, err := b.iterateExistingPathsDFS(ctx, filepath.Join(path, entry.Name()), f, fileFilter)
if err != nil {
return false, err
}
@@ -216,6 +216,9 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
return true, nil
}
} else {
+ if !fileFilter(entry.Name()) {
+ continue
+ }
stop, err := f(filepath.Join(path, entry.Name()))
if err != nil {
return false, err
@@ -228,6 +231,15 @@ func (b *Blobovniczas) iterateExistingDBPathsDFS(ctx context.Context, path strin
return false, nil
}
+// iterateIncompletedRebuildDBPaths iterates over the paths of Blobovniczas with incompleted rebuild files without any order.
+func (b *Blobovniczas) iterateIncompletedRebuildDBPaths(ctx context.Context, f func(string) (bool, error)) error {
+ b.dbFilesGuard.RLock()
+ defer b.dbFilesGuard.RUnlock()
+
+ _, err := b.iterateExistingPathsDFS(ctx, "", f, func(path string) bool { return strings.HasSuffix(path, rebuildSuffix) })
+ return err
+}
+
func (b *Blobovniczas) iterateSortedDBPaths(ctx context.Context, addr oid.Address, f func(string) (bool, error)) error {
b.dbFilesGuard.RLock()
defer b.dbFilesGuard.RUnlock()
@@ -249,6 +261,9 @@ func (b *Blobovniczas) iterateSordedDBPathsInternal(ctx context.Context, path st
var dirIdxs []uint64
for _, entry := range entries {
+ if strings.HasSuffix(entry.Name(), rebuildSuffix) {
+ continue
+ }
idx := u64FromHexString(entry.Name())
if entry.IsDir() {
dirIdxs = append(dirIdxs, idx)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index cfc17cfae..058fe1fb6 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -6,6 +6,7 @@ import (
"errors"
"os"
"path/filepath"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -19,6 +20,8 @@ import (
"golang.org/x/sync/errgroup"
)
+const rebuildSuffix = ".rebuild"
+
var (
errRebuildInProgress = errors.New("rebuild is in progress, the operation cannot be performed")
errBatchFull = errors.New("batch full")
@@ -124,15 +127,36 @@ func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.M
}
shDB.Close()
}()
-
+ dropTempFile, err := b.addRebuildTempFile(path)
+ if err != nil {
+ return 0, err
+ }
migratedObjects, err := b.moveObjects(ctx, blz, shDB.SystemPath(), meta, limiter)
if err != nil {
return migratedObjects, err
}
shDBClosed, err = b.dropDB(ctx, path, shDB)
+ if err == nil {
+ // drop only on success to continue rebuild on error
+ dropTempFile()
+ }
return migratedObjects, err
}
+func (b *Blobovniczas) addRebuildTempFile(path string) (func(), error) {
+ sysPath := filepath.Join(b.rootPath, path)
+ sysPath = sysPath + rebuildSuffix
+ _, err := os.OpenFile(sysPath, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, b.perm)
+ if err != nil {
+ return nil, err
+ }
+ return func() {
+ if err := os.Remove(sysPath); err != nil {
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ }, nil
+}
+
func (b *Blobovniczas) moveObjects(ctx context.Context, blz *blobovnicza.Blobovnicza, blzPath string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
var result atomic.Uint64
batch := make(map[oid.Address][]byte)
@@ -256,7 +280,10 @@ func (b *Blobovniczas) dropDirectoryIfEmpty(path string) error {
func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore common.MetaStorage) (uint64, error) {
var count uint64
- return count, b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
+ var rebuildTempFilesToRemove []string
+ err := b.iterateIncompletedRebuildDBPaths(ctx, func(s string) (bool, error) {
+ rebuildTmpFilePath := s
+ s = strings.TrimSuffix(s, rebuildSuffix)
shDB := b.getBlobovnicza(s)
blz, err := shDB.Open()
if err != nil {
@@ -276,8 +303,15 @@ func (b *Blobovniczas) completeIncompletedMove(ctx context.Context, metaStore co
count++
}
+ rebuildTempFilesToRemove = append(rebuildTempFilesToRemove, rebuildTmpFilePath)
return false, nil
})
+ for _, tmp := range rebuildTempFilesToRemove {
+ if err := os.Remove(filepath.Join(b.rootPath, tmp)); err != nil {
+ b.log.Warn(logs.BlobovniczatreeFailedToRemoveRebuildTempFile, zap.Error(err))
+ }
+ }
+ return count, err
}
func (b *Blobovniczas) performMove(ctx context.Context, source *blobovnicza.Blobovnicza, sourcePath string,
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index a6afed60c..9fec795ca 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -3,6 +3,7 @@ package blobovniczatree
import (
"bytes"
"context"
+ "os"
"path/filepath"
"sync"
"testing"
@@ -53,6 +54,8 @@ func testRebuildFailoverOnlyMoveInfoSaved(t *testing.T) {
}))
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
testRebuildFailoverValidate(t, dir, obj, true)
}
@@ -82,6 +85,9 @@ func testRebuildFailoverObjectSavedToTarget(t *testing.T) {
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
@@ -113,6 +119,9 @@ func testRebuildFailoverObjectDeletedFromSource(t *testing.T) {
require.NoError(t, blz.Close())
+ _, err = os.OpenFile(filepath.Join(dir, "0", "0", "1.db.rebuild"), os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, defaultPerm)
+ require.NoError(t, err)
+
blz = blobovnicza.New(blobovnicza.WithPath(filepath.Join(dir, "0", "0", "0.db")))
require.NoError(t, blz.Open())
require.NoError(t, blz.Init())
@@ -194,4 +203,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
}
require.NoError(t, blz.Close())
+
+ _, err = os.Stat(filepath.Join(dir, "0", "0", "1.db.rebuild"))
+ require.True(t, os.IsNotExist(err))
}
From d508da8397026fac3fd2a5c77846f1a5ec61a665 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 11:34:18 +0300
Subject: [PATCH 029/655] [#1337] blobovniczatree: Add rebuild by fill percent
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/blobovnicza/sizes.go | 4 +
.../blobstor/blobovniczatree/rebuild.go | 93 ++++++++-
.../blobstor/blobovniczatree/rebuild_test.go | 195 +++++++++++++++++-
.../blobstor/common/rebuild.go | 8 +
pkg/local_object_storage/blobstor/rebuild.go | 3 +-
pkg/local_object_storage/shard/rebuild.go | 173 ++++++++++++++++
.../shard/rebuild_limiter.go | 13 --
pkg/local_object_storage/shard/rebuilder.go | 98 ---------
pkg/local_object_storage/shard/shard.go | 2 +-
9 files changed, 470 insertions(+), 119 deletions(-)
create mode 100644 pkg/local_object_storage/shard/rebuild.go
delete mode 100644 pkg/local_object_storage/shard/rebuild_limiter.go
delete mode 100644 pkg/local_object_storage/shard/rebuilder.go
diff --git a/pkg/local_object_storage/blobovnicza/sizes.go b/pkg/local_object_storage/blobovnicza/sizes.go
index 1dff75aed..9bbed0db5 100644
--- a/pkg/local_object_storage/blobovnicza/sizes.go
+++ b/pkg/local_object_storage/blobovnicza/sizes.go
@@ -57,3 +57,7 @@ func (b *Blobovnicza) itemDeleted(itemSize uint64) {
func (b *Blobovnicza) IsFull() bool {
return b.dataSize.Load() >= b.fullSizeLimit
}
+
+func (b *Blobovnicza) FillPercent() int {
+ return int(100.0 * (float64(b.dataSize.Load()) / float64(b.fullSizeLimit)))
+}
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index 058fe1fb6..b7f20822e 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -59,7 +60,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
- dbsToMigrate, err := b.getDBsToRebuild(ctx)
+ dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.Action)
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
@@ -93,7 +94,33 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.
return res, nil
}
-func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) {
+func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, action common.RebuildAction) ([]string, error) {
+ schemaChange := make(map[string]struct{})
+ fillPercent := make(map[string]struct{})
+ var err error
+ if action.SchemaChange {
+ schemaChange, err = b.selectDBsDoNotMatchSchema(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if action.FillPercent {
+ fillPercent, err = b.selectDBsDoNotMatchFillPercent(ctx, action.FillPercentValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for k := range fillPercent {
+ schemaChange[k] = struct{}{}
+ }
+ result := make([]string, 0, len(schemaChange))
+ for db := range schemaChange {
+ result = append(result, db)
+ }
+ return result, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchSchema(ctx context.Context) (map[string]struct{}, error) {
dbsToMigrate := make(map[string]struct{})
if err := b.iterateExistingDBPaths(ctx, func(s string) (bool, error) {
dbsToMigrate[s] = struct{}{}
@@ -107,13 +134,69 @@ func (b *Blobovniczas) getDBsToRebuild(ctx context.Context) ([]string, error) {
}); err != nil {
return nil, err
}
- result := make([]string, 0, len(dbsToMigrate))
- for db := range dbsToMigrate {
- result = append(result, db)
+ return dbsToMigrate, nil
+}
+
+func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, target int) (map[string]struct{}, error) {
+ if target <= 0 || target > 100 {
+ return nil, fmt.Errorf("invalid fill percent value %d: must be (0; 100]", target)
+ }
+ result := make(map[string]struct{})
+ if err := b.iterateDeepest(ctx, oid.Address{}, func(lvlPath string) (bool, error) {
+ dir := filepath.Join(b.rootPath, lvlPath)
+ entries, err := os.ReadDir(dir)
+ if os.IsNotExist(err) { // non initialized tree
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ hasDBs := false
+ // db with maxIdx could be an active, so it should not be rebuilded
+ var maxIdx uint64
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ hasDBs = true
+ maxIdx = max(u64FromHexString(e.Name()), maxIdx)
+ }
+ if !hasDBs {
+ return false, nil
+ }
+ for _, e := range entries {
+ if e.IsDir() || strings.HasSuffix(e.Name(), rebuildSuffix) {
+ continue
+ }
+ if u64FromHexString(e.Name()) == maxIdx {
+ continue
+ }
+ path := filepath.Join(lvlPath, e.Name())
+ resettlementRequired, err := b.fillPercentIsLow(path, target)
+ if err != nil {
+ return false, err
+ }
+ if resettlementRequired {
+ result[path] = struct{}{}
+ }
+ }
+ return false, nil
+ }); err != nil {
+ return nil, err
}
return result, nil
}
+func (b *Blobovniczas) fillPercentIsLow(path string, target int) (bool, error) {
+ shDB := b.getBlobovnicza(path)
+ blz, err := shDB.Open()
+ if err != nil {
+ return false, err
+ }
+ defer shDB.Close()
+ return blz.FillPercent() < target, nil
+}
+
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 4a51fd86a..62ae9ea90 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -15,7 +15,7 @@ import (
"golang.org/x/sync/errgroup"
)
-func TestBlobovniczaTreeRebuild(t *testing.T) {
+func TestBlobovniczaTreeSchemaRebuild(t *testing.T) {
t.Parallel()
t.Run("width increased", func(t *testing.T) {
@@ -39,6 +39,197 @@ func TestBlobovniczaTreeRebuild(t *testing.T) {
})
}
+func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
+ t.Parallel()
+
+ t.Run("no rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ {
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 60,
+ },
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+
+ t.Run("no rebuild single db", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB soft limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ obj := blobstortest.NewObject(64 * 1024) // 64KB object
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 90, // 64KB / 100KB = 64%
+ },
+ })
+ require.NoError(t, err)
+ dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
+ require.False(t, dataMigrated)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+
+ t.Run("rebuild by fill percent", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ toDelete := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ if i%2 == 1 {
+ toDelete[prm.Address] = res.StorageID
+ }
+ }
+ for addr, storageID := range toDelete {
+ var prm common.DeletePrm
+ prm.Address = addr
+ prm.StorageID = storageID
+ _, err := b.Delete(context.Background(), prm)
+ require.NoError(t, err)
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 80,
+ },
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(49), rRes.ObjectsMoved) // 49 DBs with 1 objects
+ require.Equal(t, uint64(49), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ if _, found := toDelete[addr]; found {
+ continue
+ }
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
+}
+
func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
t.Parallel()
@@ -92,6 +283,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Action = common.RebuildAction{SchemaChange: true}
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -180,6 +372,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
+ rPrm.Action = common.RebuildAction{SchemaChange: true}
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 9f629ef8c..020d9d022 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -11,9 +11,17 @@ type RebuildRes struct {
FilesRemoved uint64
}
+type RebuildAction struct {
+ SchemaChange bool
+
+ FillPercent bool
+ FillPercentValue int
+}
+
type RebuildPrm struct {
MetaStorage MetaStorage
WorkerLimiter ConcurrentWorkersLimiter
+ Action RebuildAction
}
type MetaStorage interface {
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 101c60752..31bc2d167 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -18,13 +18,14 @@ type ConcurrentWorkersLimiter interface {
ReleaseWorkSlot()
}
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter) error {
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, action common.RebuildAction) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
MetaStorage: upd,
WorkerLimiter: limiter,
+ Action: action,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
new file mode 100644
index 000000000..998fcf08b
--- /dev/null
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -0,0 +1,173 @@
+package shard
+
+import (
+ "context"
+ "errors"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.uber.org/zap"
+)
+
+type RebuildWorkerLimiter interface {
+ AcquireWorkSlot(ctx context.Context) error
+ ReleaseWorkSlot()
+}
+
+type rebuildLimiter struct {
+ semaphore chan struct{}
+}
+
+func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
+ return &rebuildLimiter{
+ semaphore: make(chan struct{}, workersCount),
+ }
+}
+
+func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
+ select {
+ case l.semaphore <- struct{}{}:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (l *rebuildLimiter) ReleaseWorkSlot() {
+ <-l.semaphore
+}
+
+type rebuildTask struct {
+ limiter RebuildWorkerLimiter
+ action common.RebuildAction
+}
+
+type rebuilder struct {
+ mtx *sync.Mutex
+ wg *sync.WaitGroup
+ cancel func()
+ limiter RebuildWorkerLimiter
+ done chan struct{}
+ tasks chan rebuildTask
+}
+
+func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
+ return &rebuilder{
+ mtx: &sync.Mutex{},
+ wg: &sync.WaitGroup{},
+ limiter: l,
+ tasks: make(chan rebuildTask, 10),
+ }
+}
+
+func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ return // already started
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ r.cancel = cancel
+ r.done = make(chan struct{})
+ r.wg.Add(1)
+ go func() {
+ defer r.wg.Done()
+ for {
+ select {
+ case <-r.done:
+ return
+ case t, ok := <-r.tasks:
+ if !ok {
+ continue
+ }
+ runRebuild(ctx, bs, mb, log, t.action, t.limiter)
+ }
+ }
+ }()
+ select {
+ case <-ctx.Done():
+ return
+ case r.tasks <- rebuildTask{
+ limiter: r.limiter,
+ action: common.RebuildAction{
+ SchemaChange: true,
+ },
+ }:
+ return
+ }
+}
+
+func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
+ action common.RebuildAction, limiter RebuildWorkerLimiter,
+) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ log.Info(logs.BlobstoreRebuildStarted)
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, action); err != nil {
+ log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
+ } else {
+ log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
+ }
+}
+
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, action common.RebuildAction,
+) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case r.tasks <- rebuildTask{
+ limiter: limiter,
+ action: action,
+ }:
+ return nil
+ }
+}
+
+func (r *rebuilder) Stop(log *logger.Logger) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.done != nil {
+ close(r.done)
+ }
+ if r.cancel != nil {
+ r.cancel()
+ }
+ r.wg.Wait()
+ r.cancel = nil
+ r.done = nil
+ log.Info(logs.BlobstoreRebuildStopped)
+}
+
+var errMBIsNotAvailable = errors.New("metabase is not available")
+
+type mbStorageIDUpdate struct {
+ mb *meta.DB
+}
+
+func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if u.mb == nil {
+ return errMBIsNotAvailable
+ }
+
+ var prm meta.UpdateStorageIDPrm
+ prm.SetAddress(addr)
+ prm.SetStorageID(storageID)
+ _, err := u.mb.UpdateStorageID(ctx, prm)
+ return err
+}
diff --git a/pkg/local_object_storage/shard/rebuild_limiter.go b/pkg/local_object_storage/shard/rebuild_limiter.go
deleted file mode 100644
index efc21837c..000000000
--- a/pkg/local_object_storage/shard/rebuild_limiter.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package shard
-
-import "context"
-
-type RebuildWorkerLimiter interface {
- AcquireWorkSlot(ctx context.Context) error
- ReleaseWorkSlot()
-}
-
-type noopRebuildLimiter struct{}
-
-func (l *noopRebuildLimiter) AcquireWorkSlot(context.Context) error { return nil }
-func (l *noopRebuildLimiter) ReleaseWorkSlot() {}
diff --git a/pkg/local_object_storage/shard/rebuilder.go b/pkg/local_object_storage/shard/rebuilder.go
deleted file mode 100644
index f18573c57..000000000
--- a/pkg/local_object_storage/shard/rebuilder.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package shard
-
-import (
- "context"
- "errors"
- "sync"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.uber.org/zap"
-)
-
-type rebuilder struct {
- mtx *sync.Mutex
- wg *sync.WaitGroup
- cancel func()
- limiter RebuildWorkerLimiter
-}
-
-func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
- return &rebuilder{
- mtx: &sync.Mutex{},
- wg: &sync.WaitGroup{},
- cancel: nil,
- limiter: l,
- }
-}
-
-func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.start(ctx, bs, mb, log)
-}
-
-func (r *rebuilder) start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger) {
- if r.cancel != nil {
- r.stop(log)
- }
- ctx, cancel := context.WithCancel(ctx)
- r.cancel = cancel
- r.wg.Add(1)
- go func() {
- defer r.wg.Done()
-
- log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, r.limiter); err != nil {
- log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
- } else {
- log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
- }
- }()
-}
-
-func (r *rebuilder) Stop(log *logger.Logger) {
- r.mtx.Lock()
- defer r.mtx.Unlock()
-
- r.stop(log)
-}
-
-func (r *rebuilder) stop(log *logger.Logger) {
- if r.cancel == nil {
- return
- }
-
- r.cancel()
- r.wg.Wait()
- r.cancel = nil
- log.Info(logs.BlobstoreRebuildStopped)
-}
-
-var errMBIsNotAvailable = errors.New("metabase is not available")
-
-type mbStorageIDUpdate struct {
- mb *meta.DB
-}
-
-func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Address, storageID []byte) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- if u.mb == nil {
- return errMBIsNotAvailable
- }
-
- var prm meta.UpdateStorageIDPrm
- prm.SetAddress(addr)
- prm.SetStorageID(storageID)
- _, err := u.mb.UpdateStorageID(ctx, prm)
- return err
-}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index ac389b506..1eaee8815 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -151,7 +151,7 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildLimiter: &noopRebuildLimiter{},
+ rebuildLimiter: newRebuildLimiter(1),
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
From 6b6eabe41cd5750257adc3041f6a1c28df8197c7 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 12:11:19 +0300
Subject: [PATCH 030/655] [#1337] cli: Add `control shards rebuild` command
Signed-off-by: Dmitrii Stepanov
---
.../modules/control/rebuild_shards.go | 88 ++
cmd/frostfs-cli/modules/control/shards.go | 1 +
pkg/local_object_storage/engine/rebuild.go | 90 ++
pkg/local_object_storage/shard/rebuild.go | 33 +
pkg/services/control/rpc.go | 14 +
pkg/services/control/server/rebuild.go | 59 ++
pkg/services/control/service.proto | 29 +
pkg/services/control/service_frostfs.pb.go | 918 ++++++++++++++++++
pkg/services/control/service_grpc.pb.go | 39 +
9 files changed, 1271 insertions(+)
create mode 100644 cmd/frostfs-cli/modules/control/rebuild_shards.go
create mode 100644 pkg/local_object_storage/engine/rebuild.go
create mode 100644 pkg/services/control/server/rebuild.go
diff --git a/cmd/frostfs-cli/modules/control/rebuild_shards.go b/cmd/frostfs-cli/modules/control/rebuild_shards.go
new file mode 100644
index 000000000..e2b408712
--- /dev/null
+++ b/cmd/frostfs-cli/modules/control/rebuild_shards.go
@@ -0,0 +1,88 @@
+package control
+
+import (
+ "fmt"
+
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
+ commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "github.com/mr-tron/base58"
+ "github.com/spf13/cobra"
+)
+
+const (
+ fillPercentFlag = "fill_percent"
+)
+
+var shardsRebuildCmd = &cobra.Command{
+ Use: "rebuild",
+ Short: "Rebuild shards",
+ Long: "Rebuild reclaims storage occupied by dead objects and adjusts the storage structure according to the configuration (for blobovnicza only now)",
+ Run: shardsRebuild,
+}
+
+func shardsRebuild(cmd *cobra.Command, _ []string) {
+ pk := key.Get(cmd)
+
+ req := &control.StartShardRebuildRequest{
+ Body: &control.StartShardRebuildRequest_Body{
+ Shard_ID: getShardIDList(cmd),
+ TargetFillPercent: getFillPercentValue(cmd),
+ ConcurrencyLimit: getConcurrencyValue(cmd),
+ },
+ }
+
+ signRequest(cmd, pk, req)
+
+ cli := getClient(cmd, pk)
+
+ var resp *control.StartShardRebuildResponse
+ var err error
+ err = cli.ExecRaw(func(client *rawclient.Client) error {
+ resp, err = control.StartShardRebuild(client, req)
+ return err
+ })
+ commonCmd.ExitOnErr(cmd, "rpc error: %w", err)
+
+ verifyResponse(cmd, resp.GetSignature(), resp.GetBody())
+
+ var success, failed uint
+ for _, res := range resp.GetBody().GetResults() {
+ if res.GetSuccess() {
+ success++
+ cmd.Printf("Shard %s: OK\n", base58.Encode(res.GetShard_ID()))
+ } else {
+ failed++
+ cmd.Printf("Shard %s: failed with error %q\n", base58.Encode(res.GetShard_ID()), res.GetError())
+ }
+ }
+ cmd.Printf("Total: %d success, %d failed\n", success, failed)
+}
+
+func getFillPercentValue(cmd *cobra.Command) uint32 {
+ v, _ := cmd.Flags().GetUint32(fillPercentFlag)
+ if v <= 0 || v > 100 {
+ commonCmd.ExitOnErr(cmd, "invalid fill_percent value", fmt.Errorf("fill_percent value must be (0, 100], current value: %d", v))
+ }
+ return v
+}
+
+func getConcurrencyValue(cmd *cobra.Command) uint32 {
+ v, _ := cmd.Flags().GetUint32(concurrencyFlag)
+ if v <= 0 || v > 10000 {
+ commonCmd.ExitOnErr(cmd, "invalid concurrency value", fmt.Errorf("concurrency value must be (0, 10 000], current value: %d", v))
+ }
+ return v
+}
+
+func initControlShardRebuildCmd() {
+ initControlFlags(shardsRebuildCmd)
+
+ flags := shardsRebuildCmd.Flags()
+ flags.StringSlice(shardIDFlag, nil, "List of shard IDs in base58 encoding")
+ flags.Bool(shardAllFlag, false, "Process all shards")
+ flags.Uint32(fillPercentFlag, 80, "Target fill percent to reclaim space")
+ flags.Uint32(concurrencyFlag, 20, "Maximum count of concurrently rebuilding files")
+ setShardModeCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
+}
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index d8198c426..d6c2a0b9b 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -28,4 +28,5 @@ func initControlShardsCmd() {
initControlDoctorCmd()
initControlShardsWritecacheCmd()
initControlShardsDetachCmd()
+ initControlShardRebuildCmd()
}
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
new file mode 100644
index 000000000..3970aae89
--- /dev/null
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -0,0 +1,90 @@
+package engine
+
+import (
+ "context"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+ "golang.org/x/sync/errgroup"
+)
+
+type RebuildPrm struct {
+ ShardIDs []*shard.ID
+ ConcurrencyLimit uint32
+ TargetFillPercent uint32
+}
+
+type ShardRebuildResult struct {
+ ShardID *shard.ID
+ Success bool
+ ErrorMsg string
+}
+
+type RebuildRes struct {
+ ShardResults []ShardRebuildResult
+}
+
+func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Rebuild",
+ trace.WithAttributes(
+ attribute.Int("shard_id_count", len(prm.ShardIDs)),
+ attribute.Int64("target_fill_percent", int64(prm.TargetFillPercent)),
+ attribute.Int64("concurrency_limit", int64(prm.ConcurrencyLimit)),
+ ))
+ defer span.End()
+
+ res := RebuildRes{
+ ShardResults: make([]ShardRebuildResult, 0, len(prm.ShardIDs)),
+ }
+ resGuard := &sync.Mutex{}
+
+ limiter := newRebuildLimiter(prm.ConcurrencyLimit)
+
+ eg, egCtx := errgroup.WithContext(ctx)
+ for _, shardID := range prm.ShardIDs {
+ eg.Go(func() error {
+ e.mtx.RLock()
+ sh, ok := e.shards[shardID.String()]
+ e.mtx.RUnlock()
+
+ if !ok {
+ resGuard.Lock()
+ defer resGuard.Unlock()
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: errShardNotFound.Error(),
+ })
+ return nil
+ }
+
+ err := sh.ScheduleRebuild(egCtx, shard.RebuildPrm{
+ ConcurrencyLimiter: limiter,
+ TargetFillPercent: prm.TargetFillPercent,
+ })
+
+ resGuard.Lock()
+ defer resGuard.Unlock()
+
+ if err != nil {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ ErrorMsg: err.Error(),
+ })
+ } else {
+ res.ShardResults = append(res.ShardResults, ShardRebuildResult{
+ ShardID: shardID,
+ Success: true,
+ })
+ }
+ return nil
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return RebuildRes{}, err
+ }
+ return res, nil
+}
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 998fcf08b..f8051999e 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -10,7 +10,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
)
@@ -171,3 +174,33 @@ func (u *mbStorageIDUpdate) UpdateStorageID(ctx context.Context, addr oid.Addres
_, err := u.mb.UpdateStorageID(ctx, prm)
return err
}
+
+type RebuildPrm struct {
+ ConcurrencyLimiter RebuildWorkerLimiter
+ TargetFillPercent uint32
+}
+
+func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "Shard.ScheduleRebuild",
+ trace.WithAttributes(
+ attribute.String("shard_id", s.ID().String()),
+ attribute.Int64("target_fill_percent", int64(p.TargetFillPercent)),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.ReadOnly() {
+ return ErrReadOnlyMode
+ }
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, common.RebuildAction{
+ SchemaChange: true,
+ FillPercent: true,
+ FillPercentValue: int(p.TargetFillPercent),
+ })
+}
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index a90e58a65..80aece008 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -30,6 +30,7 @@ const (
rpcSealWriteCache = "SealWriteCache"
rpcListTargetsLocalOverrides = "ListTargetsLocalOverrides"
rpcDetachShards = "DetachShards"
+ rpcStartShardRebuild = "StartShardRebuild"
)
// HealthCheck executes ControlService.HealthCheck RPC.
@@ -361,3 +362,16 @@ func DetachShards(
return wResp.message, nil
}
+
+// StartShardRebuild executes ControlService.StartShardRebuild RPC.
+func StartShardRebuild(cli *client.Client, req *StartShardRebuildRequest, opts ...client.CallOption) (*StartShardRebuildResponse, error) {
+ wResp := newResponseWrapper[StartShardRebuildResponse]()
+ wReq := &requestWrapper{m: req}
+
+ err := client.SendUnary(cli, common.CallMethodInfoUnary(serviceName, rpcStartShardRebuild), wReq, wResp, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return wResp.message, nil
+}
diff --git a/pkg/services/control/server/rebuild.go b/pkg/services/control/server/rebuild.go
new file mode 100644
index 000000000..6ddfb8bf4
--- /dev/null
+++ b/pkg/services/control/server/rebuild.go
@@ -0,0 +1,59 @@
+package control
+
+import (
+ "context"
+ "fmt"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/server/ctrlmessage"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+func (s *Server) StartShardRebuild(ctx context.Context, req *control.StartShardRebuildRequest) (*control.StartShardRebuildResponse, error) {
+ err := s.isValidRequest(req)
+ if err != nil {
+ return nil, status.Error(codes.PermissionDenied, err.Error())
+ }
+
+ if req.GetBody().GetConcurrencyLimit() == 0 || req.GetBody().GetConcurrencyLimit() > 10000 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("concurrency limit must be in range (0; 10 000], current value %d", req.GetBody().GetConcurrencyLimit()))
+ }
+
+ if req.GetBody().GetTargetFillPercent() == 0 || req.GetBody().GetTargetFillPercent() > 100 {
+ return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("fill percent must be in range (0; 100], current value %d", req.GetBody().GetTargetFillPercent()))
+ }
+
+ prm := engine.RebuildPrm{
+ ShardIDs: s.getShardIDList(req.GetBody().GetShard_ID()),
+ ConcurrencyLimit: req.GetBody().GetConcurrencyLimit(),
+ TargetFillPercent: req.GetBody().GetTargetFillPercent(),
+ }
+
+ res, err := s.s.Rebuild(ctx, prm)
+ if err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+
+ resp := &control.StartShardRebuildResponse{Body: &control.StartShardRebuildResponse_Body{}}
+ for _, r := range res.ShardResults {
+ if r.Success {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Success: true,
+ })
+ } else {
+ resp.Body.Results = append(resp.GetBody().GetResults(), control.StartShardRebuildResponse_Body_Status{
+ Shard_ID: *r.ShardID,
+ Error: r.ErrorMsg,
+ })
+ }
+ }
+
+ err = ctrlmessage.Sign(s.key, resp)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index d6639cb48..04994328a 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -91,6 +91,9 @@ service ControlService {
// DetachShards detaches and closes shards.
rpc DetachShards(DetachShardsRequest) returns (DetachShardsResponse);
+
+ // StartShardRebuild starts shard rebuild process.
+ rpc StartShardRebuild(StartShardRebuildRequest) returns (StartShardRebuildResponse);
}
// Health check request.
@@ -699,3 +702,29 @@ message DetachShardsResponse {
Signature signature = 2;
}
+
+message StartShardRebuildRequest {
+ message Body {
+ repeated bytes shard_ID = 1;
+ uint32 target_fill_percent = 2;
+ uint32 concurrency_limit = 3;
+ }
+
+ Body body = 1;
+ Signature signature = 2;
+}
+
+message StartShardRebuildResponse {
+ message Body {
+ message Status {
+ bytes shard_ID = 1;
+ bool success = 2;
+ string error = 3;
+ }
+ repeated Status results = 1;
+ }
+
+ Body body = 1;
+
+ Signature signature = 2;
+}
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index eb0d95c64..019cac290 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -15023,3 +15023,921 @@ func (x *DetachShardsResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
in.Consumed()
}
}
+
+type StartShardRebuildRequest_Body struct {
+ Shard_ID [][]byte `json:"shardID"`
+ TargetFillPercent uint32 `json:"targetFillPercent"`
+ ConcurrencyLimit uint32 `json:"concurrencyLimit"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.RepeatedBytesSize(1, x.Shard_ID)
+ size += proto.UInt32Size(2, x.TargetFillPercent)
+ size += proto.UInt32Size(3, x.ConcurrencyLimit)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for j := range x.Shard_ID {
+ mm.AppendBytes(1, x.Shard_ID[j])
+ }
+ if x.TargetFillPercent != 0 {
+ mm.AppendUint32(2, x.TargetFillPercent)
+ }
+ if x.ConcurrencyLimit != 0 {
+ mm.AppendUint32(3, x.ConcurrencyLimit)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = append(x.Shard_ID, data)
+ case 2: // TargetFillPercent
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "TargetFillPercent")
+ }
+ x.TargetFillPercent = data
+ case 3: // ConcurrencyLimit
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ConcurrencyLimit")
+ }
+ x.ConcurrencyLimit = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) GetShard_ID() [][]byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest_Body) SetShard_ID(v [][]byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildRequest_Body) GetTargetFillPercent() uint32 {
+ if x != nil {
+ return x.TargetFillPercent
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetTargetFillPercent(v uint32) {
+ x.TargetFillPercent = v
+}
+func (x *StartShardRebuildRequest_Body) GetConcurrencyLimit() uint32 {
+ if x != nil {
+ return x.ConcurrencyLimit
+ }
+ return 0
+}
+func (x *StartShardRebuildRequest_Body) SetConcurrencyLimit(v uint32) {
+ x.ConcurrencyLimit = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Shard_ID {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ out.Base64Bytes(x.Shard_ID[i])
+ }
+ out.RawByte(']')
+ }
+ {
+ const prefix string = ",\"targetFillPercent\":"
+ out.RawString(prefix)
+ out.Uint32(x.TargetFillPercent)
+ }
+ {
+ const prefix string = ",\"concurrencyLimit\":"
+ out.RawString(prefix)
+ out.Uint32(x.ConcurrencyLimit)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ var list [][]byte
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = in.Bytes()
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Shard_ID = list
+ in.Delim(']')
+ }
+ case "targetFillPercent":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.TargetFillPercent = f
+ }
+ case "concurrencyLimit":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ConcurrencyLimit = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildRequest struct {
+ Body *StartShardRebuildRequest_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildRequest)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Marshaler = (*StartShardRebuildRequest)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildRequest)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildRequest) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildRequest) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildRequest) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildRequest) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildRequest) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildRequest")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildRequest_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) GetBody() *StartShardRebuildRequest_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetBody(v *StartShardRebuildRequest_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildRequest) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildRequest) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildRequest) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildRequest) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildRequest) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildRequest) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildRequest_Body
+ f = new(StartShardRebuildRequest_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body_Status struct {
+ Shard_ID []byte `json:"shardID"`
+ Success bool `json:"success"`
+ Error string `json:"error"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body_Status)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body_Status) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.BytesSize(1, x.Shard_ID)
+ size += proto.BoolSize(2, x.Success)
+ size += proto.StringSize(3, x.Error)
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body_Status) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if len(x.Shard_ID) != 0 {
+ mm.AppendBytes(1, x.Shard_ID)
+ }
+ if x.Success {
+ mm.AppendBool(2, x.Success)
+ }
+ if len(x.Error) != 0 {
+ mm.AppendString(3, x.Error)
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body_Status")
+ }
+ switch fc.FieldNum {
+ case 1: // Shard_ID
+ data, ok := fc.Bytes()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Shard_ID")
+ }
+ x.Shard_ID = data
+ case 2: // Success
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Success")
+ }
+ x.Success = data
+ case 3: // Error
+ data, ok := fc.String()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Error")
+ }
+ x.Error = data
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) GetShard_ID() []byte {
+ if x != nil {
+ return x.Shard_ID
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body_Status) SetShard_ID(v []byte) {
+ x.Shard_ID = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetSuccess() bool {
+ if x != nil {
+ return x.Success
+ }
+ return false
+}
+func (x *StartShardRebuildResponse_Body_Status) SetSuccess(v bool) {
+ x.Success = v
+}
+func (x *StartShardRebuildResponse_Body_Status) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+func (x *StartShardRebuildResponse_Body_Status) SetError(v string) {
+ x.Error = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body_Status) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"shardID\":"
+ out.RawString(prefix[1:])
+ out.Base64Bytes(x.Shard_ID)
+ }
+ {
+ const prefix string = ",\"success\":"
+ out.RawString(prefix)
+ out.Bool(x.Success)
+ }
+ {
+ const prefix string = ",\"error\":"
+ out.RawString(prefix)
+ out.String(x.Error)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body_Status) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "shardID":
+ {
+ var f []byte
+ f = in.Bytes()
+ x.Shard_ID = f
+ }
+ case "success":
+ {
+ var f bool
+ f = in.Bool()
+ x.Success = f
+ }
+ case "error":
+ {
+ var f string
+ f = in.String()
+ x.Error = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse_Body struct {
+ Results []StartShardRebuildResponse_Body_Status `json:"results"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse_Body)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse_Body)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse_Body) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ for i := range x.Results {
+ size += proto.NestedStructureSizeUnchecked(1, &x.Results[i])
+ }
+ return size
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse_Body) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ for i := range x.Results {
+ x.Results[i].EmitProtobuf(mm.AppendMessage(1))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse_Body")
+ }
+ switch fc.FieldNum {
+ case 1: // Results
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Results")
+ }
+ x.Results = append(x.Results, StartShardRebuildResponse_Body_Status{})
+ ff := &x.Results[len(x.Results)-1]
+ if err := ff.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) GetResults() []StartShardRebuildResponse_Body_Status {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse_Body) SetResults(v []StartShardRebuildResponse_Body_Status) {
+ x.Results = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse_Body) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse_Body) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"results\":"
+ out.RawString(prefix[1:])
+ out.RawByte('[')
+ for i := range x.Results {
+ if i != 0 {
+ out.RawByte(',')
+ }
+ x.Results[i].MarshalEasyJSON(out)
+ }
+ out.RawByte(']')
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse_Body) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "results":
+ {
+ var f StartShardRebuildResponse_Body_Status
+ var list []StartShardRebuildResponse_Body_Status
+ in.Delim('[')
+ for !in.IsDelim(']') {
+ f = StartShardRebuildResponse_Body_Status{}
+ f.UnmarshalEasyJSON(in)
+ list = append(list, f)
+ in.WantComma()
+ }
+ x.Results = list
+ in.Delim(']')
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
+
+type StartShardRebuildResponse struct {
+ Body *StartShardRebuildResponse_Body `json:"body"`
+ Signature *Signature `json:"signature"`
+}
+
+var (
+ _ encoding.ProtoMarshaler = (*StartShardRebuildResponse)(nil)
+ _ encoding.ProtoUnmarshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Marshaler = (*StartShardRebuildResponse)(nil)
+ _ json.Unmarshaler = (*StartShardRebuildResponse)(nil)
+)
+
+// StableSize returns the size of x in protobuf format.
+//
+// Structures with the same field values have the same binary size.
+func (x *StartShardRebuildResponse) StableSize() (size int) {
+ if x == nil {
+ return 0
+ }
+ size += proto.NestedStructureSize(1, x.Body)
+ size += proto.NestedStructureSize(2, x.Signature)
+ return size
+}
+
+// ReadSignedData fills buf with signed data of x.
+// If buffer length is less than x.SignedDataSize(), new buffer is allocated.
+//
+// Returns any error encountered which did not allow writing the data completely.
+// Otherwise, returns the buffer in which the data is written.
+//
+// Structures with the same field values have the same signed data.
+func (x *StartShardRebuildResponse) SignedDataSize() int {
+ return x.GetBody().StableSize()
+}
+
+// SignedDataSize returns size of the request signed data in bytes.
+//
+// Structures with the same field values have the same signed data size.
+func (x *StartShardRebuildResponse) ReadSignedData(buf []byte) ([]byte, error) {
+ return x.GetBody().MarshalProtobuf(buf), nil
+}
+
+// MarshalProtobuf implements the encoding.ProtoMarshaler interface.
+func (x *StartShardRebuildResponse) MarshalProtobuf(dst []byte) []byte {
+ m := pool.MarshalerPool.Get()
+ defer pool.MarshalerPool.Put(m)
+ x.EmitProtobuf(m.MessageMarshaler())
+ dst = m.Marshal(dst)
+ return dst
+}
+
+func (x *StartShardRebuildResponse) EmitProtobuf(mm *easyproto.MessageMarshaler) {
+ if x == nil {
+ return
+ }
+ if x.Body != nil {
+ x.Body.EmitProtobuf(mm.AppendMessage(1))
+ }
+ if x.Signature != nil {
+ x.Signature.EmitProtobuf(mm.AppendMessage(2))
+ }
+}
+
+// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalProtobuf(src []byte) (err error) {
+ var fc easyproto.FieldContext
+ for len(src) > 0 {
+ src, err = fc.NextField(src)
+ if err != nil {
+ return fmt.Errorf("cannot read next field in %s", "StartShardRebuildResponse")
+ }
+ switch fc.FieldNum {
+ case 1: // Body
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Body")
+ }
+ x.Body = new(StartShardRebuildResponse_Body)
+ if err := x.Body.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ case 2: // Signature
+ data, ok := fc.MessageData()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "Signature")
+ }
+ x.Signature = new(Signature)
+ if err := x.Signature.UnmarshalProtobuf(data); err != nil {
+ return fmt.Errorf("unmarshal: %w", err)
+ }
+ }
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) GetBody() *StartShardRebuildResponse_Body {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetBody(v *StartShardRebuildResponse_Body) {
+ x.Body = v
+}
+func (x *StartShardRebuildResponse) GetSignature() *Signature {
+ if x != nil {
+ return x.Signature
+ }
+ return nil
+}
+func (x *StartShardRebuildResponse) SetSignature(v *Signature) {
+ x.Signature = v
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (x *StartShardRebuildResponse) MarshalJSON() ([]byte, error) {
+ w := jwriter.Writer{}
+ x.MarshalEasyJSON(&w)
+ return w.Buffer.BuildBytes(), w.Error
+}
+func (x *StartShardRebuildResponse) MarshalEasyJSON(out *jwriter.Writer) {
+ if x == nil {
+ out.RawString("null")
+ return
+ }
+ out.RawByte('{')
+ {
+ const prefix string = ",\"body\":"
+ out.RawString(prefix[1:])
+ x.Body.MarshalEasyJSON(out)
+ }
+ {
+ const prefix string = ",\"signature\":"
+ out.RawString(prefix)
+ x.Signature.MarshalEasyJSON(out)
+ }
+ out.RawByte('}')
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (x *StartShardRebuildResponse) UnmarshalJSON(data []byte) error {
+ r := jlexer.Lexer{Data: data}
+ x.UnmarshalEasyJSON(&r)
+ return r.Error()
+}
+func (x *StartShardRebuildResponse) UnmarshalEasyJSON(in *jlexer.Lexer) {
+ isTopLevel := in.IsStart()
+ if in.IsNull() {
+ if isTopLevel {
+ in.Consumed()
+ }
+ in.Skip()
+ return
+ }
+ in.Delim('{')
+ for !in.IsDelim('}') {
+ key := in.UnsafeFieldName(false)
+ in.WantColon()
+ if in.IsNull() {
+ in.Skip()
+ in.WantComma()
+ continue
+ }
+ switch key {
+ case "body":
+ {
+ var f *StartShardRebuildResponse_Body
+ f = new(StartShardRebuildResponse_Body)
+ f.UnmarshalEasyJSON(in)
+ x.Body = f
+ }
+ case "signature":
+ {
+ var f *Signature
+ f = new(Signature)
+ f.UnmarshalEasyJSON(in)
+ x.Signature = f
+ }
+ }
+ in.WantComma()
+ }
+ in.Delim('}')
+ if isTopLevel {
+ in.Consumed()
+ }
+}
diff --git a/pkg/services/control/service_grpc.pb.go b/pkg/services/control/service_grpc.pb.go
index fa9de974a..f5cfefa85 100644
--- a/pkg/services/control/service_grpc.pb.go
+++ b/pkg/services/control/service_grpc.pb.go
@@ -41,6 +41,7 @@ const (
ControlService_ListTargetsLocalOverrides_FullMethodName = "/control.ControlService/ListTargetsLocalOverrides"
ControlService_SealWriteCache_FullMethodName = "/control.ControlService/SealWriteCache"
ControlService_DetachShards_FullMethodName = "/control.ControlService/DetachShards"
+ ControlService_StartShardRebuild_FullMethodName = "/control.ControlService/StartShardRebuild"
)
// ControlServiceClient is the client API for ControlService service.
@@ -97,6 +98,8 @@ type ControlServiceClient interface {
SealWriteCache(ctx context.Context, in *SealWriteCacheRequest, opts ...grpc.CallOption) (*SealWriteCacheResponse, error)
// DetachShards detaches and closes shards.
DetachShards(ctx context.Context, in *DetachShardsRequest, opts ...grpc.CallOption) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error)
}
type controlServiceClient struct {
@@ -305,6 +308,15 @@ func (c *controlServiceClient) DetachShards(ctx context.Context, in *DetachShard
return out, nil
}
+func (c *controlServiceClient) StartShardRebuild(ctx context.Context, in *StartShardRebuildRequest, opts ...grpc.CallOption) (*StartShardRebuildResponse, error) {
+ out := new(StartShardRebuildResponse)
+ err := c.cc.Invoke(ctx, ControlService_StartShardRebuild_FullMethodName, in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// ControlServiceServer is the server API for ControlService service.
// All implementations should embed UnimplementedControlServiceServer
// for forward compatibility
@@ -359,6 +371,8 @@ type ControlServiceServer interface {
SealWriteCache(context.Context, *SealWriteCacheRequest) (*SealWriteCacheResponse, error)
// DetachShards detaches and closes shards.
DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error)
+ // StartShardRebuild starts shard rebuild process.
+ StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error)
}
// UnimplementedControlServiceServer should be embedded to have forward compatible implementations.
@@ -431,6 +445,9 @@ func (UnimplementedControlServiceServer) SealWriteCache(context.Context, *SealWr
func (UnimplementedControlServiceServer) DetachShards(context.Context, *DetachShardsRequest) (*DetachShardsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DetachShards not implemented")
}
+func (UnimplementedControlServiceServer) StartShardRebuild(context.Context, *StartShardRebuildRequest) (*StartShardRebuildResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method StartShardRebuild not implemented")
+}
// UnsafeControlServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ControlServiceServer will
@@ -839,6 +856,24 @@ func _ControlService_DetachShards_Handler(srv interface{}, ctx context.Context,
return interceptor(ctx, in, info, handler)
}
+func _ControlService_StartShardRebuild_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StartShardRebuildRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ControlService_StartShardRebuild_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServiceServer).StartShardRebuild(ctx, req.(*StartShardRebuildRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
// ControlService_ServiceDesc is the grpc.ServiceDesc for ControlService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -934,6 +969,10 @@ var ControlService_ServiceDesc = grpc.ServiceDesc{
MethodName: "DetachShards",
Handler: _ControlService_DetachShards_Handler,
},
+ {
+ MethodName: "StartShardRebuild",
+ Handler: _ControlService_StartShardRebuild_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "pkg/services/control/service.proto",
From a61201a98753c4522a1169abe5b42f2631f639ad Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 29 Aug 2024 13:51:09 +0300
Subject: [PATCH 031/655] [#1337] config: Move `rebuild_worker_count` to shard
section
This makes it simple to limit performance degradation for every shard
because of rebuild.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/control/shards.go | 1 +
cmd/frostfs-node/config.go | 7 ++---
cmd/frostfs-node/config/engine/config.go | 11 --------
cmd/frostfs-node/config/engine/config_test.go | 4 +--
.../config/engine/shard/config.go | 15 +++++++++++
config/example/node.env | 2 +-
config/example/node.json | 2 +-
config/example/node.yaml | 2 +-
docs/storage-node-configuration.md | 1 +
pkg/local_object_storage/engine/engine.go | 16 ++----------
pkg/local_object_storage/engine/rebuild.go | 2 +-
.../engine/rebuild_limiter.go | 26 -------------------
pkg/local_object_storage/engine/shards.go | 1 -
pkg/local_object_storage/shard/control.go | 2 +-
pkg/local_object_storage/shard/rebuild.go | 2 +-
pkg/local_object_storage/shard/shard.go | 10 +++----
16 files changed, 36 insertions(+), 68 deletions(-)
delete mode 100644 pkg/local_object_storage/engine/rebuild_limiter.go
diff --git a/cmd/frostfs-cli/modules/control/shards.go b/cmd/frostfs-cli/modules/control/shards.go
index d6c2a0b9b..329cb9100 100644
--- a/cmd/frostfs-cli/modules/control/shards.go
+++ b/cmd/frostfs-cli/modules/control/shards.go
@@ -19,6 +19,7 @@ func initControlShardsCmd() {
shardsCmd.AddCommand(doctorCmd)
shardsCmd.AddCommand(writecacheShardCmd)
shardsCmd.AddCommand(shardsDetachCmd)
+ shardsCmd.AddCommand(shardsRebuildCmd)
initControlShardsListCmd()
initControlSetShardModeCmd()
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 110281418..b59518d14 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -109,7 +109,6 @@ type applicationConfiguration struct {
shardPoolSize uint32
shards []shardCfg
lowMem bool
- rebuildWorkers uint32
}
// if need to run node in compatibility with other versions mode
@@ -127,6 +126,8 @@ type shardCfg struct {
refillMetabaseWorkersCount int
mode shardmode.Mode
+ rebuildWorkersCount uint32
+
metaCfg struct {
path string
perm fs.FileMode
@@ -230,7 +231,6 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
a.EngineCfg.shardPoolSize = engineconfig.ShardPoolSize(c)
a.EngineCfg.lowMem = engineconfig.EngineLowMemoryConsumption(c)
- a.EngineCfg.rebuildWorkers = engineconfig.EngineRebuildWorkersCount(c)
return engineconfig.IterateShards(c, false, func(sc *shardconfig.Config) error { return a.updateShardConfig(c, sc) })
}
@@ -240,6 +240,7 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
+ newConfig.rebuildWorkersCount = oldConfig.RebuildWorkerCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
@@ -835,7 +836,6 @@ func (c *cfg) engineOpts() []engine.Option {
engine.WithErrorThreshold(c.EngineCfg.errorThreshold),
engine.WithLogger(c.log),
engine.WithLowMemoryConsumption(c.EngineCfg.lowMem),
- engine.WithRebuildWorkersCount(c.EngineCfg.rebuildWorkers),
)
if c.metricsCollector != nil {
@@ -998,6 +998,7 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
+ shard.WithRebuildWorkersCount(shCfg.rebuildWorkersCount),
shard.WithMode(shCfg.mode),
shard.WithBlobStorOptions(blobstoreOpts...),
shard.WithMetaBaseOptions(mbOptions...),
diff --git a/cmd/frostfs-node/config/engine/config.go b/cmd/frostfs-node/config/engine/config.go
index baa4e3c9d..c944d1c58 100644
--- a/cmd/frostfs-node/config/engine/config.go
+++ b/cmd/frostfs-node/config/engine/config.go
@@ -15,9 +15,6 @@ const (
// ShardPoolSizeDefault is a default value of routine pool size per-shard to
// process object PUT operations in a storage engine.
ShardPoolSizeDefault = 20
- // RebuildWorkersCountDefault is a default value of the workers count to
- // process storage rebuild operations in a storage engine.
- RebuildWorkersCountDefault = 100
)
// ErrNoShardConfigured is returned when at least 1 shard is required but none are found.
@@ -91,11 +88,3 @@ func ShardErrorThreshold(c *config.Config) uint32 {
func EngineLowMemoryConsumption(c *config.Config) bool {
return config.BoolSafe(c.Sub(subsection), "low_mem")
}
-
-// EngineRebuildWorkersCount returns value of "rebuild_workers_count" config parmeter from "storage" section.
-func EngineRebuildWorkersCount(c *config.Config) uint32 {
- if v := config.Uint32Safe(c.Sub(subsection), "rebuild_workers_count"); v > 0 {
- return v
- }
- return RebuildWorkersCountDefault
-}
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index d53207ccc..464d72556 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -39,7 +39,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 0, engineconfig.ShardErrorThreshold(empty))
require.EqualValues(t, engineconfig.ShardPoolSizeDefault, engineconfig.ShardPoolSize(empty))
require.EqualValues(t, mode.ReadWrite, shardconfig.From(empty).Mode())
- require.EqualValues(t, engineconfig.RebuildWorkersCountDefault, engineconfig.EngineRebuildWorkersCount(empty))
})
const path = "../../../../config/example/node"
@@ -49,7 +48,6 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 100, engineconfig.ShardErrorThreshold(c))
require.EqualValues(t, 15, engineconfig.ShardPoolSize(c))
- require.EqualValues(t, uint32(1000), engineconfig.EngineRebuildWorkersCount(c))
err := engineconfig.IterateShards(c, true, func(sc *shardconfig.Config) error {
defer func() {
@@ -121,6 +119,7 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
+ require.Equal(t, uint32(1000), sc.RebuildWorkerCount())
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -176,6 +175,7 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
+ require.Equal(t, uint32(shardconfig.RebuildWorkersCountDefault), sc.RebuildWorkerCount())
}
return nil
})
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index 0620c9f63..ec9df0e89 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -19,6 +19,7 @@ const (
SmallSizeLimitDefault = 1 << 20
EstimateCompressibilityThresholdDefault = 0.1
RefillMetabaseWorkersCountDefault = 500
+ RebuildWorkersCountDefault = 5
)
// From wraps config section into Config.
@@ -149,6 +150,20 @@ func (x *Config) RefillMetabaseWorkersCount() int {
return RefillMetabaseWorkersCountDefault
}
+// RebuildWorkersCount returns the value of "resync_metabase_worker_count" config parameter.
+//
+// Returns RebuildWorkersCountDefault if the value is not a positive number.
+func (x *Config) RebuildWorkerCount() uint32 {
+ v := config.Uint32Safe(
+ (*config.Config)(x),
+ "rebuild_worker_count",
+ )
+ if v > 0 {
+ return v
+ }
+ return RebuildWorkersCountDefault
+}
+
// Mode return the value of "mode" config parameter.
//
// Panics if read the value is not one of predefined
diff --git a/config/example/node.env b/config/example/node.env
index b39423ffb..1eccd8a5d 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -90,11 +90,11 @@ FROSTFS_OBJECT_DELETE_TOMBSTONE_LIFETIME=10
# Storage engine section
FROSTFS_STORAGE_SHARD_POOL_SIZE=15
FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
-FROSTFS_STORAGE_REBUILD_WORKERS_COUNT=1000
## 0 shard
### Flag to refill Metabase from BlobStor
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100
+FROSTFS_STORAGE_SHARD_0_REBUILD_WORKER_COUNT=1000
### Flag to set shard mode
FROSTFS_STORAGE_SHARD_0_MODE=read-only
### Write cache config
diff --git a/config/example/node.json b/config/example/node.json
index fe2de0e01..be7ced77a 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -135,12 +135,12 @@
"storage": {
"shard_pool_size": 15,
"shard_ro_error_threshold": 100,
- "rebuild_workers_count": 1000,
"shard": {
"0": {
"mode": "read-only",
"resync_metabase": false,
"resync_metabase_worker_count": 100,
+ "rebuild_worker_count": 1000,
"writecache": {
"enabled": false,
"no_sync": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index cc339a427..4b9720655 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -117,7 +117,6 @@ storage:
# note: shard configuration can be omitted for relay node (see `node.relay`)
shard_pool_size: 15 # size of per-shard worker pools used for PUT operations
shard_ro_error_threshold: 100 # amount of errors to occur before shard is made read-only (default: 0, ignore errors)
- rebuild_workers_count: 1000 # count of rebuild storage concurrent workers
shard:
default: # section with the default shard parameters
@@ -165,6 +164,7 @@ storage:
# disabled (do not work with the shard, allows to not remove it from the config)
resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding
resync_metabase_worker_count: 100
+ rebuild_worker_count: 1000 # count of rebuild storage concurrent workers
writecache:
enabled: false
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 5bf35cd65..f390d84a4 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -189,6 +189,7 @@ The following table describes configuration for each shard.
| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
+| `rebuild_worker_count` | `int` | `5` | Count of concurrent workers to rebuild blobstore. |
| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index b87d77e6c..5e883a641 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -40,7 +40,6 @@ type StorageEngine struct {
err error
}
evacuateLimiter *evacuationLimiter
- rebuildLimiter *rebuildLimiter
}
type shardWrapper struct {
@@ -213,16 +212,13 @@ type cfg struct {
lowMem bool
- rebuildWorkersCount uint32
-
containerSource atomic.Pointer[containerSource]
}
func defaultCfg() *cfg {
res := &cfg{
- log: &logger.Logger{Logger: zap.L()},
- shardPoolSize: 20,
- rebuildWorkersCount: 100,
+ log: &logger.Logger{Logger: zap.L()},
+ shardPoolSize: 20,
}
res.containerSource.Store(&containerSource{})
return res
@@ -243,7 +239,6 @@ func New(opts ...Option) *StorageEngine {
closeCh: make(chan struct{}),
setModeCh: make(chan setModeRequest),
evacuateLimiter: &evacuationLimiter{},
- rebuildLimiter: newRebuildLimiter(c.rebuildWorkersCount),
}
}
@@ -282,13 +277,6 @@ func WithLowMemoryConsumption(lowMemCons bool) Option {
}
}
-// WithRebuildWorkersCount returns an option to set the count of concurrent rebuild workers.
-func WithRebuildWorkersCount(count uint32) Option {
- return func(c *cfg) {
- c.rebuildWorkersCount = count
- }
-}
-
// SetContainerSource sets container source.
func (e *StorageEngine) SetContainerSource(cs container.Source) {
e.containerSource.Store(&containerSource{cs: cs})
diff --git a/pkg/local_object_storage/engine/rebuild.go b/pkg/local_object_storage/engine/rebuild.go
index 3970aae89..83c6a54ed 100644
--- a/pkg/local_object_storage/engine/rebuild.go
+++ b/pkg/local_object_storage/engine/rebuild.go
@@ -41,7 +41,7 @@ func (e *StorageEngine) Rebuild(ctx context.Context, prm RebuildPrm) (RebuildRes
}
resGuard := &sync.Mutex{}
- limiter := newRebuildLimiter(prm.ConcurrencyLimit)
+ limiter := shard.NewRebuildLimiter(prm.ConcurrencyLimit)
eg, egCtx := errgroup.WithContext(ctx)
for _, shardID := range prm.ShardIDs {
diff --git a/pkg/local_object_storage/engine/rebuild_limiter.go b/pkg/local_object_storage/engine/rebuild_limiter.go
deleted file mode 100644
index 28b02b0a3..000000000
--- a/pkg/local_object_storage/engine/rebuild_limiter.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package engine
-
-import "context"
-
-type rebuildLimiter struct {
- semaphore chan struct{}
-}
-
-func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
- return &rebuildLimiter{
- semaphore: make(chan struct{}, workersCount),
- }
-}
-
-func (l *rebuildLimiter) AcquireWorkSlot(ctx context.Context) error {
- select {
- case l.semaphore <- struct{}{}:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func (l *rebuildLimiter) ReleaseWorkSlot() {
- <-l.semaphore
-}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 40584149e..2ad6859e4 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -137,7 +137,6 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
shard.WithReportErrorFunc(e.reportShardErrorBackground),
- shard.WithRebuildWorkerLimiter(e.rebuildLimiter),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 6efe4ec37..5e9639a7b 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -129,7 +129,7 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
- s.rb = newRebuilder(s.rebuildLimiter)
+ s.rb = newRebuilder(NewRebuildLimiter(s.rebuildWorkersCount))
if !m.NoMetabase() && !s.rebuildDisabled {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index f8051999e..2eef456be 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -26,7 +26,7 @@ type rebuildLimiter struct {
semaphore chan struct{}
}
-func newRebuildLimiter(workersCount uint32) *rebuildLimiter {
+func NewRebuildLimiter(workersCount uint32) RebuildWorkerLimiter {
return &rebuildLimiter{
semaphore: make(chan struct{}, workersCount),
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 1eaee8815..1e2bb7900 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -140,7 +140,7 @@ type cfg struct {
reportErrorFunc func(selfID string, message string, err error)
- rebuildLimiter RebuildWorkerLimiter
+ rebuildWorkersCount uint32
rebuildDisabled bool
}
@@ -151,7 +151,7 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildLimiter: newRebuildLimiter(1),
+ rebuildWorkersCount: 1,
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
@@ -384,11 +384,11 @@ func WithExpiredCollectorWorkerCount(count int) Option {
}
}
-// WithRebuildWorkerLimiter return option to set concurrent
+// WithRebuildWorkersCount return option to set concurrent
// workers count of storage rebuild operation.
-func WithRebuildWorkerLimiter(l RebuildWorkerLimiter) Option {
+func WithRebuildWorkersCount(count uint32) Option {
return func(c *cfg) {
- c.rebuildLimiter = l
+ c.rebuildWorkersCount = count
}
}
From edb1747af7765fe685d9d4736626c409fbde7c79 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Sun, 1 Sep 2024 12:29:02 +0300
Subject: [PATCH 032/655] [#1337] blobovniczatree: Add rebuild by overflow
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/blobovniczatree/rebuild.go | 11 ++-
.../blobstor/blobovniczatree/rebuild_test.go | 74 +++++++++++++++++++
2 files changed, 82 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index b7f20822e..b7b1dfd4b 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -172,7 +172,7 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
continue
}
path := filepath.Join(lvlPath, e.Name())
- resettlementRequired, err := b.fillPercentIsLow(path, target)
+ resettlementRequired, err := b.rebuildBySize(path, target)
if err != nil {
return false, err
}
@@ -187,14 +187,19 @@ func (b *Blobovniczas) selectDBsDoNotMatchFillPercent(ctx context.Context, targe
return result, nil
}
-func (b *Blobovniczas) fillPercentIsLow(path string, target int) (bool, error) {
+func (b *Blobovniczas) rebuildBySize(path string, targetFillPercent int) (bool, error) {
shDB := b.getBlobovnicza(path)
blz, err := shDB.Open()
if err != nil {
return false, err
}
defer shDB.Close()
- return blz.FillPercent() < target, nil
+ fp := blz.FillPercent()
+ // accepted fill percent defines as
+ // |----|+++++++++++++++++|+++++++++++++++++|---------------
+ // 0% target 100% 100+(100 - target)
+ // where `+` - accepted fill percent, `-` - not accepted fill percent
+ return fp < targetFillPercent || fp > 100+(100-targetFillPercent), nil
}
func (b *Blobovniczas) rebuildDB(ctx context.Context, path string, meta common.MetaStorage, limiter common.ConcurrentWorkersLimiter) (uint64, error) {
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index 62ae9ea90..e6da1c553 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -228,6 +228,80 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
require.NoError(t, b.Close())
})
+
+ t.Run("rebuild by overflow", func(t *testing.T) {
+ t.Parallel()
+
+ dir := t.TempDir()
+ b := NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1), // single directory
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(100*1024), // 100 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ storageIDs := make(map[oid.Address][]byte)
+ for i := 0; i < 100; i++ { // 2 objects for one blobovnicza, so 50 DBs total will be created
+ obj := blobstortest.NewObject(64 * 1024)
+ data, err := obj.Marshal()
+ require.NoError(t, err)
+ var prm common.PutPrm
+ prm.Address = object.AddressOf(obj)
+ prm.RawData = data
+ res, err := b.Put(context.Background(), prm)
+ require.NoError(t, err)
+ storageIDs[prm.Address] = res.StorageID
+ }
+ metaStub := &storageIDUpdateStub{
+ storageIDs: storageIDs,
+ guard: &sync.Mutex{},
+ }
+ require.NoError(t, b.Close())
+ b = NewBlobovniczaTree(
+ context.Background(),
+ WithLogger(test.NewLogger(t)),
+ WithObjectSizeLimit(64*1024),
+ WithBlobovniczaShallowWidth(1),
+ WithBlobovniczaShallowDepth(1),
+ WithRootPath(dir),
+ WithBlobovniczaSize(50*1024), // 50 KB limit for each blobovnicza
+ WithWaitBeforeDropDB(0),
+ WithOpenedCacheSize(1000),
+ WithMoveBatchSize(3))
+ require.NoError(t, b.Open(mode.ComponentReadWrite))
+ require.NoError(t, b.Init())
+
+ rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
+ MetaStorage: metaStub,
+ WorkerLimiter: &rebuildLimiterStub{},
+ Action: common.RebuildAction{
+ SchemaChange: false,
+ FillPercent: true,
+ FillPercentValue: 80,
+ },
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(49), rRes.FilesRemoved)
+ require.Equal(t, uint64(98), rRes.ObjectsMoved) // 49 DBs with 2 objects
+ require.Equal(t, uint64(98), metaStub.updatedCount)
+
+ for addr, storageID := range storageIDs {
+ var gPrm common.GetPrm
+ gPrm.Address = addr
+ gPrm.StorageID = storageID
+ _, err := b.Get(context.Background(), gPrm)
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, b.Close())
+ })
}
func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
From d3b209c8e19edfdeb13035e1d8f4b4815cf77f08 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 4 Sep 2024 09:30:56 +0300
Subject: [PATCH 033/655] [#1337] shard: Disable background rebuild
Since `frostfs-cli control shards rebuild` command was added,
there is no need for background rebuild now.
For failover tests used used value 1 to rebuild only schema change.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 4 --
cmd/frostfs-node/config/engine/config_test.go | 2 -
.../config/engine/shard/config.go | 15 -----
config/example/node.env | 1 -
config/example/node.json | 1 -
config/example/node.yaml | 1 -
docs/storage-node-configuration.md | 1 -
.../blobstor/blobovniczatree/rebuild.go | 31 ++++------
.../blobovniczatree/rebuild_failover_test.go | 3 +-
.../blobstor/blobovniczatree/rebuild_test.go | 28 ++-------
.../blobstor/common/rebuild.go | 9 +--
pkg/local_object_storage/blobstor/rebuild.go | 4 +-
pkg/local_object_storage/shard/control.go | 6 +-
pkg/local_object_storage/shard/gc_test.go | 2 +-
pkg/local_object_storage/shard/rebuild.go | 58 +++++++------------
pkg/local_object_storage/shard/shard.go | 21 -------
16 files changed, 49 insertions(+), 138 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index b59518d14..16f49a082 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -126,8 +126,6 @@ type shardCfg struct {
refillMetabaseWorkersCount int
mode shardmode.Mode
- rebuildWorkersCount uint32
-
metaCfg struct {
path string
perm fs.FileMode
@@ -240,7 +238,6 @@ func (a *applicationConfiguration) updateShardConfig(c *config.Config, oldConfig
newConfig.refillMetabase = oldConfig.RefillMetabase()
newConfig.refillMetabaseWorkersCount = oldConfig.RefillMetabaseWorkersCount()
- newConfig.rebuildWorkersCount = oldConfig.RebuildWorkerCount()
newConfig.mode = oldConfig.Mode()
newConfig.compress = oldConfig.Compress()
newConfig.estimateCompressibility = oldConfig.EstimateCompressibility()
@@ -998,7 +995,6 @@ func (c *cfg) getShardOpts(ctx context.Context, shCfg shardCfg) shardOptsWithID
shard.WithLogger(c.log),
shard.WithRefillMetabase(shCfg.refillMetabase),
shard.WithRefillMetabaseWorkersCount(shCfg.refillMetabaseWorkersCount),
- shard.WithRebuildWorkersCount(shCfg.rebuildWorkersCount),
shard.WithMode(shCfg.mode),
shard.WithBlobStorOptions(blobstoreOpts...),
shard.WithMetaBaseOptions(mbOptions...),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index 464d72556..ef6bf7f74 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -119,7 +119,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, sc.RefillMetabase())
require.Equal(t, mode.ReadOnly, sc.Mode())
require.Equal(t, 100, sc.RefillMetabaseWorkersCount())
- require.Equal(t, uint32(1000), sc.RebuildWorkerCount())
case 1:
require.Equal(t, "tmp/1/blob/pilorama.db", pl.Path())
require.Equal(t, fs.FileMode(0o644), pl.Perm())
@@ -175,7 +174,6 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, sc.RefillMetabase())
require.Equal(t, mode.ReadWrite, sc.Mode())
require.Equal(t, shardconfig.RefillMetabaseWorkersCountDefault, sc.RefillMetabaseWorkersCount())
- require.Equal(t, uint32(shardconfig.RebuildWorkersCountDefault), sc.RebuildWorkerCount())
}
return nil
})
diff --git a/cmd/frostfs-node/config/engine/shard/config.go b/cmd/frostfs-node/config/engine/shard/config.go
index ec9df0e89..0620c9f63 100644
--- a/cmd/frostfs-node/config/engine/shard/config.go
+++ b/cmd/frostfs-node/config/engine/shard/config.go
@@ -19,7 +19,6 @@ const (
SmallSizeLimitDefault = 1 << 20
EstimateCompressibilityThresholdDefault = 0.1
RefillMetabaseWorkersCountDefault = 500
- RebuildWorkersCountDefault = 5
)
// From wraps config section into Config.
@@ -150,20 +149,6 @@ func (x *Config) RefillMetabaseWorkersCount() int {
return RefillMetabaseWorkersCountDefault
}
-// RebuildWorkersCount returns the value of "resync_metabase_worker_count" config parameter.
-//
-// Returns RebuildWorkersCountDefault if the value is not a positive number.
-func (x *Config) RebuildWorkerCount() uint32 {
- v := config.Uint32Safe(
- (*config.Config)(x),
- "rebuild_worker_count",
- )
- if v > 0 {
- return v
- }
- return RebuildWorkersCountDefault
-}
-
// Mode return the value of "mode" config parameter.
//
// Panics if read the value is not one of predefined
diff --git a/config/example/node.env b/config/example/node.env
index 1eccd8a5d..82553745e 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -94,7 +94,6 @@ FROSTFS_STORAGE_SHARD_RO_ERROR_THRESHOLD=100
### Flag to refill Metabase from BlobStor
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE=false
FROSTFS_STORAGE_SHARD_0_RESYNC_METABASE_WORKER_COUNT=100
-FROSTFS_STORAGE_SHARD_0_REBUILD_WORKER_COUNT=1000
### Flag to set shard mode
FROSTFS_STORAGE_SHARD_0_MODE=read-only
### Write cache config
diff --git a/config/example/node.json b/config/example/node.json
index be7ced77a..da108c692 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -140,7 +140,6 @@
"mode": "read-only",
"resync_metabase": false,
"resync_metabase_worker_count": 100,
- "rebuild_worker_count": 1000,
"writecache": {
"enabled": false,
"no_sync": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 4b9720655..a79f48226 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -164,7 +164,6 @@ storage:
# disabled (do not work with the shard, allows to not remove it from the config)
resync_metabase: false # sync metabase with blobstor on start, expensive, leave false until complete understanding
resync_metabase_worker_count: 100
- rebuild_worker_count: 1000 # count of rebuild storage concurrent workers
writecache:
enabled: false
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index f390d84a4..5bf35cd65 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -189,7 +189,6 @@ The following table describes configuration for each shard.
| `mode` | `string` | `read-write` | Shard Mode.
Possible values: `read-write`, `read-only`, `degraded`, `degraded-read-only`, `disabled` |
| `resync_metabase` | `bool` | `false` | Flag to enable metabase resync on start. |
| `resync_metabase_worker_count` | `int` | `1000` | Count of concurrent workers to resync metabase. |
-| `rebuild_worker_count` | `int` | `5` | Count of concurrent workers to rebuild blobstore. |
| `writecache` | [Writecache config](#writecache-subsection) | | Write-cache configuration. |
| `metabase` | [Metabase config](#metabase-subsection) | | Metabase configuration. |
| `blobstor` | [Blobstor config](#blobstor-subsection) | | Blobstor configuration. |
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
index b7b1dfd4b..202d38cd7 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild.go
@@ -60,7 +60,7 @@ func (b *Blobovniczas) Rebuild(ctx context.Context, prm common.RebuildPrm) (comm
b.log.Debug(logs.BlobovniczaTreeCompletedPreviousRebuildSuccess)
b.log.Debug(logs.BlobovniczaTreeCollectingDBToRebuild)
- dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.Action)
+ dbsToMigrate, err := b.getDBsToRebuild(ctx, prm.FillPercent)
if err != nil {
b.log.Warn(logs.BlobovniczaTreeCollectingDBToRebuildFailed, zap.Error(err))
success = false
@@ -94,27 +94,20 @@ func (b *Blobovniczas) migrateDBs(ctx context.Context, dbs []string, prm common.
return res, nil
}
-func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, action common.RebuildAction) ([]string, error) {
- schemaChange := make(map[string]struct{})
- fillPercent := make(map[string]struct{})
- var err error
- if action.SchemaChange {
- schemaChange, err = b.selectDBsDoNotMatchSchema(ctx)
- if err != nil {
- return nil, err
- }
+func (b *Blobovniczas) getDBsToRebuild(ctx context.Context, fillPercent int) ([]string, error) {
+ withSchemaChange, err := b.selectDBsDoNotMatchSchema(ctx)
+ if err != nil {
+ return nil, err
}
- if action.FillPercent {
- fillPercent, err = b.selectDBsDoNotMatchFillPercent(ctx, action.FillPercentValue)
- if err != nil {
- return nil, err
- }
+ withFillPercent, err := b.selectDBsDoNotMatchFillPercent(ctx, fillPercent)
+ if err != nil {
+ return nil, err
}
- for k := range fillPercent {
- schemaChange[k] = struct{}{}
+ for k := range withFillPercent {
+ withSchemaChange[k] = struct{}{}
}
- result := make([]string, 0, len(schemaChange))
- for db := range schemaChange {
+ result := make([]string, 0, len(withSchemaChange))
+ for db := range withSchemaChange {
result = append(result, db)
}
return result, nil
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
index 9fec795ca..b177d20fc 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_failover_test.go
@@ -145,7 +145,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
WithBlobovniczaShallowWidth(2),
WithBlobovniczaShallowDepth(2),
WithRootPath(dir),
- WithBlobovniczaSize(100*1024*1024),
+ WithBlobovniczaSize(10*1024),
WithWaitBeforeDropDB(0),
WithOpenedCacheSize(1000))
require.NoError(t, b.Open(mode.ComponentReadWrite))
@@ -164,6 +164,7 @@ func testRebuildFailoverValidate(t *testing.T, dir string, obj *objectSDK.Object
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
+ FillPercent: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), rRes.ObjectsMoved)
diff --git a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
index e6da1c553..dfd928aaf 100644
--- a/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
+++ b/pkg/local_object_storage/blobstor/blobovniczatree/rebuild_test.go
@@ -79,11 +79,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 60,
- },
+ FillPercent: 60,
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -135,11 +131,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 90, // 64KB / 100KB = 64%
- },
+ FillPercent: 90, // 64KB / 100KB = 64%
})
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -204,11 +196,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 80,
- },
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -281,11 +269,7 @@ func TestBlobovniczaTreeFillPercentRebuild(t *testing.T) {
rRes, err := b.Rebuild(context.Background(), common.RebuildPrm{
MetaStorage: metaStub,
WorkerLimiter: &rebuildLimiterStub{},
- Action: common.RebuildAction{
- SchemaChange: false,
- FillPercent: true,
- FillPercentValue: 80,
- },
+ FillPercent: 80,
})
require.NoError(t, err)
require.Equal(t, uint64(49), rRes.FilesRemoved)
@@ -357,7 +341,7 @@ func TestBlobovniczaTreeRebuildLargeObject(t *testing.T) {
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
- rPrm.Action = common.RebuildAction{SchemaChange: true}
+ rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
@@ -446,7 +430,7 @@ func testBlobovniczaTreeRebuildHelper(t *testing.T, sourceDepth, sourceWidth, ta
var rPrm common.RebuildPrm
rPrm.MetaStorage = metaStub
rPrm.WorkerLimiter = &rebuildLimiterStub{}
- rPrm.Action = common.RebuildAction{SchemaChange: true}
+ rPrm.FillPercent = 1
rRes, err := b.Rebuild(context.Background(), rPrm)
require.NoError(t, err)
dataMigrated := rRes.ObjectsMoved > 0 || rRes.FilesRemoved > 0 || metaStub.updatedCount > 0
diff --git a/pkg/local_object_storage/blobstor/common/rebuild.go b/pkg/local_object_storage/blobstor/common/rebuild.go
index 020d9d022..19e181ee7 100644
--- a/pkg/local_object_storage/blobstor/common/rebuild.go
+++ b/pkg/local_object_storage/blobstor/common/rebuild.go
@@ -11,17 +11,10 @@ type RebuildRes struct {
FilesRemoved uint64
}
-type RebuildAction struct {
- SchemaChange bool
-
- FillPercent bool
- FillPercentValue int
-}
-
type RebuildPrm struct {
MetaStorage MetaStorage
WorkerLimiter ConcurrentWorkersLimiter
- Action RebuildAction
+ FillPercent int
}
type MetaStorage interface {
diff --git a/pkg/local_object_storage/blobstor/rebuild.go b/pkg/local_object_storage/blobstor/rebuild.go
index 31bc2d167..7b2786ba2 100644
--- a/pkg/local_object_storage/blobstor/rebuild.go
+++ b/pkg/local_object_storage/blobstor/rebuild.go
@@ -18,14 +18,14 @@ type ConcurrentWorkersLimiter interface {
ReleaseWorkSlot()
}
-func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, action common.RebuildAction) error {
+func (b *BlobStor) Rebuild(ctx context.Context, upd StorageIDUpdate, limiter ConcurrentWorkersLimiter, fillPercent int) error {
var summary common.RebuildRes
var rErr error
for _, storage := range b.storage {
res, err := storage.Storage.Rebuild(ctx, common.RebuildPrm{
MetaStorage: upd,
WorkerLimiter: limiter,
- Action: action,
+ FillPercent: fillPercent,
})
summary.FilesRemoved += res.FilesRemoved
summary.ObjectsMoved += res.ObjectsMoved
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 5e9639a7b..de881654a 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -129,8 +129,8 @@ func (s *Shard) Init(ctx context.Context) error {
s.gc.init(ctx)
- s.rb = newRebuilder(NewRebuildLimiter(s.rebuildWorkersCount))
- if !m.NoMetabase() && !s.rebuildDisabled {
+ s.rb = newRebuilder()
+ if !m.NoMetabase() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}
s.writecacheSealCancel.Store(dummyCancel)
@@ -398,7 +398,7 @@ func (s *Shard) Reload(ctx context.Context, opts ...Option) error {
defer unlock()
s.rb.Stop(s.log)
- if !s.info.Mode.NoMetabase() && !s.rebuildDisabled {
+ if !s.info.Mode.NoMetabase() {
defer func() {
s.rb.Start(ctx, s.blobStor, s.metaBase, s.log)
}()
diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go
index 1c0ef1c2e..90958cd35 100644
--- a/pkg/local_object_storage/shard/gc_test.go
+++ b/pkg/local_object_storage/shard/gc_test.go
@@ -187,7 +187,7 @@ func TestGCDropsObjectInhumedFromWritecache(t *testing.T) {
func testGCDropsObjectInhumedFromWritecache(t *testing.T, flushbeforeInhume bool) {
sh := newCustomShard(t, true, shardOptions{
- additionalShardOptions: []Option{WithDisabledGC(), WithDisabledRebuild()},
+ additionalShardOptions: []Option{WithDisabledGC()},
wcOpts: []writecache.Option{writecache.WithDisableBackgroundFlush()},
})
defer func() { require.NoError(t, sh.Close()) }()
diff --git a/pkg/local_object_storage/shard/rebuild.go b/pkg/local_object_storage/shard/rebuild.go
index 2eef456be..0d83caa0c 100644
--- a/pkg/local_object_storage/shard/rebuild.go
+++ b/pkg/local_object_storage/shard/rebuild.go
@@ -7,7 +7,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -17,6 +16,8 @@ import (
"go.uber.org/zap"
)
+var ErrRebuildInProgress = errors.New("shard rebuild in progress")
+
type RebuildWorkerLimiter interface {
AcquireWorkSlot(ctx context.Context) error
ReleaseWorkSlot()
@@ -46,25 +47,23 @@ func (l *rebuildLimiter) ReleaseWorkSlot() {
}
type rebuildTask struct {
- limiter RebuildWorkerLimiter
- action common.RebuildAction
+ limiter RebuildWorkerLimiter
+ fillPercent int
}
type rebuilder struct {
- mtx *sync.Mutex
- wg *sync.WaitGroup
- cancel func()
- limiter RebuildWorkerLimiter
- done chan struct{}
- tasks chan rebuildTask
+ mtx *sync.Mutex
+ wg *sync.WaitGroup
+ cancel func()
+ done chan struct{}
+ tasks chan rebuildTask
}
-func newRebuilder(l RebuildWorkerLimiter) *rebuilder {
+func newRebuilder() *rebuilder {
return &rebuilder{
- mtx: &sync.Mutex{},
- wg: &sync.WaitGroup{},
- limiter: l,
- tasks: make(chan rebuildTask, 10),
+ mtx: &sync.Mutex{},
+ wg: &sync.WaitGroup{},
+ tasks: make(chan rebuildTask),
}
}
@@ -89,25 +88,14 @@ func (r *rebuilder) Start(ctx context.Context, bs *blobstor.BlobStor, mb *meta.D
if !ok {
continue
}
- runRebuild(ctx, bs, mb, log, t.action, t.limiter)
+ runRebuild(ctx, bs, mb, log, t.fillPercent, t.limiter)
}
}
}()
- select {
- case <-ctx.Done():
- return
- case r.tasks <- rebuildTask{
- limiter: r.limiter,
- action: common.RebuildAction{
- SchemaChange: true,
- },
- }:
- return
- }
}
func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *logger.Logger,
- action common.RebuildAction, limiter RebuildWorkerLimiter,
+ fillPercent int, limiter RebuildWorkerLimiter,
) {
select {
case <-ctx.Done():
@@ -115,23 +103,25 @@ func runRebuild(ctx context.Context, bs *blobstor.BlobStor, mb *meta.DB, log *lo
default:
}
log.Info(logs.BlobstoreRebuildStarted)
- if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, action); err != nil {
+ if err := bs.Rebuild(ctx, &mbStorageIDUpdate{mb: mb}, limiter, fillPercent); err != nil {
log.Warn(logs.FailedToRebuildBlobstore, zap.Error(err))
} else {
log.Info(logs.BlobstoreRebuildCompletedSuccessfully)
}
}
-func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, action common.RebuildAction,
+func (r *rebuilder) ScheduleRebuild(ctx context.Context, limiter RebuildWorkerLimiter, fillPercent int,
) error {
select {
case <-ctx.Done():
return ctx.Err()
case r.tasks <- rebuildTask{
- limiter: limiter,
- action: action,
+ limiter: limiter,
+ fillPercent: fillPercent,
}:
return nil
+ default:
+ return ErrRebuildInProgress
}
}
@@ -198,9 +188,5 @@ func (s *Shard) ScheduleRebuild(ctx context.Context, p RebuildPrm) error {
return ErrDegradedMode
}
- return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, common.RebuildAction{
- SchemaChange: true,
- FillPercent: true,
- FillPercentValue: int(p.TargetFillPercent),
- })
+ return s.rb.ScheduleRebuild(ctx, p.ConcurrencyLimiter, int(p.TargetFillPercent))
}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 1e2bb7900..7496fc352 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -139,10 +139,6 @@ type cfg struct {
metricsWriter MetricsWriter
reportErrorFunc func(selfID string, message string, err error)
-
- rebuildWorkersCount uint32
-
- rebuildDisabled bool
}
func defaultCfg() *cfg {
@@ -151,7 +147,6 @@ func defaultCfg() *cfg {
log: &logger.Logger{Logger: zap.L()},
gcCfg: defaultGCCfg(),
reportErrorFunc: func(string, string, error) {},
- rebuildWorkersCount: 1,
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
}
@@ -384,14 +379,6 @@ func WithExpiredCollectorWorkerCount(count int) Option {
}
}
-// WithRebuildWorkersCount return option to set concurrent
-// workers count of storage rebuild operation.
-func WithRebuildWorkersCount(count uint32) Option {
- return func(c *cfg) {
- c.rebuildWorkersCount = count
- }
-}
-
// WithDisabledGC disables GC.
// For testing purposes only.
func WithDisabledGC() Option {
@@ -414,14 +401,6 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
-// WithDisabledRebuild returns an option to disable a shard rebuild.
-// For testing purposes only.
-func WithDisabledRebuild() Option {
- return func(c *cfg) {
- c.rebuildDisabled = true
- }
-}
-
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From 654d970fadfe7eb47a5ce566be81ad15119df6a0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 6 Sep 2024 11:16:12 +0300
Subject: [PATCH 034/655] [#1355] adm: Run `metabase upgrade` concurrently
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/upgrade.go | 33 +++++++++++++------
1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 83e085df4..96cb62f10 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -3,6 +3,7 @@ package metabase
import (
"errors"
"fmt"
+ "sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
@@ -11,6 +12,7 @@ import (
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"github.com/spf13/cobra"
+ "golang.org/x/sync/errgroup"
)
const (
@@ -57,17 +59,28 @@ func upgrade(cmd *cobra.Command, _ []string) error {
cmd.Println(i+1, ":", path)
}
result := make(map[string]bool)
+ var resultGuard sync.Mutex
+ eg, ctx := errgroup.WithContext(cmd.Context())
for _, path := range paths {
- cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(cmd.Context(), path, !noCompact, func(a ...any) {
- cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
- }); err != nil {
- result[path] = false
- cmd.Println("error: failed to upgrade metabase", path, ":", err)
- } else {
- result[path] = true
- cmd.Println("metabase", path, "upgraded successfully")
- }
+ eg.Go(func() error {
+ var success bool
+ cmd.Println("upgrading metabase", path, "...")
+ if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
+ cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
+ }); err != nil {
+ cmd.Println("error: failed to upgrade metabase", path, ":", err)
+ } else {
+ success = true
+ cmd.Println("metabase", path, "upgraded successfully")
+ }
+ resultGuard.Lock()
+ result[path] = success
+ resultGuard.Unlock()
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return err
}
for mb, ok := range result {
if ok {
From 4668efc0bfdfac3750307d0863effb991baa1a4b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 6 Sep 2024 11:17:02 +0300
Subject: [PATCH 035/655] [#1355] metabase: Upgrade improvements
Do not fail on same latest version to run compact on upgraded metabase.
Use NoSync on compact.
Log every batch on bucket delete stage.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index e9abd746c..f677dcf8e 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -27,6 +27,10 @@ const (
var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
2: upgradeFromV2ToV3,
+ 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
+ log("metabase already upgraded")
+ return nil
+ },
}
func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
@@ -86,6 +90,7 @@ func compactDB(db *bbolt.DB) error {
}
dst, err := bbolt.Open(tmpFileName, f.Mode(), &bbolt.Options{
Timeout: 100 * time.Millisecond,
+ NoSync: true,
})
if err != nil {
return fmt.Errorf("can't open new metabase to compact: %w", err)
@@ -93,6 +98,9 @@ func compactDB(db *bbolt.DB) error {
if err := bbolt.Compact(dst, db, compactMaxTxSize); err != nil {
return fmt.Errorf("compact metabase: %w", errors.Join(err, dst.Close(), os.Remove(tmpFileName)))
}
+ if err := dst.Sync(); err != nil {
+ return fmt.Errorf("sync compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
+ }
if err := dst.Close(); err != nil {
return fmt.Errorf("close compacted metabase: %w", errors.Join(err, os.Remove(tmpFileName)))
}
@@ -369,8 +377,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed with an error:", err)
return err
}
- if count += uint64(len(keys)); count%upgradeLogFrequency == 0 {
- log("deleted", count, "buckets")
- }
+ count += uint64(len(keys))
+ log("deleted", count, "buckets")
}
}
From 92fe5d90f50a4d0f3c3b5265a32e0127848559e4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 9 Sep 2024 18:39:22 +0300
Subject: [PATCH 036/655] [#1359] writecache: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/writecache/cachebbolt.go | 8 --------
pkg/local_object_storage/writecache/put.go | 5 -----
2 files changed, 13 deletions(-)
diff --git a/pkg/local_object_storage/writecache/cachebbolt.go b/pkg/local_object_storage/writecache/cachebbolt.go
index cdd4ed442..f1e6a619a 100644
--- a/pkg/local_object_storage/writecache/cachebbolt.go
+++ b/pkg/local_object_storage/writecache/cachebbolt.go
@@ -18,16 +18,9 @@ import (
type cache struct {
options
- // mtx protects statistics, counters and compressFlags.
- mtx sync.RWMutex
-
mode mode.Mode
modeMtx sync.RWMutex
- // compressFlags maps address of a big object to boolean value indicating
- // whether object should be compressed.
- compressFlags map[string]struct{}
-
// flushCh is a channel with objects to flush.
flushCh chan objectInfo
// cancel is cancel function, protected by modeMtx in Close.
@@ -66,7 +59,6 @@ func New(opts ...Option) Cache {
flushCh: make(chan objectInfo),
mode: mode.Disabled,
- compressFlags: make(map[string]struct{}),
options: options{
log: &logger.Logger{Logger: zap.NewNop()},
maxObjectSize: defaultMaxObjectSize,
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index 150399de8..ae0e8b77a 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -115,11 +115,6 @@ func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) erro
return err
}
- if compressor := c.blobstor.Compressor(); compressor != nil && compressor.NeedsCompression(prm.Object) {
- c.mtx.Lock()
- c.compressFlags[addr] = struct{}{}
- c.mtx.Unlock()
- }
storagelog.Write(c.log,
storagelog.AddressField(addr),
storagelog.StorageTypeField(wcStorageType),
From a812932984531162648fdbfa985a6f496fdbd80e Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 10 Sep 2024 11:15:30 +0300
Subject: [PATCH 037/655] [#1362] ape: Move common APE check logic to separate
package
* Tree and object service have the same log for checking APE. So,
this check should be moved to common package.
Signed-off-by: Airat Arifullin
---
pkg/services/common/ape/checker.go | 167 +++++++++++++++++++++++++++++
pkg/services/object/ape/checker.go | 139 +++++-------------------
pkg/services/tree/ape.go | 116 ++------------------
pkg/services/tree/service.go | 5 +
4 files changed, 205 insertions(+), 222 deletions(-)
create mode 100644 pkg/services/common/ape/checker.go
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
new file mode 100644
index 000000000..f24d22124
--- /dev/null
+++ b/pkg/services/common/ape/checker.go
@@ -0,0 +1,167 @@
+package ape
+
+import (
+ "crypto/ecdsa"
+ "errors"
+ "fmt"
+
+ aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
+ frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+)
+
+var (
+ errInvalidTargetType = errors.New("bearer token defines non-container target override")
+ errBearerExpired = errors.New("bearer token has expired")
+ errBearerInvalidSignature = errors.New("bearer token has invalid signature")
+ errBearerInvalidContainerID = errors.New("bearer token was created for another container")
+ errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
+ errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
+)
+
+type CheckPrm struct {
+ // Request is an APE-request that is checked by policy engine.
+ Request aperequest.Request
+
+ Namespace string
+
+ Container cid.ID
+
+ // An encoded container's owner user ID.
+ ContainerOwner user.ID
+
+ // PublicKey is public key of the request sender.
+ PublicKey *keys.PublicKey
+
+ // The request's bearer token. It is used in order to check APE overrides with the token.
+ BearerToken *bearer.Token
+
+ // If SoftAPECheck is set to true, then NoRuleFound is interpreted as allow.
+ SoftAPECheck bool
+}
+
+// CheckCore provides methods to perform the common logic of APE check.
+type CheckCore interface {
+ // CheckAPE performs the common policy-engine check logic on a prepared request.
+ CheckAPE(prm CheckPrm) error
+}
+
+type checkerCoreImpl struct {
+ LocalOverrideStorage policyengine.LocalOverrideStorage
+ MorphChainStorage policyengine.MorphRuleChainStorageReader
+ FrostFSSubjectProvider frostfsidcore.SubjectProvider
+ State netmap.State
+}
+
+func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
+ frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State) CheckCore {
+ return &checkerCoreImpl{
+ LocalOverrideStorage: localOverrideStorage,
+ MorphChainStorage: morphChainStorage,
+ FrostFSSubjectProvider: frostFSSubjectProvider,
+ State: state,
+ }
+}
+
+// CheckAPE performs the common policy-engine check logic on a prepared request.
+func (c *checkerCoreImpl) CheckAPE(prm CheckPrm) error {
+ var cr policyengine.ChainRouter
+ if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
+ var err error
+ if err = isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, prm.PublicKey, c.State); err != nil {
+ return fmt.Errorf("bearer validation error: %w", err)
+ }
+ cr, err = router.BearerChainFeedRouter(c.LocalOverrideStorage, c.MorphChainStorage, prm.BearerToken.APEOverride())
+ if err != nil {
+ return fmt.Errorf("create chain router error: %w", err)
+ }
+ } else {
+ cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.MorphChainStorage, c.LocalOverrideStorage)
+ }
+
+ groups, err := aperequest.Groups(c.FrostFSSubjectProvider, prm.PublicKey)
+ if err != nil {
+ return fmt.Errorf("failed to get group ids: %w", err)
+ }
+
+ // Policy contract keeps group related chains as namespace-group pair.
+ for i := range groups {
+ groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
+ }
+
+ rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, prm.PublicKey.Address()), groups)
+ status, found, err := cr.IsAllowed(apechain.Ingress, rt, prm.Request)
+ if err != nil {
+ return err
+ }
+ if !found && prm.SoftAPECheck || status == apechain.Allow {
+ return nil
+ }
+ err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", prm.Request.Operation(), status.String())
+ return apeErr(err)
+}
+
+func apeErr(err error) error {
+ errAccessDenied := &apistatus.ObjectAccessDenied{}
+ errAccessDenied.WriteReason(err.Error())
+ return errAccessDenied
+}
+
+// isValidBearer checks whether bearer token was correctly signed by authorized
+// entity. This method might be defined on whole ACL service because it will
+// require fetching current epoch to check lifetime.
+func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
+ if token == nil {
+ return nil
+ }
+
+ // First check token lifetime. Simplest verification.
+ if token.InvalidAt(st.CurrentEpoch()) {
+ return errBearerExpired
+ }
+
+ // Then check if bearer token is signed correctly.
+ if !token.VerifySignature() {
+ return errBearerInvalidSignature
+ }
+
+ // Check for ape overrides defined in the bearer token.
+ apeOverride := token.APEOverride()
+ if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
+ return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
+ }
+
+ // Then check if container is either empty or equal to the container in the request.
+ var targetCnr cid.ID
+ err := targetCnr.DecodeString(apeOverride.Target.Name)
+ if err != nil {
+ return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
+ }
+ if !cntID.Equals(targetCnr) {
+ return errBearerInvalidContainerID
+ }
+
+ // Then check if container owner signed this token.
+ if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
+ return errBearerNotSignedByOwner
+ }
+
+ // Then check if request sender has rights to use this token.
+ var usrSender user.ID
+ user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
+
+ if !token.AssertUser(usrSender) {
+ return errBearerInvalidOwner
+ }
+
+ return nil
+}
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index a1972292e..3688638d0 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -2,49 +2,41 @@ package ape
import (
"context"
- "crypto/ecdsa"
"errors"
"fmt"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
- aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
policyengine "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type checkerImpl struct {
- localOverrideStorage policyengine.LocalOverrideStorage
- morphChainStorage policyengine.MorphRuleChainStorageReader
- headerProvider HeaderProvider
- frostFSIDClient frostfsidcore.SubjectProvider
- nm netmap.Source
- st netmap.State
- cnrSource container.Source
- nodePK []byte
+ checkerCore checkercore.CheckCore
+ frostFSIDClient frostfsidcore.SubjectProvider
+ headerProvider HeaderProvider
+ nm netmap.Source
+ cnrSource container.Source
+ nodePK []byte
}
func NewChecker(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader, headerProvider HeaderProvider, frostFSIDClient frostfsidcore.SubjectProvider, nm netmap.Source, st netmap.State, cnrSource container.Source, nodePK []byte) Checker {
return &checkerImpl{
- localOverrideStorage: localOverrideStorage,
- morphChainStorage: morphChainStorage,
- headerProvider: headerProvider,
- frostFSIDClient: frostFSIDClient,
- nm: nm,
- st: st,
- cnrSource: cnrSource,
- nodePK: nodePK,
+ checkerCore: checkercore.New(localOverrideStorage, morphChainStorage, frostFSIDClient, st),
+ frostFSIDClient: frostFSIDClient,
+ headerProvider: headerProvider,
+ nm: nm,
+ cnrSource: cnrSource,
+ nodePK: nodePK,
}
}
@@ -85,68 +77,9 @@ type Prm struct {
XHeaders []session.XHeader
}
-var (
- errMissingOID = errors.New("object ID is not set")
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
+var errMissingOID = errors.New("object ID is not set")
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(token *bearer.Token, ownerCnr user.ID, containerID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
- if token == nil {
- return nil
- }
-
- // First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !containerID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
-
- // Then check if container owner signed this token.
- if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
- return errBearerNotSignedByOwner
- }
-
- // Then check if request sender has rights to use this token.
- var usrSender user.ID
- user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
-
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
-// CheckAPE checks if a request or a response is permitted creating an ape request and passing
-// it to chain router.
+// CheckAPE prepares an APE-request and checks if it is permitted by policies.
func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
// APE check is ignored for some inter-node requests.
if prm.Role == nativeschema.PropertyValueContainerRoleContainer {
@@ -171,38 +104,14 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
if err != nil {
return err
}
- groups, err := aperequest.Groups(c.frostFSIDClient, pub)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", prm.Namespace, groups[i])
- }
-
- var cr policyengine.ChainRouter
- if prm.BearerToken != nil && !prm.BearerToken.Impersonate() {
- if err := isValidBearer(prm.BearerToken, prm.ContainerOwner, prm.Container, pub, c.st); err != nil {
- return fmt.Errorf("bearer token validation error: %w", err)
- }
- cr, err = router.BearerChainFeedRouter(c.localOverrideStorage, c.morphChainStorage, prm.BearerToken.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
- } else {
- cr = policyengine.NewDefaultChainRouterWithLocalOverrides(c.morphChainStorage, c.localOverrideStorage)
- }
-
- rt := policyengine.NewRequestTargetExtended(prm.Namespace, prm.Container.EncodeToString(), fmt.Sprintf("%s:%s", prm.Namespace, pub.Address()), groups)
- status, ruleFound, err := cr.IsAllowed(apechain.Ingress, rt, r)
- if err != nil {
- return err
- }
-
- if !ruleFound && prm.SoftAPECheck || status == apechain.Allow {
- return nil
- }
-
- return fmt.Errorf("method %s: %s", prm.Method, status)
+ return c.checkerCore.CheckAPE(checkercore.CheckPrm{
+ Request: r,
+ PublicKey: pub,
+ Namespace: prm.Method,
+ Container: prm.Container,
+ ContainerOwner: prm.ContainerOwner,
+ BearerToken: prm.BearerToken,
+ SoftAPECheck: prm.SoftAPECheck,
+ })
}
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index ee4687911..693b16e60 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -2,42 +2,25 @@ package tree
import (
"context"
- "crypto/ecdsa"
"encoding/hex"
- "errors"
"fmt"
"net"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/converter"
aperequest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/request"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/router"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
cnrSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
- apechain "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
- "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
commonschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/common"
nativeschema "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"google.golang.org/grpc/peer"
)
-var (
- errInvalidTargetType = errors.New("bearer token defines non-container target override")
- errBearerExpired = errors.New("bearer token has expired")
- errBearerInvalidSignature = errors.New("bearer token has invalid signature")
- errBearerInvalidContainerID = errors.New("bearer token was created for another container")
- errBearerNotSignedByOwner = errors.New("bearer token is not signed by the container owner")
- errBearerInvalidOwner = errors.New("bearer token owner differs from the request sender")
-)
-
func (s *Service) newAPERequest(ctx context.Context, namespace string,
cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) (aperequest.Request, error) {
@@ -77,56 +60,6 @@ func (s *Service) newAPERequest(ctx context.Context, namespace string,
), nil
}
-// isValidBearer checks whether bearer token was correctly signed by authorized
-// entity. This method might be defined on whole ACL service because it will
-// require fetching current epoch to check lifetime.
-func isValidBearer(token *bearer.Token, ownerCnr user.ID, cntID cid.ID, publicKey *keys.PublicKey, st netmap.State) error {
- if token == nil {
- return nil
- }
-
- // First check token lifetime. Simplest verification.
- if token.InvalidAt(st.CurrentEpoch()) {
- return errBearerExpired
- }
-
- // Then check if bearer token is signed correctly.
- if !token.VerifySignature() {
- return errBearerInvalidSignature
- }
-
- // Check for ape overrides defined in the bearer token.
- apeOverride := token.APEOverride()
- if len(apeOverride.Chains) > 0 && apeOverride.Target.TargetType != ape.TargetTypeContainer {
- return fmt.Errorf("%w: %s", errInvalidTargetType, apeOverride.Target.TargetType.ToV2().String())
- }
-
- // Then check if container is either empty or equal to the container in the request.
- var targetCnr cid.ID
- err := targetCnr.DecodeString(apeOverride.Target.Name)
- if err != nil {
- return fmt.Errorf("invalid cid format: %s", apeOverride.Target.Name)
- }
- if !cntID.Equals(targetCnr) {
- return errBearerInvalidContainerID
- }
-
- // Then check if container owner signed this token.
- if !bearer.ResolveIssuer(*token).Equals(ownerCnr) {
- return errBearerNotSignedByOwner
- }
-
- // Then check if request sender has rights to use this token.
- var usrSender user.ID
- user.IDFromKey(&usrSender, (ecdsa.PublicKey)(*publicKey))
-
- if !token.AssertUser(usrSender) {
- return errBearerInvalidOwner
- }
-
- return nil
-}
-
func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
container *core.Container, cid cid.ID, operation acl.Op, role acl.Role, publicKey *keys.PublicKey,
) error {
@@ -141,45 +74,14 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
return fmt.Errorf("failed to create ape request: %w", err)
}
- var cr engine.ChainRouter
- if bt != nil && !bt.Impersonate() {
- if err := isValidBearer(bt, container.Value.Owner(), cid, publicKey, s.state); err != nil {
- return fmt.Errorf("bearer validation error: %w", err)
- }
- cr, err = router.BearerChainFeedRouter(s.localOverrideStorage, s.morphChainStorage, bt.APEOverride())
- if err != nil {
- return fmt.Errorf("create chain router error: %w", err)
- }
- } else {
- cr = engine.NewDefaultChainRouterWithLocalOverrides(s.morphChainStorage, s.localOverrideStorage)
- }
-
- groups, err := aperequest.Groups(s.frostfsidSubjectProvider, publicKey)
- if err != nil {
- return fmt.Errorf("failed to get group ids: %w", err)
- }
-
- // Policy contract keeps group related chains as namespace-group pair.
- for i := range groups {
- groups[i] = fmt.Sprintf("%s:%s", namespace, groups[i])
- }
-
- rt := engine.NewRequestTargetExtended(namespace, cid.EncodeToString(), fmt.Sprintf("%s:%s", namespace, publicKey.Address()), groups)
- status, found, err := cr.IsAllowed(apechain.Ingress, rt, request)
- if err != nil {
- return err
- }
- if found && status == apechain.Allow {
- return nil
- }
- err = fmt.Errorf("access to operation %s is denied by access policy engine: %s", request.Operation(), status.String())
- return apeErr(err)
-}
-
-func apeErr(err error) error {
- errAccessDenied := &apistatus.ObjectAccessDenied{}
- errAccessDenied.WriteReason(err.Error())
- return errAccessDenied
+ return s.apeChecker.CheckAPE(checkercore.CheckPrm{
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ PublicKey: publicKey,
+ BearerToken: bt,
+ SoftAPECheck: false,
+ })
}
// fillWithUserClaimTags fills ape request properties with user claim tags getting them from frostfsid contract by actor public key.
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 4da61617f..875e47ecb 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -10,6 +10,7 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -37,6 +38,8 @@ type Service struct {
initialSyncDone atomic.Bool
+ apeChecker checkercore.CheckCore
+
// cnrMap contains existing (used) container IDs.
cnrMap map[cidSDK.ID]struct{}
// cnrMapMtx protects cnrMap
@@ -72,6 +75,8 @@ func New(opts ...Option) *Service {
s.syncChan = make(chan struct{})
s.syncPool, _ = ants.NewPool(defaultSyncWorkerCount)
+ s.apeChecker = checkercore.New(s.localOverrideStorage, s.morphChainStorage, s.frostfsidSubjectProvider, s.state)
+
return &s
}
From 2220f6a8091d9b861fd6a86b7afc90320591e9b1 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Tue, 10 Sep 2024 16:45:15 +0300
Subject: [PATCH 038/655] [#1365] Makefile: Fix HUB_IMAGE
Signed-off-by: Alexander Chuprov
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index 71492ef17..2f29ac19c 100755
--- a/Makefile
+++ b/Makefile
@@ -4,7 +4,7 @@ SHELL = bash
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
-HUB_IMAGE ?= truecloudlab/frostfs
+HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
GO_VERSION ?= 1.22
From 5fac4058e8cbc9ef8484b2ddaaf583574684dded Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Tue, 10 Sep 2024 12:14:46 +0300
Subject: [PATCH 039/655] [#1364] cmd/common: Add tests for CreateViper and
ReloadViper
Add tests for `CreateViper` and `ReloadViper` to ensure that no extra
files, except *.yaml, *.yml, *.json, are loaded from config directory.
Signed-off-by: Aleksey Savchuk
---
cmd/internal/common/config/viper_test.go | 107 +++++++++++++++++++++++
pkg/util/config/test/generate.go | 58 ++++++++++++
2 files changed, 165 insertions(+)
create mode 100644 cmd/internal/common/config/viper_test.go
create mode 100644 pkg/util/config/test/generate.go
diff --git a/cmd/internal/common/config/viper_test.go b/cmd/internal/common/config/viper_test.go
new file mode 100644
index 000000000..d533a15c2
--- /dev/null
+++ b/cmd/internal/common/config/viper_test.go
@@ -0,0 +1,107 @@
+package config_test
+
+import (
+ "encoding/json"
+ "os"
+ "path"
+ "testing"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/config/test"
+ "github.com/spf13/viper"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v3"
+)
+
+func TestCreateReloadViper(t *testing.T) {
+ type m = map[string]any
+
+ dummyFileSize := 1 << 10
+
+ configPath := t.TempDir()
+ configFile := "000_a.yaml"
+
+ configDirPath := path.Join(configPath, "conf.d")
+ require.NoError(t, os.Mkdir(configDirPath, 0o700))
+
+ configtest.PrepareConfigFiles(t, configPath, []configtest.ConfigFile{
+ configtest.NewConfigFile(configFile, m{"a": "000"}, yaml.Marshal),
+ })
+
+ // Not valid configs, dummy files those appear lexicographically first.
+ configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
+ configtest.NewDummyFile("000_file_1", dummyFileSize),
+ configtest.NewDummyFile("000_file_2", dummyFileSize),
+ configtest.NewDummyFile("000_file_3", dummyFileSize),
+ })
+
+ configtest.PrepareConfigFiles(t, configDirPath, []configtest.ConfigFile{
+ // Valid configs with invalid extensions those appear lexicographically first.
+ configtest.NewConfigFile("001_a.yaml.un~", m{"a": "101"}, yaml.Marshal),
+ configtest.NewConfigFile("001_b.yml~", m{"b": m{"a": "102", "b": "103"}}, yaml.Marshal),
+ configtest.NewConfigFile("001_c.yaml.swp", m{"c": m{"a": "104", "b": "105"}}, yaml.Marshal),
+ configtest.NewConfigFile("001_d.json.swp", m{"d": m{"a": "106", "b": "107"}}, json.Marshal),
+
+ // Valid configs with valid extensions those should be loaded.
+ configtest.NewConfigFile("010_a.yaml", m{"a": "1"}, yaml.Marshal),
+ configtest.NewConfigFile("020_b.yml", m{"b": m{"a": "2", "b": "3"}}, yaml.Marshal),
+ configtest.NewConfigFile("030_c.json", m{"c": m{"a": "4", "b": "5"}}, json.Marshal),
+
+ // Valid configs with invalid extensions those appear lexicographically last.
+ configtest.NewConfigFile("099_a.yaml.un~", m{"a": "201"}, yaml.Marshal),
+ configtest.NewConfigFile("099_b.yml~", m{"b": m{"a": "202", "b": "203"}}, yaml.Marshal),
+ configtest.NewConfigFile("099_c.yaml.swp", m{"c": m{"a": "204", "b": "205"}}, yaml.Marshal),
+ configtest.NewConfigFile("099_c.json.swp", m{"d": m{"a": "206", "b": "207"}}, json.Marshal),
+ })
+
+ // Not valid configs, dummy files those appear lexicographically last.
+ configtest.PrepareDummyFiles(t, configDirPath, []configtest.DummyFile{
+ configtest.NewDummyFile("999_file_1", dummyFileSize),
+ configtest.NewDummyFile("999_file_2", dummyFileSize),
+ configtest.NewDummyFile("999_file_3", dummyFileSize),
+ })
+
+ finalConfig := m{"a": "1", "b": m{"a": "2", "b": "3"}, "c": m{"a": "4", "b": "5"}}
+
+ var (
+ v *viper.Viper
+ err error
+ )
+
+ t.Run("create config with config dir only", func(t *testing.T) {
+ v, err = config.CreateViper(
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("reload config with config dir only", func(t *testing.T) {
+ err = config.ReloadViper(
+ config.WithViper(v),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("create config with both config and config dir", func(t *testing.T) {
+ v, err = config.CreateViper(
+ config.WithConfigFile(path.Join(configPath, configFile)),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+
+ t.Run("reload config with both config and config dir", func(t *testing.T) {
+ err = config.ReloadViper(
+ config.WithViper(v),
+ config.WithConfigFile(path.Join(configPath, configFile)),
+ config.WithConfigDir(configDirPath),
+ )
+ require.NoError(t, err)
+ assert.Equal(t, finalConfig, v.AllSettings())
+ })
+}
diff --git a/pkg/util/config/test/generate.go b/pkg/util/config/test/generate.go
new file mode 100644
index 000000000..63e286615
--- /dev/null
+++ b/pkg/util/config/test/generate.go
@@ -0,0 +1,58 @@
+package configtest
+
+import (
+ "crypto/rand"
+ "os"
+ "path"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type MarshalFunc = func(any) ([]byte, error)
+
+type ConfigFile struct {
+ filename string
+ content map[string]any
+ marshal func(any) ([]byte, error)
+}
+
+type DummyFile struct {
+ filename string
+ size int
+}
+
+func NewConfigFile(filename string, content map[string]any, marshal MarshalFunc) ConfigFile {
+ return ConfigFile{
+ filename: filename,
+ content: content,
+ marshal: marshal,
+ }
+}
+
+func NewDummyFile(filename string, size int) DummyFile {
+ return DummyFile{
+ filename: filename,
+ size: size,
+ }
+}
+
+func PrepareConfigFiles(t *testing.T, dir string, files []ConfigFile) {
+ for _, file := range files {
+ data, err := file.marshal(file.content)
+ require.NoError(t, err)
+
+ err = os.WriteFile(path.Join(dir, file.filename), data, 0o600)
+ require.NoError(t, err)
+ }
+}
+
+func PrepareDummyFiles(t *testing.T, dir string, files []DummyFile) {
+ for _, file := range files {
+ data := make([]byte, file.size)
+ _, _ = rand.Read(data)
+
+ err := os.WriteFile(path.Join(dir, file.filename), data, 0o600)
+ require.NoError(t, err)
+ }
+}
From dea6f031f97664259d407bff6a320b295dc4b3d0 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Mon, 9 Sep 2024 19:26:41 +0300
Subject: [PATCH 040/655] [#1331] cli/tree: Add order flag to `tree
get-subtree`
Added `--ordered` flag to sort output by ascending FileName.
Signed-off-by: Ekaterina Lebedeva
---
cmd/frostfs-cli/modules/tree/root.go | 1 +
cmd/frostfs-cli/modules/tree/subtree.go | 11 +++++++++++
2 files changed, 12 insertions(+)
diff --git a/cmd/frostfs-cli/modules/tree/root.go b/cmd/frostfs-cli/modules/tree/root.go
index efd1c08b5..5a53c50d6 100644
--- a/cmd/frostfs-cli/modules/tree/root.go
+++ b/cmd/frostfs-cli/modules/tree/root.go
@@ -49,6 +49,7 @@ const (
heightFlagKey = "height"
countFlagKey = "count"
depthFlagKey = "depth"
+ orderFlagKey = "ordered"
)
func initCTID(cmd *cobra.Command) {
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index e58a13fd6..e88ef79cb 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -30,6 +30,7 @@ func initGetSubtreeCmd() {
ff := getSubtreeCmd.Flags()
ff.Uint64(rootIDFlagKey, 0, "Root ID to traverse from.")
ff.Uint32(depthFlagKey, 10, "Traversal depth.")
+ ff.Bool(orderFlagKey, false, "Sort output by ascending FileName.")
_ = getSubtreeCmd.MarkFlagRequired(commonflags.CIDFlag)
_ = getSubtreeCmd.MarkFlagRequired(treeIDFlagKey)
@@ -59,6 +60,13 @@ func getSubTree(cmd *cobra.Command, _ []string) {
depth, _ := cmd.Flags().GetUint32(depthFlagKey)
+ order, _ := cmd.Flags().GetBool(orderFlagKey)
+
+ bodyOrder := tree.GetSubTreeRequest_Body_Order_None
+ if order {
+ bodyOrder = tree.GetSubTreeRequest_Body_Order_Asc
+ }
+
var bt []byte
if t := common.ReadBearerToken(cmd, bearerFlagKey); t != nil {
bt = t.Marshal()
@@ -71,6 +79,9 @@ func getSubTree(cmd *cobra.Command, _ []string) {
RootId: []uint64{rid},
Depth: depth,
BearerToken: bt,
+ OrderBy: &tree.GetSubTreeRequest_Body_Order{
+ Direction: bodyOrder,
+ },
},
}
From ec8da4056704d81107f514bcb998aa6f3dd7b07f Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Wed, 11 Sep 2024 12:40:04 +0300
Subject: [PATCH 041/655] [#1369] Update obsolete URLs
Signed-off-by: Vitaliy Potyarkin
---
README.md | 15 +++++++--------
cmd/frostfs-adm/docs/deploy.md | 4 ++--
config/testnet/README.md | 2 +-
docs/release-instruction.md | 6 +++---
4 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/README.md b/README.md
index 8225f56c5..47d812b18 100644
--- a/README.md
+++ b/README.md
@@ -7,9 +7,8 @@
---
-[](https://goreportcard.com/report/github.com/TrueCloudLab/frostfs-node)
-
-
+[](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-node)
+
# Overview
@@ -33,8 +32,8 @@ manipulate large amounts of data without paying a prohibitive price.
FrostFS has a native [gRPC API](https://git.frostfs.info/TrueCloudLab/frostfs-api) and has
protocol gateways for popular protocols such as [AWS
-S3](https://github.com/TrueCloudLab/frostfs-s3-gw),
-[HTTP](https://github.com/TrueCloudLab/frostfs-http-gw),
+S3](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw),
+[HTTP](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw),
[FUSE](https://wikipedia.org/wiki/Filesystem_in_Userspace) and
[sFTP](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol) allowing
developers to integrate applications without rewriting their code.
@@ -45,7 +44,7 @@ Now, we only support GNU/Linux on amd64 CPUs with AVX/AVX2 instructions. More
platforms will be officially supported after release `1.0`.
The latest version of frostfs-node works with frostfs-contract
-[v0.16.0](https://github.com/TrueCloudLab/frostfs-contract/releases/tag/v0.16.0).
+[v0.19.2](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/tag/v0.19.2).
# Building
@@ -71,7 +70,7 @@ make docker/bin/frostfs- # build a specific binary
## Docker images
-To make docker images suitable for use in [frostfs-dev-env](https://github.com/TrueCloudLab/frostfs-dev-env/) use:
+To make docker images suitable for use in [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env/) use:
```
make images
```
@@ -125,7 +124,7 @@ the feature/topic you are going to implement.
# Credits
-FrostFS is maintained by [True Cloud Lab](https://github.com/TrueCloudLab/) with the help and
+FrostFS is maintained by [True Cloud Lab](https://git.frostfs.info/TrueCloudLab/) with the help and
contributions from community members.
Please see [CREDITS](CREDITS.md) for details.
diff --git a/cmd/frostfs-adm/docs/deploy.md b/cmd/frostfs-adm/docs/deploy.md
index 87d2e47c1..b4b1ed8e4 100644
--- a/cmd/frostfs-adm/docs/deploy.md
+++ b/cmd/frostfs-adm/docs/deploy.md
@@ -9,8 +9,8 @@ related configuration details.
To follow this guide you need:
- latest released version of [neo-go](https://github.com/nspcc-dev/neo-go/releases) (v0.97.2 at the moment),
-- latest released version of [frostfs-adm](https://github.com/TrueCloudLab/frostfs-node/releases) utility (v0.25.1 at the moment),
-- latest released version of compiled [frostfs-contract](https://github.com/TrueCloudLab/frostfs-contract/releases) (v0.11.0 at the moment).
+- latest released version of [frostfs-adm](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases) utility (v0.42.9 at the moment),
+- latest released version of compiled [frostfs-contract](https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases) (v0.19.2 at the moment).
## Step 1: Prepare network configuration
diff --git a/config/testnet/README.md b/config/testnet/README.md
index b5faf2b27..e2cda33ec 100644
--- a/config/testnet/README.md
+++ b/config/testnet/README.md
@@ -67,7 +67,7 @@ NEOFS_NODE_ATTRIBUTE_2=UN-LOCODE:RU LED
```
You can validate UN/LOCODE attribute in
-[NeoFS LOCODE database](https://github.com/TrueCloudLab/frostfs-locode-db/releases/tag/v0.1.0)
+[NeoFS LOCODE database](https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/tag/v0.4.0)
with frostfs-cli.
```
diff --git a/docs/release-instruction.md b/docs/release-instruction.md
index ec7b8cdf3..3aebc8e66 100644
--- a/docs/release-instruction.md
+++ b/docs/release-instruction.md
@@ -9,7 +9,7 @@ These should run successfully:
* `make lint` (should not change any files);
* `make fmts` (should not change any files);
* `go mod tidy` (should not change any files);
-* integration tests in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv).
+* integration tests in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env).
## Make release commit
@@ -123,12 +123,12 @@ the release. Publish the release.
### Update FrostFS Developer Environment
-Prepare pull-request in [frostfs-devenv](https://github.com/TrueCloudLab/frostfs-devenv)
+Prepare pull-request in [frostfs-devenv](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
with new versions.
### Close GitHub milestone
-Look up GitHub [milestones](https://github.com/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
+Look up [milestones](https://git.frostfs.info/TrueCloudLab/frostfs-node/milestones) and close the release one if exists.
### Rebuild FrostFS LOCODE database
From 99be4c83a7c8ab6717ef5242c80f9ccc51d470d7 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 10:00:28 +0300
Subject: [PATCH 042/655] [#1368] *: Run gofumpt
Signed-off-by: Aleksey Savchuk
---
pkg/services/common/ape/checker.go | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/pkg/services/common/ape/checker.go b/pkg/services/common/ape/checker.go
index f24d22124..278f6da31 100644
--- a/pkg/services/common/ape/checker.go
+++ b/pkg/services/common/ape/checker.go
@@ -63,7 +63,8 @@ type checkerCoreImpl struct {
}
func New(localOverrideStorage policyengine.LocalOverrideStorage, morphChainStorage policyengine.MorphRuleChainStorageReader,
- frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State) CheckCore {
+ frostFSSubjectProvider frostfsidcore.SubjectProvider, state netmap.State,
+) CheckCore {
return &checkerCoreImpl{
LocalOverrideStorage: localOverrideStorage,
MorphChainStorage: morphChainStorage,
From 66e17f4b8e968e1d58924ee9a38b720d0df7989c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 10:01:27 +0300
Subject: [PATCH 043/655] [#1368] cli/container: Use dedicated method to list
user attributes
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/modules/container/list.go | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/list.go b/cmd/frostfs-cli/modules/container/list.go
index 6d0019ec4..f01e4db4d 100644
--- a/cmd/frostfs-cli/modules/container/list.go
+++ b/cmd/frostfs-cli/modules/container/list.go
@@ -1,9 +1,6 @@
package container
import (
- "strings"
-
- "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -84,12 +81,8 @@ var listContainersCmd = &cobra.Command{
cmd.Println(cnrID.String())
if flagVarListPrintAttr {
- cnr.IterateAttributes(func(key, val string) {
- if !strings.HasPrefix(key, container.SysAttributePrefix) && !strings.HasPrefix(key, container.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
- // Use dedicated method to skip system attributes.
- cmd.Printf(" %s: %s\n", key, val)
- }
+ cnr.IterateUserAttributes(func(key, val string) {
+ cmd.Printf(" %s: %s\n", key, val)
})
}
}
From 5f6c7cbdb102c51e4e994198f805a33084f3b9de Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 9 Sep 2024 18:37:06 +0300
Subject: [PATCH 044/655] [#1367] writecache: Drop bbolt DB
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 2 +-
.../writecache/{cachebbolt.go => cache.go} | 15 +-
pkg/local_object_storage/writecache/delete.go | 39 +---
pkg/local_object_storage/writecache/flush.go | 192 ++++--------------
.../writecache/flush_test.go | 28 +--
pkg/local_object_storage/writecache/get.go | 14 +-
pkg/local_object_storage/writecache/mode.go | 39 ++--
.../writecache/mode_test.go | 8 +-
.../writecache/options.go | 11 -
pkg/local_object_storage/writecache/put.go | 53 +----
pkg/local_object_storage/writecache/state.go | 35 +---
.../writecache/storage.go | 61 ------
12 files changed, 82 insertions(+), 415 deletions(-)
rename pkg/local_object_storage/writecache/{cachebbolt.go => cache.go} (94%)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 97b189529..87e4e0b43 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -542,6 +542,6 @@ const (
StartedWritecacheSealAsync = "started writecache seal async"
WritecacheSealCompletedAsync = "writecache seal completed successfully"
FailedToSealWritecacheAsync = "failed to seal writecache async"
- WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: database is not empty"
+ WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
)
diff --git a/pkg/local_object_storage/writecache/cachebbolt.go b/pkg/local_object_storage/writecache/cache.go
similarity index 94%
rename from pkg/local_object_storage/writecache/cachebbolt.go
rename to pkg/local_object_storage/writecache/cache.go
index f1e6a619a..ff38de407 100644
--- a/pkg/local_object_storage/writecache/cachebbolt.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -2,7 +2,7 @@ package writecache
import (
"context"
- "os"
+ "fmt"
"sync"
"sync/atomic"
@@ -27,8 +27,6 @@ type cache struct {
cancel atomic.Value
// wg is a wait group for flush workers.
wg sync.WaitGroup
- // store contains underlying database.
- store
// fsTree contains big files stored directly on file-system.
fsTree *fstree.FSTree
}
@@ -67,7 +65,6 @@ func New(opts ...Option) Cache {
maxCacheSize: defaultMaxCacheSize,
maxBatchSize: bbolt.DefaultMaxBatchSize,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- openFile: os.OpenFile,
metrics: DefaultMetrics(),
},
}
@@ -102,13 +99,15 @@ func (c *cache) Open(_ context.Context, mod mode.Mode) error {
if err != nil {
return metaerr.Wrap(err)
}
-
return metaerr.Wrap(c.initCounters())
}
// Init runs necessary services.
func (c *cache) Init() error {
c.metrics.SetMode(mode.ConvertToComponentModeDegraded(c.mode))
+ if err := c.flushAndDropBBoltDB(context.Background()); err != nil {
+ return fmt.Errorf("flush previous version write-cache database: %w", err)
+ }
ctx, cancel := context.WithCancel(context.Background())
c.cancel.Store(cancel)
c.runFlushLoop(ctx)
@@ -132,10 +131,10 @@ func (c *cache) Close() error {
defer c.modeMtx.Unlock()
var err error
- if c.db != nil {
- err = c.db.Close()
+ if c.fsTree != nil {
+ err = c.fsTree.Close()
if err != nil {
- c.db = nil
+ c.fsTree = nil
}
}
c.metrics.Close()
diff --git a/pkg/local_object_storage/writecache/delete.go b/pkg/local_object_storage/writecache/delete.go
index b1a0511ee..dda284439 100644
--- a/pkg/local_object_storage/writecache/delete.go
+++ b/pkg/local_object_storage/writecache/delete.go
@@ -2,7 +2,6 @@ package writecache
import (
"context"
- "math"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
@@ -10,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -45,46 +43,11 @@ func (c *cache) Delete(ctx context.Context, addr oid.Address) error {
return ErrDegraded
}
- saddr := addr.EncodeToString()
-
- var dataSize int
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- dataSize = len(b.Get([]byte(saddr)))
- return nil
- })
-
- if dataSize > 0 {
- storageType = StorageTypeDB
- var recordDeleted bool
- err := c.db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(saddr)
- recordDeleted = b.Get(key) != nil
- err := b.Delete(key)
- return err
- })
- if err != nil {
- return err
- }
- storagelog.Write(c.log,
- storagelog.AddressField(saddr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- if recordDeleted {
- c.objCounters.cDB.Add(math.MaxUint64)
- c.estimateCacheSize()
- }
- deleted = true
- return nil
- }
-
storageType = StorageTypeFSTree
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err == nil {
storagelog.Write(c.log,
- storagelog.AddressField(saddr),
+ storagelog.AddressField(addr.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree DELETE"),
)
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 930ac8431..074756e32 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -4,6 +4,9 @@ import (
"bytes"
"context"
"errors"
+ "fmt"
+ "os"
+ "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -16,7 +19,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "github.com/mr-tron/base58"
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
@@ -24,10 +26,6 @@ import (
)
const (
- // flushBatchSize is amount of keys which will be read from cache to be flushed
- // to the main storage. It is used to reduce contention between cache put
- // and cache persist.
- flushBatchSize = 512
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
defaultFlushWorkersCount = 20
// defaultFlushInterval is default time interval between successive flushes.
@@ -41,112 +39,11 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
- for range c.workersCount {
- c.wg.Add(1)
- go c.workerFlushSmall(ctx)
- }
-
c.wg.Add(1)
go func() {
c.workerFlushBig(ctx)
c.wg.Done()
}()
-
- c.wg.Add(1)
- go func() {
- defer c.wg.Done()
-
- tt := time.NewTimer(defaultFlushInterval)
- defer tt.Stop()
-
- for {
- select {
- case <-tt.C:
- c.flushSmallObjects(ctx)
- tt.Reset(defaultFlushInterval)
- c.estimateCacheSize()
- case <-ctx.Done():
- return
- }
- }
- }()
-}
-
-func (c *cache) flushSmallObjects(ctx context.Context) {
- var lastKey []byte
- for {
- select {
- case <-ctx.Done():
- return
- default:
- }
-
- var m []objectInfo
-
- c.modeMtx.RLock()
- if c.readOnly() {
- c.modeMtx.RUnlock()
- time.Sleep(time.Second)
- continue
- }
-
- // We put objects in batches of fixed size to not interfere with main put cycle a lot.
- _ = c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
-
- var k, v []byte
-
- if len(lastKey) == 0 {
- k, v = cs.First()
- } else {
- k, v = cs.Seek(lastKey)
- if bytes.Equal(k, lastKey) {
- k, v = cs.Next()
- }
- }
-
- for ; k != nil && len(m) < flushBatchSize; k, v = cs.Next() {
- if len(lastKey) == len(k) {
- copy(lastKey, k)
- } else {
- lastKey = bytes.Clone(k)
- }
-
- m = append(m, objectInfo{
- addr: string(k),
- data: bytes.Clone(v),
- })
- }
- return nil
- })
-
- var count int
- for i := range m {
- obj := objectSDK.New()
- if err := obj.Unmarshal(m[i].data); err != nil {
- continue
- }
- m[i].obj = obj
-
- count++
- select {
- case c.flushCh <- m[i]:
- case <-ctx.Done():
- c.modeMtx.RUnlock()
- return
- }
- }
-
- c.modeMtx.RUnlock()
- if count == 0 {
- break
- }
-
- c.log.Debug(logs.WritecacheTriedToFlushItemsFromWritecache,
- zap.Int("count", count),
- zap.String("start", base58.Encode(lastKey)))
- }
}
func (c *cache) workerFlushBig(ctx context.Context) {
@@ -197,9 +94,6 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
err = c.flushObject(ctx, &obj, e.ObjectData, StorageTypeFSTree)
if err != nil {
- if ignoreErrors {
- return nil
- }
return err
}
@@ -211,29 +105,6 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
return err
}
-// workerFlushSmall writes small objects to the main storage.
-func (c *cache) workerFlushSmall(ctx context.Context) {
- defer c.wg.Done()
-
- var objInfo objectInfo
- for {
- // Give priority to direct put.
- select {
- case objInfo = <-c.flushCh:
- case <-ctx.Done():
- return
- }
-
- err := c.flushObject(ctx, objInfo.obj, objInfo.data, StorageTypeDB)
- if err != nil {
- // Error is handled in flushObject.
- continue
- }
-
- c.deleteFromDB(objInfo.addr, true)
- }
-}
-
// flushObject is used to write object directly to the main storage.
func (c *cache) flushObject(ctx context.Context, obj *objectSDK.Object, data []byte, st StorageType) error {
var err error
@@ -300,13 +171,33 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
}
func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
- if err := c.flushFSTree(ctx, ignoreErrors); err != nil {
- return err
+ return c.flushFSTree(ctx, ignoreErrors)
+}
+
+type batchItem struct {
+ data []byte
+ address string
+}
+
+func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
+ _, err := os.Stat(filepath.Join(c.path, dbName))
+ if err != nil && os.IsNotExist(err) {
+ return nil
}
+ if err != nil {
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
+ }
+ db, err := OpenDB(c.path, true, os.OpenFile, c.pageSize)
+ if err != nil {
+ return fmt.Errorf("could not open write-cache database: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
var last string
for {
- batch, err := c.readNextDBBatch(ignoreErrors, last)
+ batch, err := c.readNextDBBatch(db, last)
if err != nil {
return err
}
@@ -316,32 +207,27 @@ func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
for _, item := range batch {
var obj objectSDK.Object
if err := obj.Unmarshal(item.data); err != nil {
- c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, item.address, metaerr.Wrap(err))
- if ignoreErrors {
- continue
- }
- return err
+ return fmt.Errorf("unmarshal object from database: %w", err)
}
-
if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
- return err
+ return fmt.Errorf("flush object from database: %w", err)
}
- c.deleteFromDB(item.address, false)
}
last = batch[len(batch)-1].address
}
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close write-cache database: %w", err)
+ }
+ if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
+ return fmt.Errorf("remove write-cache database: %w", err)
+ }
return nil
}
-type batchItem struct {
- data []byte
- address string
-}
-
-func (c *cache) readNextDBBatch(ignoreErrors bool, last string) ([]batchItem, error) {
+func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
const batchSize = 100
var batch []batchItem
- err := c.db.View(func(tx *bbolt.Tx) error {
+ err := db.View(func(tx *bbolt.Tx) error {
var addr oid.Address
b := tx.Bucket(defaultBucket)
@@ -352,11 +238,7 @@ func (c *cache) readNextDBBatch(ignoreErrors bool, last string) ([]batchItem, er
continue
}
if err := addr.DecodeString(sa); err != nil {
- c.reportFlushError(logs.FSTreeCantDecodeDBObjectAddress, sa, metaerr.Wrap(err))
- if ignoreErrors {
- continue
- }
- return err
+ return fmt.Errorf("decode address from database: %w", err)
}
batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index a637da45d..9c7e240e0 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -19,7 +19,6 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -47,31 +46,6 @@ func TestFlush(t *testing.T) {
}
failures := []TestFailureInjector[Option]{
- {
- Desc: "db, invalid address",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- obj := testutil.GenerateObject()
- data, err := obj.Marshal()
- require.NoError(t, err)
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- return b.Put([]byte{1, 2, 3}, data)
- }))
- },
- },
- {
- Desc: "db, invalid object",
- InjectFn: func(t *testing.T, wc Cache) {
- c := wc.(*cache)
- require.NoError(t, c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- k := []byte(oidtest.Address().EncodeToString())
- v := []byte{1, 2, 3}
- return b.Put(k, v)
- }))
- },
- },
{
Desc: "fs, read error",
InjectFn: func(t *testing.T, wc Cache) {
@@ -263,7 +237,7 @@ func check(t *testing.T, mb *meta.DB, bs *blobstor.BlobStor, objects []objectPai
prm.StorageID = mRes.StorageID()
res, err := bs.Get(context.Background(), prm)
- require.NoError(t, err)
+ require.NoError(t, err, objects[i].addr)
require.Equal(t, objects[i].obj, res.Object)
}
}
diff --git a/pkg/local_object_storage/writecache/get.go b/pkg/local_object_storage/writecache/get.go
index bf26833bd..c0847a65f 100644
--- a/pkg/local_object_storage/writecache/get.go
+++ b/pkg/local_object_storage/writecache/get.go
@@ -37,11 +37,11 @@ func (c *cache) Get(ctx context.Context, addr oid.Address) (*objectSDK.Object, e
return nil, ErrDegraded
}
- obj, err := c.getInternal(ctx, saddr, addr)
+ obj, err := c.getInternal(ctx, addr)
return obj, metaerr.Wrap(err)
}
-func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address) (*objectSDK.Object, error) {
+func (c *cache) getInternal(ctx context.Context, addr oid.Address) (*objectSDK.Object, error) {
found := false
storageType := StorageTypeUndefined
startedAt := time.Now()
@@ -49,14 +49,6 @@ func (c *cache) getInternal(ctx context.Context, saddr string, addr oid.Address)
c.metrics.Get(time.Since(startedAt), found, storageType)
}()
- value, err := Get(c.db, []byte(saddr))
- if err == nil {
- obj := objectSDK.New()
- found = true
- storageType = StorageTypeDB
- return obj, obj.Unmarshal(value)
- }
-
res, err := c.fsTree.Get(ctx, common.GetPrm{Address: addr})
if err != nil {
return nil, logicerr.Wrap(new(apistatus.ObjectNotFound))
@@ -87,7 +79,7 @@ func (c *cache) Head(ctx context.Context, addr oid.Address) (*objectSDK.Object,
return nil, ErrDegraded
}
- obj, err := c.getInternal(ctx, saddr, addr)
+ obj, err := c.getInternal(ctx, addr)
if err != nil {
return nil, metaerr.Wrap(err)
}
diff --git a/pkg/local_object_storage/writecache/mode.go b/pkg/local_object_storage/writecache/mode.go
index 44da9b36e..d12dd603b 100644
--- a/pkg/local_object_storage/writecache/mode.go
+++ b/pkg/local_object_storage/writecache/mode.go
@@ -5,13 +5,12 @@ import (
"errors"
"fmt"
"os"
- "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -53,7 +52,7 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
}
}
- if err := c.closeDB(prm.shrink); err != nil {
+ if err := c.closeStorage(ctx, prm.shrink); err != nil {
return err
}
@@ -78,33 +77,37 @@ func (c *cache) setMode(ctx context.Context, m mode.Mode, prm setModePrm) error
return nil
}
-func (c *cache) closeDB(shrink bool) error {
- if c.db == nil {
+func (c *cache) closeStorage(ctx context.Context, shrink bool) error {
+ if c.fsTree == nil {
return nil
}
if !shrink {
- if err := c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
return nil
}
- var empty bool
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- empty = b == nil || b.Stats().KeyN == 0
- return nil
+ empty := true
+ _, err := c.fsTree.Iterate(ctx, common.IteratePrm{
+ Handler: func(common.IterationElement) error {
+ return errIterationCompleted
+ },
})
- if err != nil && !errors.Is(err, bbolt.ErrDatabaseNotOpen) {
- return fmt.Errorf("failed to check DB items: %w", err)
+ if err != nil {
+ if errors.Is(err, errIterationCompleted) {
+ empty = false
+ } else {
+ return fmt.Errorf("failed to check write-cache items: %w", err)
+ }
}
- if err := c.db.Close(); err != nil {
- return fmt.Errorf("can't close write-cache database: %w", err)
+ if err := c.fsTree.Close(); err != nil {
+ return fmt.Errorf("can't close write-cache storage: %w", err)
}
if empty {
- err := os.Remove(filepath.Join(c.path, dbName))
+ err := os.RemoveAll(c.path)
if err != nil && !os.IsNotExist(err) {
- return fmt.Errorf("failed to remove DB file: %w", err)
+ return fmt.Errorf("failed to remove write-cache files: %w", err)
}
} else {
c.log.Info(logs.WritecacheShrinkSkippedNotEmpty)
diff --git a/pkg/local_object_storage/writecache/mode_test.go b/pkg/local_object_storage/writecache/mode_test.go
index f684c15bc..70cfe8382 100644
--- a/pkg/local_object_storage/writecache/mode_test.go
+++ b/pkg/local_object_storage/writecache/mode_test.go
@@ -17,14 +17,14 @@ func TestMode(t *testing.T) {
WithPath(t.TempDir()))
require.NoError(t, wc.Open(context.Background(), mode.DegradedReadOnly))
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Init())
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Close())
require.NoError(t, wc.Open(context.Background(), mode.Degraded))
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Init())
- require.Nil(t, wc.(*cache).db)
+ require.Nil(t, wc.(*cache).fsTree)
require.NoError(t, wc.Close())
}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 980cf9303..7845c5da9 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,6 @@
package writecache
import (
- "io/fs"
- "os"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
@@ -42,8 +40,6 @@ type options struct {
noSync bool
// reportError is the function called when encountering disk errors in background workers.
reportError func(string, error)
- // openFile is the function called internally by bbolt to open database files. Useful for hermetic testing.
- openFile func(string, int, fs.FileMode) (*os.File, error)
// metrics is metrics implementation
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
@@ -155,13 +151,6 @@ func WithReportErrorFunc(f func(string, error)) Option {
}
}
-// WithOpenFile sets the OpenFile function to use internally by bolt. Useful for hermetic testing.
-func WithOpenFile(f func(string, int, fs.FileMode) (*os.File, error)) Option {
- return func(o *options) {
- o.openFile = f
- }
-}
-
// WithMetrics sets metrics implementation.
func WithMetrics(metrics Metrics) Option {
return func(o *options) {
diff --git a/pkg/local_object_storage/writecache/put.go b/pkg/local_object_storage/writecache/put.go
index ae0e8b77a..c53067bea 100644
--- a/pkg/local_object_storage/writecache/put.go
+++ b/pkg/local_object_storage/writecache/put.go
@@ -8,7 +8,6 @@ import (
storagelog "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/log"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
@@ -50,62 +49,16 @@ func (c *cache) Put(ctx context.Context, prm common.PutPrm) (common.PutRes, erro
return common.PutRes{}, ErrBigObject
}
- oi := objectInfo{
- addr: prm.Address.EncodeToString(),
- obj: prm.Object,
- data: prm.RawData,
- }
-
- if sz <= c.smallObjectSize {
- storageType = StorageTypeDB
- err := c.putSmall(oi)
- if err == nil {
- added = true
- }
- return common.PutRes{}, err
- }
-
storageType = StorageTypeFSTree
- err := c.putBig(ctx, oi.addr, prm)
+ err := c.putBig(ctx, prm)
if err == nil {
added = true
}
return common.PutRes{}, metaerr.Wrap(err)
}
-// putSmall persists small objects to the write-cache database and
-// pushes the to the flush workers queue.
-func (c *cache) putSmall(obj objectInfo) error {
- if !c.hasEnoughSpaceDB() {
- return ErrOutOfSpace
- }
-
- var newRecord bool
- err := c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(obj.addr)
- newRecord = b.Get(key) == nil
- if newRecord {
- return b.Put(key, obj.data)
- }
- return nil
- })
- if err == nil {
- storagelog.Write(c.log,
- storagelog.AddressField(obj.addr),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db PUT"),
- )
- if newRecord {
- c.objCounters.cDB.Add(1)
- c.estimateCacheSize()
- }
- }
- return err
-}
-
// putBig writes object to FSTree and pushes it to the flush workers queue.
-func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) error {
+func (c *cache) putBig(ctx context.Context, prm common.PutPrm) error {
if !c.hasEnoughSpaceFS() {
return ErrOutOfSpace
}
@@ -116,7 +69,7 @@ func (c *cache) putBig(ctx context.Context, addr string, prm common.PutPrm) erro
}
storagelog.Write(c.log,
- storagelog.AddressField(addr),
+ storagelog.AddressField(prm.Address.EncodeToString()),
storagelog.StorageTypeField(wcStorageType),
storagelog.OpField("fstree PUT"),
)
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index d03f4a63e..e4e22f404 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -1,29 +1,18 @@
package writecache
import (
- "fmt"
"math"
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
- "go.etcd.io/bbolt"
)
func (c *cache) estimateCacheSize() (uint64, uint64) {
- dbCount := c.objCounters.DB()
fsCount := c.objCounters.FS()
- if fsCount > 0 {
- fsCount-- // db file
- }
- dbSize := dbCount * c.smallObjectSize
fsSize := fsCount * c.maxObjectSize
- c.metrics.SetEstimateSize(dbSize, fsSize)
- c.metrics.SetActualCounters(dbCount, fsCount)
- return dbCount + fsCount, dbSize + fsSize
-}
-
-func (c *cache) hasEnoughSpaceDB() bool {
- return c.hasEnoughSpace(c.smallObjectSize)
+ c.metrics.SetEstimateSize(0, fsSize)
+ c.metrics.SetActualCounters(0, fsCount)
+ return fsCount, fsSize
}
func (c *cache) hasEnoughSpaceFS() bool {
@@ -41,11 +30,7 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
var _ fstree.FileCounter = &counters{}
type counters struct {
- cDB, cFS atomic.Uint64
-}
-
-func (x *counters) DB() uint64 {
- return x.cDB.Load()
+ cFS atomic.Uint64
}
func (x *counters) FS() uint64 {
@@ -68,18 +53,6 @@ func (x *counters) Dec() {
}
func (c *cache) initCounters() error {
- var inDB uint64
- err := c.db.View(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- if b != nil {
- inDB = uint64(b.Stats().KeyN)
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("could not read write-cache DB counter: %w", err)
- }
- c.objCounters.cDB.Store(inDB)
c.estimateCacheSize()
return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 57021cc17..309bd2a66 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -3,7 +3,6 @@ package writecache
import (
"context"
"fmt"
- "math"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -14,16 +13,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
-// store represents persistent storage with in-memory LRU cache
-// for flushed items on top of it.
-type store struct {
- db *bbolt.DB
-}
-
const dbName = "small.bolt"
func (c *cache) openStore(mod mode.ComponentMode) error {
@@ -32,24 +24,6 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return err
}
- c.db, err = OpenDB(c.path, mod.ReadOnly(), c.openFile, c.pageSize)
- if err != nil {
- return fmt.Errorf("could not open database: %w", err)
- }
-
- c.db.MaxBatchSize = c.maxBatchSize
- c.db.MaxBatchDelay = c.maxBatchDelay
-
- if !mod.ReadOnly() {
- err = c.db.Update(func(tx *bbolt.Tx) error {
- _, err := tx.CreateBucketIfNotExists(defaultBucket)
- return err
- })
- if err != nil {
- return fmt.Errorf("could not create default bucket: %w", err)
- }
- }
-
c.fsTree = fstree.New(
fstree.WithPath(c.path),
fstree.WithPerm(os.ModePerm),
@@ -68,41 +42,6 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return nil
}
-func (c *cache) deleteFromDB(key string, batched bool) {
- var recordDeleted bool
- var err error
- if batched {
- err = c.db.Batch(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(key)
- recordDeleted = b.Get(key) != nil
- return b.Delete(key)
- })
- } else {
- err = c.db.Update(func(tx *bbolt.Tx) error {
- b := tx.Bucket(defaultBucket)
- key := []byte(key)
- recordDeleted = b.Get(key) != nil
- return b.Delete(key)
- })
- }
-
- if err == nil {
- c.metrics.Evict(StorageTypeDB)
- storagelog.Write(c.log,
- storagelog.AddressField(key),
- storagelog.StorageTypeField(wcStorageType),
- storagelog.OpField("db DELETE"),
- )
- if recordDeleted {
- c.objCounters.cDB.Add(math.MaxUint64)
- c.estimateCacheSize()
- }
- } else {
- c.log.Error(logs.WritecacheCantRemoveObjectsFromTheDatabase, zap.Error(err))
- }
-}
-
func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address) {
_, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
if err != nil && !client.IsErrObjectNotFound(err) {
From b142b6f48e46210235a56840c79da358120cf0cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:01:30 +0300
Subject: [PATCH 045/655] [#1367] fstree: Add size to file counter
FSTree file counter used by writecache. As writecache has now only one
storage, so it is required to use real object size to get writecache
size more accurate than `count * max_object_size`.
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/fstree/counter.go | 61 +++++++++++++++----
.../blobstor/fstree/fstree.go | 22 ++++---
.../blobstor/fstree/fstree_test.go | 15 +++--
.../blobstor/fstree/fstree_write_generic.go | 27 +++++---
.../blobstor/fstree/fstree_write_linux.go | 42 ++++++++++---
pkg/local_object_storage/writecache/cache.go | 3 +
.../writecache/options.go | 2 -
pkg/local_object_storage/writecache/state.go | 41 ++-----------
.../writecache/storage.go | 2 +-
9 files changed, 130 insertions(+), 85 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/fstree/counter.go b/pkg/local_object_storage/blobstor/fstree/counter.go
index 718104e2e..b5dbc9e40 100644
--- a/pkg/local_object_storage/blobstor/fstree/counter.go
+++ b/pkg/local_object_storage/blobstor/fstree/counter.go
@@ -1,22 +1,21 @@
package fstree
import (
- "math"
- "sync/atomic"
+ "sync"
)
// FileCounter used to count files in FSTree. The implementation must be thread-safe.
type FileCounter interface {
- Set(v uint64)
- Inc()
- Dec()
+ Set(count, size uint64)
+ Inc(size uint64)
+ Dec(size uint64)
}
type noopCounter struct{}
-func (c *noopCounter) Set(uint64) {}
-func (c *noopCounter) Inc() {}
-func (c *noopCounter) Dec() {}
+func (c *noopCounter) Set(uint64, uint64) {}
+func (c *noopCounter) Inc(uint64) {}
+func (c *noopCounter) Dec(uint64) {}
func counterEnabled(c FileCounter) bool {
_, noop := c.(*noopCounter)
@@ -24,14 +23,50 @@ func counterEnabled(c FileCounter) bool {
}
type SimpleCounter struct {
- v atomic.Uint64
+ mtx sync.RWMutex
+ count uint64
+ size uint64
}
func NewSimpleCounter() *SimpleCounter {
return &SimpleCounter{}
}
-func (c *SimpleCounter) Set(v uint64) { c.v.Store(v) }
-func (c *SimpleCounter) Inc() { c.v.Add(1) }
-func (c *SimpleCounter) Dec() { c.v.Add(math.MaxUint64) }
-func (c *SimpleCounter) Value() uint64 { return c.v.Load() }
+func (c *SimpleCounter) Set(count, size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count = count
+ c.size = size
+}
+
+func (c *SimpleCounter) Inc(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ c.count++
+ c.size += size
+}
+
+func (c *SimpleCounter) Dec(size uint64) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if c.count > 0 {
+ c.count--
+ } else {
+ panic("fstree.SimpleCounter: invalid count")
+ }
+ if c.size >= size {
+ c.size -= size
+ } else {
+ panic("fstree.SimpleCounter: invalid size")
+ }
+}
+
+func (c *SimpleCounter) CountSize() (uint64, uint64) {
+ c.mtx.RLock()
+ defer c.mtx.RUnlock()
+
+ return c.count, c.size
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 02580dbfa..bf6ba51e5 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -435,32 +435,38 @@ func (t *FSTree) initFileCounter() error {
return nil
}
- counter, err := t.countFiles()
+ count, size, err := t.countFiles()
if err != nil {
return err
}
- t.fileCounter.Set(counter)
+ t.fileCounter.Set(count, size)
return nil
}
-func (t *FSTree) countFiles() (uint64, error) {
- var counter uint64
+func (t *FSTree) countFiles() (uint64, uint64, error) {
+ var count, size uint64
// it is simpler to just consider every file
// that is not directory as an object
err := filepath.WalkDir(t.RootPath,
func(_ string, d fs.DirEntry, _ error) error {
- if !d.IsDir() {
- counter++
+ if d.IsDir() {
+ return nil
}
+ count++
+ info, err := d.Info()
+ if err != nil {
+ return err
+ }
+ size += uint64(info.Size())
return nil
},
)
if err != nil {
- return 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
+ return 0, 0, fmt.Errorf("could not walk through %s directory: %w", t.RootPath, err)
}
- return counter, nil
+ return count, size, nil
}
func (t *FSTree) ObjectsCount(ctx context.Context) (uint64, error) {
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index 5786dfd3b..f39c7296e 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -47,8 +47,9 @@ func TestObjectCounter(t *testing.T) {
require.NoError(t, fst.Open(mode.ComponentReadWrite))
require.NoError(t, fst.Init())
- counterValue := counter.Value()
- require.Equal(t, uint64(0), counterValue)
+ count, size := counter.CountSize()
+ require.Equal(t, uint64(0), count)
+ require.Equal(t, uint64(0), size)
defer func() {
require.NoError(t, fst.Close())
@@ -64,9 +65,6 @@ func TestObjectCounter(t *testing.T) {
putPrm.Address = addr
putPrm.RawData, _ = obj.Marshal()
- var getPrm common.GetPrm
- getPrm.Address = putPrm.Address
-
var delPrm common.DeletePrm
delPrm.Address = addr
@@ -95,8 +93,9 @@ func TestObjectCounter(t *testing.T) {
require.NoError(t, eg.Wait())
- counterValue = counter.Value()
- realCount, err := fst.countFiles()
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
require.NoError(t, err)
- require.Equal(t, realCount, counterValue)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 8b2622885..801fc4a22 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -78,14 +78,14 @@ func (w *genericWriter) writeAndRename(tmpPath, p string, data []byte) error {
}
if w.fileCounterEnabled {
- w.fileCounter.Inc()
+ w.fileCounter.Inc(uint64(len(data)))
var targetFileExists bool
if _, e := os.Stat(p); e == nil {
targetFileExists = true
}
err = os.Rename(tmpPath, p)
if err == nil && targetFileExists {
- w.fileCounter.Dec()
+ w.fileCounter.Dec(uint64(len(data)))
}
} else {
err = os.Rename(tmpPath, p)
@@ -110,12 +110,7 @@ func (w *genericWriter) writeFile(p string, data []byte) error {
func (w *genericWriter) removeFile(p string) error {
var err error
if w.fileCounterEnabled {
- w.fileGuard.Lock(p)
- err = os.Remove(p)
- w.fileGuard.Unlock(p)
- if err == nil {
- w.fileCounter.Dec()
- }
+ err = w.removeWithCounter(p)
} else {
err = os.Remove(p)
}
@@ -125,3 +120,19 @@ func (w *genericWriter) removeFile(p string) error {
}
return err
}
+
+func (w *genericWriter) removeWithCounter(p string) error {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+
+ stat, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+
+ if err := os.Remove(p); err != nil {
+ return err
+ }
+ w.fileCounter.Dec(uint64(stat.Size()))
+ return nil
+}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index efc5a3d3d..3127579ac 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -9,6 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"golang.org/x/sys/unix"
)
@@ -18,7 +19,9 @@ type linuxWriter struct {
perm uint32
flags int
- counter FileCounter
+ fileGuard keyLock
+ fileCounter FileCounter
+ fileCounterEnabled bool
}
func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync bool) writer {
@@ -33,11 +36,18 @@ func newSpecificWriteData(c FileCounter, root string, perm fs.FileMode, noSync b
return nil
}
_ = unix.Close(fd) // Don't care about error.
+ var fileGuard keyLock = &noopKeyLock{}
+ fileCounterEnabled := counterEnabled(c)
+ if fileCounterEnabled {
+ fileGuard = utilSync.NewKeyLocker[string]()
+ }
w := &linuxWriter{
- root: root,
- perm: uint32(perm),
- flags: flags,
- counter: c,
+ root: root,
+ perm: uint32(perm),
+ flags: flags,
+ fileGuard: fileGuard,
+ fileCounter: c,
+ fileCounterEnabled: fileCounterEnabled,
}
return w
}
@@ -51,6 +61,10 @@ func (w *linuxWriter) writeData(p string, data []byte) error {
}
func (w *linuxWriter) writeFile(p string, data []byte) error {
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
fd, err := unix.Open(w.root, w.flags, w.perm)
if err != nil {
return err
@@ -61,7 +75,7 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
if n == len(data) {
err = unix.Linkat(unix.AT_FDCWD, tmpPath, unix.AT_FDCWD, p, unix.AT_SYMLINK_FOLLOW)
if err == nil {
- w.counter.Inc()
+ w.fileCounter.Inc(uint64(len(data)))
}
if errors.Is(err, unix.EEXIST) {
err = nil
@@ -78,12 +92,24 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
}
func (w *linuxWriter) removeFile(p string) error {
- err := unix.Unlink(p)
+ if w.fileCounterEnabled {
+ w.fileGuard.Lock(p)
+ defer w.fileGuard.Unlock(p)
+ }
+ var stat unix.Stat_t
+ err := unix.Stat(p, &stat)
+ if err != nil {
+ if err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ err = unix.Unlink(p)
if err != nil && err == unix.ENOENT {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.counter.Dec()
+ w.fileCounter.Dec(uint64(stat.Size))
}
return err
}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index ff38de407..f2280f2f4 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -29,6 +29,8 @@ type cache struct {
wg sync.WaitGroup
// fsTree contains big files stored directly on file-system.
fsTree *fstree.FSTree
+ // counter contains atomic counters for the number of objects stored in cache.
+ counter *fstree.SimpleCounter
}
// wcStorageType is used for write-cache operations logging.
@@ -56,6 +58,7 @@ func New(opts ...Option) Cache {
c := &cache{
flushCh: make(chan objectInfo),
mode: mode.Disabled,
+ counter: fstree.NewSimpleCounter(),
options: options{
log: &logger.Logger{Logger: zap.NewNop()},
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 7845c5da9..0643faac0 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -30,8 +30,6 @@ type options struct {
// maxCacheCount is the maximum total count of all object saved in cache.
// 0 (no limit) by default.
maxCacheCount uint64
- // objCounters contains atomic counters for the number of objects stored in cache.
- objCounters counters
// maxBatchSize is the maximum batch size for the small object database.
maxBatchSize int
// maxBatchDelay is the maximum batch wait time for the small object database.
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index e4e22f404..748c78bcb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -1,18 +1,10 @@
package writecache
-import (
- "math"
- "sync/atomic"
-
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
-)
-
func (c *cache) estimateCacheSize() (uint64, uint64) {
- fsCount := c.objCounters.FS()
- fsSize := fsCount * c.maxObjectSize
- c.metrics.SetEstimateSize(0, fsSize)
- c.metrics.SetActualCounters(0, fsCount)
- return fsCount, fsSize
+ count, size := c.counter.CountSize()
+ c.metrics.SetEstimateSize(0, size)
+ c.metrics.SetActualCounters(0, count)
+ return count, size
}
func (c *cache) hasEnoughSpaceFS() bool {
@@ -27,31 +19,6 @@ func (c *cache) hasEnoughSpace(objectSize uint64) bool {
return c.maxCacheSize >= size+objectSize
}
-var _ fstree.FileCounter = &counters{}
-
-type counters struct {
- cFS atomic.Uint64
-}
-
-func (x *counters) FS() uint64 {
- return x.cFS.Load()
-}
-
-// Set implements fstree.ObjectCounter.
-func (x *counters) Set(v uint64) {
- x.cFS.Store(v)
-}
-
-// Inc implements fstree.ObjectCounter.
-func (x *counters) Inc() {
- x.cFS.Add(1)
-}
-
-// Dec implements fstree.ObjectCounter.
-func (x *counters) Dec() {
- x.cFS.Add(math.MaxUint64)
-}
-
func (c *cache) initCounters() error {
c.estimateCacheSize()
return nil
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 309bd2a66..e708a529e 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -30,7 +30,7 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
fstree.WithDepth(1),
fstree.WithDirNameLen(1),
fstree.WithNoSync(c.noSync),
- fstree.WithFileCounter(&c.objCounters),
+ fstree.WithFileCounter(c.counter),
)
if err := c.fsTree.Open(mod); err != nil {
return fmt.Errorf("could not open FSTree: %w", err)
From 2dd3a6f7a85800452f38ddb3127dea455b6366dc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:47:42 +0300
Subject: [PATCH 046/655] [#1367] fstree: Add IterateInfo method
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/fstree/fstree.go | 75 +++++++++++++++++++
.../blobstor/fstree/metrics.go | 2 +
pkg/local_object_storage/metrics/fstree.go | 4 +
3 files changed, 81 insertions(+)
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index bf6ba51e5..1c60ec340 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -222,6 +222,81 @@ func (t *FSTree) iterate(ctx context.Context, depth uint64, curPath []string, pr
return nil
}
+type ObjectInfo struct {
+ Address oid.Address
+ DataSize uint64
+}
+type IterateInfoHandler func(ObjectInfo) error
+
+func (t *FSTree) IterateInfo(ctx context.Context, handler IterateInfoHandler) error {
+ var (
+ err error
+ startedAt = time.Now()
+ )
+ defer func() {
+ t.metrics.IterateInfo(time.Since(startedAt), err == nil)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "FSTree.IterateInfo")
+ defer span.End()
+
+ return t.iterateInfo(ctx, 0, []string{t.RootPath}, handler)
+}
+
+func (t *FSTree) iterateInfo(ctx context.Context, depth uint64, curPath []string, handler IterateInfoHandler) error {
+ curName := strings.Join(curPath[1:], "")
+ dirPath := filepath.Join(curPath...)
+ entries, err := os.ReadDir(dirPath)
+ if err != nil {
+ return fmt.Errorf("read fstree dir '%s': %w", dirPath, err)
+ }
+
+ isLast := depth >= t.Depth
+ l := len(curPath)
+ curPath = append(curPath, "")
+
+ for i := range entries {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ curPath[l] = entries[i].Name()
+
+ if !isLast && entries[i].IsDir() {
+ err := t.iterateInfo(ctx, depth+1, curPath, handler)
+ if err != nil {
+ return err
+ }
+ }
+
+ if depth != t.Depth {
+ continue
+ }
+
+ addr, err := addressFromString(curName + entries[i].Name())
+ if err != nil {
+ continue
+ }
+ info, err := entries[i].Info()
+ if err != nil {
+ if os.IsNotExist(err) {
+ continue
+ }
+ return err
+ }
+
+ err = handler(ObjectInfo{
+ Address: addr,
+ DataSize: uint64(info.Size()),
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func (t *FSTree) treePath(addr oid.Address) string {
sAddr := stringifyAddress(addr)
diff --git a/pkg/local_object_storage/blobstor/fstree/metrics.go b/pkg/local_object_storage/blobstor/fstree/metrics.go
index 10de935eb..4241beec9 100644
--- a/pkg/local_object_storage/blobstor/fstree/metrics.go
+++ b/pkg/local_object_storage/blobstor/fstree/metrics.go
@@ -13,6 +13,7 @@ type Metrics interface {
Close()
Iterate(d time.Duration, success bool)
+ IterateInfo(d time.Duration, success bool)
Delete(d time.Duration, success bool)
Exists(d time.Duration, success bool)
Put(d time.Duration, size int, success bool)
@@ -27,6 +28,7 @@ func (m *noopMetrics) SetParentID(string) {}
func (m *noopMetrics) SetMode(mode.ComponentMode) {}
func (m *noopMetrics) Close() {}
func (m *noopMetrics) Iterate(time.Duration, bool) {}
+func (m *noopMetrics) IterateInfo(time.Duration, bool) {}
func (m *noopMetrics) Delete(time.Duration, bool) {}
func (m *noopMetrics) Exists(time.Duration, bool) {}
func (m *noopMetrics) Put(time.Duration, int, bool) {}
diff --git a/pkg/local_object_storage/metrics/fstree.go b/pkg/local_object_storage/metrics/fstree.go
index 76822ac2c..d93363fa3 100644
--- a/pkg/local_object_storage/metrics/fstree.go
+++ b/pkg/local_object_storage/metrics/fstree.go
@@ -38,6 +38,10 @@ func (m *fstreeMetrics) Iterate(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Iterate", d, success)
}
+func (m *fstreeMetrics) IterateInfo(d time.Duration, success bool) {
+ m.m.MethodDuration(m.shardID, m.path, "IterateInfo", d, success)
+}
+
func (m *fstreeMetrics) Delete(d time.Duration, success bool) {
m.m.MethodDuration(m.shardID, m.path, "Delete", d, success)
}
From 8a6e3025a07d9c4d80a6252f4ee8bb0e0aa2021d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 11:49:17 +0300
Subject: [PATCH 047/655] [#1367] writecache: Flush from FSTree concurrently
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 1 +
pkg/local_object_storage/writecache/cache.go | 7 +--
pkg/local_object_storage/writecache/flush.go | 65 +++++++++++++++++---
3 files changed, 62 insertions(+), 11 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 87e4e0b43..7aef6873e 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -544,4 +544,5 @@ const (
FailedToSealWritecacheAsync = "failed to seal writecache async"
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
+ WritecacheCantGetObject = "can't get an object from fstree"
)
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index f2280f2f4..b298f812a 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -10,7 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -37,9 +37,8 @@ type cache struct {
const wcStorageType = "write-cache"
type objectInfo struct {
- addr string
- data []byte
- obj *objectSDK.Object
+ addr oid.Address
+ size uint64
}
const (
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 074756e32..d06896ed5 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -13,10 +13,12 @@ import (
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -29,7 +31,7 @@ const (
// defaultFlushWorkersCount is number of workers for putting objects in main storage.
defaultFlushWorkersCount = 20
// defaultFlushInterval is default time interval between successive flushes.
- defaultFlushInterval = time.Second
+ defaultFlushInterval = 10 * time.Second
)
var errIterationCompleted = errors.New("iteration completed")
@@ -41,23 +43,41 @@ func (c *cache) runFlushLoop(ctx context.Context) {
}
c.wg.Add(1)
go func() {
- c.workerFlushBig(ctx)
- c.wg.Done()
+ defer c.wg.Done()
+ c.pushToFlushQueue(ctx)
}()
+
+ for range c.workersCount {
+ c.wg.Add(1)
+ go c.workerFlush(ctx)
+ }
}
-func (c *cache) workerFlushBig(ctx context.Context) {
- tick := time.NewTicker(defaultFlushInterval * 10)
+func (c *cache) pushToFlushQueue(ctx context.Context) {
+ tick := time.NewTicker(defaultFlushInterval)
for {
select {
case <-tick.C:
c.modeMtx.RLock()
if c.readOnly() || c.noMetabase() {
c.modeMtx.RUnlock()
- break
+ continue
}
- _ = c.flushFSTree(ctx, true)
+ err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ select {
+ case c.flushCh <- objectInfo{
+ addr: oi.Address,
+ size: oi.DataSize,
+ }:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ })
+ if err != nil {
+ c.log.Warn(logs.BlobstorErrorOccurredDuringTheIteration, zap.Error(err))
+ }
c.modeMtx.RUnlock()
case <-ctx.Done():
@@ -66,6 +86,37 @@ func (c *cache) workerFlushBig(ctx context.Context) {
}
}
+func (c *cache) workerFlush(ctx context.Context) {
+ defer c.wg.Done()
+
+ var objInfo objectInfo
+ for {
+ select {
+ case objInfo = <-c.flushCh:
+ case <-ctx.Done():
+ return
+ }
+
+ res, err := c.fsTree.Get(ctx, common.GetPrm{
+ Address: objInfo.addr,
+ })
+ if err != nil {
+ if !errors.As(err, new(*apistatus.ObjectNotFound)) {
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ }
+ continue
+ }
+
+ err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
+ if err != nil {
+ // Error is handled in flushObject.
+ continue
+ }
+
+ c.deleteFromDisk(ctx, objInfo.addr)
+ }
+}
+
func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
c.reportError(msg, err)
From e39378b1c36d0d00864c4f5e7fcab44975ce506d Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 12:56:29 +0300
Subject: [PATCH 048/655] [#1367] writecache: Add background flushing objects
limiter
To limit memory usage by background flush.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 3 +
cmd/frostfs-node/config/engine/config_test.go | 2 +
.../config/engine/shard/writecache/config.go | 18 +++++
config/example/node.env | 1 +
config/example/node.json | 3 +-
config/example/node.yaml | 1 +
docs/storage-node-configuration.md | 23 +++---
pkg/local_object_storage/writecache/cache.go | 1 +
pkg/local_object_storage/writecache/flush.go | 61 ++++++++++------
.../writecache/limiter.go | 70 +++++++++++++++++++
.../writecache/limiter_test.go | 27 +++++++
.../writecache/options.go | 9 +++
12 files changed, 184 insertions(+), 35 deletions(-)
create mode 100644 pkg/local_object_storage/writecache/limiter.go
create mode 100644 pkg/local_object_storage/writecache/limiter_test.go
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 16f49a082..ef2752872 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -154,6 +154,7 @@ type shardCfg struct {
countLimit uint64
noSync bool
pageSize int
+ flushSizeLimit uint64
}
piloramaCfg struct {
@@ -278,6 +279,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.sizeLimit = writeCacheCfg.SizeLimit()
wc.countLimit = writeCacheCfg.CountLimit()
wc.noSync = writeCacheCfg.NoSync()
+ wc.flushSizeLimit = writeCacheCfg.MaxFlushingObjectsSize()
}
}
@@ -865,6 +867,7 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
writecache.WithMaxBatchSize(wcRead.maxBatchSize),
writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
writecache.WithPageSize(wcRead.pageSize),
+ writecache.WithFlushSizeLimit(wcRead.flushSizeLimit),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index ef6bf7f74..b952aca4c 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -79,6 +79,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 3221225472, wc.SizeLimit())
require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
+ require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/0/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
@@ -136,6 +137,7 @@ func TestEngineSection(t *testing.T) {
require.EqualValues(t, 4294967296, wc.SizeLimit())
require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
+ require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize())
require.Equal(t, "tmp/1/meta", meta.Path())
require.Equal(t, fs.FileMode(0o644), meta.BoltDB().Perm())
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index bfe8144df..5a069e99f 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -24,6 +24,8 @@ const (
// CountLimitDefault is a default write-cache count limit.
CountLimitDefault = 0
+
+ MaxFlushingObjectsSizeDefault = 128 << 20
)
// From wraps config section into Config.
@@ -145,3 +147,19 @@ func (x *Config) NoSync() bool {
func (x *Config) BoltDB() *boltdbconfig.Config {
return (*boltdbconfig.Config)(x)
}
+
+// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter.
+//
+// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number.
+func (x *Config) MaxFlushingObjectsSize() uint64 {
+ s := config.SizeInBytesSafe(
+ (*config.Config)(x),
+ "max_flushing_objects_size",
+ )
+
+ if s > 0 {
+ return s
+ }
+
+ return MaxFlushingObjectsSizeDefault
+}
diff --git a/config/example/node.env b/config/example/node.env
index 82553745e..c3fa85c13 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -106,6 +106,7 @@ FROSTFS_STORAGE_SHARD_0_WRITECACHE_FLUSH_WORKER_COUNT=30
FROSTFS_STORAGE_SHARD_0_WRITECACHE_CAPACITY=3221225472
FROSTFS_STORAGE_SHARD_0_WRITECACHE_PAGE_SIZE=4096
FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_OBJECT_COUNT=49
+FROSTFS_STORAGE_SHARD_0_WRITECACHE_MAX_FLUSHING_OBJECTS_SIZE=100
### Metabase config
FROSTFS_STORAGE_SHARD_0_METABASE_PATH=tmp/0/meta
FROSTFS_STORAGE_SHARD_0_METABASE_PERM=0644
diff --git a/config/example/node.json b/config/example/node.json
index da108c692..d7187250b 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -149,7 +149,8 @@
"flush_worker_count": 30,
"capacity": 3221225472,
"page_size": 4096,
- "max_object_count": 49
+ "max_object_count": 49,
+ "max_flushing_objects_size": 100
},
"metabase": {
"path": "tmp/0/meta",
diff --git a/config/example/node.yaml b/config/example/node.yaml
index a79f48226..776b22bd0 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -172,6 +172,7 @@ storage:
capacity: 3221225472 # approximate write-cache total size, bytes
max_object_count: 49
page_size: 4k
+ max_flushing_objects_size: 100b
metabase:
path: tmp/0/meta # metabase path
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index 5bf35cd65..c83828978 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -293,17 +293,18 @@ writecache:
page_size: '4k'
```
-| Parameter | Type | Default value | Description |
-|----------------------|------------|---------------|-------------------------------------------------------------------------------------------------------------------------------|
-| `path` | `string` | | Path to the metabase file. |
-| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
-| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
-| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
-| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
+| Parameter | Type | Default value | Description |
+| --------------------------- | ---------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
+| `path` | `string` | | Path to the metabase file. |
+| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
+| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
+| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
+| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
+| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
+| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
+| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
+| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index b298f812a..f0f10d8b5 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -68,6 +68,7 @@ func New(opts ...Option) Cache {
maxBatchSize: bbolt.DefaultMaxBatchSize,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
metrics: DefaultMetrics(),
+ flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
},
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index d06896ed5..5d5fc13ab 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -18,7 +18,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
- apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -41,19 +41,25 @@ func (c *cache) runFlushLoop(ctx context.Context) {
if c.disableBackgroundFlush {
return
}
+ fl := newFlushLimiter(c.flushSizeLimit)
c.wg.Add(1)
go func() {
defer c.wg.Done()
- c.pushToFlushQueue(ctx)
+ c.pushToFlushQueue(ctx, fl)
}()
for range c.workersCount {
c.wg.Add(1)
- go c.workerFlush(ctx)
+ go c.workerFlush(ctx, fl)
}
}
-func (c *cache) pushToFlushQueue(ctx context.Context) {
+func (c *cache) pushToFlushQueue(ctx context.Context, fl *flushLimiter) {
+ stopf := context.AfterFunc(ctx, func() {
+ fl.close()
+ })
+ defer stopf()
+
tick := time.NewTicker(defaultFlushInterval)
for {
select {
@@ -65,6 +71,9 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}
err := c.fsTree.IterateInfo(ctx, func(oi fstree.ObjectInfo) error {
+ if err := fl.acquire(oi.DataSize); err != nil {
+ return err
+ }
select {
case c.flushCh <- objectInfo{
addr: oi.Address,
@@ -72,6 +81,7 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}:
return nil
case <-ctx.Done():
+ fl.release(oi.DataSize)
return ctx.Err()
}
})
@@ -86,37 +96,42 @@ func (c *cache) pushToFlushQueue(ctx context.Context) {
}
}
-func (c *cache) workerFlush(ctx context.Context) {
+func (c *cache) workerFlush(ctx context.Context, fl *flushLimiter) {
defer c.wg.Done()
var objInfo objectInfo
for {
select {
case objInfo = <-c.flushCh:
+ c.flushIfAnObjectExistsWorker(ctx, objInfo, fl)
case <-ctx.Done():
return
}
-
- res, err := c.fsTree.Get(ctx, common.GetPrm{
- Address: objInfo.addr,
- })
- if err != nil {
- if !errors.As(err, new(*apistatus.ObjectNotFound)) {
- c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
- }
- continue
- }
-
- err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
- if err != nil {
- // Error is handled in flushObject.
- continue
- }
-
- c.deleteFromDisk(ctx, objInfo.addr)
}
}
+func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectInfo, fl *flushLimiter) {
+ defer fl.release(objInfo.size)
+
+ res, err := c.fsTree.Get(ctx, common.GetPrm{
+ Address: objInfo.addr,
+ })
+ if err != nil {
+ if !client.IsErrObjectNotFound(err) {
+ c.reportFlushError(logs.WritecacheCantGetObject, objInfo.addr.EncodeToString(), metaerr.Wrap(err))
+ }
+ return
+ }
+
+ err = c.flushObject(ctx, res.Object, res.RawData, StorageTypeFSTree)
+ if err != nil {
+ // Error is handled in flushObject.
+ return
+ }
+
+ c.deleteFromDisk(ctx, objInfo.addr)
+}
+
func (c *cache) reportFlushError(msg string, addr string, err error) {
if c.reportError != nil {
c.reportError(msg, err)
diff --git a/pkg/local_object_storage/writecache/limiter.go b/pkg/local_object_storage/writecache/limiter.go
new file mode 100644
index 000000000..ddc4101be
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter.go
@@ -0,0 +1,70 @@
+package writecache
+
+import (
+ "errors"
+ "sync"
+)
+
+var errLimiterClosed = errors.New("acquire failed: limiter closed")
+
+// flushLimiter is used to limit the total size of objects
+// being flushed to blobstore at the same time. This is a necessary
+// limitation so that the flushing process does not have
+// a strong impact on user requests.
+type flushLimiter struct {
+ count, size uint64
+ maxSize uint64
+ cond *sync.Cond
+ closed bool
+}
+
+func newFlushLimiter(maxSize uint64) *flushLimiter {
+ return &flushLimiter{
+ maxSize: maxSize,
+ cond: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (l *flushLimiter) acquire(size uint64) error {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ // it is allowed to overflow maxSize to allow flushing objects with size > maxSize
+ for l.count > 0 && l.size+size > l.maxSize && !l.closed {
+ l.cond.Wait()
+ if l.closed {
+ return errLimiterClosed
+ }
+ }
+ l.count++
+ l.size += size
+ return nil
+}
+
+func (l *flushLimiter) release(size uint64) {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ if l.size >= size {
+ l.size -= size
+ } else {
+ panic("flushLimiter: invalid size")
+ }
+
+ if l.count > 0 {
+ l.count--
+ } else {
+ panic("flushLimiter: invalid count")
+ }
+
+ l.cond.Broadcast()
+}
+
+func (l *flushLimiter) close() {
+ l.cond.L.Lock()
+ defer l.cond.L.Unlock()
+
+ l.closed = true
+
+ l.cond.Broadcast()
+}
diff --git a/pkg/local_object_storage/writecache/limiter_test.go b/pkg/local_object_storage/writecache/limiter_test.go
new file mode 100644
index 000000000..db99b203a
--- /dev/null
+++ b/pkg/local_object_storage/writecache/limiter_test.go
@@ -0,0 +1,27 @@
+package writecache
+
+import (
+ "sync/atomic"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "golang.org/x/sync/errgroup"
+)
+
+func TestLimiter(t *testing.T) {
+ var maxSize uint64 = 10
+ var single uint64 = 3
+ l := newFlushLimiter(uint64(maxSize))
+ var currSize atomic.Int64
+ var eg errgroup.Group
+ for i := 0; i < 10_000; i++ {
+ eg.Go(func() error {
+ defer l.release(single)
+ defer currSize.Add(-1)
+ l.acquire(single)
+ require.True(t, currSize.Add(1) <= 3)
+ return nil
+ })
+ }
+ require.NoError(t, eg.Wait())
+}
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index 0643faac0..edbb3d422 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -44,6 +44,8 @@ type options struct {
disableBackgroundFlush bool
// pageSize is bbolt's page size config value
pageSize int
+ // flushSizeLimit is total size of flushing objects.
+ flushSizeLimit uint64
}
// WithLogger sets logger.
@@ -169,3 +171,10 @@ func WithPageSize(s int) Option {
o.pageSize = s
}
}
+
+// WithFlushSizeLimit sets flush size limit.
+func WithFlushSizeLimit(v uint64) Option {
+ return func(o *options) {
+ o.flushSizeLimit = v
+ }
+}
From 25d2ae8aaf22c12e9e625b0433f84a49f5f22b39 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 10 Sep 2024 13:12:17 +0300
Subject: [PATCH 049/655] [#1367] writecache: Drop BBolt related config
variables
Signed-off-by: Dmitrii Stepanov
---
.../internal/writecache/inspect.go | 2 +-
cmd/frostfs-lens/internal/writecache/list.go | 2 +-
cmd/frostfs-node/config.go | 12 -----
cmd/frostfs-node/config/engine/config_test.go | 4 --
.../config/engine/shard/writecache/config.go | 25 -----------
docs/storage-node-configuration.md | 6 ---
.../writecache/benchmark/writecache_test.go | 1 -
pkg/local_object_storage/writecache/cache.go | 21 ++++-----
pkg/local_object_storage/writecache/flush.go | 2 +-
.../writecache/flush_test.go | 13 +++---
.../writecache/options.go | 44 -------------------
pkg/local_object_storage/writecache/util.go | 3 +-
12 files changed, 17 insertions(+), 118 deletions(-)
diff --git a/cmd/frostfs-lens/internal/writecache/inspect.go b/cmd/frostfs-lens/internal/writecache/inspect.go
index 63c669a35..afc986c8b 100644
--- a/cmd/frostfs-lens/internal/writecache/inspect.go
+++ b/cmd/frostfs-lens/internal/writecache/inspect.go
@@ -25,7 +25,7 @@ func init() {
func inspectFunc(cmd *cobra.Command, _ []string) {
var data []byte
- db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-lens/internal/writecache/list.go b/cmd/frostfs-lens/internal/writecache/list.go
index 9c8fa6138..bcbae0ec9 100644
--- a/cmd/frostfs-lens/internal/writecache/list.go
+++ b/cmd/frostfs-lens/internal/writecache/list.go
@@ -31,7 +31,7 @@ func listFunc(cmd *cobra.Command, _ []string) {
return err
}
- db, err := writecache.OpenDB(vPath, true, os.OpenFile, 0)
+ db, err := writecache.OpenDB(vPath, true, os.OpenFile)
common.ExitOnErr(cmd, common.Errf("could not open write-cache db: %w", err))
defer db.Close()
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ef2752872..cdfabdebd 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -145,15 +145,11 @@ type shardCfg struct {
writecacheCfg struct {
enabled bool
path string
- maxBatchSize int
- maxBatchDelay time.Duration
- smallObjectSize uint64
maxObjSize uint64
flushWorkerCount int
sizeLimit uint64
countLimit uint64
noSync bool
- pageSize int
flushSizeLimit uint64
}
@@ -270,11 +266,7 @@ func (a *applicationConfiguration) setShardWriteCacheConfig(newConfig *shardCfg,
wc.enabled = true
wc.path = writeCacheCfg.Path()
- wc.maxBatchSize = writeCacheCfg.BoltDB().MaxBatchSize()
- wc.maxBatchDelay = writeCacheCfg.BoltDB().MaxBatchDelay()
- wc.pageSize = writeCacheCfg.BoltDB().PageSize()
wc.maxObjSize = writeCacheCfg.MaxObjectSize()
- wc.smallObjectSize = writeCacheCfg.SmallObjectSize()
wc.flushWorkerCount = writeCacheCfg.WorkerCount()
wc.sizeLimit = writeCacheCfg.SizeLimit()
wc.countLimit = writeCacheCfg.CountLimit()
@@ -864,12 +856,8 @@ func (c *cfg) getWriteCacheOpts(shCfg shardCfg) []writecache.Option {
if wcRead := shCfg.writecacheCfg; wcRead.enabled {
writeCacheOpts = append(writeCacheOpts,
writecache.WithPath(wcRead.path),
- writecache.WithMaxBatchSize(wcRead.maxBatchSize),
- writecache.WithMaxBatchDelay(wcRead.maxBatchDelay),
- writecache.WithPageSize(wcRead.pageSize),
writecache.WithFlushSizeLimit(wcRead.flushSizeLimit),
writecache.WithMaxObjectSize(wcRead.maxObjSize),
- writecache.WithSmallObjectSize(wcRead.smallObjectSize),
writecache.WithFlushWorkersCount(wcRead.flushWorkerCount),
writecache.WithMaxCacheSize(wcRead.sizeLimit),
writecache.WithMaxCacheCount(wcRead.countLimit),
diff --git a/cmd/frostfs-node/config/engine/config_test.go b/cmd/frostfs-node/config/engine/config_test.go
index b952aca4c..19ad0e7ac 100644
--- a/cmd/frostfs-node/config/engine/config_test.go
+++ b/cmd/frostfs-node/config/engine/config_test.go
@@ -73,11 +73,9 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, true, wc.NoSync())
require.Equal(t, "tmp/0/cache", wc.Path())
- require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 3221225472, wc.SizeLimit())
- require.EqualValues(t, 4096, wc.BoltDB().PageSize())
require.EqualValues(t, 49, wc.CountLimit())
require.EqualValues(t, uint64(100), wc.MaxFlushingObjectsSize())
@@ -131,11 +129,9 @@ func TestEngineSection(t *testing.T) {
require.Equal(t, false, wc.NoSync())
require.Equal(t, "tmp/1/cache", wc.Path())
- require.EqualValues(t, 16384, wc.SmallObjectSize())
require.EqualValues(t, 134217728, wc.MaxObjectSize())
require.EqualValues(t, 30, wc.WorkerCount())
require.EqualValues(t, 4294967296, wc.SizeLimit())
- require.EqualValues(t, 0, wc.BoltDB().PageSize())
require.EqualValues(t, writecacheconfig.CountLimitDefault, wc.CountLimit())
require.EqualValues(t, writecacheconfig.MaxFlushingObjectsSizeDefault, wc.MaxFlushingObjectsSize())
diff --git a/cmd/frostfs-node/config/engine/shard/writecache/config.go b/cmd/frostfs-node/config/engine/shard/writecache/config.go
index 5a069e99f..6fff0308b 100644
--- a/cmd/frostfs-node/config/engine/shard/writecache/config.go
+++ b/cmd/frostfs-node/config/engine/shard/writecache/config.go
@@ -2,7 +2,6 @@ package writecacheconfig
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
- boltdbconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/boltdb"
)
// Config is a wrapper over the config section
@@ -10,9 +9,6 @@ import (
type Config config.Config
const (
- // SmallSizeDefault is a default size of small objects.
- SmallSizeDefault = 32 << 10
-
// MaxSizeDefault is a default value of the object payload size limit.
MaxSizeDefault = 64 << 20
@@ -56,22 +52,6 @@ func (x *Config) Path() string {
return p
}
-// SmallObjectSize returns the value of "small_object_size" config parameter.
-//
-// Returns SmallSizeDefault if the value is not a positive number.
-func (x *Config) SmallObjectSize() uint64 {
- s := config.SizeInBytesSafe(
- (*config.Config)(x),
- "small_object_size",
- )
-
- if s > 0 {
- return s
- }
-
- return SmallSizeDefault
-}
-
// MaxObjectSize returns the value of "max_object_size" config parameter.
//
// Returns MaxSizeDefault if the value is not a positive number.
@@ -143,11 +123,6 @@ func (x *Config) NoSync() bool {
return config.BoolSafe((*config.Config)(x), "no_sync")
}
-// BoltDB returns config instance for querying bolt db specific parameters.
-func (x *Config) BoltDB() *boltdbconfig.Config {
- return (*boltdbconfig.Config)(x)
-}
-
// MaxFlushingObjectsSize returns the value of "max_flushing_objects_size" config parameter.
//
// Returns MaxFlushingObjectsSizeDefault if the value is not a positive number.
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index c83828978..c74695e2b 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -287,10 +287,8 @@ writecache:
enabled: true
path: /path/to/writecache
capacity: 4294967296
- small_object_size: 16384
max_object_size: 134217728
flush_worker_count: 30
- page_size: '4k'
```
| Parameter | Type | Default value | Description |
@@ -298,13 +296,9 @@ writecache:
| `path` | `string` | | Path to the metabase file. |
| `capacity` | `size` | `1G` | Approximate maximum size of the writecache. If the writecache is full, objects are written to the blobstor directly. |
| `max_object_count` | `int` | unrestricted | Approximate maximum objects count in the writecache. If the writecache is full, objects are written to the blobstor directly. |
-| `small_object_size` | `size` | `32K` | Maximum object size for "small" objects. This objects are stored in a key-value database instead of a file-system. |
| `max_object_size` | `size` | `64M` | Maximum object size allowed to be stored in the writecache. |
| `flush_worker_count` | `int` | `20` | Amount of background workers that move data from the writecache to the blobstor. |
| `max_flushing_objects_size` | `size` | `512M` | Max total size of background flushing objects. |
-| `max_batch_size` | `int` | `1000` | Maximum amount of small object `PUT` operations to perform in a single transaction. |
-| `max_batch_delay` | `duration` | `10ms` | Maximum delay before a batch starts. |
-| `page_size` | `size` | `0` | Page size overrides the default OS page size for small objects storage. Does not affect the existing storage. |
# `node` section
diff --git a/pkg/local_object_storage/writecache/benchmark/writecache_test.go b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
index 4da9a26d7..79ab7d9c6 100644
--- a/pkg/local_object_storage/writecache/benchmark/writecache_test.go
+++ b/pkg/local_object_storage/writecache/benchmark/writecache_test.go
@@ -118,6 +118,5 @@ func newCache(b *testing.B) writecache.Cache {
writecache.WithBlobstor(bs),
writecache.WithMetabase(testMetabase{}),
writecache.WithMaxCacheSize(256<<30),
- writecache.WithSmallObjectSize(128<<10),
)
}
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index f0f10d8b5..341071dc6 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -11,7 +11,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.uber.org/zap"
)
@@ -42,9 +41,8 @@ type objectInfo struct {
}
const (
- defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
- defaultSmallObjectSize = 32 * 1024 // 32 KiB
- defaultMaxCacheSize = 1 << 30 // 1 GiB
+ defaultMaxObjectSize = 64 * 1024 * 1024 // 64 MiB
+ defaultMaxCacheSize = 1 << 30 // 1 GiB
)
var (
@@ -60,15 +58,12 @@ func New(opts ...Option) Cache {
counter: fstree.NewSimpleCounter(),
options: options{
- log: &logger.Logger{Logger: zap.NewNop()},
- maxObjectSize: defaultMaxObjectSize,
- smallObjectSize: defaultSmallObjectSize,
- workersCount: defaultFlushWorkersCount,
- maxCacheSize: defaultMaxCacheSize,
- maxBatchSize: bbolt.DefaultMaxBatchSize,
- maxBatchDelay: bbolt.DefaultMaxBatchDelay,
- metrics: DefaultMetrics(),
- flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
+ log: &logger.Logger{Logger: zap.NewNop()},
+ maxObjectSize: defaultMaxObjectSize,
+ workersCount: defaultFlushWorkersCount,
+ maxCacheSize: defaultMaxCacheSize,
+ metrics: DefaultMetrics(),
+ flushSizeLimit: defaultFlushWorkersCount * defaultMaxObjectSize,
},
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 5d5fc13ab..10e4d68f0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -253,7 +253,7 @@ func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
if err != nil {
return fmt.Errorf("could not check write-cache database existence: %w", err)
}
- db, err := OpenDB(c.path, true, os.OpenFile, c.pageSize)
+ db, err := OpenDB(c.path, true, os.OpenFile)
if err != nil {
return fmt.Errorf("could not open write-cache database: %w", err)
}
diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go
index 9c7e240e0..59a4e4895 100644
--- a/pkg/local_object_storage/writecache/flush_test.go
+++ b/pkg/local_object_storage/writecache/flush_test.go
@@ -25,12 +25,11 @@ import (
func TestFlush(t *testing.T) {
testlogger := test.NewLogger(t)
- createCacheFn := func(t *testing.T, smallSize uint64, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
+ createCacheFn := func(t *testing.T, mb *meta.DB, bs MainStorage, opts ...Option) Cache {
return New(
append([]Option{
WithLogger(testlogger),
WithPath(filepath.Join(t.TempDir(), "writecache")),
- WithSmallObjectSize(smallSize),
WithMetabase(mb),
WithBlobstor(bs),
WithDisableBackgroundFlush(),
@@ -92,7 +91,6 @@ const (
type CreateCacheFunc[Option any] func(
t *testing.T,
- smallSize uint64,
meta *meta.DB,
bs MainStorage,
opts ...Option,
@@ -115,7 +113,7 @@ func runFlushTest[Option any](
failures ...TestFailureInjector[Option],
) {
t.Run("no errors", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ wc, bs, mb := newCache(t, createCacheFn)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
@@ -128,7 +126,7 @@ func runFlushTest[Option any](
})
t.Run("flush on moving to degraded mode", func(t *testing.T) {
- wc, bs, mb := newCache(t, createCacheFn, smallSize)
+ wc, bs, mb := newCache(t, createCacheFn)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
@@ -146,7 +144,7 @@ func runFlushTest[Option any](
for _, f := range failures {
t.Run(f.Desc, func(t *testing.T) {
errCountOpt, errCount := errCountOption()
- wc, bs, mb := newCache(t, createCacheFn, smallSize, errCountOpt)
+ wc, bs, mb := newCache(t, createCacheFn, errCountOpt)
defer func() { require.NoError(t, wc.Close()) }()
objects := putObjects(t, wc)
f.InjectFn(t, wc)
@@ -168,7 +166,6 @@ func runFlushTest[Option any](
func newCache[Option any](
t *testing.T,
createCacheFn CreateCacheFunc[Option],
- smallSize uint64,
opts ...Option,
) (Cache, *blobstor.BlobStor, *meta.DB) {
dir := t.TempDir()
@@ -189,7 +186,7 @@ func newCache[Option any](
require.NoError(t, bs.Open(context.Background(), mode.ReadWrite))
require.NoError(t, bs.Init())
- wc := createCacheFn(t, smallSize, mb, bs, opts...)
+ wc := createCacheFn(t, mb, bs, opts...)
require.NoError(t, wc.Open(context.Background(), mode.ReadWrite))
require.NoError(t, wc.Init())
diff --git a/pkg/local_object_storage/writecache/options.go b/pkg/local_object_storage/writecache/options.go
index edbb3d422..66ac7805c 100644
--- a/pkg/local_object_storage/writecache/options.go
+++ b/pkg/local_object_storage/writecache/options.go
@@ -1,8 +1,6 @@
package writecache
import (
- "time"
-
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.uber.org/zap"
)
@@ -20,8 +18,6 @@ type options struct {
metabase Metabase
// maxObjectSize is the maximum size of the object stored in the write-cache.
maxObjectSize uint64
- // smallObjectSize is the maximum size of the object stored in the database.
- smallObjectSize uint64
// workersCount is the number of workers flushing objects in parallel.
workersCount int
// maxCacheSize is the maximum total size of all objects saved in cache (DB + FS).
@@ -30,10 +26,6 @@ type options struct {
// maxCacheCount is the maximum total count of all object saved in cache.
// 0 (no limit) by default.
maxCacheCount uint64
- // maxBatchSize is the maximum batch size for the small object database.
- maxBatchSize int
- // maxBatchDelay is the maximum batch wait time for the small object database.
- maxBatchDelay time.Duration
// noSync is true iff FSTree allows unsynchronized writes.
noSync bool
// reportError is the function called when encountering disk errors in background workers.
@@ -42,8 +34,6 @@ type options struct {
metrics Metrics
// disableBackgroundFlush is for testing purposes only.
disableBackgroundFlush bool
- // pageSize is bbolt's page size config value
- pageSize int
// flushSizeLimit is total size of flushing objects.
flushSizeLimit uint64
}
@@ -85,15 +75,6 @@ func WithMaxObjectSize(sz uint64) Option {
}
}
-// WithSmallObjectSize sets maximum object size to be stored in write-cache.
-func WithSmallObjectSize(sz uint64) Option {
- return func(o *options) {
- if sz > 0 {
- o.smallObjectSize = sz
- }
- }
-}
-
func WithFlushWorkersCount(c int) Option {
return func(o *options) {
if c > 0 {
@@ -116,24 +97,6 @@ func WithMaxCacheCount(v uint64) Option {
}
}
-// WithMaxBatchSize sets max batch size for the small object database.
-func WithMaxBatchSize(sz int) Option {
- return func(o *options) {
- if sz > 0 {
- o.maxBatchSize = sz
- }
- }
-}
-
-// WithMaxBatchDelay sets max batch delay for the small object database.
-func WithMaxBatchDelay(d time.Duration) Option {
- return func(o *options) {
- if d > 0 {
- o.maxBatchDelay = d
- }
- }
-}
-
// WithNoSync sets an option to allow returning to caller on PUT before write is persisted.
// Note, that we use this flag for FSTree only and DO NOT use it for a bolt DB because
// we cannot yet properly handle the corrupted database during the startup. This SHOULD NOT
@@ -165,13 +128,6 @@ func WithDisableBackgroundFlush() Option {
}
}
-// WithPageSize sets bbolt's page size.
-func WithPageSize(s int) Option {
- return func(o *options) {
- o.pageSize = s
- }
-}
-
// WithFlushSizeLimit sets flush size limit.
func WithFlushSizeLimit(v uint64) Option {
return func(o *options) {
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
index ad3b443f3..0ed4a954e 100644
--- a/pkg/local_object_storage/writecache/util.go
+++ b/pkg/local_object_storage/writecache/util.go
@@ -10,12 +10,11 @@ import (
)
// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error), pageSize int) (*bbolt.DB, error) {
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
NoFreelistSync: true,
ReadOnly: ro,
Timeout: 100 * time.Millisecond,
OpenFile: openFile,
- PageSize: pageSize,
})
}
From 3b236160a61c8257aae5bd7078035838606650b7 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 11 Sep 2024 09:55:08 +0300
Subject: [PATCH 050/655] [#1367] writecache: Drop DB label from metrics
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/writecache.go | 10 ++++------
pkg/local_object_storage/writecache/metrics.go | 8 ++++----
pkg/local_object_storage/writecache/state.go | 4 ++--
3 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go
index 7710bc7f4..e9ba3410f 100644
--- a/pkg/local_object_storage/engine/writecache.go
+++ b/pkg/local_object_storage/engine/writecache.go
@@ -169,18 +169,16 @@ func (m *writeCacheMetrics) Put(d time.Duration, success bool, st writecache.Sto
m.metrics.AddMethodDuration(m.shardID, m.path, st.String(), "Put", success, d)
}
-func (m *writeCacheMetrics) SetEstimateSize(db, fstree uint64) {
- m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeDB.String(), db)
- m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree)
+func (m *writeCacheMetrics) SetEstimateSize(size uint64) {
+ m.metrics.SetEstimateSize(m.shardID, m.path, writecache.StorageTypeFSTree.String(), size)
}
func (m *writeCacheMetrics) SetMode(mod mode.ComponentMode) {
m.metrics.SetMode(m.shardID, mod.String())
}
-func (m *writeCacheMetrics) SetActualCounters(db, fstree uint64) {
- m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeDB.String(), db)
- m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), fstree)
+func (m *writeCacheMetrics) SetActualCounters(count uint64) {
+ m.metrics.SetActualCount(m.shardID, m.path, writecache.StorageTypeFSTree.String(), count)
}
func (m *writeCacheMetrics) Flush(success bool, st writecache.StorageType) {
diff --git a/pkg/local_object_storage/writecache/metrics.go b/pkg/local_object_storage/writecache/metrics.go
index e68b6d8be..e3641f85e 100644
--- a/pkg/local_object_storage/writecache/metrics.go
+++ b/pkg/local_object_storage/writecache/metrics.go
@@ -26,9 +26,9 @@ type Metrics interface {
Flush(success bool, st StorageType)
Evict(st StorageType)
- SetEstimateSize(db, fstree uint64)
+ SetEstimateSize(uint64)
SetMode(m mode.ComponentMode)
- SetActualCounters(db, fstree uint64)
+ SetActualCounters(uint64)
SetPath(path string)
Close()
}
@@ -47,11 +47,11 @@ func (metricsStub) Delete(time.Duration, bool, StorageType) {}
func (metricsStub) Put(time.Duration, bool, StorageType) {}
-func (metricsStub) SetEstimateSize(uint64, uint64) {}
+func (metricsStub) SetEstimateSize(uint64) {}
func (metricsStub) SetMode(mode.ComponentMode) {}
-func (metricsStub) SetActualCounters(uint64, uint64) {}
+func (metricsStub) SetActualCounters(uint64) {}
func (metricsStub) Flush(bool, StorageType) {}
diff --git a/pkg/local_object_storage/writecache/state.go b/pkg/local_object_storage/writecache/state.go
index 748c78bcb..835686fbb 100644
--- a/pkg/local_object_storage/writecache/state.go
+++ b/pkg/local_object_storage/writecache/state.go
@@ -2,8 +2,8 @@ package writecache
func (c *cache) estimateCacheSize() (uint64, uint64) {
count, size := c.counter.CountSize()
- c.metrics.SetEstimateSize(0, size)
- c.metrics.SetActualCounters(0, count)
+ c.metrics.SetEstimateSize(size)
+ c.metrics.SetActualCounters(count)
return count, size
}
From f345fe9a581762fcd5827719d1781e16ce133056 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 11 Sep 2024 17:08:52 +0300
Subject: [PATCH 051/655] [#1367] writecache: Move DB related code to
upgrade.go
This is done to drop this file in the future.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/writecache/cache.go | 5 +-
pkg/local_object_storage/writecache/flush.go | 86 --------------
.../writecache/storage.go | 2 -
.../writecache/upgrade.go | 110 ++++++++++++++++++
pkg/local_object_storage/writecache/util.go | 20 ----
5 files changed, 111 insertions(+), 112 deletions(-)
create mode 100644 pkg/local_object_storage/writecache/upgrade.go
delete mode 100644 pkg/local_object_storage/writecache/util.go
diff --git a/pkg/local_object_storage/writecache/cache.go b/pkg/local_object_storage/writecache/cache.go
index 341071dc6..b97fc5856 100644
--- a/pkg/local_object_storage/writecache/cache.go
+++ b/pkg/local_object_storage/writecache/cache.go
@@ -45,10 +45,7 @@ const (
defaultMaxCacheSize = 1 << 30 // 1 GiB
)
-var (
- defaultBucket = []byte{0}
- dummyCanceler context.CancelFunc = func() {}
-)
+var dummyCanceler context.CancelFunc = func() {}
// New creates new writecache instance.
func New(opts ...Option) Cache {
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 10e4d68f0..83933375b 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -1,12 +1,8 @@
package writecache
import (
- "bytes"
"context"
"errors"
- "fmt"
- "os"
- "path/filepath"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -20,8 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
- oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
- "go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
@@ -239,83 +233,3 @@ func (c *cache) Flush(ctx context.Context, ignoreErrors, seal bool) error {
func (c *cache) flush(ctx context.Context, ignoreErrors bool) error {
return c.flushFSTree(ctx, ignoreErrors)
}
-
-type batchItem struct {
- data []byte
- address string
-}
-
-func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
- _, err := os.Stat(filepath.Join(c.path, dbName))
- if err != nil && os.IsNotExist(err) {
- return nil
- }
- if err != nil {
- return fmt.Errorf("could not check write-cache database existence: %w", err)
- }
- db, err := OpenDB(c.path, true, os.OpenFile)
- if err != nil {
- return fmt.Errorf("could not open write-cache database: %w", err)
- }
- defer func() {
- _ = db.Close()
- }()
-
- var last string
- for {
- batch, err := c.readNextDBBatch(db, last)
- if err != nil {
- return err
- }
- if len(batch) == 0 {
- break
- }
- for _, item := range batch {
- var obj objectSDK.Object
- if err := obj.Unmarshal(item.data); err != nil {
- return fmt.Errorf("unmarshal object from database: %w", err)
- }
- if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
- return fmt.Errorf("flush object from database: %w", err)
- }
- }
- last = batch[len(batch)-1].address
- }
- if err := db.Close(); err != nil {
- return fmt.Errorf("close write-cache database: %w", err)
- }
- if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
- return fmt.Errorf("remove write-cache database: %w", err)
- }
- return nil
-}
-
-func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
- const batchSize = 100
- var batch []batchItem
- err := db.View(func(tx *bbolt.Tx) error {
- var addr oid.Address
-
- b := tx.Bucket(defaultBucket)
- cs := b.Cursor()
- for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
- sa := string(k)
- if sa == last {
- continue
- }
- if err := addr.DecodeString(sa); err != nil {
- return fmt.Errorf("decode address from database: %w", err)
- }
-
- batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
- if len(batch) == batchSize {
- return errIterationCompleted
- }
- }
- return nil
- })
- if err == nil || errors.Is(err, errIterationCompleted) {
- return batch, nil
- }
- return nil, err
-}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index e708a529e..6aface7a5 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -16,8 +16,6 @@ import (
"go.uber.org/zap"
)
-const dbName = "small.bolt"
-
func (c *cache) openStore(mod mode.ComponentMode) error {
err := util.MkdirAllX(c.path, os.ModePerm)
if err != nil {
diff --git a/pkg/local_object_storage/writecache/upgrade.go b/pkg/local_object_storage/writecache/upgrade.go
new file mode 100644
index 000000000..3a100f1a3
--- /dev/null
+++ b/pkg/local_object_storage/writecache/upgrade.go
@@ -0,0 +1,110 @@
+package writecache
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "time"
+
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ "go.etcd.io/bbolt"
+)
+
+const dbName = "small.bolt"
+
+var defaultBucket = []byte{0}
+
+func (c *cache) flushAndDropBBoltDB(ctx context.Context) error {
+ _, err := os.Stat(filepath.Join(c.path, dbName))
+ if err != nil && os.IsNotExist(err) {
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("could not check write-cache database existence: %w", err)
+ }
+ db, err := OpenDB(c.path, true, os.OpenFile)
+ if err != nil {
+ return fmt.Errorf("could not open write-cache database: %w", err)
+ }
+ defer func() {
+ _ = db.Close()
+ }()
+
+ var last string
+ for {
+ batch, err := c.readNextDBBatch(db, last)
+ if err != nil {
+ return err
+ }
+ if len(batch) == 0 {
+ break
+ }
+ for _, item := range batch {
+ var obj objectSDK.Object
+ if err := obj.Unmarshal(item.data); err != nil {
+ return fmt.Errorf("unmarshal object from database: %w", err)
+ }
+ if err := c.flushObject(ctx, &obj, item.data, StorageTypeDB); err != nil {
+ return fmt.Errorf("flush object from database: %w", err)
+ }
+ }
+ last = batch[len(batch)-1].address
+ }
+ if err := db.Close(); err != nil {
+ return fmt.Errorf("close write-cache database: %w", err)
+ }
+ if err := os.Remove(filepath.Join(c.path, dbName)); err != nil {
+ return fmt.Errorf("remove write-cache database: %w", err)
+ }
+ return nil
+}
+
+type batchItem struct {
+ data []byte
+ address string
+}
+
+func (c *cache) readNextDBBatch(db *bbolt.DB, last string) ([]batchItem, error) {
+ const batchSize = 100
+ var batch []batchItem
+ err := db.View(func(tx *bbolt.Tx) error {
+ var addr oid.Address
+
+ b := tx.Bucket(defaultBucket)
+ cs := b.Cursor()
+ for k, data := cs.Seek([]byte(last)); k != nil; k, data = cs.Next() {
+ sa := string(k)
+ if sa == last {
+ continue
+ }
+ if err := addr.DecodeString(sa); err != nil {
+ return fmt.Errorf("decode address from database: %w", err)
+ }
+
+ batch = append(batch, batchItem{data: bytes.Clone(data), address: sa})
+ if len(batch) == batchSize {
+ return errIterationCompleted
+ }
+ }
+ return nil
+ })
+ if err == nil || errors.Is(err, errIterationCompleted) {
+ return batch, nil
+ }
+ return nil, err
+}
+
+// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
+func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
+ return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
+ NoFreelistSync: true,
+ ReadOnly: ro,
+ Timeout: 100 * time.Millisecond,
+ OpenFile: openFile,
+ })
+}
diff --git a/pkg/local_object_storage/writecache/util.go b/pkg/local_object_storage/writecache/util.go
deleted file mode 100644
index 0ed4a954e..000000000
--- a/pkg/local_object_storage/writecache/util.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package writecache
-
-import (
- "io/fs"
- "os"
- "path/filepath"
- "time"
-
- "go.etcd.io/bbolt"
-)
-
-// OpenDB opens BoltDB instance for write-cache. Opens in read-only mode if ro is true.
-func OpenDB(p string, ro bool, openFile func(string, int, fs.FileMode) (*os.File, error)) (*bbolt.DB, error) {
- return bbolt.Open(filepath.Join(p, dbName), os.ModePerm, &bbolt.Options{
- NoFreelistSync: true,
- ReadOnly: ro,
- Timeout: 100 * time.Millisecond,
- OpenFile: openFile,
- })
-}
From b33559754df994c4e2e37f1b5b6c8f29ac8f97f1 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 12 Sep 2024 12:33:12 +0300
Subject: [PATCH 052/655] [#1367] fstree: Add size hint for Delete
This allow to not to call `os.Stat` if caller already knows data size.
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/common/delete.go | 1 +
.../blobstor/fstree/fstree.go | 2 +-
.../blobstor/fstree/fstree_test.go | 84 +++++++++++++------
.../blobstor/fstree/fstree_write_generic.go | 19 +++--
.../blobstor/fstree/fstree_write_linux.go | 25 +++---
pkg/local_object_storage/writecache/flush.go | 4 +-
.../writecache/storage.go | 4 +-
7 files changed, 92 insertions(+), 47 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/common/delete.go b/pkg/local_object_storage/blobstor/common/delete.go
index 1b04eab1a..c19e099cb 100644
--- a/pkg/local_object_storage/blobstor/common/delete.go
+++ b/pkg/local_object_storage/blobstor/common/delete.go
@@ -8,6 +8,7 @@ import (
type DeletePrm struct {
Address oid.Address
StorageID []byte
+ Size uint64
}
// DeleteRes groups the resulting values of Delete operation.
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree.go b/pkg/local_object_storage/blobstor/fstree/fstree.go
index 1c60ec340..057796db2 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree.go
@@ -338,7 +338,7 @@ func (t *FSTree) Delete(ctx context.Context, prm common.DeletePrm) (common.Delet
}
p := t.treePath(prm.Address)
- err = t.writer.removeFile(p)
+ err = t.writer.removeFile(p, prm.Size)
return common.DeleteRes{}, err
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_test.go b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
index f39c7296e..eb2126b6c 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_test.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_test.go
@@ -68,34 +68,70 @@ func TestObjectCounter(t *testing.T) {
var delPrm common.DeletePrm
delPrm.Address = addr
- eg, egCtx := errgroup.WithContext(context.Background())
+ t.Run("without size hint", func(t *testing.T) {
+ eg, egCtx := errgroup.WithContext(context.Background())
- eg.Go(func() error {
- for range 1_000 {
- _, err := fst.Put(egCtx, putPrm)
- if err != nil {
- return err
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
}
- }
- return nil
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
})
- eg.Go(func() error {
- var le logicerr.Logical
- for range 1_000 {
- _, err := fst.Delete(egCtx, delPrm)
- if err != nil && !errors.As(err, &le) {
- return err
+ t.Run("with size hint", func(t *testing.T) {
+ delPrm.Size = uint64(len(putPrm.RawData))
+ eg, egCtx := errgroup.WithContext(context.Background())
+
+ eg.Go(func() error {
+ for range 1_000 {
+ _, err := fst.Put(egCtx, putPrm)
+ if err != nil {
+ return err
+ }
}
- }
- return nil
+ return nil
+ })
+
+ eg.Go(func() error {
+ var le logicerr.Logical
+ for range 1_000 {
+ _, err := fst.Delete(egCtx, delPrm)
+ if err != nil && !errors.As(err, &le) {
+ return err
+ }
+ }
+ return nil
+ })
+
+ require.NoError(t, eg.Wait())
+
+ count, size = counter.CountSize()
+ realCount, realSize, err := fst.countFiles()
+ require.NoError(t, err)
+ require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
+ require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
})
-
- require.NoError(t, eg.Wait())
-
- count, size = counter.CountSize()
- realCount, realSize, err := fst.countFiles()
- require.NoError(t, err)
- require.Equal(t, realCount, count, "real %d, actual %d", realCount, count)
- require.Equal(t, realSize, size, "real %d, actual %d", realSize, size)
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
index 801fc4a22..4110ba7d7 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_generic.go
@@ -16,7 +16,7 @@ import (
type writer interface {
writeData(string, []byte) error
- removeFile(string) error
+ removeFile(string, uint64) error
}
type genericWriter struct {
@@ -107,10 +107,10 @@ func (w *genericWriter) writeFile(p string, data []byte) error {
return err
}
-func (w *genericWriter) removeFile(p string) error {
+func (w *genericWriter) removeFile(p string, size uint64) error {
var err error
if w.fileCounterEnabled {
- err = w.removeWithCounter(p)
+ err = w.removeWithCounter(p, size)
} else {
err = os.Remove(p)
}
@@ -121,18 +121,21 @@ func (w *genericWriter) removeFile(p string) error {
return err
}
-func (w *genericWriter) removeWithCounter(p string) error {
+func (w *genericWriter) removeWithCounter(p string, size uint64) error {
w.fileGuard.Lock(p)
defer w.fileGuard.Unlock(p)
- stat, err := os.Stat(p)
- if err != nil {
- return err
+ if size == 0 {
+ stat, err := os.Stat(p)
+ if err != nil {
+ return err
+ }
+ size = uint64(stat.Size())
}
if err := os.Remove(p); err != nil {
return err
}
- w.fileCounter.Dec(uint64(stat.Size()))
+ w.fileCounter.Dec(uint64(size))
return nil
}
diff --git a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
index 3127579ac..3561c616b 100644
--- a/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
+++ b/pkg/local_object_storage/blobstor/fstree/fstree_write_linux.go
@@ -91,25 +91,30 @@ func (w *linuxWriter) writeFile(p string, data []byte) error {
return errClose
}
-func (w *linuxWriter) removeFile(p string) error {
+func (w *linuxWriter) removeFile(p string, size uint64) error {
if w.fileCounterEnabled {
w.fileGuard.Lock(p)
defer w.fileGuard.Unlock(p)
- }
- var stat unix.Stat_t
- err := unix.Stat(p, &stat)
- if err != nil {
- if err == unix.ENOENT {
- return logicerr.Wrap(new(apistatus.ObjectNotFound))
+
+ if size == 0 {
+ var stat unix.Stat_t
+ err := unix.Stat(p, &stat)
+ if err != nil {
+ if err == unix.ENOENT {
+ return logicerr.Wrap(new(apistatus.ObjectNotFound))
+ }
+ return err
+ }
+ size = uint64(stat.Size)
}
- return err
}
- err = unix.Unlink(p)
+
+ err := unix.Unlink(p)
if err != nil && err == unix.ENOENT {
return logicerr.Wrap(new(apistatus.ObjectNotFound))
}
if err == nil {
- w.fileCounter.Dec(uint64(stat.Size))
+ w.fileCounter.Dec(uint64(size))
}
return err
}
diff --git a/pkg/local_object_storage/writecache/flush.go b/pkg/local_object_storage/writecache/flush.go
index 83933375b..bfa6aacb0 100644
--- a/pkg/local_object_storage/writecache/flush.go
+++ b/pkg/local_object_storage/writecache/flush.go
@@ -123,7 +123,7 @@ func (c *cache) flushIfAnObjectExistsWorker(ctx context.Context, objInfo objectI
return
}
- c.deleteFromDisk(ctx, objInfo.addr)
+ c.deleteFromDisk(ctx, objInfo.addr, uint64(len(res.RawData)))
}
func (c *cache) reportFlushError(msg string, addr string, err error) {
@@ -157,7 +157,7 @@ func (c *cache) flushFSTree(ctx context.Context, ignoreErrors bool) error {
return err
}
- c.deleteFromDisk(ctx, e.Address)
+ c.deleteFromDisk(ctx, e.Address, uint64(len(e.ObjectData)))
return nil
}
diff --git a/pkg/local_object_storage/writecache/storage.go b/pkg/local_object_storage/writecache/storage.go
index 6aface7a5..2e52e5b20 100644
--- a/pkg/local_object_storage/writecache/storage.go
+++ b/pkg/local_object_storage/writecache/storage.go
@@ -40,8 +40,8 @@ func (c *cache) openStore(mod mode.ComponentMode) error {
return nil
}
-func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address) {
- _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr})
+func (c *cache) deleteFromDisk(ctx context.Context, addr oid.Address, size uint64) {
+ _, err := c.fsTree.Delete(ctx, common.DeletePrm{Address: addr, Size: size})
if err != nil && !client.IsErrObjectNotFound(err) {
c.log.Error(logs.WritecacheCantRemoveObjectFromWritecache, zap.Error(err))
} else if err == nil {
From e3764c51df237289431eacd0051bb3754e7682cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 12 Sep 2024 09:53:21 +0300
Subject: [PATCH 053/655] [#1347] metabase: Fix EC search
For EC chunks need to return EC parent object ID as
EC chunks don't have own attributes but inherit parent's.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/select.go | 21 +++++++++------
.../metabase/select_test.go | 27 +++++++++++++++++++
2 files changed, 40 insertions(+), 8 deletions(-)
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 1863fc25e..ed43fc41f 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -150,7 +150,8 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters
continue // ignore removed objects
}
- if !db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch) {
+ addr, match := db.matchSlowFilters(tx, addr, group.slowFilters, currEpoch)
+ if !match {
continue // ignore objects with unmatched slow filters
}
@@ -382,15 +383,16 @@ func (db *DB) selectObjectID(
}
// matchSlowFilters return true if object header is matched by all slow filters.
-func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) bool {
+func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.SearchFilters, currEpoch uint64) (oid.Address, bool) {
+ result := addr
if len(f) == 0 {
- return true
+ return result, true
}
buf := make([]byte, addressKeySize)
obj, err := db.get(tx, addr, buf, true, false, currEpoch)
if err != nil {
- return false
+ return result, false
}
for i := range f {
@@ -415,23 +417,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
default: // user attribute
v, ok := attributeValue(obj, f[i].Header())
if ok {
+ if ech := obj.ECHeader(); ech != nil {
+ result.SetObject(ech.Parent())
+ }
data = []byte(v)
} else {
- return f[i].Operation() == objectSDK.MatchNotPresent
+ return result, f[i].Operation() == objectSDK.MatchNotPresent
}
}
matchFunc, ok := db.matchers[f[i].Operation()]
if !ok {
- return false
+ return result, false
}
if !matchFunc.matchSlow(f[i].Header(), data, f[i].Value()) {
- return false
+ return result, false
}
}
- return true
+ return result, true
}
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 0fab3a108..6469bbdbc 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -70,6 +70,22 @@ func TestDB_SelectUserAttributes(t *testing.T) {
err = putBig(db, raw6)
require.NoError(t, err)
+ raw7 := testutil.GenerateObjectWithCID(cnr)
+ var attr objectSDK.Attribute
+ attr.SetKey("path")
+ attr.SetValue("test/3/4")
+ attrs := raw7.Attributes()
+ attrs = append(attrs, attr)
+ ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
+ ID: oidtest.ID(),
+ Attributes: attrs,
+ }, 0, 3, []byte{}, 0)
+ raw7.SetECHeader(ech)
+ require.NoError(t, putBig(db, raw7))
+ var raw7Parent oid.Address
+ raw7Parent.SetContainer(cnr)
+ raw7Parent.SetObject(ech.Parent())
+
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
testSelect(t, db, cnr, fs,
@@ -100,6 +116,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -110,6 +127,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -120,6 +138,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -131,6 +150,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ object.AddressOf(raw7),
)
fs = objectSDK.SearchFilters{}
@@ -139,6 +159,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
+ raw7Parent,
)
fs = objectSDK.SearchFilters{}
@@ -147,6 +168,12 @@ func TestDB_SelectUserAttributes(t *testing.T) {
object.AddressOf(raw4),
object.AddressOf(raw5),
)
+
+ fs = objectSDK.SearchFilters{}
+ fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
+ testSelect(t, db, cnr, fs,
+ raw7Parent,
+ )
}
func TestDB_SelectRootPhyParent(t *testing.T) {
From 546d09660f52d262c3c053b72457170f5c4b68e7 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Thu, 12 Sep 2024 17:05:14 +0300
Subject: [PATCH 054/655] [#1283] Clear systemd-notify status on exit
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-ir/main.go | 5 +++++
cmd/frostfs-node/config.go | 4 ++++
pkg/util/sdnotify/sdnotify.go | 5 +++++
3 files changed, 14 insertions(+)
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 5b852c37c..02936ae78 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -13,6 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/innerring"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sdnotify"
"github.com/spf13/viper"
"go.uber.org/zap"
)
@@ -124,4 +125,8 @@ func shutdown() {
zap.String("error", err.Error()),
)
}
+
+ if err := sdnotify.ClearStatus(); err != nil {
+ log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ }
}
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index cdfabdebd..f98f1c1a3 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1398,4 +1398,8 @@ func (c *cfg) shutdown() {
for i := range c.closers {
c.closers[len(c.closers)-1-i].fn()
}
+
+ if err := sdnotify.ClearStatus(); err != nil {
+ c.log.Error(logs.FailedToReportStatusToSystemd, zap.Error(err))
+ }
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index 5235315cc..e94ff77ad 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -69,6 +69,11 @@ func Status(status string) error {
return Send("STATUS=" + status)
}
+// ClearStatus resets the current service status previously set by Status.
+func ClearStatus() error {
+ return Status("")
+}
+
// Send state through the notify socket if any.
// If the notify socket was not detected, it returns an error.
func Send(state string) error {
From bb448674918330ddd15df6462fbde860eca5bc64 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:31:33 +0300
Subject: [PATCH 055/655] [#1374] go.mod: Upgrade grpc to v1.66.2
Signed-off-by: Dmitrii Stepanov
---
go.mod | 14 +++++++-------
go.sum | 28 ++++++++++++++--------------
2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/go.mod b/go.mod
index 93eef5b8c..78fefc9ae 100644
--- a/go.mod
+++ b/go.mod
@@ -46,9 +46,9 @@ require (
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
- golang.org/x/term v0.18.0
- google.golang.org/grpc v1.63.2
- google.golang.org/protobuf v1.33.0
+ golang.org/x/term v0.21.0
+ google.golang.org/grpc v1.66.2
+ google.golang.org/protobuf v1.34.1
gopkg.in/yaml.v3 v3.0.1
)
@@ -122,11 +122,11 @@ require (
go.opentelemetry.io/otel/sdk v1.22.0 // indirect
go.opentelemetry.io/proto/otlp v1.1.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.21.0 // indirect
- golang.org/x/net v0.23.0 // indirect
+ golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 102501484..2147f8988 100644
--- a/go.sum
+++ b/go.sum
@@ -306,8 +306,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
-golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -327,8 +327,8 @@ golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
-golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -370,8 +370,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -393,12 +393,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
-google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
+google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -407,8 +407,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From 944160427bad682df038f75b94cfad1e3e23aa41 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:32:03 +0300
Subject: [PATCH 056/655] [#1374] cli: Drop deprecated grpc connection method
For `frostfs-cli` it is ok to use grpc-client without blocking,
as `frostfs-cli` will perform RPC call anyway.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/tree/add.go | 5 +++--
cmd/frostfs-cli/modules/tree/add_by_path.go | 5 +++--
cmd/frostfs-cli/modules/tree/client.go | 22 +++++++++++----------
cmd/frostfs-cli/modules/tree/get_by_path.go | 5 +++--
cmd/frostfs-cli/modules/tree/get_op_log.go | 5 +++--
cmd/frostfs-cli/modules/tree/healthcheck.go | 5 +++--
cmd/frostfs-cli/modules/tree/list.go | 5 +++--
cmd/frostfs-cli/modules/tree/move.go | 5 +++--
cmd/frostfs-cli/modules/tree/remove.go | 5 +++--
cmd/frostfs-cli/modules/tree/subtree.go | 5 +++--
10 files changed, 39 insertions(+), 28 deletions(-)
diff --git a/cmd/frostfs-cli/modules/tree/add.go b/cmd/frostfs-cli/modules/tree/add.go
index 0b8dc292f..019feb0ec 100644
--- a/cmd/frostfs-cli/modules/tree/add.go
+++ b/cmd/frostfs-cli/modules/tree/add.go
@@ -47,9 +47,10 @@ func add(cmd *cobra.Command, _ []string) {
meta, err := parseMeta(cmd)
commonCmd.ExitOnErr(cmd, "meta data parsing: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/add_by_path.go b/cmd/frostfs-cli/modules/tree/add_by_path.go
index ea815dbfe..5d5b00b7d 100644
--- a/cmd/frostfs-cli/modules/tree/add_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/add_by_path.go
@@ -50,9 +50,10 @@ func addByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/client.go b/cmd/frostfs-cli/modules/tree/client.go
index 4f4f54657..4e0099f02 100644
--- a/cmd/frostfs-cli/modules/tree/client.go
+++ b/cmd/frostfs-cli/modules/tree/client.go
@@ -3,13 +3,14 @@ package tree
import (
"context"
"strings"
- "time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/tree"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
+ "github.com/spf13/cobra"
"github.com/spf13/viper"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -17,7 +18,7 @@ import (
// _client returns grpc Tree service client. Should be removed
// after making Tree API public.
-func _client(ctx context.Context) (tree.TreeServiceClient, error) {
+func _client() (tree.TreeServiceClient, error) {
var netAddr network.Address
err := netAddr.FromString(viper.GetString(commonflags.RPC))
if err != nil {
@@ -25,7 +26,6 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
}
opts := []grpc.DialOption{
- grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
@@ -40,12 +40,14 @@ func _client(ctx context.Context) (tree.TreeServiceClient, error) {
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
- // a default connection establishing timeout
- const defaultClientConnectTimeout = time.Second * 2
-
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
- cancel()
-
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
return tree.NewTreeServiceClient(cc), err
}
+
+func contextWithTimeout(cmd *cobra.Command) (context.Context, context.CancelFunc) {
+ if timeout := viper.GetDuration(commonflags.Timeout); timeout > 0 {
+ common.PrintVerbose(cmd, "Set request timeout to %s.", timeout)
+ return context.WithTimeout(cmd.Context(), timeout)
+ }
+ return context.WithTimeout(cmd.Context(), commonflags.TimeoutDefault)
+}
diff --git a/cmd/frostfs-cli/modules/tree/get_by_path.go b/cmd/frostfs-cli/modules/tree/get_by_path.go
index f239066cd..7061723fd 100644
--- a/cmd/frostfs-cli/modules/tree/get_by_path.go
+++ b/cmd/frostfs-cli/modules/tree/get_by_path.go
@@ -50,9 +50,10 @@ func getByPath(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/get_op_log.go b/cmd/frostfs-cli/modules/tree/get_op_log.go
index b1b307f62..376aa8e8d 100644
--- a/cmd/frostfs-cli/modules/tree/get_op_log.go
+++ b/cmd/frostfs-cli/modules/tree/get_op_log.go
@@ -44,9 +44,10 @@ func getOpLog(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
tid, _ := cmd.Flags().GetString(treeIDFlagKey)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/healthcheck.go b/cmd/frostfs-cli/modules/tree/healthcheck.go
index f0506467e..b01bb2e77 100644
--- a/cmd/frostfs-cli/modules/tree/healthcheck.go
+++ b/cmd/frostfs-cli/modules/tree/healthcheck.go
@@ -26,9 +26,10 @@ func initHealthcheckCmd() {
func healthcheck(cmd *cobra.Command, _ []string) {
pk := key.GetOrGenerate(cmd)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
req := &tree.HealthcheckRequest{
diff --git a/cmd/frostfs-cli/modules/tree/list.go b/cmd/frostfs-cli/modules/tree/list.go
index a25d066d5..f8c0e490f 100644
--- a/cmd/frostfs-cli/modules/tree/list.go
+++ b/cmd/frostfs-cli/modules/tree/list.go
@@ -38,9 +38,10 @@ func list(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/move.go b/cmd/frostfs-cli/modules/tree/move.go
index 24abbd650..dc807d752 100644
--- a/cmd/frostfs-cli/modules/tree/move.go
+++ b/cmd/frostfs-cli/modules/tree/move.go
@@ -45,9 +45,10 @@ func move(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/remove.go b/cmd/frostfs-cli/modules/tree/remove.go
index 74e9d9749..d0b6fab2f 100644
--- a/cmd/frostfs-cli/modules/tree/remove.go
+++ b/cmd/frostfs-cli/modules/tree/remove.go
@@ -41,9 +41,10 @@ func remove(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
diff --git a/cmd/frostfs-cli/modules/tree/subtree.go b/cmd/frostfs-cli/modules/tree/subtree.go
index e88ef79cb..83a8909b6 100644
--- a/cmd/frostfs-cli/modules/tree/subtree.go
+++ b/cmd/frostfs-cli/modules/tree/subtree.go
@@ -46,9 +46,10 @@ func getSubTree(cmd *cobra.Command, _ []string) {
err := cnr.DecodeString(cidString)
commonCmd.ExitOnErr(cmd, "decode container ID string: %w", err)
- ctx := cmd.Context()
+ ctx, cancel := contextWithTimeout(cmd)
+ defer cancel()
- cli, err := _client(ctx)
+ cli, err := _client()
commonCmd.ExitOnErr(cmd, "failed to create client: %w", err)
rawCID := make([]byte, sha256.Size)
From 54fe8383a4f274da8f1fd553097f3268c9f2d13a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:39:25 +0300
Subject: [PATCH 057/655] [#1374] tree: Use NewClient to create grpc connection
for sync
Created connection will be used to sync trees, so it is ok to defer
dial to the first RPC call.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/tree/sync.go | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/services/tree/sync.go b/pkg/services/tree/sync.go
index be22074a5..5bbc93978 100644
--- a/pkg/services/tree/sync.go
+++ b/pkg/services/tree/sync.go
@@ -294,7 +294,7 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return false
}
- cc, err := s.dialCtx(egCtx, a)
+ cc, err := s.createConnection(a)
if err != nil {
s.log.Warn(logs.TreeFailedToConnectForTreeSynchronization, zap.Error(err), zap.String("address", addr))
return false
@@ -332,8 +332,8 @@ func (s *Service) synchronizeTree(ctx context.Context, cid cid.ID, from uint64,
return from
}
-func (*Service) dialCtx(egCtx context.Context, a network.Address) (*grpc.ClientConn, error) {
- return grpc.DialContext(egCtx, a.URIAddr(),
+func (*Service) createConnection(a network.Address) (*grpc.ClientConn, error) {
+ return grpc.NewClient(a.URIAddr(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing_grpc.NewUnaryClientInteceptor(),
From 89d0435b1d59257d5bf15c926465193e53c11922 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 13 Sep 2024 11:59:17 +0300
Subject: [PATCH 058/655] [#1374] tree: Use NewClient to create grpc connection
in cache
Created grpc connection should be established, so perform Healthcheck request
to check connection is ok.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/tree/cache.go | 33 +++++++++++++++++++++++++--------
pkg/services/tree/service.go | 2 +-
2 files changed, 26 insertions(+), 9 deletions(-)
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 1be1c2f83..38501b852 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -2,6 +2,7 @@ package tree
import (
"context"
+ "crypto/ecdsa"
"errors"
"fmt"
"sync"
@@ -19,6 +20,7 @@ import (
type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
+ key *ecdsa.PrivateKey
}
type cacheItem struct {
@@ -34,13 +36,14 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init() {
+func (c *clientCache) init(pk *ecdsa.PrivateKey) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
}
})
c.LRU = *l
+ c.key = pk
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
@@ -63,7 +66,7 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
}
}
- cc, err := dialTreeService(ctx, netmapAddr)
+ cc, err := c.dialTreeService(ctx, netmapAddr)
lastTry := time.Now()
c.Lock()
@@ -81,14 +84,13 @@ func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceCl
return NewTreeServiceClient(cc), nil
}
-func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
+func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn, error) {
var netAddr network.Address
if err := netAddr.FromString(netmapAddr); err != nil {
return nil, err
}
opts := []grpc.DialOption{
- grpc.WithBlock(),
grpc.WithChainUnaryInterceptor(
metrics.NewUnaryClientInterceptor(),
tracing.NewUnaryClientInteceptor(),
@@ -103,9 +105,24 @@ func dialTreeService(ctx context.Context, netmapAddr string) (*grpc.ClientConn,
opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
- ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
- cc, err := grpc.DialContext(ctx, netAddr.URIAddr(), opts...)
- cancel()
+ req := &HealthcheckRequest{
+ Body: &HealthcheckRequest_Body{},
+ }
+ if err := SignMessage(req, c.key); err != nil {
+ return nil, err
+ }
- return cc, err
+ cc, err := grpc.NewClient(netAddr.URIAddr(), opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, defaultClientConnectTimeout)
+ defer cancel()
+ // perform some request to check connection
+ if _, err := NewTreeServiceClient(cc).Healthcheck(ctx, req); err != nil {
+ _ = cc.Close()
+ return nil, err
+ }
+ return cc, nil
}
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 875e47ecb..60bb1a6ad 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -65,7 +65,7 @@ func New(opts ...Option) *Service {
s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init()
+ s.cache.init(s.key)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
From 2be1aa781dbbc2462ca717f3b763053fb8cd2810 Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Fri, 13 Sep 2024 15:44:23 +0300
Subject: [PATCH 059/655] [#1266] .forgejo: Make 'fumpt' job fail on changed
files
`gofumpt` always returns an exit code of 0, even when it finds
misformatted files. To make `fumpt` action behave as expected
we need to check if `gofumpt` changed any files.
Signed-off-by: Ekaterina Lebedeva
---
.forgejo/workflows/tests.yml | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml
index a908c6278..07ba5c268 100644
--- a/.forgejo/workflows/tests.yml
+++ b/.forgejo/workflows/tests.yml
@@ -106,4 +106,6 @@ jobs:
run: make fumpt-install
- name: Run gofumpt
- run: make fumpt
+ run: |
+ make fumpt
+ git diff --exit-code --quiet
From 74a6a1da7fb6a895583d72f188f31b8f067ce513 Mon Sep 17 00:00:00 2001
From: Aleksey Savaitan
Date: Thu, 12 Sep 2024 17:40:10 +0300
Subject: [PATCH 060/655] [#1361] add root ca cert for telemetry configuration
Signed-off-by: Aleksey Savaitan
---
cmd/frostfs-node/config.go | 6 ++-
cmd/frostfs-node/config/tracing/config.go | 23 +++++++-
cmd/frostfs-node/tracing.go | 10 ++--
config/example/node.env | 1 +
config/example/node.json | 3 +-
config/example/node.yaml | 1 +
go.mod | 30 +++++------
go.sum | 64 +++++++++++------------
8 files changed, 84 insertions(+), 54 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index f98f1c1a3..bb6580a40 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1299,7 +1299,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}})
components = append(components, dCmp{"pools", c.reloadPools})
components = append(components, dCmp{"tracing", func() error {
- updated, err := tracing.Setup(ctx, *tracingconfig.ToTracingConfig(c.appCfg))
+ traceConfig, err := tracingconfig.ToTracingConfig(c.appCfg)
+ if err != nil {
+ return err
+ }
+ updated, err := tracing.Setup(ctx, *traceConfig)
if updated {
c.log.Info(logs.FrostFSNodeTracingConfigationUpdated)
}
diff --git a/cmd/frostfs-node/config/tracing/config.go b/cmd/frostfs-node/config/tracing/config.go
index e846be158..8544c672c 100644
--- a/cmd/frostfs-node/config/tracing/config.go
+++ b/cmd/frostfs-node/config/tracing/config.go
@@ -1,6 +1,11 @@
package tracing
import (
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "os"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
"git.frostfs.info/TrueCloudLab/frostfs-node/misc"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -11,8 +16,8 @@ const (
)
// ToTracingConfig extracts tracing config.
-func ToTracingConfig(c *config.Config) *tracing.Config {
- return &tracing.Config{
+func ToTracingConfig(c *config.Config) (*tracing.Config, error) {
+ conf := &tracing.Config{
Enabled: config.BoolSafe(c.Sub(subsection), "enabled"),
Exporter: tracing.Exporter(config.StringSafe(c.Sub(subsection), "exporter")),
Endpoint: config.StringSafe(c.Sub(subsection), "endpoint"),
@@ -20,6 +25,20 @@ func ToTracingConfig(c *config.Config) *tracing.Config {
InstanceID: getInstanceIDOrDefault(c),
Version: misc.Version,
}
+
+ if trustedCa := config.StringSafe(c.Sub(subsection), "trusted_ca"); trustedCa != "" {
+ caBytes, err := os.ReadFile(trustedCa)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read trusted ca cert by path: %w", err)
+ }
+ certPool := x509.NewCertPool()
+ ok := certPool.AppendCertsFromPEM(caBytes)
+ if !ok {
+ return nil, errors.New("can't fill cert pool by ca cert")
+ }
+ conf.ServerCaCertPool = certPool
+ }
+ return conf, nil
}
func getInstanceIDOrDefault(c *config.Config) string {
diff --git a/cmd/frostfs-node/tracing.go b/cmd/frostfs-node/tracing.go
index 675c31374..f550dd882 100644
--- a/cmd/frostfs-node/tracing.go
+++ b/cmd/frostfs-node/tracing.go
@@ -11,11 +11,15 @@ import (
)
func initTracing(ctx context.Context, c *cfg) {
- conf := tracingconfig.ToTracingConfig(c.appCfg)
-
- _, err := tracing.Setup(ctx, *conf)
+ conf, err := tracingconfig.ToTracingConfig(c.appCfg)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ return
+ }
+ _, err = tracing.Setup(ctx, *conf)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeFailedInitTracing, zap.Error(err))
+ return
}
c.closers = append(c.closers, closer{
diff --git a/config/example/node.env b/config/example/node.env
index c3fa85c13..030a79934 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -199,6 +199,7 @@ FROSTFS_STORAGE_SHARD_1_GC_REMOVER_SLEEP_INTERVAL=5m
FROSTFS_TRACING_ENABLED=true
FROSTFS_TRACING_ENDPOINT="localhost"
FROSTFS_TRACING_EXPORTER="otlp_grpc"
+FROSTFS_TRACING_TRUSTED_CA=""
FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
diff --git a/config/example/node.json b/config/example/node.json
index d7187250b..4e6d239fe 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -255,7 +255,8 @@
"tracing": {
"enabled": true,
"endpoint": "localhost:9090",
- "exporter": "otlp_grpc"
+ "exporter": "otlp_grpc",
+ "trusted_ca": "/etc/ssl/tracing.pem"
},
"runtime": {
"soft_memory_limit": 1073741824
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 776b22bd0..5a8e6a2a4 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -231,6 +231,7 @@ tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost"
+ trusted_ca: ""
runtime:
soft_memory_limit: 1gb
diff --git a/go.mod b/go.mod
index 78fefc9ae..621d2e85d 100644
--- a/go.mod
+++ b/go.mod
@@ -8,7 +8,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
- git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
+ git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
@@ -40,15 +40,15 @@ require (
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
go.etcd.io/bbolt v1.3.10
- go.opentelemetry.io/otel v1.24.0
- go.opentelemetry.io/otel/trace v1.24.0
+ go.opentelemetry.io/otel v1.28.0
+ go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
golang.org/x/sync v0.7.0
golang.org/x/sys v0.22.0
golang.org/x/term v0.21.0
google.golang.org/grpc v1.66.2
- google.golang.org/protobuf v1.34.1
+ google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v3 v3.0.1
)
@@ -63,7 +63,7 @@ require (
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
- github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.2-0.20231222162921-eb75782795d2 // indirect
@@ -73,13 +73,13 @@ require (
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-fed/httpsig v1.1.0 // indirect
- github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 // indirect
- github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
@@ -115,18 +115,18 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.14 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 // indirect
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 // indirect
- go.opentelemetry.io/otel/metric v1.24.0 // indirect
- go.opentelemetry.io/otel/sdk v1.22.0 // indirect
- go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
+ go.opentelemetry.io/otel/metric v1.28.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.28.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
lukechampine.com/blake3 v1.2.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
diff --git a/go.sum b/go.sum
index 2147f8988..4d21d9bca 100644
--- a/go.sum
+++ b/go.sum
@@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
-git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
+git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
@@ -33,8 +33,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
@@ -82,8 +82,8 @@ github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7
github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI=
github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-pkgz/expirable-cache/v3 v3.0.0 h1:u3/gcu3sabLYiTCevoRKv+WzjIn5oo7P8XtiXBeRDLw=
@@ -116,8 +116,8 @@ github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0 h1:f4tg
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0/go.mod h1:hKAkSgNkL0FII46ZkJcpVEAai4KV+swlIWCKfekd1pA=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1 h1:HcUWd006luQPljE73d5sk+/VgYPGUReEVz2y1/qylwY=
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.1/go.mod h1:w9Y7gY31krpLmrVU5ZPG9H7l9fZuRu5/3R3S3FMtVQ4=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
@@ -229,8 +229,8 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
-github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
@@ -278,22 +278,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
-go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0 h1:H2JFgRcGiyHg7H7bwcwaQJYrNFqCqrbTQ8K4p1OvDu8=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.22.0/go.mod h1:WfCWp1bGoYK8MeULtI15MmQVczfR+bFkk0DF3h06QmQ=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0 h1:zr8ymM5OWWjjiWRzwTfZ67c905+2TMHYp2lMJ52QTyM=
-go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.22.0/go.mod h1:sQs7FT2iLVJ+67vYngGJkPe1qr39IzaBzaj9IDNNY8k=
-go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
-go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw=
-go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc=
-go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
-go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
-go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
-go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
+go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
+go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
+go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
+go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
+go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
+go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
+go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
+go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -393,10 +393,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
-google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -407,8 +407,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
-google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
From 96308a26c6ab73c416464e3da2f0e859915abc93 Mon Sep 17 00:00:00 2001
From: Aleksey Savaitan
Date: Thu, 12 Sep 2024 17:42:20 +0300
Subject: [PATCH 061/655] [#1361] linter: fix funlen
Signed-off-by: Aleksey Savaitan
---
cmd/frostfs-node/config.go | 55 +++++++++++++++++++++-----------------
1 file changed, 31 insertions(+), 24 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index bb6580a40..c0019d36a 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1278,7 +1278,6 @@ func (c *cfg) reloadConfig(ctx context.Context) {
// all the components are expected to support
// Logger's dynamic reconfiguration approach
- var components []dCmp
// Logger
@@ -1288,6 +1287,36 @@ func (c *cfg) reloadConfig(ctx context.Context) {
return
}
+ components := c.getComponents(ctx, logPrm)
+
+ // Storage Engine
+
+ var rcfg engine.ReConfiguration
+ for _, optsWithID := range c.shardOpts(ctx) {
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
+ }
+
+ err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
+ return
+ }
+
+ for _, component := range components {
+ err = component.reloadFunc()
+ if err != nil {
+ c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
+ zap.String("component", component.name),
+ zap.Error(err))
+ }
+ }
+
+ c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+}
+
+func (c *cfg) getComponents(ctx context.Context, logPrm *logger.Prm) []dCmp {
+ var components []dCmp
+
components = append(components, dCmp{"logger", logPrm.Reload})
components = append(components, dCmp{"runtime", func() error {
setRuntimeParameters(c)
@@ -1321,29 +1350,7 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components = append(components, dCmp{cmp.name, func() error { return cmp.reload(ctx) }})
}
- // Storage Engine
-
- var rcfg engine.ReConfiguration
- for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
- }
-
- err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
- if err != nil {
- c.log.Error(logs.FrostFSNodeStorageEngineConfigurationUpdate, zap.Error(err))
- return
- }
-
- for _, component := range components {
- err = component.reloadFunc()
- if err != nil {
- c.log.Error(logs.FrostFSNodeUpdatedConfigurationApplying,
- zap.String("component", component.name),
- zap.Error(err))
- }
- }
-
- c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
+ return components
}
func (c *cfg) reloadPools() error {
From ea48e928c8d6c54662c72bd650b57a8b897e7879 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 16 Sep 2024 10:45:26 +0300
Subject: [PATCH 062/655] [#1366] logger: Make timestamp prepending optional
Signed-off-by: Aleksey Savchuk
---
pkg/util/logger/logger.go | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/pkg/util/logger/logger.go b/pkg/util/logger/logger.go
index e67afb36b..4b60f02de 100644
--- a/pkg/util/logger/logger.go
+++ b/pkg/util/logger/logger.go
@@ -40,6 +40,9 @@ type Prm struct {
// do not support runtime rereading
dest string
+
+ // PrependTimestamp specifies whether to prepend a timestamp in the log
+ PrependTimestamp bool
}
const (
@@ -116,11 +119,16 @@ func newConsoleLogger(prm *Prm) (*Logger, error) {
c := zap.NewProductionConfig()
c.Level = lvl
c.Encoding = "console"
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
+
lZap, err := c.Build(
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
)
@@ -140,11 +148,16 @@ func newJournaldLogger(prm *Prm) (*Logger, error) {
c := zap.NewProductionConfig()
c.Level = lvl
c.Encoding = "console"
- c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
if prm.SamplingHook != nil {
c.Sampling.Hook = prm.SamplingHook
}
+ if prm.PrependTimestamp {
+ c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+ } else {
+ c.EncoderConfig.TimeKey = ""
+ }
+
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
From d4bec24c9fa7ede0fcc4d5b879316f0a54c61bb9 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 16 Sep 2024 10:46:26 +0300
Subject: [PATCH 063/655] [#1366] node, ir: Support `timestamp` config option,
update tests
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-ir/config.go | 2 ++
cmd/frostfs-ir/defaults.go | 1 +
cmd/frostfs-ir/main.go | 2 ++
cmd/frostfs-node/config.go | 3 +++
cmd/frostfs-node/config/logger/config.go | 8 ++++++++
cmd/frostfs-node/config/logger/config_test.go | 2 ++
config/example/ir.env | 1 +
config/example/ir.yaml | 1 +
config/example/node.env | 1 +
config/example/node.json | 3 ++-
config/example/node.yaml | 1 +
11 files changed, 24 insertions(+), 1 deletion(-)
diff --git a/cmd/frostfs-ir/config.go b/cmd/frostfs-ir/config.go
index 4eaac845c..137e764ed 100644
--- a/cmd/frostfs-ir/config.go
+++ b/cmd/frostfs-ir/config.go
@@ -41,6 +41,8 @@ func reloadConfig() error {
if err != nil {
return err
}
+ logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+
return logPrm.Reload()
}
diff --git a/cmd/frostfs-ir/defaults.go b/cmd/frostfs-ir/defaults.go
index e703301ae..899918d22 100644
--- a/cmd/frostfs-ir/defaults.go
+++ b/cmd/frostfs-ir/defaults.go
@@ -9,6 +9,7 @@ import (
func defaultConfiguration(cfg *viper.Viper) {
cfg.SetDefault("logger.level", "info")
cfg.SetDefault("logger.destination", "stdout")
+ cfg.SetDefault("logger.timestamp", false)
setPprofDefaults(cfg)
diff --git a/cmd/frostfs-ir/main.go b/cmd/frostfs-ir/main.go
index 02936ae78..4bc5923a0 100644
--- a/cmd/frostfs-ir/main.go
+++ b/cmd/frostfs-ir/main.go
@@ -79,6 +79,8 @@ func main() {
)
exitErr(err)
logPrm.SamplingHook = metrics.LogMetrics().GetSamplingHook()
+ logPrm.PrependTimestamp = cfg.GetBool("logger.timestamp")
+
log, err = logger.NewLogger(logPrm)
exitErr(err)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index c0019d36a..ed3a65c25 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -102,6 +102,7 @@ type applicationConfiguration struct {
LoggerCfg struct {
level string
destination string
+ timestamp bool
}
EngineCfg struct {
@@ -220,6 +221,7 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.level = loggerconfig.Level(c)
a.LoggerCfg.destination = loggerconfig.Destination(c)
+ a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
// Storage Engine
@@ -1023,6 +1025,7 @@ func (c *cfg) loggerPrm() (*logger.Prm, error) {
// not expected since validation should be performed before
panic("incorrect log destination format: " + c.LoggerCfg.destination)
}
+ c.dynamicConfiguration.logger.PrependTimestamp = c.LoggerCfg.timestamp
return c.dynamicConfiguration.logger, nil
}
diff --git a/cmd/frostfs-node/config/logger/config.go b/cmd/frostfs-node/config/logger/config.go
index 378b9d793..ba9eeea2b 100644
--- a/cmd/frostfs-node/config/logger/config.go
+++ b/cmd/frostfs-node/config/logger/config.go
@@ -52,6 +52,14 @@ func Destination(c *config.Config) string {
return DestinationDefault
}
+// Timestamp returns the value of "timestamp" config parameter
+// from "logger" section.
+//
+// Returns false if the value isn't specified.
+func Timestamp(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "timestamp")
+}
+
// ToLokiConfig extracts loki config.
func ToLokiConfig(c *config.Config) loki.Config {
hostname, _ := os.Hostname()
diff --git a/cmd/frostfs-node/config/logger/config_test.go b/cmd/frostfs-node/config/logger/config_test.go
index 3587a0ddb..ffe8ac693 100644
--- a/cmd/frostfs-node/config/logger/config_test.go
+++ b/cmd/frostfs-node/config/logger/config_test.go
@@ -13,6 +13,7 @@ func TestLoggerSection_Level(t *testing.T) {
t.Run("defaults", func(t *testing.T) {
require.Equal(t, loggerconfig.LevelDefault, loggerconfig.Level(configtest.EmptyConfig()))
require.Equal(t, loggerconfig.DestinationDefault, loggerconfig.Destination(configtest.EmptyConfig()))
+ require.Equal(t, false, loggerconfig.Timestamp(configtest.EmptyConfig()))
})
const path = "../../../../config/example/node"
@@ -20,6 +21,7 @@ func TestLoggerSection_Level(t *testing.T) {
fileConfigTest := func(c *config.Config) {
require.Equal(t, "debug", loggerconfig.Level(c))
require.Equal(t, "journald", loggerconfig.Destination(c))
+ require.Equal(t, true, loggerconfig.Timestamp(c))
}
configtest.ForEachFileType(path, fileConfigTest)
diff --git a/config/example/ir.env b/config/example/ir.env
index 3f9530ab6..7234a4b32 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -1,4 +1,5 @@
FROSTFS_IR_LOGGER_LEVEL=info
+FROSTFS_IR_LOGGER_TIMESTAMP=true
FROSTFS_IR_WALLET_PATH=/path/to/wallet.json
FROSTFS_IR_WALLET_ADDRESS=NUHtW3eM6a4mmFCgyyr4rj4wygsTKB88XX
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 401328e72..4c64f088b 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -2,6 +2,7 @@
logger:
level: info # Logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
+ timestamp: true
wallet:
path: /path/to/wallet.json # Path to NEP-6 NEO wallet file
diff --git a/config/example/node.env b/config/example/node.env
index 030a79934..6618a981a 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -1,5 +1,6 @@
FROSTFS_LOGGER_LEVEL=debug
FROSTFS_LOGGER_DESTINATION=journald
+FROSTFS_LOGGER_TIMESTAMP=true
FROSTFS_PPROF_ENABLED=true
FROSTFS_PPROF_ADDRESS=localhost:6060
diff --git a/config/example/node.json b/config/example/node.json
index 4e6d239fe..0d100ed80 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -1,7 +1,8 @@
{
"logger": {
"level": "debug",
- "destination": "journald"
+ "destination": "journald",
+ "timestamp": true
},
"pprof": {
"enabled": true,
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 5a8e6a2a4..86be35ba8 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -1,6 +1,7 @@
logger:
level: debug # logger level: one of "debug", "info" (default), "warn", "error", "dpanic", "panic", "fatal"
destination: journald # logger destination: one of "stdout" (default), "journald"
+ timestamp: true
systemdnotify:
enabled: true
From b807d8c40066a90accf7dbdb5d8f31f6069f551c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:20:55 +0300
Subject: [PATCH 064/655] [#1382] go.mod: Upgrade sdk-go and api-go versions
Signed-off-by: Dmitrii Stepanov
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index 621d2e85d..78dce0131 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,12 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 4d21d9bca..dd0e31088 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb h1:p9ByDsw+H6p6LyYSx8LKFtAG/oPKQpDVMNfjPqdevTw=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240902111049-c11f50efeccb/go.mod h1:BDnEpkKMykCS8u1nLzR6SgNzCv6885RWlo5TnravQuI=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad h1:cJGK/bXSF/0KMr6zkIy06qekQhQRU7eYzh+lWdQ0U8w=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240903093628-8f751d9dd0ad/go.mod h1:I4GzeEnftO++N2JHQn9QR88eV0rjQkGqlUCNMnCz1CY=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From 1e7f9909dade3ed905c07930c9a9f1bd9a8323b4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:21:53 +0300
Subject: [PATCH 065/655] [#1382] policer: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/services/policer/check.go | 2 +-
pkg/services/policer/ec.go | 2 +-
pkg/services/policer/policer_test.go | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index bf67ec4d4..06282bd8d 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -110,7 +110,7 @@ func (p *Policer) processRepNodes(ctx context.Context, requirements *placementRe
requirements.needLocalCopy = true
shortage--
- } else if nodes[i].IsMaintenance() {
+ } else if nodes[i].Status().IsMaintenance() {
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else {
if status := checkedNodes.processStatus(nodes[i]); status.Processed() {
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index 61a65fc21..e822d1c09 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -106,7 +106,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
validPlacement: true,
}
}
- if requiredNode.IsMaintenance() {
+ if requiredNode.Status().IsMaintenance() {
// consider maintenance mode has object, but do not drop local copy
p.log.Debug(logs.PolicerConsiderNodeUnderMaintenanceAsOK, zap.String("node", netmap.StringifyPublicKey(requiredNode)))
return ecChunkProcessResult{}
diff --git a/pkg/services/policer/policer_test.go b/pkg/services/policer/policer_test.go
index e353ea428..4e17e98a8 100644
--- a/pkg/services/policer/policer_test.go
+++ b/pkg/services/policer/policer_test.go
@@ -174,7 +174,7 @@ func TestProcessObject(t *testing.T) {
nodes[i].SetPublicKey([]byte{byte(i)})
}
for _, i := range ti.maintenanceNodes {
- nodes[i].SetMaintenance()
+ nodes[i].SetStatus(netmap.Maintenance)
}
var policy netmap.PlacementPolicy
From e5c8f7ff9f49b9e8d0f0a7ac4290aadeba356d6c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:22:38 +0300
Subject: [PATCH 066/655] [#1382] controlSvc: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/services/control/ir/server/calls.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/control/ir/server/calls.go b/pkg/services/control/ir/server/calls.go
index 2447a8a74..642932c91 100644
--- a/pkg/services/control/ir/server/calls.go
+++ b/pkg/services/control/ir/server/calls.go
@@ -91,7 +91,7 @@ func (s *Server) RemoveNode(_ context.Context, req *control.RemoveNodeRequest) (
if len(nodeInfo.PublicKey()) == 0 {
return nil, status.Error(codes.NotFound, "no such node")
}
- if nodeInfo.IsOffline() {
+ if nodeInfo.Status().IsOffline() {
return nil, status.Error(codes.FailedPrecondition, "node is already offline")
}
From d4be2f20d4a240dd5a46f09b07b432667ca52f24 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:23:46 +0300
Subject: [PATCH 067/655] [#1382] morph: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/morph/client/netmap/netmap.go | 6 +++---
pkg/morph/client/netmap/netmap_test.go | 14 +++++++-------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/pkg/morph/client/netmap/netmap.go b/pkg/morph/client/netmap/netmap.go
index 61bbf5f17..f7b5c3ba4 100644
--- a/pkg/morph/client/netmap/netmap.go
+++ b/pkg/morph/client/netmap/netmap.go
@@ -136,11 +136,11 @@ func decodeNodeInfo(dst *netmap.NodeInfo, itemNode stackitem.Item) error {
default:
return fmt.Errorf("unsupported state %v", node.State)
case netmapcontract.NodeStateOnline:
- dst.SetOnline()
+ dst.SetStatus(netmap.Online)
case netmapcontract.NodeStateOffline:
- dst.SetOffline()
+ dst.SetStatus(netmap.Offline)
case netmapcontract.NodeStateMaintenance:
- dst.SetMaintenance()
+ dst.SetStatus(netmap.Maintenance)
}
return nil
diff --git a/pkg/morph/client/netmap/netmap_test.go b/pkg/morph/client/netmap/netmap_test.go
index a8a306197..e686e271e 100644
--- a/pkg/morph/client/netmap/netmap_test.go
+++ b/pkg/morph/client/netmap/netmap_test.go
@@ -20,11 +20,11 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
switch i % 3 {
default:
- expected[i].SetOffline()
+ expected[i].SetStatus(netmap.Offline)
case int(netmapcontract.NodeStateOnline):
- expected[i].SetOnline()
+ expected[i].SetStatus(netmap.Online)
case int(netmapcontract.NodeStateMaintenance):
- expected[i].SetMaintenance()
+ expected[i].SetStatus(netmap.Maintenance)
}
expected[i].SetPublicKey(pub)
@@ -38,12 +38,12 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
var state int64
- switch {
- case expected[i].IsOnline():
+ switch expected[i].Status() {
+ case netmap.Online:
state = int64(netmapcontract.NodeStateOnline)
- case expected[i].IsOffline():
+ case netmap.Offline:
state = int64(netmapcontract.NodeStateOffline)
- case expected[i].IsMaintenance():
+ case netmap.Maintenance:
state = int64(netmapcontract.NodeStateMaintenance)
}
From a603d14d080e2485fdedee4b92306b1ce4aee2b0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:24:53 +0300
Subject: [PATCH 068/655] [#1382] ir: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/processors/netmap/cleanup_table.go | 2 +-
pkg/innerring/processors/netmap/cleanup_table_test.go | 2 +-
pkg/innerring/processors/netmap/handlers_test.go | 2 +-
.../processors/netmap/nodevalidation/state/validator.go | 4 ++--
.../netmap/nodevalidation/state/validator_test.go | 8 ++++----
pkg/innerring/processors/netmap/process_peers.go | 2 +-
6 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/pkg/innerring/processors/netmap/cleanup_table.go b/pkg/innerring/processors/netmap/cleanup_table.go
index c18611569..abd5b089a 100644
--- a/pkg/innerring/processors/netmap/cleanup_table.go
+++ b/pkg/innerring/processors/netmap/cleanup_table.go
@@ -60,7 +60,7 @@ func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
}
access.binNodeInfo = binNodeInfo
- access.maintenance = nmNodes[i].IsMaintenance()
+ access.maintenance = nmNodes[i].Status().IsMaintenance()
newMap[keyString] = access
}
diff --git a/pkg/innerring/processors/netmap/cleanup_table_test.go b/pkg/innerring/processors/netmap/cleanup_table_test.go
index ae5620733..208bd5496 100644
--- a/pkg/innerring/processors/netmap/cleanup_table_test.go
+++ b/pkg/innerring/processors/netmap/cleanup_table_test.go
@@ -127,7 +127,7 @@ func TestCleanupTable(t *testing.T) {
t.Run("skip maintenance nodes", func(t *testing.T) {
cnt := 0
- infos[1].SetMaintenance()
+ infos[1].SetStatus(netmap.Maintenance)
key := netmap.StringifyPublicKey(infos[1])
c.update(networkMap, 5)
diff --git a/pkg/innerring/processors/netmap/handlers_test.go b/pkg/innerring/processors/netmap/handlers_test.go
index b34abb78c..8875880bf 100644
--- a/pkg/innerring/processors/netmap/handlers_test.go
+++ b/pkg/innerring/processors/netmap/handlers_test.go
@@ -146,7 +146,7 @@ func TestAddPeer(t *testing.T) {
require.Nil(t, nc.notaryInvokes, "invalid notary invokes")
- node.SetOnline()
+ node.SetStatus(netmap.Online)
ev = netmapEvent.AddPeer{
NodeBytes: node.Marshal(),
Request: &payload.P2PNotaryRequest{
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
index 4094e50a5..e5165f618 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator.go
@@ -56,11 +56,11 @@ func (x *NetMapCandidateValidator) SetNetworkSettings(netSettings NetworkSetting
//
// See also netmap.NodeInfo.IsOnline/SetOnline and other similar methods.
func (x *NetMapCandidateValidator) VerifyAndUpdate(node *netmap.NodeInfo) error {
- if node.IsOnline() {
+ if node.Status().IsOnline() {
return nil
}
- if node.IsMaintenance() {
+ if node.Status().IsMaintenance() {
return x.netSettings.MaintenanceModeAllowed()
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
index a557628f0..b81d7243b 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/state/validator_test.go
@@ -41,22 +41,22 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
},
{
name: "ONLINE",
- preparer: (*netmap.NodeInfo).SetOnline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Online) },
valid: true,
},
{
name: "OFFLINE",
- preparer: (*netmap.NodeInfo).SetOffline,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Offline) },
valid: false,
},
{
name: "MAINTENANCE/allowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: true,
},
{
name: "MAINTENANCE/disallowed",
- preparer: (*netmap.NodeInfo).SetMaintenance,
+ preparer: func(ni *netmap.NodeInfo) { ni.SetStatus(netmap.Maintenance) },
valid: false,
validatorPreparer: func(v *state.NetMapCandidateValidator) {
var s testNetworkSettings
diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go
index 9e6e8c283..c8c7928a3 100644
--- a/pkg/innerring/processors/netmap/process_peers.go
+++ b/pkg/innerring/processors/netmap/process_peers.go
@@ -62,7 +62,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) bool {
// But there is no guarantee that code will be executed in the same order.
// That is why we need to perform `addPeerIR` only in case when node is online,
// because in scope of this method, contract set state `ONLINE` for the node.
- if updated && nodeInfo.IsOnline() {
+ if updated && nodeInfo.Status().IsOnline() {
np.log.Info(logs.NetmapApprovingNetworkMapCandidate,
zap.String("key", keyString))
From ac1eee091dfbb9193c407ac237cd53a26f4f83d9 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:27:10 +0300
Subject: [PATCH 069/655] [#1382] node: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 8 ++++++--
cmd/frostfs-node/netmap.go | 26 +++++++++++++++-----------
cmd/internal/common/netmap.go | 8 ++++----
3 files changed, 25 insertions(+), 17 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index ed3a65c25..63f410b89 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1182,7 +1182,9 @@ func (c *cfg) bootstrapWithState(stateSetter func(*netmap.NodeInfo)) error {
// bootstrapOnline calls cfg.bootstrapWithState with "online" state.
func bootstrapOnline(c *cfg) error {
- return c.bootstrapWithState((*netmap.NodeInfo).SetOnline)
+ return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
+ ni.SetStatus(netmap.Online)
+ })
}
// bootstrap calls bootstrapWithState with:
@@ -1193,7 +1195,9 @@ func (c *cfg) bootstrap() error {
st := c.cfgNetmap.state.controlNetmapStatus()
if st == control.NetmapStatus_MAINTENANCE {
c.log.Info(logs.FrostFSNodeBootstrappingWithTheMaintenanceState)
- return c.bootstrapWithState((*netmap.NodeInfo).SetMaintenance)
+ return c.bootstrapWithState(func(ni *netmap.NodeInfo) {
+ ni.SetStatus(netmap.Maintenance)
+ })
}
c.log.Info(logs.FrostFSNodeBootstrappingWithOnlineState,
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index 8104b1dc1..c0b87492c 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -61,13 +61,15 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
if ni != nil {
s.nodeInfo.Store(*ni)
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
ctrlNetSt = control.NetmapStatus_ONLINE
- case ni.IsOffline():
+ case netmapSDK.Offline:
ctrlNetSt = control.NetmapStatus_OFFLINE
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
ctrlNetSt = control.NetmapStatus_MAINTENANCE
+ case netmapSDK.UnspecifiedState:
+ ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
}
} else {
ctrlNetSt = control.NetmapStatus_OFFLINE
@@ -78,7 +80,7 @@ func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
// nil ni means that the node is not included
// in the netmap
- niOld.SetOffline()
+ niOld.SetStatus(netmapSDK.Offline)
s.nodeInfo.Store(niOld)
}
@@ -139,7 +141,7 @@ func initNetmapService(ctx context.Context, c *cfg) {
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
parseAttributes(c)
- c.cfgNodeInfo.localInfo.SetOffline()
+ c.cfgNodeInfo.localInfo.SetStatus(netmapSDK.Offline)
if c.cfgMorph.client == nil {
initMorphComponents(ctx, c)
@@ -252,7 +254,7 @@ func initNetmapState(c *cfg) {
zap.String("state", stateWord),
)
- if ni != nil && ni.IsMaintenance() {
+ if ni != nil && ni.Status().IsMaintenance() {
c.isMaintenance.Store(true)
}
@@ -263,13 +265,15 @@ func initNetmapState(c *cfg) {
func nodeState(ni *netmapSDK.NodeInfo) string {
if ni != nil {
- switch {
- case ni.IsOnline():
+ switch ni.Status() {
+ case netmapSDK.Online:
return "online"
- case ni.IsOffline():
+ case netmapSDK.Offline:
return "offline"
- case ni.IsMaintenance():
+ case netmapSDK.Maintenance:
return "maintenance"
+ case netmapSDK.UnspecifiedState:
+ return "undefined"
}
}
return "undefined"
diff --git a/cmd/internal/common/netmap.go b/cmd/internal/common/netmap.go
index 79b03a726..f550552d2 100644
--- a/cmd/internal/common/netmap.go
+++ b/cmd/internal/common/netmap.go
@@ -14,14 +14,14 @@ func PrettyPrintNodeInfo(cmd *cobra.Command, node netmap.NodeInfo,
) {
var strState string
- switch {
+ switch node.Status() {
default:
strState = "STATE_UNSUPPORTED"
- case node.IsOnline():
+ case netmap.Online:
strState = "ONLINE"
- case node.IsOffline():
+ case netmap.Offline:
strState = "OFFLINE"
- case node.IsMaintenance():
+ case netmap.Maintenance:
strState = "MAINTENANCE"
}
From 3441fff05dd61647e7bd069db34f320e7e9efe9a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 18 Sep 2024 12:27:39 +0300
Subject: [PATCH 070/655] [#1382] cli: Replace deprecated methods
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/modules/netmap/nodeinfo.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-cli/modules/netmap/nodeinfo.go b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
index b6ec48f35..ae4bb329a 100644
--- a/cmd/frostfs-cli/modules/netmap/nodeinfo.go
+++ b/cmd/frostfs-cli/modules/netmap/nodeinfo.go
@@ -49,14 +49,14 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
var stateWord string
- switch {
+ switch i.Status() {
default:
stateWord = ""
- case i.IsOnline():
+ case netmap.Online:
stateWord = "online"
- case i.IsOffline():
+ case netmap.Offline:
stateWord = "offline"
- case i.IsMaintenance():
+ case netmap.Maintenance:
stateWord = "maintenance"
}
From 61d5e140e051f92222fa9152d6bd807d505ca1e8 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 18 Sep 2024 12:13:15 +0300
Subject: [PATCH 071/655] [#1383] object: Add restrictions for `Patch` method
* `Patch` can't be applied for non-regular type object (tombstones,
locks etc.)
* Complex object parts can't be patched. So, if an object has EC/Split
header, it won't be patched.
Signed-off-by: Airat Arifullin
---
pkg/services/object/patch/streamer.go | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 85c28cda0..73def8c7c 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -57,12 +57,31 @@ func toFullObjectHeader(hdr *objectSDK.Object) objectV2.GetHeaderPart {
return hs
}
+func isLinkObject(hdr *objectV2.HeaderWithSignature) bool {
+ split := hdr.GetHeader().GetSplit()
+ return len(split.GetChildren()) > 0 && split.GetParent() != nil
+}
+
+func isComplexObjectPart(hdr *objectV2.HeaderWithSignature) bool {
+ return hdr.GetHeader().GetEC() != nil || hdr.GetHeader().GetSplit() != nil
+}
+
func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
hdrWithSig, addr, err := s.readHeader(ctx, req)
if err != nil {
return err
}
+ if hdrWithSig.GetHeader().GetObjectType() != objectV2.TypeRegular {
+ return errors.New("non-regular object can't be patched")
+ }
+ if isLinkObject(hdrWithSig) {
+ return errors.New("linking object can't be patched")
+ }
+ if isComplexObjectPart(hdrWithSig) {
+ return errors.New("complex object parts can't be patched")
+ }
+
commonPrm, err := util.CommonPrmFromV2(req)
if err != nil {
return err
From 945b7c740b0deb4a2f16bb85f20efd8820762f53 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Wed, 18 Sep 2024 18:14:54 +0300
Subject: [PATCH 072/655] [#1372] adm/morph: Add delta flag to
'force-new-epoch'
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-adm/internal/commonflags/flags.go | 1 +
cmd/frostfs-adm/internal/modules/morph/helper/netmap.go | 8 ++++++--
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++-
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 ++
cmd/frostfs-adm/internal/modules/morph/node/remove.go | 2 +-
5 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index 81395edb0..b51d2e115 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -39,4 +39,5 @@ const (
CustomZoneFlag = "domain"
AlphabetSizeFlag = "size"
AllFlag = "all"
+ DeltaFlag = "delta"
)
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
index 7a778f8c3..fb8f03783 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/netmap.go
@@ -72,13 +72,17 @@ func InvalidConfigValueErr(key string) error {
return fmt.Errorf("invalid %s config value from netmap contract", key)
}
-func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160) error {
+func EmitNewEpochCall(bw *io.BufBinWriter, wCtx *InitializeContext, nmHash util.Uint160, countEpoch int64) error {
+ if countEpoch <= 0 {
+ return errors.New("number of epochs cannot be less than 1")
+ }
+
curr, err := unwrap.Int64(wCtx.ReadOnlyInvoker.Call(nmHash, "epoch"))
if err != nil {
return errors.New("can't fetch current epoch from the netmap contract")
}
- newEpoch := curr + 1
+ newEpoch := curr + countEpoch
wCtx.Command.Printf("Current epoch: %d, increase to %d.\n", curr, newEpoch)
// In NeoFS this is done via Notary contract. Here, however, we can form the
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index df9a03fd1..5e4e9c725 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -4,6 +4,7 @@ import (
"fmt"
"strings"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -30,7 +31,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 31fda860e..0288bcdc5 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -22,6 +22,7 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag))
},
RunE: ForceNewEpochCmd,
}
@@ -35,6 +36,7 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
+ ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
diff --git a/cmd/frostfs-adm/internal/modules/morph/node/remove.go b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
index 0a19102ba..e47451e0c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/node/remove.go
+++ b/cmd/frostfs-adm/internal/modules/morph/node/remove.go
@@ -53,7 +53,7 @@ func RemoveNodesCmd(cmd *cobra.Command, args []string) error {
int64(netmapcontract.NodeStateOffline), nodeKeys[i].Bytes())
}
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash); err != nil {
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, 1); err != nil {
return err
}
From 1361db91ee37d3da938dc5146cc3f15f9ee33517 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 16 Sep 2024 11:09:51 +0300
Subject: [PATCH 073/655] [#1301] adm/morph: Add flag -v to 'Tokens'
Signed-off-by: Alexander Chuprov
---
.../internal/modules/morph/nns/tokens.go | 44 ++++++++++++++++++-
1 file changed, 43 insertions(+), 1 deletion(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
index 6e8ffb40a..3c7136e9d 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/tokens.go
@@ -1,15 +1,25 @@
package nns
import (
+ "math/big"
+ "strings"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/nns"
+ client "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/nns"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
commonCmd "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/internal/common"
"github.com/spf13/cobra"
)
+const (
+ verboseDesc = "Include additional information about CNAME record."
+)
+
func initTokensCmd() {
Cmd.AddCommand(tokensCmd)
tokensCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
tokensCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ tokensCmd.Flags().BoolP(commonflags.Verbose, commonflags.VerboseShorthand, false, verboseDesc)
}
func listTokens(cmd *cobra.Command, _ []string) {
@@ -18,7 +28,39 @@ func listTokens(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "unable to get tokens: %w", err)
for toks, err := it.Next(10); err == nil && len(toks) > 0; toks, err = it.Next(10) {
for _, token := range toks {
- cmd.Println(string(token))
+ output := string(token)
+ if verbose, _ := cmd.Flags().GetBool(commonflags.Verbose); verbose {
+ cname, err := getCnameRecord(c, token)
+ commonCmd.ExitOnErr(cmd, "", err)
+ if cname != "" {
+ output += " (CNAME: " + cname + ")"
+ }
+ }
+ cmd.Println(output)
}
}
}
+
+func getCnameRecord(c *client.Contract, token []byte) (string, error) {
+ items, err := c.GetRecords(string(token), big.NewInt(int64(nns.CNAME)))
+
+ // GetRecords returns the error "not an array" if the domain does not contain records.
+ if err != nil && strings.Contains(err.Error(), "not an array") {
+ return "", nil
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ if len(items) == 0 {
+ return "", nil
+ }
+
+ record, err := items[0].TryBytes()
+ if err != nil {
+ return "", err
+ }
+
+ return string(record), nil
+}
From 5a53f9c4fd52243dd36c69e62d79f344342d4349 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Thu, 19 Sep 2024 14:19:16 +0300
Subject: [PATCH 074/655] [#1301] go.mod: Bump frostfs-contract
Signed-off-by: Alexander Chuprov
---
go.mod | 6 +++---
go.sum | 8 ++++----
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/go.mod b/go.mod
index 78dce0131..502761866 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
@@ -28,7 +28,7 @@ require (
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.2.0
github.com/multiformats/go-multiaddr v0.12.1
- github.com/nspcc-dev/neo-go v0.106.2
+ github.com/nspcc-dev/neo-go v0.106.3
github.com/olekukonko/tablewriter v0.0.5
github.com/panjf2000/ants/v2 v2.9.0
github.com/prometheus/client_golang v1.19.0
@@ -100,7 +100,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
- github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
+ github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
diff --git a/go.sum b/go.sum
index dd0e31088..85d9df443 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
@@ -188,8 +188,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
-github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec h1:vDrbVXF2+2uP0RlkZmem3QYATcXCu9BzzGGCNsNcK7Q=
+github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240727093519-1a48f1ce43ec/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
From 53a90634fc0a55be636a220b461be731f8a91b3f Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Thu, 19 Sep 2024 14:19:41 +0300
Subject: [PATCH 075/655] [#1301] adm/morph: Add 'delete' domains
Signed-off-by: Alexander Chuprov
---
.../morph/nns/{register.go => domains.go} | 20 +++++++++++++++++++
.../internal/modules/morph/nns/root.go | 10 ++++++++++
2 files changed, 30 insertions(+)
rename cmd/frostfs-adm/internal/modules/morph/nns/{register.go => domains.go} (73%)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/register.go b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
similarity index 73%
rename from cmd/frostfs-adm/internal/modules/morph/nns/register.go
rename to cmd/frostfs-adm/internal/modules/morph/nns/domains.go
index d05d9f171..3684db94a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/register.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/domains.go
@@ -42,3 +42,23 @@ func registerDomain(cmd *cobra.Command, _ []string) {
commonCmd.ExitOnErr(cmd, "register domain error: %w", err)
cmd.Println("Domain registered successfully")
}
+
+func initDeleteCmd() {
+ Cmd.AddCommand(deleteCmd)
+ deleteCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ deleteCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ deleteCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+
+ _ = cobra.MarkFlagRequired(deleteCmd.Flags(), nnsNameFlag)
+}
+
+func deleteDomain(cmd *cobra.Command, _ []string) {
+ c, actor, _ := getRPCClient(cmd)
+
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ h, vub, err := c.DeleteDomain(name)
+
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete domain error: %w", err)
+ cmd.Println("Domain deleted successfully")
+}
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index e528e4b7b..56774c292 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -42,6 +42,15 @@ var (
},
Run: registerDomain,
}
+ deleteCmd = &cobra.Command{
+ Use: "delete",
+ Short: "Delete a domain by name",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ },
+ Run: deleteDomain,
+ }
renewCmd = &cobra.Command{
Use: "renew",
Short: "Increases domain expiration date",
@@ -91,6 +100,7 @@ var (
func init() {
initTokensCmd()
initRegisterCmd()
+ initDeleteCmd()
initRenewCmd()
initUpdateCmd()
initAddRecordCmd()
From c290d079fd71ddc851cce8d06f496d27ceedc168 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 20 Sep 2024 10:53:02 +0300
Subject: [PATCH 076/655] [#1312] go.mod: Update sdk-go
Signed-off-by: Aleksey Savchuk
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 502761866..9817f8527 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 85d9df443..3c6dd9a99 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69 h1:DJExzndXf6hztcQ8zHlBOJV/+FA6k2FpRGUcTDWqq2M=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240916123151-6009d089fc69/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From c34b8acedde282bbe81efccea772a923ee570a8f Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Fri, 20 Sep 2024 10:58:22 +0300
Subject: [PATCH 077/655] [#1312] Drop handling of system attributes with NeoFS
prefix
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-cli/modules/container/list_objects.go | 13 ++-----------
pkg/core/object/fmt.go | 2 +-
pkg/local_object_storage/metabase/put.go | 4 ----
pkg/local_object_storage/metabase/upgrade.go | 2 +-
pkg/services/object_manager/tombstone/checker.go | 2 +-
5 files changed, 5 insertions(+), 18 deletions(-)
diff --git a/cmd/frostfs-cli/modules/container/list_objects.go b/cmd/frostfs-cli/modules/container/list_objects.go
index ff2f8cf45..d5850359d 100644
--- a/cmd/frostfs-cli/modules/container/list_objects.go
+++ b/cmd/frostfs-cli/modules/container/list_objects.go
@@ -1,9 +1,6 @@
package container
import (
- "strings"
-
- v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-cli/internal/key"
@@ -67,14 +64,8 @@ var listContainerObjectsCmd = &cobra.Command{
resHead, err := internalclient.HeadObject(cmd.Context(), prmHead)
if err == nil {
- attrs := resHead.Header().Attributes()
- for i := range attrs {
- attrKey := attrs[i].Key()
- if !strings.HasPrefix(attrKey, v2object.SysAttributePrefix) && !strings.HasPrefix(attrKey, v2object.SysAttributePrefixNeoFS) {
- // FIXME(@cthulhu-rider): https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go/issues/97
- // Use dedicated method to skip system attributes.
- cmd.Printf(" %s: %s\n", attrKey, attrs[i].Value())
- }
+ for _, attr := range resHead.Header().UserAttributes() {
+ cmd.Printf(" %s: %s\n", attr.Key(), attr.Value())
}
} else {
cmd.Printf(" failed to read attributes: %v\n", err)
diff --git a/pkg/core/object/fmt.go b/pkg/core/object/fmt.go
index 96f721806..317d62cb0 100644
--- a/pkg/core/object/fmt.go
+++ b/pkg/core/object/fmt.go
@@ -361,7 +361,7 @@ func (v *FormatValidator) checkIfExpired(ctx context.Context, obj *objectSDK.Obj
func expirationEpochAttribute(obj *objectSDK.Object) (uint64, error) {
for _, a := range obj.Attributes() {
- if a.Key() != objectV2.SysAttributeExpEpoch && a.Key() != objectV2.SysAttributeExpEpochNeoFS {
+ if a.Key() != objectV2.SysAttributeExpEpoch {
continue
}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index ff79a0387..087529895 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -387,10 +387,6 @@ func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes = ech.ParentAttributes()
}
for _, attr := range attributes {
- if attr.Key() == objectV2.SysAttributeExpEpochNeoFS {
- expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
- return expEpoch, err == nil
- }
if attr.Key() == objectV2.SysAttributeExpEpoch {
expEpoch, err := strconv.ParseUint(attr.Value(), 10, 64)
return expEpoch, err == nil
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index f677dcf8e..b5de430dc 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -252,7 +252,7 @@ func getNextObjectsWithExpirationEpochBatch(ctx context.Context, db *bbolt.DB, i
continue
}
attributeKey := string(attrKey[1+cidSize:])
- if attributeKey != objectV2.SysAttributeExpEpochNeoFS && attributeKey != objectV2.SysAttributeExpEpoch {
+ if attributeKey != objectV2.SysAttributeExpEpoch {
continue
}
var containerID cid.ID
diff --git a/pkg/services/object_manager/tombstone/checker.go b/pkg/services/object_manager/tombstone/checker.go
index c3c810001..48a08b693 100644
--- a/pkg/services/object_manager/tombstone/checker.go
+++ b/pkg/services/object_manager/tombstone/checker.go
@@ -74,7 +74,7 @@ func (g *ExpirationChecker) IsTombstoneAvailable(ctx context.Context, a oid.Addr
func (g *ExpirationChecker) handleTS(addr string, ts *objectSDK.Object, reqEpoch uint64) bool {
for _, atr := range ts.Attributes() {
- if atr.Key() == objectV2.SysAttributeExpEpoch || atr.Key() == objectV2.SysAttributeExpEpochNeoFS {
+ if atr.Key() == objectV2.SysAttributeExpEpoch {
epoch, err := strconv.ParseUint(atr.Value(), 10, 64)
if err != nil {
g.log.Warn(
From f71418b73cfb49306ec1a191621b954a75105b18 Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Fri, 20 Sep 2024 10:24:40 +0000
Subject: [PATCH 078/655] [#1386] frostfs-adm: Add info to error messages
These error messages bubble up to human users - adding more context helps
to find the cause of the issue faster.
Signed-off-by: Vitaliy Potyarkin
---
.../modules/morph/initialize/initialize_roles.go | 10 ++++++++--
.../morph/initialize/initialize_transfer.go | 15 ++++++++++++++-
2 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
index a6815ee13..05bc83a8b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_roles.go
@@ -1,6 +1,8 @@
package initialize
import (
+ "fmt"
+
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -29,10 +31,14 @@ func setNotaryAndAlphabetNodes(c *helper.InitializeContext) error {
callflag.States|callflag.AllowNotify, int64(noderoles.NeoFSAlphabet), pubs)
if err := c.SendCommitteeTx(w.Bytes(), false); err != nil {
- return err
+ return fmt.Errorf("send committee transaction: %w", err)
}
- return c.AwaitTx()
+ err := c.AwaitTx()
+ if err != nil {
+ err = fmt.Errorf("await committee transaction: %w", err)
+ }
+ return err
}
func setRolesFinished(c *helper.InitializeContext) (bool, error) {
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index b7102fa13..d7b0ec86c 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -3,6 +3,7 @@ package initialize
import (
"fmt"
"math/big"
+ "strings"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
@@ -144,5 +145,17 @@ func createNEP17MultiTransferTx(c helper.Client, acc *wallet.Account, recipients
if err != nil {
return nil, fmt.Errorf("can't create actor: %w", err)
}
- return act.MakeRun(w.Bytes())
+ tx, err := act.MakeRun(w.Bytes())
+ if err != nil {
+ sum := make(map[util.Uint160]int64)
+ for _, recipient := range recipients {
+ sum[recipient.Token] += recipient.Amount
+ }
+ detail := make([]string, 0, len(sum))
+ for _, value := range sum {
+ detail = append(detail, fmt.Sprintf("amount=%v", value))
+ }
+ err = fmt.Errorf("transfer failed: from=%s(%s) %s: %w", acc.Label, acc.Address, strings.Join(detail, " "), err)
+ }
+ return tx, err
}
From 0b87be804a63760fc7e43a51cc1b00c5aeedbb34 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 17 Sep 2024 11:24:48 +0300
Subject: [PATCH 079/655] [#1381] engine: Fix tests
Drop not required `Eventually` calls.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/shard/delete_test.go | 14 ++++-------
pkg/local_object_storage/shard/get_test.go | 23 ++++---------------
pkg/local_object_storage/shard/head_test.go | 19 ++-------------
pkg/local_object_storage/shard/inhume_test.go | 2 +-
4 files changed, 11 insertions(+), 47 deletions(-)
diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go
index 9f205fa5d..574250a93 100644
--- a/pkg/local_object_storage/shard/delete_test.go
+++ b/pkg/local_object_storage/shard/delete_test.go
@@ -3,7 +3,6 @@ package shard
import (
"context"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
@@ -58,19 +57,14 @@ func testShard(t *testing.T, hasWriteCache bool, payloadSize int) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
if hasWriteCache {
- sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false})
- require.Eventually(t, func() bool {
- _, err = sh.Delete(context.Background(), delPrm)
- return err == nil
- }, 30*time.Second, 10*time.Millisecond)
- } else {
- _, err = sh.Delete(context.Background(), delPrm)
- require.NoError(t, err)
+ require.NoError(t, sh.FlushWriteCache(context.Background(), FlushWriteCachePrm{ignoreErrors: false}))
}
+ _, err = sh.Delete(context.Background(), delPrm)
+ require.NoError(t, err)
_, err = sh.Get(context.Background(), getPrm)
require.True(t, client.IsErrObjectNotFound(err))
diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go
index 8a7c6972d..d0eecf74e 100644
--- a/pkg/local_object_storage/shard/get_test.go
+++ b/pkg/local_object_storage/shard/get_test.go
@@ -5,11 +5,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -49,7 +47,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -67,7 +65,7 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(obj))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.Equal(t, obj, res.Object())
})
@@ -95,13 +93,13 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
getPrm.SetAddress(object.AddressOf(child))
- res, err := testGet(t, sh, getPrm, hasWriteCache)
+ res, err := sh.Get(context.Background(), getPrm)
require.NoError(t, err)
require.True(t, binaryEqual(child, res.Object()))
getPrm.SetAddress(object.AddressOf(parent))
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
var si *objectSDK.SplitInfoError
require.True(t, errors.As(err, &si))
@@ -115,19 +113,6 @@ func testShardGet(t *testing.T, hasWriteCache bool) {
})
}
-func testGet(t *testing.T, sh *Shard, getPrm GetPrm, hasWriteCache bool) (GetRes, error) {
- res, err := sh.Get(context.Background(), getPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Get(context.Background(), getPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
-
// binary equal is used when object contains empty lists in the structure and
// requre.Equal fails on comparing and []{} lists.
func binaryEqual(a, b *objectSDK.Object) bool {
diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go
index 1f4631993..c65bbb1e3 100644
--- a/pkg/local_object_storage/shard/head_test.go
+++ b/pkg/local_object_storage/shard/head_test.go
@@ -4,11 +4,9 @@ import (
"context"
"errors"
"testing"
- "time"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
@@ -46,7 +44,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
headPrm.SetAddress(object.AddressOf(obj))
- res, err := testHead(t, sh, headPrm, hasWriteCache)
+ res, err := sh.Head(context.Background(), headPrm)
require.NoError(t, err)
require.Equal(t, obj.CutPayload(), res.Object())
})
@@ -74,7 +72,7 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
var siErr *objectSDK.SplitInfoError
- _, err = testHead(t, sh, headPrm, hasWriteCache)
+ _, err = sh.Head(context.Background(), headPrm)
require.True(t, errors.As(err, &siErr))
headPrm.SetAddress(object.AddressOf(parent))
@@ -85,16 +83,3 @@ func testShardHead(t *testing.T, hasWriteCache bool) {
require.Equal(t, parent.CutPayload(), head.Object())
})
}
-
-func testHead(t *testing.T, sh *Shard, headPrm HeadPrm, hasWriteCache bool) (HeadRes, error) {
- res, err := sh.Head(context.Background(), headPrm)
- if hasWriteCache {
- require.Eventually(t, func() bool {
- if client.IsErrObjectNotFound(err) {
- res, err = sh.Head(context.Background(), headPrm)
- }
- return !client.IsErrObjectNotFound(err)
- }, time.Second, time.Millisecond*100)
- }
- return res, err
-}
diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go
index 82754568f..1353d5d94 100644
--- a/pkg/local_object_storage/shard/inhume_test.go
+++ b/pkg/local_object_storage/shard/inhume_test.go
@@ -48,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
_, err := sh.Put(context.Background(), putPrm)
require.NoError(t, err)
- _, err = testGet(t, sh, getPrm, hasWriteCache)
+ _, err = sh.Get(context.Background(), getPrm)
require.NoError(t, err)
_, err = sh.Inhume(context.Background(), inhPrm)
From d4493a6d082011cfa24df68b41d92c7b905fda27 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 15:07:10 +0300
Subject: [PATCH 080/655] [#1390] getSvc: Fix Head EC1.1
If local EC chunk found, but remote node is off, then `HEAD --raw` request
returns object not found.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/container.go | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go
index d22b14192..034768c81 100644
--- a/pkg/services/object/get/container.go
+++ b/pkg/services/object/get/container.go
@@ -26,8 +26,10 @@ func (r *request) executeOnContainer(ctx context.Context) {
return
}
+ localStatus := r.status
+
for {
- if r.processCurrentEpoch(ctx) {
+ if r.processCurrentEpoch(ctx, localStatus) {
break
}
@@ -43,7 +45,7 @@ func (r *request) executeOnContainer(ctx context.Context) {
}
}
-func (r *request) processCurrentEpoch(ctx context.Context) bool {
+func (r *request) processCurrentEpoch(ctx context.Context, localStatus int) bool {
r.log.Debug(logs.ProcessEpoch,
zap.Uint64("number", r.curProcEpoch),
)
@@ -56,7 +58,11 @@ func (r *request) processCurrentEpoch(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
- r.status = statusUndefined
+ if localStatus == statusEC { // possible only for raw == true and local == false
+ r.status = statusEC
+ } else {
+ r.status = statusUndefined
+ }
for {
addrs := traverser.Next()
From 3bb65ba820274a2014b3abfe6e11a98047b2059f Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Tue, 24 Sep 2024 11:46:15 +0300
Subject: [PATCH 081/655] [#1392] object: Fix target initialization within put
streamer
* Remove `relay` field from put streamer as it's no longer used;
* Fix initialization of `Relay` object writer parameter.
Signed-off-by: Airat Arifullin
---
pkg/services/object/put/streamer.go | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index f3803d433..d08e7fafa 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/target"
objectwriter "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/common/writer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
@@ -15,8 +14,6 @@ type Streamer struct {
*objectwriter.Config
target transformer.ChunkedObjectWriter
-
- relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
}
var errNotInit = errors.New("stream not initialized")
@@ -35,7 +32,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
Header: prm.hdr,
Container: prm.cnr,
TraverseOpts: prm.traverseOpts,
- Relay: p.relay,
+ Relay: prm.relay,
}
var err error
From 839dead226534887ffbb21b07327724e42cd2135 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 16 Sep 2024 12:38:45 +0300
Subject: [PATCH 082/655] [#1297] getSvc: Return AccessDenied instead of
ObjectNotFound
Do not replace the access denied error if it was received earlier.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/remote.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go
index 163767c43..f2639f8e6 100644
--- a/pkg/services/object/get/remote.go
+++ b/pkg/services/object/get/remote.go
@@ -41,7 +41,7 @@ func (r *request) processNode(ctx context.Context, info client.NodeInfo) bool {
r.status = statusUndefined
if errors.As(err, &errAccessDenied) {
r.err = err
- } else {
+ } else if r.err == nil || !errors.As(r.err, &errAccessDenied) {
r.err = new(apistatus.ObjectNotFound)
}
}
From bdf386366c4e268d9f151c38c4eb5c837a49ab25 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 16 Sep 2024 12:40:12 +0300
Subject: [PATCH 083/655] [#1297] dev: Bump neo-go version
Signed-off-by: Dmitrii Stepanov
---
dev/docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/dev/docker-compose.yml b/dev/docker-compose.yml
index 9d026797c..be1956e65 100644
--- a/dev/docker-compose.yml
+++ b/dev/docker-compose.yml
@@ -3,7 +3,7 @@
version: "2.4"
services:
neo-go:
- image: nspccdev/neo-go:0.105.0
+ image: nspccdev/neo-go:0.106.0
container_name: neo-go
command: ["node", "--config-path", "/config", "--privnet", "--debug"]
stop_signal: SIGKILL
From 34e6a309c6b1cdd4e277f76b63a6b5d01b094115 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Wed, 18 Sep 2024 12:15:32 +0300
Subject: [PATCH 084/655] [#1356] engine: Evacuate object from shards
concurrently
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-cli/modules/control/evacuation.go | 15 +-
pkg/local_object_storage/engine/evacuate.go | 270 +++++++++++-------
.../engine/evacuate_test.go | 41 ++-
pkg/local_object_storage/metabase/list.go | 167 +++++++++++
pkg/local_object_storage/shard/list.go | 65 +++++
pkg/services/control/server/evacuate_async.go | 14 +-
pkg/services/control/service.proto | 4 +
pkg/services/control/service_frostfs.pb.go | 68 ++++-
8 files changed, 533 insertions(+), 111 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index 6fa5ed75c..04a67e5b5 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -21,6 +21,9 @@ const (
noProgressFlag = "no-progress"
scopeFlag = "scope"
+ containerWorkerCountFlag = "container-worker-count"
+ objectWorkerCountFlag = "object-worker-count"
+
scopeAll = "all"
scopeObjects = "objects"
scopeTrees = "trees"
@@ -64,12 +67,16 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
pk := key.Get(cmd)
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
+ containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
+ objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
- Shard_ID: getShardIDList(cmd),
- IgnoreErrors: ignoreErrors,
- Scope: getEvacuationScope(cmd),
+ Shard_ID: getShardIDList(cmd),
+ IgnoreErrors: ignoreErrors,
+ Scope: getEvacuationScope(cmd),
+ ContainerWorkerCount: containerWorkerCount,
+ ObjectWorkerCount: objectWorkerCount,
},
}
@@ -371,6 +378,8 @@ func initControlStartEvacuationShardCmd() {
flags.String(scopeFlag, scopeAll, fmt.Sprintf("Evacuation scope; possible values: %s, %s, %s", scopeTrees, scopeObjects, scopeAll))
flags.Bool(awaitFlag, false, "Block execution until evacuation is completed")
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
+ flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
+ flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 7bef6edfb..3db556a8f 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -10,7 +10,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
- meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/util/logicerr"
@@ -24,6 +23,16 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap"
+ "golang.org/x/sync/errgroup"
+)
+
+const (
+ // containerWorkerCountDefault is a default value of the count of
+ // concurrent container evacuation workers.
+ containerWorkerCountDefault = 10
+ // objectWorkerCountDefault is a default value of the count of
+ // concurrent object evacuation workers.
+ objectWorkerCountDefault = 10
)
var (
@@ -79,6 +88,9 @@ type EvacuateShardPrm struct {
IgnoreErrors bool
Async bool
Scope EvacuateScope
+
+ ContainerWorkerCount uint32
+ ObjectWorkerCount uint32
}
// EvacuateShardRes represents result of the EvacuateShard operation.
@@ -189,8 +201,6 @@ func (p *EvacuateShardRes) DeepCopy() *EvacuateShardRes {
return res
}
-const defaultEvacuateBatchSize = 100
-
type pooledShard struct {
hashedShard
pool util.WorkerPool
@@ -242,8 +252,16 @@ func (e *StorageEngine) Evacuate(ctx context.Context, prm EvacuateShardPrm) (*Ev
return nil, err
}
+ var mtx sync.RWMutex
+ copyShards := func() []pooledShard {
+ mtx.RLock()
+ defer mtx.RUnlock()
+ t := make([]pooledShard, len(shards))
+ copy(t, shards)
+ return t
+ }
eg.Go(func() error {
- return e.evacuateShards(egCtx, shardIDs, prm, res, shards, shardsToEvacuate)
+ return e.evacuateShards(egCtx, shardIDs, prm, res, copyShards, shardsToEvacuate)
})
if prm.Async {
@@ -261,7 +279,7 @@ func ctxOrBackground(ctx context.Context, background bool) context.Context {
}
func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
var err error
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShards",
@@ -287,13 +305,39 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return err
}
- for _, shardID := range shardIDs {
- if err = e.evacuateShard(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
- e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
- return err
+ ctx, cancel, egShard, egContainer, egObject := e.createErrorGroupsForEvacuation(ctx, prm)
+ continueLoop := true
+ for i := 0; continueLoop && i < len(shardIDs); i++ {
+ select {
+ case <-ctx.Done():
+ continueLoop = false
+ default:
+ egShard.Go(func() error {
+ err := e.evacuateShard(ctx, cancel, shardIDs[i], prm, res, shards, shardsToEvacuate, egContainer, egObject)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
}
}
+ err = egShard.Wait()
+ if err != nil {
+ err = fmt.Errorf("shard error: %w", err)
+ }
+ errContainer := egContainer.Wait()
+ errObject := egObject.Wait()
+ if errContainer != nil {
+ err = errors.Join(err, fmt.Errorf("container error: %w", errContainer))
+ }
+ if errObject != nil {
+ err = errors.Join(err, fmt.Errorf("object error: %w", errObject))
+ }
+ if err != nil {
+ e.log.Error(logs.EngineFinishedWithErrorShardsEvacuation, zap.Error(err), zap.Strings("shard_ids", shardIDs), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)), zap.Stringer("scope", prm.Scope))
+ return err
+ }
e.log.Info(logs.EngineFinishedSuccessfullyShardsEvacuation,
zap.Strings("shard_ids", shardIDs),
@@ -309,6 +353,27 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
return nil
}
+func (e *StorageEngine) createErrorGroupsForEvacuation(ctx context.Context, prm EvacuateShardPrm) (
+ context.Context, context.CancelCauseFunc, *errgroup.Group, *errgroup.Group, *errgroup.Group,
+) {
+ operationCtx, cancel := context.WithCancelCause(ctx)
+ egObject, _ := errgroup.WithContext(operationCtx)
+ objectWorkerCount := prm.ObjectWorkerCount
+ if objectWorkerCount == 0 {
+ objectWorkerCount = objectWorkerCountDefault
+ }
+ egObject.SetLimit(int(objectWorkerCount))
+ egContainer, _ := errgroup.WithContext(operationCtx)
+ containerWorkerCount := prm.ContainerWorkerCount
+ if containerWorkerCount == 0 {
+ containerWorkerCount = containerWorkerCountDefault
+ }
+ egContainer.SetLimit(int(containerWorkerCount))
+ egShard, _ := errgroup.WithContext(operationCtx)
+
+ return operationCtx, cancel, egShard, egContainer, egObject
+}
+
func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.getTotals")
defer span.End()
@@ -335,8 +400,9 @@ func (e *StorageEngine) getTotals(ctx context.Context, prm EvacuateShardPrm, sha
return nil
}
-func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShard(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateShard",
trace.WithAttributes(
@@ -345,11 +411,10 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
defer span.End()
if prm.Scope.WithObjects() {
- if err := e.evacuateShardObjects(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
+ if err := e.evacuateShardObjects(ctx, cancel, shardID, prm, res, shards, shardsToEvacuate, egContainer, egObject); err != nil {
return err
}
}
-
if prm.Scope.WithTrees() && shardsToEvacuate[shardID].PiloramaEnabled() {
if err := e.evacuateShardTrees(ctx, shardID, prm, res, shards, shardsToEvacuate); err != nil {
return err
@@ -359,44 +424,60 @@ func (e *StorageEngine) evacuateShard(ctx context.Context, shardID string, prm E
return nil
}
-func (e *StorageEngine) evacuateShardObjects(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context.CancelCauseFunc, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
+ shards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ egContainer *errgroup.Group, egObject *errgroup.Group,
) error {
- var listPrm shard.ListWithCursorPrm
- listPrm.WithCount(defaultEvacuateBatchSize)
-
sh := shardsToEvacuate[shardID]
- sh.SetEvacuationInProgress(true)
-
- var c *meta.Cursor
- for {
- listPrm.WithCursor(c)
-
- // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes
- // because ListWithCursor works only with the metabase.
- listRes, err := sh.ListWithCursor(ctx, listPrm)
- if err != nil {
- if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) {
- break
+ var cntPrm shard.IterateOverContainersPrm
+ cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egContainer.Go(func() error {
+ var objPrm shard.IterateOverObjectsInContainerPrm
+ objPrm.BucketName = name
+ objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
+ egObject.Go(func() error {
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate)
+ if err != nil {
+ cancel(err)
+ }
+ return err
+ })
+ return nil
+ }
+ err := sh.IterateOverObjectsInContainer(ctx, objPrm)
+ if err != nil {
+ cancel(err)
}
- e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
return err
- }
-
- if err = e.evacuateObjects(ctx, sh, listRes.AddressList(), prm, res, shards, shardsToEvacuate); err != nil {
- return err
- }
-
- c = listRes.Cursor()
+ })
+ return nil
}
- return nil
+
+ sh.SetEvacuationInProgress(true)
+ err := sh.IterateOverContainers(ctx, cntPrm)
+ if err != nil {
+ cancel(err)
+ e.log.Error(logs.EngineShardsEvacuationFailedToListObjects, zap.String("shard_id", shardID), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ }
+ return err
}
func (e *StorageEngine) evacuateShardTrees(ctx context.Context, shardID string, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
sh := shardsToEvacuate[shardID]
+ shards := getShards()
var listPrm pilorama.TreeListTreesPrm
first := true
@@ -637,68 +718,65 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
return shards, nil
}
-func (e *StorageEngine) evacuateObjects(ctx context.Context, sh *shard.Shard, toEvacuate []object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
) error {
- ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects",
- trace.WithAttributes(
- attribute.Int("objects_count", len(toEvacuate)),
- ))
+ ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End()
- for i := range toEvacuate {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
- addr := toEvacuate[i].Address
+ select {
+ case <-ctx.Done():
+ return context.Cause(ctx)
+ default:
+ }
- var getPrm shard.GetPrm
- getPrm.SetAddress(addr)
- getPrm.SkipEvacCheck(true)
+ shards := getShards()
+ addr := objInfo.Address
- getRes, err := sh.Get(ctx, getPrm)
- if err != nil {
- if prm.IgnoreErrors {
- res.objFailed.Add(1)
- continue
- }
- e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
+ var getPrm shard.GetPrm
+ getPrm.SetAddress(addr)
+ getPrm.SkipEvacCheck(true)
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), sh, shards, shardsToEvacuate, res)
- if err != nil {
- return err
- }
-
- if evacuatedLocal {
- continue
- }
-
- if prm.ObjectsHandler == nil {
- // Do not check ignoreErrors flag here because
- // ignoring errors on put make this command kinda useless.
- return fmt.Errorf("%w: %s", errPutShard, toEvacuate[i])
- }
-
- moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
- if err != nil {
- e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- return err
- }
- if moved {
- res.objEvacuated.Add(1)
- } else if prm.IgnoreErrors {
+ getRes, err := shardsToEvacuate[shardID].Get(ctx, getPrm)
+ if err != nil {
+ if prm.IgnoreErrors {
res.objFailed.Add(1)
- e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
- zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
- } else {
- return fmt.Errorf("object %s was not replicated", addr)
+ return nil
}
+ e.log.Error(logs.EngineShardsEvacuationFailedToReadObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res)
+ if err != nil {
+ return err
+ }
+
+ if evacuatedLocal {
+ return nil
+ }
+
+ if prm.ObjectsHandler == nil {
+ // Do not check ignoreErrors flag here because
+ // ignoring errors on put make this command kinda useless.
+ return fmt.Errorf("%w: %s", errPutShard, objInfo)
+ }
+
+ moved, err := prm.ObjectsHandler(ctx, addr, getRes.Object())
+ if err != nil {
+ e.log.Error(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ return err
+ }
+ if moved {
+ res.objEvacuated.Add(1)
+ } else if prm.IgnoreErrors {
+ res.objFailed.Add(1)
+ e.log.Warn(logs.EngineShardsEvacuationFailedToMoveObject, zap.String("address", addr.EncodeToString()), zap.Error(err), evacuationOperationLogField,
+ zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
+ } else {
+ return fmt.Errorf("object %s was not replicated", addr)
}
return nil
}
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index 28529fab9..f72333399 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -6,6 +6,8 @@ import (
"fmt"
"path/filepath"
"strconv"
+ "sync"
+ "sync/atomic"
"testing"
"time"
@@ -174,13 +176,13 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
errReplication := errors.New("handler error")
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
- var n uint64
+ var n atomic.Uint64
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
- if n == max {
+ if n.Load() == max {
return false, errReplication
}
- n++
+ n.Add(1)
for i := range objects {
if addr == objectCore.AddressOf(objects[i]) {
require.Equal(t, objects[i], obj)
@@ -314,6 +316,36 @@ func TestEvacuateCancellation(t *testing.T) {
require.Equal(t, uint64(0), res.ObjectsEvacuated())
}
+func TestEvacuateCancellationByError(t *testing.T) {
+ t.Parallel()
+ e, ids, _ := newEngineEvacuate(t, 2, 10)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+ require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[1:2]
+ var once atomic.Bool
+ prm.ObjectsHandler = func(ctx context.Context, a oid.Address, o *objectSDK.Object) (bool, error) {
+ var err error
+ flag := true
+ if once.CompareAndSwap(false, true) {
+ err = errors.New("test error")
+ flag = false
+ }
+ return flag, err
+ }
+ prm.Scope = EvacuateScopeObjects
+ prm.ObjectWorkerCount = 2
+ prm.ContainerWorkerCount = 2
+
+ _, err := e.Evacuate(context.Background(), prm)
+ require.ErrorContains(t, err, "test error")
+}
+
func TestEvacuateSingleProcess(t *testing.T) {
e, ids, _ := newEngineEvacuate(t, 2, 3)
defer func() {
@@ -531,6 +563,7 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
require.NoError(t, e.shards[ids[1].String()].SetMode(mode.ReadOnly))
+ mutex := sync.Mutex{}
evacuatedTreeOps := make(map[string][]*pilorama.Move)
var prm EvacuateShardPrm
prm.ShardID = ids
@@ -545,7 +578,9 @@ func TestEvacuateTreesRemote(t *testing.T) {
if op.Time == 0 {
return true, "", nil
}
+ mutex.Lock()
evacuatedTreeOps[key] = append(evacuatedTreeOps[key], &op)
+ mutex.Unlock()
height = op.Time + 1
}
}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index b4326a92c..5943be7f4 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -1,6 +1,7 @@
package meta
import (
+ "bytes"
"context"
"time"
@@ -61,6 +62,20 @@ func (l ListRes) Cursor() *Cursor {
return l.cursor
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, []byte, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+ // Handler function executed upon objects in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
// ListWithCursor lists physical objects available in metabase starting from
// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
@@ -259,3 +274,155 @@ func parseContainerIDWithPrefix(containerID *cid.ID, name []byte) ([]byte, byte)
return rawID, name[0]
}
+
+// IterateOverContainers lists physical containers available in metabase starting from first.
+func (db *DB) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverContainers", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverContainers(ctx, tx, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm IterateOverContainersPrm) error {
+ var containerID cid.ID
+ for _, prefix := range [][]byte{{byte(primaryPrefix)}, {byte(lockersPrefix)}, {byte(tombstonePrefix)}} {
+ c := tx.Cursor()
+ for name, _ := c.Seek(prefix); name != nil && bytes.HasPrefix(name, prefix); name, _ = c.Next() {
+ cidRaw, _ := parseContainerIDWithPrefix(&containerID, name)
+ if cidRaw == nil {
+ continue
+ }
+
+ bktName := make([]byte, len(name))
+ copy(bktName, name)
+ var cnt cid.ID
+ copy(cnt[:], containerID[:])
+ err := prm.Handler(ctx, bktName, cnt)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer iterate over physical objects available in metabase starting from first.
+func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("IterateOverObjectsInContainer", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var containerID cid.ID
+ cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName)
+ if cidRaw == nil {
+ return nil
+ }
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm)
+ })
+ success = err == nil
+ return metaerr.Wrap(err)
+}
+
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte,
+ containerID cid.ID, prm IterateOverObjectsInContainerPrm,
+) error {
+ bkt := tx.Bucket(prm.BucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, v := c.First()
+
+ var objType objectSDK.Type
+
+ switch prefix {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ return nil
+ }
+
+ for ; k != nil; k, v = c.Next() {
+ var obj oid.ID
+ if err := obj.Decode(k); err != nil {
+ break
+ }
+
+ if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+
+ var isLinkingObj bool
+ var ecInfo *objectcore.ECInfo
+ if objType == objectSDK.TypeRegular {
+ var o objectSDK.Object
+ if err := o.Unmarshal(v); err != nil {
+ return err
+ }
+ isLinkingObj = isLinkObject(&o)
+ ecHeader := o.ECHeader()
+ if ecHeader != nil {
+ ecInfo = &objectcore.ECInfo{
+ ParentID: ecHeader.Parent(),
+ Index: ecHeader.Index(),
+ Total: ecHeader.Total(),
+ }
+ }
+ }
+
+ var a oid.Address
+ a.SetContainer(containerID)
+ a.SetObject(obj)
+ objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ err := prm.Handler(ctx, &objInfo)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 08ea81a0c..9f56ec750 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -34,6 +34,20 @@ func (r ListContainersRes) Containers() []cid.ID {
return r.containers
}
+// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
+type IterateOverContainersPrm struct {
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, []byte, cid.ID) error
+}
+
+// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type IterateOverObjectsInContainerPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+ // Handler function executed upon containers in db.
+ Handler func(context.Context, *objectcore.Info) error
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -164,3 +178,54 @@ func (s *Shard) ListWithCursor(ctx context.Context, prm ListWithCursorPrm) (List
cursor: res.Cursor(),
}, nil
}
+
+// IterateOverContainers lists physical containers presented in shard.
+func (s *Shard) IterateOverContainers(ctx context.Context, prm IterateOverContainersPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverContainers",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = prm.Handler
+ err := s.metaBase.IterateOverContainers(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("could not iterate over containers: %w", err)
+ }
+
+ return nil
+}
+
+// IterateOverObjectsInContainer lists physical objects presented in shard for provided container's bucket name.
+func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOverObjectsInContainerPrm) error {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.IterateOverObjectsInContainer",
+ trace.WithAttributes(
+ attribute.Bool("has_handler", prm.Handler != nil),
+ ))
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return ErrDegradedMode
+ }
+
+ var metaPrm meta.IterateOverObjectsInContainerPrm
+ metaPrm.BucketName = prm.BucketName
+ metaPrm.Handler = prm.Handler
+ err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
+ if err != nil {
+ return fmt.Errorf("could not iterate over objects: %w", err)
+ }
+
+ return nil
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index aacebe9e3..bdc6f7c38 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -23,12 +23,14 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
}
prm := engine.EvacuateShardPrm{
- ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
- IgnoreErrors: req.GetBody().GetIgnoreErrors(),
- ObjectsHandler: s.replicateObject,
- TreeHandler: s.replicateTree,
- Async: true,
- Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ShardID: s.getShardIDList(req.GetBody().GetShard_ID()),
+ IgnoreErrors: req.GetBody().GetIgnoreErrors(),
+ ObjectsHandler: s.replicateObject,
+ TreeHandler: s.replicateTree,
+ Async: true,
+ Scope: engine.EvacuateScope(req.GetBody().GetScope()),
+ ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
+ ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
}
_, err = s.s.Evacuate(ctx, prm)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 04994328a..88a06de22 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -394,6 +394,10 @@ message StartShardEvacuationRequest {
bool ignore_errors = 2;
// Evacuation scope.
uint32 scope = 3;
+ // Count of concurrent container evacuation workers.
+ uint32 container_worker_count = 4;
+ // Count of concurrent object evacuation workers.
+ uint32 object_worker_count = 5;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index 019cac290..e92a8acd1 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -6511,9 +6511,11 @@ func (x *StartShardEvacuationRequest_Body_Scope) FromString(s string) bool {
}
type StartShardEvacuationRequest_Body struct {
- Shard_ID [][]byte `json:"shardID"`
- IgnoreErrors bool `json:"ignoreErrors"`
- Scope uint32 `json:"scope"`
+ Shard_ID [][]byte `json:"shardID"`
+ IgnoreErrors bool `json:"ignoreErrors"`
+ Scope uint32 `json:"scope"`
+ ContainerWorkerCount uint32 `json:"containerWorkerCount"`
+ ObjectWorkerCount uint32 `json:"objectWorkerCount"`
}
var (
@@ -6533,6 +6535,8 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.RepeatedBytesSize(1, x.Shard_ID)
size += proto.BoolSize(2, x.IgnoreErrors)
size += proto.UInt32Size(3, x.Scope)
+ size += proto.UInt32Size(4, x.ContainerWorkerCount)
+ size += proto.UInt32Size(5, x.ObjectWorkerCount)
return size
}
@@ -6558,6 +6562,12 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.Scope != 0 {
mm.AppendUint32(3, x.Scope)
}
+ if x.ContainerWorkerCount != 0 {
+ mm.AppendUint32(4, x.ContainerWorkerCount)
+ }
+ if x.ObjectWorkerCount != 0 {
+ mm.AppendUint32(5, x.ObjectWorkerCount)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -6587,6 +6597,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "Scope")
}
x.Scope = data
+ case 4: // ContainerWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ContainerWorkerCount")
+ }
+ x.ContainerWorkerCount = data
+ case 5: // ObjectWorkerCount
+ data, ok := fc.Uint32()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
+ }
+ x.ObjectWorkerCount = data
}
}
return nil
@@ -6618,6 +6640,24 @@ func (x *StartShardEvacuationRequest_Body) GetScope() uint32 {
func (x *StartShardEvacuationRequest_Body) SetScope(v uint32) {
x.Scope = v
}
+func (x *StartShardEvacuationRequest_Body) GetContainerWorkerCount() uint32 {
+ if x != nil {
+ return x.ContainerWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetContainerWorkerCount(v uint32) {
+ x.ContainerWorkerCount = v
+}
+func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
+ if x != nil {
+ return x.ObjectWorkerCount
+ }
+ return 0
+}
+func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
+ x.ObjectWorkerCount = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -6653,6 +6693,16 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString(prefix)
out.Uint32(x.Scope)
}
+ {
+ const prefix string = ",\"containerWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ContainerWorkerCount)
+ }
+ {
+ const prefix string = ",\"objectWorkerCount\":"
+ out.RawString(prefix)
+ out.Uint32(x.ObjectWorkerCount)
+ }
out.RawByte('}')
}
@@ -6706,6 +6756,18 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.Uint32()
x.Scope = f
}
+ case "containerWorkerCount":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ContainerWorkerCount = f
+ }
+ case "objectWorkerCount":
+ {
+ var f uint32
+ f = in.Uint32()
+ x.ObjectWorkerCount = f
+ }
}
in.WantComma()
}
From 8434f3dbfc850839c759430cea9640f3c87e5f95 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 19 Sep 2024 17:00:58 +0300
Subject: [PATCH 085/655] [#1385] metabase: Use `Batch` for delete-related
operations
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/counter.go | 4 ++--
pkg/local_object_storage/metabase/delete.go | 2 +-
pkg/local_object_storage/metabase/graveyard.go | 2 +-
pkg/local_object_storage/metabase/inhume.go | 2 +-
pkg/local_object_storage/metabase/lock.go | 4 ++--
5 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/pkg/local_object_storage/metabase/counter.go b/pkg/local_object_storage/metabase/counter.go
index 275099ff2..3ead0d9a0 100644
--- a/pkg/local_object_storage/metabase/counter.go
+++ b/pkg/local_object_storage/metabase/counter.go
@@ -654,7 +654,7 @@ func (db *DB) DeleteContainerSize(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerVolumeBucketName)
key := make([]byte, cidSize)
@@ -737,7 +737,7 @@ func (db *DB) DeleteContainerCount(ctx context.Context, id cid.ID) error {
return ErrReadOnlyMode
}
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(containerCounterBucketName)
key := make([]byte, cidSize)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 00c8d06e0..e5e9840a0 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -112,7 +112,7 @@ func (db *DB) Delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
var err error
var res DeleteRes
- err = db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
res, err = db.deleteGroup(tx, prm.addrs)
return err
})
diff --git a/pkg/local_object_storage/metabase/graveyard.go b/pkg/local_object_storage/metabase/graveyard.go
index 80d40fb78..31f95d6ed 100644
--- a/pkg/local_object_storage/metabase/graveyard.go
+++ b/pkg/local_object_storage/metabase/graveyard.go
@@ -282,7 +282,7 @@ func (db *DB) DropGraves(ctx context.Context, tss []TombstonedObject) error {
buf := make([]byte, addressKeySize)
- return db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return db.boltDB.Batch(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(graveyardBucketName)
if bkt == nil {
return nil
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index b62accc43..3aae15061 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -181,7 +181,7 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
inhumedByCnrID: make(map[cid.ID]ObjectCounters),
}
currEpoch := db.epochState.CurrentEpoch()
- err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
return db.inhumeTx(tx, currEpoch, prm, &res)
})
success = err == nil
diff --git a/pkg/local_object_storage/metabase/lock.go b/pkg/local_object_storage/metabase/lock.go
index 732ba426d..6b78ef392 100644
--- a/pkg/local_object_storage/metabase/lock.go
+++ b/pkg/local_object_storage/metabase/lock.go
@@ -78,7 +78,7 @@ func (db *DB) lockInternal(locked []oid.ID, cnr cid.ID, locker oid.ID) error {
}
key := make([]byte, cidSize)
- return metaerr.Wrap(db.boltDB.Update(func(tx *bbolt.Tx) error {
+ return metaerr.Wrap(db.boltDB.Batch(func(tx *bbolt.Tx) error {
if firstIrregularObjectType(tx, cnr, bucketKeysLocked...) != objectSDK.TypeRegular {
return logicerr.Wrap(new(apistatus.LockNonRegularObject))
}
@@ -143,7 +143,7 @@ func (db *DB) FreeLockedBy(lockers []oid.Address) ([]oid.Address, error) {
var unlockedObjects []oid.Address
- if err := db.boltDB.Update(func(tx *bbolt.Tx) error {
+ if err := db.boltDB.Batch(func(tx *bbolt.Tx) error {
for i := range lockers {
unlocked, err := freePotentialLocks(tx, lockers[i].Container(), lockers[i].Object())
if err != nil {
From 76268e3ea2a73072119ea1963f914646c029e08a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 20 Sep 2024 13:28:21 +0300
Subject: [PATCH 086/655] [#1385] metabase: Validate that tombstone and target
have the same container ID
Target container ID is taken from tombstone: cmd/frostfs-node/object.go:507
Also object of type `TOMBSTONE` contains objectID, so tombstone and
tombstoned object must have the same containerID.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/lock_test.go | 8 +++--
.../metabase/containers_test.go | 4 +--
.../metabase/control_test.go | 2 +-
.../metabase/counter_test.go | 30 ++++++++++++-------
.../metabase/delete_test.go | 6 ++--
.../metabase/exists_test.go | 2 +-
pkg/local_object_storage/metabase/get_test.go | 3 +-
.../metabase/graveyard_test.go | 27 ++++++++++-------
pkg/local_object_storage/metabase/inhume.go | 18 +++++++++++
.../metabase/inhume_test.go | 21 +++++++++----
.../metabase/iterators_test.go | 6 ++++
.../metabase/list_test.go | 2 +-
.../metabase/lock_test.go | 10 +++++--
.../metabase/select_test.go | 6 +---
.../metabase/storage_id_test.go | 2 +-
.../shard/metrics_test.go | 13 ++++----
16 files changed, 108 insertions(+), 52 deletions(-)
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 7fa7c27ef..9e6758fb4 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -199,7 +199,9 @@ func TestLockExpiration(t *testing.T) {
require.NoError(t, err)
var inhumePrm InhumePrm
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
var objLockedErr *apistatus.ObjectLocked
_, err = e.Inhume(context.Background(), inhumePrm)
@@ -209,7 +211,9 @@ func TestLockExpiration(t *testing.T) {
e.HandleNewEpoch(context.Background(), lockerExpiresAfter+1)
// 4.
- inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(cnr)
+ inhumePrm.WithTarget(tombAddr, objectcore.AddressOf(obj))
require.Eventually(t, func() bool {
_, err = e.Inhume(context.Background(), inhumePrm)
diff --git a/pkg/local_object_storage/metabase/containers_test.go b/pkg/local_object_storage/metabase/containers_test.go
index 8b1874458..110be68ad 100644
--- a/pkg/local_object_storage/metabase/containers_test.go
+++ b/pkg/local_object_storage/metabase/containers_test.go
@@ -67,7 +67,7 @@ func TestDB_Containers(t *testing.T) {
assertContains(cnrs, cnr)
- require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.Address()))
+ require.NoError(t, metaInhume(db, object.AddressOf(obj), oidtest.ID()))
cnrs, err = db.Containers(context.Background())
require.NoError(t, err)
@@ -164,7 +164,7 @@ func TestDB_ContainerSize(t *testing.T) {
require.NoError(t, metaInhume(
db,
object.AddressOf(obj),
- oidtest.Address(),
+ oidtest.ID(),
))
volume -= int(obj.PayloadSize())
diff --git a/pkg/local_object_storage/metabase/control_test.go b/pkg/local_object_storage/metabase/control_test.go
index 0354a5eb6..2a64881cb 100644
--- a/pkg/local_object_storage/metabase/control_test.go
+++ b/pkg/local_object_storage/metabase/control_test.go
@@ -41,7 +41,7 @@ func TestReset(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
- err = metaInhume(db, addrToInhume, oidtest.Address())
+ err = metaInhume(db, addrToInhume, oidtest.ID())
require.NoError(t, err)
assertExists(addr, true, nil)
diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go
index d1f808a63..dccccd456 100644
--- a/pkg/local_object_storage/metabase/counter_test.go
+++ b/pkg/local_object_storage/metabase/counter_test.go
@@ -156,13 +156,18 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- res, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
- require.Equal(t, uint64(len(inhumedObjs)), res.LogicInhumed())
- require.Equal(t, uint64(len(inhumedObjs)), res.UserInhumed())
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ res, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), res.LogicInhumed())
+ require.Equal(t, uint64(1), res.UserInhumed())
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
@@ -296,11 +301,16 @@ func TestCounters(t *testing.T) {
}
var prm meta.InhumePrm
- prm.SetTombstoneAddress(oidtest.Address())
- prm.SetAddresses(inhumedObjs...)
+ for _, o := range inhumedObjs {
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(o.Container())
- _, err := db.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ prm.SetTombstoneAddress(tombAddr)
+ prm.SetAddresses(o)
+
+ _, err := db.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
c, err := db.ObjectCounters()
require.NoError(t, err)
diff --git a/pkg/local_object_storage/metabase/delete_test.go b/pkg/local_object_storage/metabase/delete_test.go
index cb85157e7..fe5f7833b 100644
--- a/pkg/local_object_storage/metabase/delete_test.go
+++ b/pkg/local_object_storage/metabase/delete_test.go
@@ -40,12 +40,12 @@ func TestDB_Delete(t *testing.T) {
// inhume parent and child so they will be on graveyard
ts := testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(child), object.AddressOf(ts).Object())
require.NoError(t, err)
ts = testutil.GenerateObjectWithCID(cnr)
- err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(parent), object.AddressOf(ts).Object())
require.NoError(t, err)
// delete object
@@ -108,7 +108,7 @@ func TestGraveOnlyDelete(t *testing.T) {
addr := oidtest.Address()
// inhume non-existent object by address
- require.NoError(t, metaInhume(db, addr, oidtest.Address()))
+ require.NoError(t, metaInhume(db, addr, oidtest.ID()))
// delete the object data
require.NoError(t, metaDelete(db, addr))
diff --git a/pkg/local_object_storage/metabase/exists_test.go b/pkg/local_object_storage/metabase/exists_test.go
index 0087c1e31..1e4148eba 100644
--- a/pkg/local_object_storage/metabase/exists_test.go
+++ b/pkg/local_object_storage/metabase/exists_test.go
@@ -37,7 +37,7 @@ func TestDB_Exists(t *testing.T) {
require.True(t, exists)
t.Run("removed object", func(t *testing.T) {
- err := metaInhume(db, object.AddressOf(regular), oidtest.Address())
+ err := metaInhume(db, object.AddressOf(regular), oidtest.ID())
require.NoError(t, err)
exists, err := metaExists(db, object.AddressOf(regular))
diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go
index 7654d2cd8..f0caaea70 100644
--- a/pkg/local_object_storage/metabase/get_test.go
+++ b/pkg/local_object_storage/metabase/get_test.go
@@ -150,9 +150,8 @@ func TestDB_Get(t *testing.T) {
t.Run("get removed object", func(t *testing.T) {
obj := oidtest.Address()
- ts := oidtest.Address()
- require.NoError(t, metaInhume(db, obj, ts))
+ require.NoError(t, metaInhume(db, obj, oidtest.ID()))
_, err := metaGet(db, obj, false)
require.True(t, client.IsErrObjectAlreadyRemoved(err))
diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go
index 75c7e2852..b9c6ce28c 100644
--- a/pkg/local_object_storage/metabase/graveyard_test.go
+++ b/pkg/local_object_storage/metabase/graveyard_test.go
@@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -114,11 +115,12 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -138,6 +140,7 @@ func TestDB_IterateDeletedObjects(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
inhumePrm.SetTombstoneAddress(addrTombstone)
@@ -201,11 +204,12 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 4 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
- obj3 := testutil.GenerateObject()
- obj4 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ obj3 := testutil.GenerateObjectWithCID(cnr)
+ obj4 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -223,6 +227,7 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(
@@ -392,9 +397,10 @@ func TestDB_DropGraves(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
// generate and put 2 objects
- obj1 := testutil.GenerateObject()
- obj2 := testutil.GenerateObject()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ obj2 := testutil.GenerateObjectWithCID(cnr)
var err error
@@ -406,6 +412,7 @@ func TestDB_DropGraves(t *testing.T) {
// inhume with tombstone
addrTombstone := oidtest.Address()
+ addrTombstone.SetContainer(cnr)
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2))
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 3aae15061..77bb84af1 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -143,6 +143,20 @@ func (p *InhumePrm) SetForceGCMark() {
p.forceRemoval = true
}
+func (p *InhumePrm) validate() error {
+ if p == nil {
+ return nil
+ }
+ if p.tomb != nil {
+ for _, addr := range p.target {
+ if addr.Container() != p.tomb.Container() {
+ return fmt.Errorf("object %s and tombstone %s have different container ID", addr, p.tomb)
+ }
+ }
+ }
+ return nil
+}
+
var errBreakBucketForEach = errors.New("bucket ForEach break")
// ErrLockObjectRemoval is returned when inhume operation is being
@@ -171,6 +185,10 @@ func (db *DB) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
db.modeMtx.RLock()
defer db.modeMtx.RUnlock()
+ if err := prm.validate(); err != nil {
+ return InhumeRes{}, err
+ }
+
if db.mode.NoMetabase() {
return InhumeRes{}, ErrDegradedMode
} else if db.mode.ReadOnly() {
diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go
index 163fbec2a..277316f7b 100644
--- a/pkg/local_object_storage/metabase/inhume_test.go
+++ b/pkg/local_object_storage/metabase/inhume_test.go
@@ -9,6 +9,7 @@ import (
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
@@ -21,12 +22,10 @@ func TestDB_Inhume(t *testing.T) {
raw := testutil.GenerateObject()
testutil.AddAttribute(raw, "foo", "bar")
- tombstoneID := oidtest.Address()
-
err := putBig(db, raw)
require.NoError(t, err)
- err = metaInhume(db, object.AddressOf(raw), tombstoneID)
+ err = metaInhume(db, object.AddressOf(raw), oidtest.ID())
require.NoError(t, err)
_, err = metaExists(db, object.AddressOf(raw))
@@ -43,13 +42,20 @@ func TestInhumeTombOnTomb(t *testing.T) {
var (
err error
+ cnr = cidtest.ID()
addr1 = oidtest.Address()
addr2 = oidtest.Address()
addr3 = oidtest.Address()
+ addr4 = oidtest.Address()
inhumePrm meta.InhumePrm
existsPrm meta.ExistsPrm
)
+ addr1.SetContainer(cnr)
+ addr2.SetContainer(cnr)
+ addr3.SetContainer(cnr)
+ addr4.SetContainer(cnr)
+
inhumePrm.SetAddresses(addr1)
inhumePrm.SetTombstoneAddress(addr2)
@@ -84,7 +90,7 @@ func TestInhumeTombOnTomb(t *testing.T) {
require.True(t, client.IsErrObjectAlreadyRemoved(err))
inhumePrm.SetAddresses(addr1)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ inhumePrm.SetTombstoneAddress(addr4)
// try to inhume addr1 (which is already a tombstone in graveyard)
_, err = db.Inhume(context.Background(), inhumePrm)
@@ -117,10 +123,13 @@ func TestInhumeLocked(t *testing.T) {
require.ErrorAs(t, err, &e)
}
-func metaInhume(db *meta.DB, target, tomb oid.Address) error {
+func metaInhume(db *meta.DB, target oid.Address, tomb oid.ID) error {
var inhumePrm meta.InhumePrm
inhumePrm.SetAddresses(target)
- inhumePrm.SetTombstoneAddress(tomb)
+ var tombAddr oid.Address
+ tombAddr.SetContainer(target.Container())
+ tombAddr.SetObject(tomb)
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err := db.Inhume(context.Background(), inhumePrm)
return err
diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go
index 54d56d923..777a94a6f 100644
--- a/pkg/local_object_storage/metabase/iterators_test.go
+++ b/pkg/local_object_storage/metabase/iterators_test.go
@@ -9,6 +9,7 @@ import (
object2 "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -71,11 +72,16 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) {
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
+ cnr := cidtest.ID()
ts := oidtest.Address()
protected1 := oidtest.Address()
protected2 := oidtest.Address()
protectedLocked := oidtest.Address()
garbage := oidtest.Address()
+ ts.SetContainer(cnr)
+ protected1.SetContainer(cnr)
+ protected2.SetContainer(cnr)
+ protectedLocked.SetContainer(cnr)
var prm meta.InhumePrm
var err error
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index 6207497b1..bc1726bd6 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -110,7 +110,7 @@ func TestLisObjectsWithCursor(t *testing.T) {
err = putBig(db, obj)
require.NoError(t, err)
ts := testutil.GenerateObjectWithCID(containerID)
- err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts))
+ err = metaInhume(db, object.AddressOf(obj), object.AddressOf(ts).Object())
require.NoError(t, err)
// add one child object (do not include parent into expected)
diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go
index 62a109b02..9601cb2be 100644
--- a/pkg/local_object_storage/metabase/lock_test.go
+++ b/pkg/local_object_storage/metabase/lock_test.go
@@ -73,7 +73,9 @@ func TestDB_Lock(t *testing.T) {
_, err := db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr := oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
@@ -89,7 +91,9 @@ func TestDB_Lock(t *testing.T) {
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
- inhumePrm.SetTombstoneAddress(oidtest.Address())
+ tombAddr = oidtest.Address()
+ tombAddr.SetContainer(objAddr.Container())
+ inhumePrm.SetTombstoneAddress(tombAddr)
_, err = db.Inhume(context.Background(), inhumePrm)
require.ErrorAs(t, err, &objLockedErr)
})
@@ -103,7 +107,7 @@ func TestDB_Lock(t *testing.T) {
var objLockedErr *apistatus.ObjectLocked
// try to inhume locked object using tombstone
- err := metaInhume(db, objAddr, lockAddr)
+ err := metaInhume(db, objAddr, lockAddr.Object())
require.ErrorAs(t, err, &objLockedErr)
// free locked object
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 6469bbdbc..fcd5d3a90 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -352,11 +352,7 @@ func TestDB_SelectInhume(t *testing.T) {
object.AddressOf(raw2),
)
- var tombstone oid.Address
- tombstone.SetContainer(cnr)
- tombstone.SetObject(oidtest.ID())
-
- err = metaInhume(db, object.AddressOf(raw2), tombstone)
+ err = metaInhume(db, object.AddressOf(raw2), oidtest.ID())
require.NoError(t, err)
fs = objectSDK.SearchFilters{}
diff --git a/pkg/local_object_storage/metabase/storage_id_test.go b/pkg/local_object_storage/metabase/storage_id_test.go
index aaf6480ab..a86e42bd2 100644
--- a/pkg/local_object_storage/metabase/storage_id_test.go
+++ b/pkg/local_object_storage/metabase/storage_id_test.go
@@ -43,7 +43,7 @@ func TestDB_StorageID(t *testing.T) {
cnrID, ok := deleted.ContainerID()
require.True(t, ok)
ts := testutil.GenerateObjectWithCID(cnrID)
- require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts)))
+ require.NoError(t, metaInhume(db, object.AddressOf(deleted), object.AddressOf(ts).Object()))
// check StorageID for object without storageID
fetchedStorageID, err = metaStorageID(db, object.AddressOf(raw2))
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 01a85da97..56622326a 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -17,6 +17,7 @@ import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
@@ -308,17 +309,19 @@ func TestCounters(t *testing.T) {
t.Run("inhume_TS", func(t *testing.T) {
var prm InhumePrm
- ts := objectcore.AddressOf(testutil.GenerateObject())
phy := mm.getObjectCounter(physical)
logic := mm.getObjectCounter(logical)
custom := mm.getObjectCounter(user)
inhumedNumber := int(phy / 4)
- prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
-
- _, err := sh.Inhume(context.Background(), prm)
- require.NoError(t, err)
+ for _, o := range addrFromObjs(oo[:inhumedNumber]) {
+ ts := oidtest.Address()
+ ts.SetContainer(o.Container())
+ prm.SetTarget(ts, o)
+ _, err := sh.Inhume(context.Background(), prm)
+ require.NoError(t, err)
+ }
for i := range inhumedNumber {
cid, ok := oo[i].ContainerID()
From fd18aa363b7b33f8b662f9b4bffaf9f3099216a6 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 20 Sep 2024 13:32:05 +0300
Subject: [PATCH 087/655] [#1385] metabase: Optimize `isTomb` check
As tombstone and target must have the same containerID, do not iterate
other containers.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/inhume.go | 29 ++++++++-------------
1 file changed, 11 insertions(+), 18 deletions(-)
diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go
index 77bb84af1..12f27d330 100644
--- a/pkg/local_object_storage/metabase/inhume.go
+++ b/pkg/local_object_storage/metabase/inhume.go
@@ -377,11 +377,8 @@ func (db *DB) getInhumeTargetBucketAndValue(garbageBKT, graveyardBKT *bbolt.Buck
return targetBucket, value, nil
}
-func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool, error) {
- targetIsTomb, err := isTomb(graveyardBKT, key)
- if err != nil {
- return false, err
- }
+func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, addressKey []byte) (bool, error) {
+ targetIsTomb := isTomb(graveyardBKT, addressKey)
// do not add grave if target is a tombstone
if targetIsTomb {
@@ -390,7 +387,7 @@ func (db *DB) markAsGC(graveyardBKT, garbageBKT *bbolt.Bucket, key []byte) (bool
// if tombstone appears object must be
// additionally marked with GC
- return false, garbageBKT.Put(key, zeroValue)
+ return false, garbageBKT.Put(addressKey, zeroValue)
}
func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Bucket, targetKey []byte, cnr cid.ID, obj *objectSDK.Object, res *InhumeRes) error {
@@ -410,25 +407,21 @@ func (db *DB) updateDeleteInfo(tx *bbolt.Tx, garbageBKT, graveyardBKT *bbolt.Buc
return nil
}
-func isTomb(graveyardBucket *bbolt.Bucket, key []byte) (bool, error) {
+func isTomb(graveyardBucket *bbolt.Bucket, addressKey []byte) bool {
targetIsTomb := false
// iterate over graveyard and check if target address
// is the address of tombstone in graveyard.
- err := graveyardBucket.ForEach(func(_, v []byte) error {
+ // tombstone must have the same container ID as key.
+ c := graveyardBucket.Cursor()
+ containerPrefix := addressKey[:cidSize]
+ for k, v := c.Seek(containerPrefix); k != nil && bytes.HasPrefix(k, containerPrefix); k, v = c.Next() {
// check if graveyard has record with key corresponding
// to tombstone address (at least one)
- targetIsTomb = bytes.Equal(v, key)
-
+ targetIsTomb = bytes.Equal(v, addressKey)
if targetIsTomb {
- // break bucket iterator
- return errBreakBucketForEach
+ break
}
-
- return nil
- })
- if err != nil && !errors.Is(err, errBreakBucketForEach) {
- return false, err
}
- return targetIsTomb, nil
+ return targetIsTomb
}
From 95597d34371db6555739c4e92640cd8f8862ee7e Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 08:56:02 +0300
Subject: [PATCH 088/655] [#1388] golangci: Make `unused` linter stricker
Add aditional checks. The most important false positive - structs used as
map keys.
Signed-off-by: Dmitrii Stepanov
---
.golangci.yml | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/.golangci.yml b/.golangci.yml
index 971f0d0e7..33cf88d8a 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -38,6 +38,10 @@ linters-settings:
alias:
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object
alias: objectSDK
+ unused:
+ field-writes-are-uses: false
+ exported-fields-are-used: false
+ local-variables-are-used: false
custom:
truecloudlab-linters:
path: bin/linters/external_linters.so
From 2bd560e52846b77d2902370cfaa80d54fcd77c46 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 08:57:18 +0300
Subject: [PATCH 089/655] [#1388] cli: Drop unused flag/parameter
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-cli/internal/client/client.go | 7 -------
cmd/frostfs-cli/modules/object/head.go | 3 ---
2 files changed, 10 deletions(-)
diff --git a/cmd/frostfs-cli/internal/client/client.go b/cmd/frostfs-cli/internal/client/client.go
index 03a987a57..dcd67f0d9 100644
--- a/cmd/frostfs-cli/internal/client/client.go
+++ b/cmd/frostfs-cli/internal/client/client.go
@@ -565,13 +565,6 @@ type HeadObjectPrm struct {
commonObjectPrm
objectAddressPrm
rawPrm
-
- mainOnly bool
-}
-
-// SetMainOnlyFlag sets flag to get only main fields of an object header in terms of FrostFS API.
-func (x *HeadObjectPrm) SetMainOnlyFlag(v bool) {
- x.mainOnly = v
}
// HeadObjectRes groups the resulting values of HeadObject operation.
diff --git a/cmd/frostfs-cli/modules/object/head.go b/cmd/frostfs-cli/modules/object/head.go
index 14797dc41..cf2e2d5e6 100644
--- a/cmd/frostfs-cli/modules/object/head.go
+++ b/cmd/frostfs-cli/modules/object/head.go
@@ -38,7 +38,6 @@ func initObjectHeadCmd() {
_ = objectHeadCmd.MarkFlagRequired(commonflags.OIDFlag)
flags.String(fileFlag, "", "File to write header to. Default: stdout.")
- flags.Bool("main-only", false, "Return only main fields")
flags.Bool(commonflags.JSON, false, "Marshal output in JSON")
flags.Bool("proto", false, "Marshal output in Protobuf")
flags.Bool(rawFlag, false, rawFlagDesc)
@@ -49,7 +48,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
var obj oid.ID
objAddr := readObjectAddress(cmd, &cnr, &obj)
- mainOnly, _ := cmd.Flags().GetBool("main-only")
pk := key.GetOrGenerate(cmd)
cli := internalclient.GetSDKClientByFlag(cmd, pk, commonflags.RPC)
@@ -62,7 +60,6 @@ func getObjectHeader(cmd *cobra.Command, _ []string) {
raw, _ := cmd.Flags().GetBool(rawFlag)
prm.SetRawFlag(raw)
prm.SetAddress(objAddr)
- prm.SetMainOnlyFlag(mainOnly)
res, err := internalclient.HeadObject(cmd.Context(), prm)
if err != nil {
From b69e07da7af2c8167e02585a723008fa2753f848 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:02:00 +0300
Subject: [PATCH 090/655] [#1388] metrics: Mark nolint:unused metrics
Although these fields could be deleted, I annotated them so that all the
metrics used would be defined in one place.
Signed-off-by: Dmitrii Stepanov
---
internal/metrics/innerring.go | 3 ++-
internal/metrics/node.go | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/internal/metrics/innerring.go b/internal/metrics/innerring.go
index f6b14a632..f3f529d05 100644
--- a/internal/metrics/innerring.go
+++ b/internal/metrics/innerring.go
@@ -17,7 +17,8 @@ type InnerRingServiceMetrics struct {
eventDuration *prometheus.HistogramVec
morphCacheMetrics *morphCacheMetrics
logMetrics logger.LogMetrics
- appInfo *ApplicationInfo
+ // nolint: unused
+ appInfo *ApplicationInfo
}
// NewInnerRingMetrics returns new instance of metrics collectors for inner ring.
diff --git a/internal/metrics/node.go b/internal/metrics/node.go
index d9e401446..711387875 100644
--- a/internal/metrics/node.go
+++ b/internal/metrics/node.go
@@ -25,7 +25,8 @@ type NodeMetrics struct {
morphClient *morphClientMetrics
morphCache *morphCacheMetrics
log logger.LogMetrics
- appInfo *ApplicationInfo
+ // nolint: unused
+ appInfo *ApplicationInfo
}
func NewNodeMetrics() *NodeMetrics {
From aedb55f913d151669885aa6bc8ea5e83269a60b0 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:06:19 +0300
Subject: [PATCH 091/655] [#1388] governance: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/initialization.go | 1 -
.../processors/governance/handlers_test.go | 21 -------------------
.../processors/governance/processor.go | 3 ---
3 files changed, 25 deletions(-)
diff --git a/pkg/innerring/initialization.go b/pkg/innerring/initialization.go
index 7da0a9794..c4aaeda56 100644
--- a/pkg/innerring/initialization.go
+++ b/pkg/innerring/initialization.go
@@ -163,7 +163,6 @@ func (s *Server) createAlphaSync(cfg *viper.Viper, frostfsCli *frostfsClient.Cli
Log: s.log,
Metrics: s.irMetrics,
FrostFSClient: frostfsCli,
- NetmapClient: s.netmapClient,
AlphabetState: s,
EpochState: s,
Voter: s,
diff --git a/pkg/innerring/processors/governance/handlers_test.go b/pkg/innerring/processors/governance/handlers_test.go
index b73e24318..87040bdef 100644
--- a/pkg/innerring/processors/governance/handlers_test.go
+++ b/pkg/innerring/processors/governance/handlers_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
frostfscontract "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/frostfs"
- nmClient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event/rolemanagement"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
@@ -38,7 +37,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -50,7 +48,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -73,10 +70,6 @@ func TestHandleAlphabetSyncEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdateExp []nmClient.UpdateIRPrm
-
- require.EqualValues(t, irUpdateExp, nm.updates, "invalid IR updates")
-
var expAlphabetUpdate client.UpdateAlphabetListPrm
expAlphabetUpdate.SetHash(ev.txHash)
expAlphabetUpdate.SetList(testKeys.newInnerRingExp)
@@ -119,7 +112,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
alphabetKeys: testKeys.mainnetKeys,
}
f := &testFrostFSClient{}
- nm := &testNetmapClient{}
proc, err := New(
&Params{
@@ -131,7 +123,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
MorphClient: m,
MainnetClient: mn,
FrostFSClient: f,
- NetmapClient: nm,
},
)
@@ -155,9 +146,6 @@ func TestHandleAlphabetDesignateEvent(t *testing.T) {
},
}, v.votes, "invalid vote calls")
- var irUpdatesExp []nmClient.UpdateIRPrm
- require.EqualValues(t, irUpdatesExp, nm.updates, "invalid IR updates")
-
var alpabetUpdExp client.UpdateAlphabetListPrm
alpabetUpdExp.SetList(testKeys.newInnerRingExp)
alpabetUpdExp.SetHash(ev.TxHash)
@@ -293,12 +281,3 @@ func (c *testFrostFSClient) AlphabetUpdate(p frostfscontract.AlphabetUpdatePrm)
c.updates = append(c.updates, p)
return nil
}
-
-type testNetmapClient struct {
- updates []nmClient.UpdateIRPrm
-}
-
-func (c *testNetmapClient) UpdateInnerRing(p nmClient.UpdateIRPrm) error {
- c.updates = append(c.updates, p)
- return nil
-}
diff --git a/pkg/innerring/processors/governance/processor.go b/pkg/innerring/processors/governance/processor.go
index fa267eade..6daea417e 100644
--- a/pkg/innerring/processors/governance/processor.go
+++ b/pkg/innerring/processors/governance/processor.go
@@ -79,7 +79,6 @@ type (
metrics metrics.Register
pool *ants.Pool
frostfsClient FrostFSClient
- netmapClient NetmapClient
alphabetState AlphabetState
epochState EpochState
@@ -105,7 +104,6 @@ type (
MorphClient MorphClient
MainnetClient MainnetClient
FrostFSClient FrostFSClient
- NetmapClient NetmapClient
}
)
@@ -146,7 +144,6 @@ func New(p *Params) (*Processor, error) {
metrics: metricsRegister,
pool: pool,
frostfsClient: p.FrostFSClient,
- netmapClient: p.NetmapClient,
alphabetState: p.AlphabetState,
epochState: p.EpochState,
voter: p.Voter,
From e319bf403e7ddd24d9527829a9d5863643635ff8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:08:11 +0300
Subject: [PATCH 092/655] [#1388] apeSvc: Drop unused and make annotations
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 -
cmd/frostfs-node/object.go | 1 -
cmd/frostfs-node/policy_engine.go | 4 +++-
pkg/ape/chainbase/option.go | 10 ----------
pkg/services/object/ape/service.go | 6 +-----
5 files changed, 4 insertions(+), 18 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 63f410b89..0ffa8c45b 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1082,7 +1082,6 @@ func initAccessPolicyEngine(_ context.Context, c *cfg) {
localOverrideDB = chainbase.NewInmemoryLocalOverrideDatabase()
} else {
localOverrideDB = chainbase.NewBoltLocalOverrideDatabase(
- chainbase.WithLogger(c.log),
chainbase.WithPath(nodeconfig.PersistentPolicyRules(c.appCfg).Path()),
chainbase.WithPerm(nodeconfig.PersistentPolicyRules(c.appCfg).Perm()),
chainbase.WithNoSync(nodeconfig.PersistentPolicyRules(c.appCfg).NoSync()),
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 610e2c363..9d4e35ca8 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -473,7 +473,6 @@ func createACLServiceV2(c *cfg, apeSvc *objectAPE.Service, irFetcher *cachedIRFe
func createAPEService(c *cfg, splitSvc *objectService.TransportSplitter) *objectAPE.Service {
return objectAPE.NewService(
- c.log,
objectAPE.NewChecker(
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage(),
c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage(),
diff --git a/cmd/frostfs-node/policy_engine.go b/cmd/frostfs-node/policy_engine.go
index 22fda2b4c..55f76cc76 100644
--- a/cmd/frostfs-node/policy_engine.go
+++ b/cmd/frostfs-node/policy_engine.go
@@ -21,7 +21,9 @@ type accessPolicyEngine struct {
var _ engine.MorphRuleChainStorageReader = (*morphAPEChainCache)(nil)
type morphAPEChainCacheKey struct {
- name chain.Name
+ // nolint:unused
+ name chain.Name
+ // nolint:unused
target engine.Target
}
diff --git a/pkg/ape/chainbase/option.go b/pkg/ape/chainbase/option.go
index e547701fb..590b7a885 100644
--- a/pkg/ape/chainbase/option.go
+++ b/pkg/ape/chainbase/option.go
@@ -5,9 +5,7 @@ import (
"os"
"time"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"go.etcd.io/bbolt"
- "go.uber.org/zap"
)
type Option func(*cfg)
@@ -18,7 +16,6 @@ type cfg struct {
noSync bool
maxBatchDelay time.Duration
maxBatchSize int
- log *logger.Logger
}
func defaultCfg() *cfg {
@@ -26,7 +23,6 @@ func defaultCfg() *cfg {
perm: os.ModePerm,
maxBatchDelay: bbolt.DefaultMaxBatchDelay,
maxBatchSize: bbolt.DefaultMaxBatchSize,
- log: &logger.Logger{Logger: zap.L()},
}
}
@@ -59,9 +55,3 @@ func WithMaxBatchSize(maxBatchSize int) Option {
c.maxBatchSize = maxBatchSize
}
}
-
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
diff --git a/pkg/services/object/ape/service.go b/pkg/services/object/ape/service.go
index a1634e7c5..6eedaf99e 100644
--- a/pkg/services/object/ape/service.go
+++ b/pkg/services/object/ape/service.go
@@ -12,7 +12,6 @@ import (
objectSvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@@ -24,8 +23,6 @@ import (
var errFailedToCastToRequestContext = errors.New("failed cast to RequestContext")
type Service struct {
- log *logger.Logger
-
apeChecker Checker
next objectSvc.ServiceServer
@@ -67,9 +64,8 @@ func NewStorageEngineHeaderProvider(e *engine.StorageEngine, s *getsvc.Service)
}
}
-func NewService(log *logger.Logger, apeChecker Checker, next objectSvc.ServiceServer) *Service {
+func NewService(apeChecker Checker, next objectSvc.ServiceServer) *Service {
return &Service{
- log: log,
apeChecker: apeChecker,
next: next,
}
From 580cd551807cea0ad2b9dfe9fbd21da0b55d6282 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:10:03 +0300
Subject: [PATCH 093/655] [#1388] getSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/get/assembleec.go | 2 +-
pkg/services/object/get/assemblerec.go | 3 ---
2 files changed, 1 insertion(+), 4 deletions(-)
diff --git a/pkg/services/object/get/assembleec.go b/pkg/services/object/get/assembleec.go
index a58602bf7..03f913bbf 100644
--- a/pkg/services/object/get/assembleec.go
+++ b/pkg/services/object/get/assembleec.go
@@ -43,7 +43,7 @@ func (r *request) assembleEC(ctx context.Context) {
}
r.prm.common = r.prm.common.WithLocalOnly(false)
- assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.containerSource, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
+ assembler := newAssemblerEC(r.address(), r.infoEC, r.ctxRange(), r, r.localStorage, r.log, r.headOnly(), r.traverserGenerator, r.curProcEpoch)
r.log.Debug(logs.GetAssemblingECObject,
zap.Uint64("range_offset", r.ctxRange().GetOffset()),
diff --git a/pkg/services/object/get/assemblerec.go b/pkg/services/object/get/assemblerec.go
index dde0d7dad..44d9af3a2 100644
--- a/pkg/services/object/get/assemblerec.go
+++ b/pkg/services/object/get/assemblerec.go
@@ -34,7 +34,6 @@ type assemblerec struct {
rng *objectSDK.Range
remoteStorage ecRemoteStorage
localStorage localStorage
- cs container.Source
log *logger.Logger
head bool
traverserGenerator traverserGenerator
@@ -47,7 +46,6 @@ func newAssemblerEC(
rng *objectSDK.Range,
remoteStorage ecRemoteStorage,
localStorage localStorage,
- cs container.Source,
log *logger.Logger,
head bool,
tg traverserGenerator,
@@ -59,7 +57,6 @@ func newAssemblerEC(
ecInfo: ecInfo,
remoteStorage: remoteStorage,
localStorage: localStorage,
- cs: cs,
log: log,
head: head,
traverserGenerator: tg,
From 63a567a1de8d40b87c5e0cfcb99235eb4079f059 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:10:38 +0300
Subject: [PATCH 094/655] [#1388] engine: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/control.go | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go
index 4778cf539..80fb3f9ed 100644
--- a/pkg/local_object_storage/engine/control.go
+++ b/pkg/local_object_storage/engine/control.go
@@ -249,23 +249,9 @@ func (e *StorageEngine) ResumeExecution() error {
}
type ReConfiguration struct {
- errorsThreshold uint32
- shardPoolSize uint32
-
shards map[string][]shard.Option // meta path -> shard opts
}
-// SetErrorsThreshold sets a size amount of errors after which
-// shard is moved to read-only mode.
-func (rCfg *ReConfiguration) SetErrorsThreshold(errorsThreshold uint32) {
- rCfg.errorsThreshold = errorsThreshold
-}
-
-// SetShardPoolSize sets a size of worker pool for each shard.
-func (rCfg *ReConfiguration) SetShardPoolSize(shardPoolSize uint32) {
- rCfg.shardPoolSize = shardPoolSize
-}
-
// AddShard adds a shard for the reconfiguration.
// Shard identifier is calculated from paths used in blobstor.
func (rCfg *ReConfiguration) AddShard(id string, opts []shard.Option) {
From 004ff9e9bf68174fbb64df6cbc81f98ced8755d3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:11:15 +0300
Subject: [PATCH 095/655] [#1388] blobstor: Drop unused
Signed-off-by: Dmitrii Stepanov
---
.../blobstor/memstore/control.go | 16 ++++++++--------
.../blobstor/memstore/memstore_test.go | 2 --
.../blobstor/memstore/option.go | 15 +--------------
3 files changed, 9 insertions(+), 24 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/memstore/control.go b/pkg/local_object_storage/blobstor/memstore/control.go
index 449d4352a..83da52eb7 100644
--- a/pkg/local_object_storage/blobstor/memstore/control.go
+++ b/pkg/local_object_storage/blobstor/memstore/control.go
@@ -10,11 +10,11 @@ func (s *memstoreImpl) Open(mod mode.ComponentMode) error {
return nil
}
-func (s *memstoreImpl) Init() error { return nil }
-func (s *memstoreImpl) Close() error { return nil }
-func (s *memstoreImpl) Type() string { return Type }
-func (s *memstoreImpl) Path() string { return s.rootPath }
-func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
-func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
-func (s *memstoreImpl) SetReportErrorFunc(f func(string, error)) { s.reportError = f }
-func (s *memstoreImpl) SetParentID(string) {}
+func (s *memstoreImpl) Init() error { return nil }
+func (s *memstoreImpl) Close() error { return nil }
+func (s *memstoreImpl) Type() string { return Type }
+func (s *memstoreImpl) Path() string { return s.rootPath }
+func (s *memstoreImpl) SetCompressor(cc *compression.Config) { s.compression = cc }
+func (s *memstoreImpl) Compressor() *compression.Config { return s.compression }
+func (s *memstoreImpl) SetReportErrorFunc(func(string, error)) {}
+func (s *memstoreImpl) SetParentID(string) {}
diff --git a/pkg/local_object_storage/blobstor/memstore/memstore_test.go b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
index 8d1480dff..dd130e5f9 100644
--- a/pkg/local_object_storage/blobstor/memstore/memstore_test.go
+++ b/pkg/local_object_storage/blobstor/memstore/memstore_test.go
@@ -8,7 +8,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/internal/blobstortest"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"github.com/stretchr/testify/require"
)
@@ -16,7 +15,6 @@ import (
func TestSimpleLifecycle(t *testing.T) {
s := New(
WithRootPath("memstore"),
- WithLogger(test.NewLogger(t)),
)
defer func() { require.NoError(t, s.Close()) }()
require.NoError(t, s.Open(mode.ComponentReadWrite))
diff --git a/pkg/local_object_storage/blobstor/memstore/option.go b/pkg/local_object_storage/blobstor/memstore/option.go
index 3d67b1e9c..97a03993d 100644
--- a/pkg/local_object_storage/blobstor/memstore/option.go
+++ b/pkg/local_object_storage/blobstor/memstore/option.go
@@ -2,33 +2,20 @@ package memstore
import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/compression"
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
- "go.uber.org/zap"
)
type cfg struct {
- log *logger.Logger
rootPath string
readOnly bool
compression *compression.Config
- reportError func(string, error)
}
func defaultConfig() *cfg {
- return &cfg{
- log: &logger.Logger{Logger: zap.L()},
- reportError: func(string, error) {},
- }
+ return &cfg{}
}
type Option func(*cfg)
-func WithLogger(l *logger.Logger) Option {
- return func(c *cfg) {
- c.log = l
- }
-}
-
func WithRootPath(p string) Option {
return func(c *cfg) {
c.rootPath = p
From 401c398704f15c1d516fbcc04f842d9d3fb8c2d3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:01 +0300
Subject: [PATCH 096/655] [#1388] metabase: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete.go | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index e5e9840a0..4ad11164f 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -77,8 +77,6 @@ func (p *DeletePrm) SetAddresses(addrs ...oid.Address) {
type referenceNumber struct {
all, cur int
- addr oid.Address
-
obj *objectSDK.Object
}
@@ -295,9 +293,8 @@ func (db *DB) delete(tx *bbolt.Tx, addr oid.Address, refCounter referenceCounter
nRef, ok := refCounter[k]
if !ok {
nRef = &referenceNumber{
- all: parentLength(tx, parAddr),
- addr: parAddr,
- obj: parent,
+ all: parentLength(tx, parAddr),
+ obj: parent,
}
refCounter[k] = nRef
From d1d6e3471c2e902c29480a091545f09c4daaf335 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:32 +0300
Subject: [PATCH 097/655] [#1388] signSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/sign.go | 3 ---
1 file changed, 3 deletions(-)
diff --git a/pkg/services/object/sign.go b/pkg/services/object/sign.go
index 35367aafe..f5ae97b62 100644
--- a/pkg/services/object/sign.go
+++ b/pkg/services/object/sign.go
@@ -10,8 +10,6 @@ import (
)
type SignService struct {
- key *ecdsa.PrivateKey
-
sigSvc *util.SignService
svc ServiceServer
@@ -48,7 +46,6 @@ type getRangeStreamSigner struct {
func NewSignService(key *ecdsa.PrivateKey, svc ServiceServer) *SignService {
return &SignService{
- key: key,
sigSvc: util.NewUnarySignService(key),
svc: svc,
}
From bdd57c8b6b03f78ed74c31db41f5bbd0f3c84beb Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:12:54 +0300
Subject: [PATCH 098/655] [#1388] sessionSvc: Add nolint annotations
Used as map key.
Signed-off-by: Dmitrii Stepanov
---
pkg/services/session/storage/temporary/storage.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pkg/services/session/storage/temporary/storage.go b/pkg/services/session/storage/temporary/storage.go
index ee93dee71..9ae9db9dc 100644
--- a/pkg/services/session/storage/temporary/storage.go
+++ b/pkg/services/session/storage/temporary/storage.go
@@ -9,7 +9,9 @@ import (
)
type key struct {
+ // nolint:unused
tokenID string
+ // nolint:unused
ownerID string
}
From a2ab6d4942046c3bca59addd2b73ce3b58251b84 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 09:13:27 +0300
Subject: [PATCH 099/655] [#1388] node: Drop unused
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 -
cmd/frostfs-node/container.go | 6 ------
cmd/frostfs-node/netmap.go | 1 -
3 files changed, 8 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 0ffa8c45b..c625b575f 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -602,7 +602,6 @@ type cfgNetmap struct {
needBootstrap bool
reBoostrapTurnedOff *atomic.Bool // managed by control service in runtime
- startEpoch uint64 // epoch number when application is started
}
type cfgNodeInfo struct {
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 5a29aac76..6733140d2 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -128,9 +128,6 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = client
cnrRdr.eacl = c.cfgObject.eaclSource
cnrRdr.src = c.cfgObject.cnrSource
-
- cnrWrt.cacheEnabled = true
- cnrWrt.eacls = cachedEACLStorage
}
return cnrRdr, cnrWrt
@@ -247,9 +244,6 @@ func (x *morphContainerReader) ContainersOf(id *user.ID) ([]cid.ID, error) {
type morphContainerWriter struct {
neoClient *cntClient.Client
-
- cacheEnabled bool
- eacls ttlEACLStorage
}
func (m morphContainerWriter) Put(cnr containerCore.Container) (*cid.ID, error) {
diff --git a/cmd/frostfs-node/netmap.go b/cmd/frostfs-node/netmap.go
index c0b87492c..5e4585f85 100644
--- a/cmd/frostfs-node/netmap.go
+++ b/cmd/frostfs-node/netmap.go
@@ -259,7 +259,6 @@ func initNetmapState(c *cfg) {
}
c.cfgNetmap.state.setCurrentEpoch(epoch)
- c.cfgNetmap.startEpoch = epoch
c.setContractNodeInfo(ni)
}
From 29e4cf7ba1c88552172bdbb19dade34ea9ff5ba2 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 23 Sep 2024 10:51:05 +0300
Subject: [PATCH 100/655] [#1388] ir: Annotate cmode as nolint
Signed-off-by: Dmitrii Stepanov
---
pkg/innerring/innerring.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go
index 50a37845b..53a07e36c 100644
--- a/pkg/innerring/innerring.go
+++ b/pkg/innerring/innerring.go
@@ -103,6 +103,8 @@ type (
// to the application.
runners []func(chan<- error) error
+ // cmode used for upgrade scenario.
+ // nolint:unused
cmode *atomic.Bool
}
From 4fbfffd44c4e0f4aa7bc88052eff8400a0421f7c Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 24 Sep 2024 12:13:11 +0300
Subject: [PATCH 101/655] [#1388] putSvc: Drop unused
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/put/prm.go | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/pkg/services/object/put/prm.go b/pkg/services/object/put/prm.go
index 0c8f12b45..52a7c102c 100644
--- a/pkg/services/object/put/prm.go
+++ b/pkg/services/object/put/prm.go
@@ -2,7 +2,6 @@ package putsvc
import (
"context"
- "crypto/ecdsa"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -21,8 +20,6 @@ type PutInitPrm struct {
traverseOpts []placement.Option
relay func(context.Context, client.NodeInfo, client.MultiAddressClient) error
-
- privateKey *ecdsa.PrivateKey
}
type PutChunkPrm struct {
@@ -68,11 +65,3 @@ func (p *PutChunkPrm) WithChunk(v []byte) *PutChunkPrm {
return p
}
-
-func (p *PutInitPrm) WithPrivateKey(v *ecdsa.PrivateKey) *PutInitPrm {
- if p != nil {
- p.privateKey = v
- }
-
- return p
-}
From 772b471aab53774e1d2cf11ae7db28166a47ec45 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 24 Sep 2024 15:58:52 +0300
Subject: [PATCH 102/655] [#1388] lens: Add nolint annotations
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-lens/internal/schema/common/raw.go | 2 ++
cmd/frostfs-lens/internal/schema/writecache/types.go | 2 ++
2 files changed, 4 insertions(+)
diff --git a/cmd/frostfs-lens/internal/schema/common/raw.go b/cmd/frostfs-lens/internal/schema/common/raw.go
index 0990e24c3..55051554c 100644
--- a/cmd/frostfs-lens/internal/schema/common/raw.go
+++ b/cmd/frostfs-lens/internal/schema/common/raw.go
@@ -7,6 +7,8 @@ import (
)
type RawEntry struct {
+ // key and value used for record dump.
+ // nolint:unused
key, value []byte
}
diff --git a/cmd/frostfs-lens/internal/schema/writecache/types.go b/cmd/frostfs-lens/internal/schema/writecache/types.go
index 3f71c5366..11e6f3fcd 100644
--- a/cmd/frostfs-lens/internal/schema/writecache/types.go
+++ b/cmd/frostfs-lens/internal/schema/writecache/types.go
@@ -16,6 +16,8 @@ type (
DefaultRecord struct {
addr oid.Address
+ // data used for record dump.
+ // nolint:unused
data []byte
}
)
From a5e1aa22c963fe612d6d2d3316ee7ca0482f0d09 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 25 Sep 2024 17:15:03 +0300
Subject: [PATCH 103/655] [#1394] putSvc: Fix relay
Signed-off-by: Dmitrii Stepanov
---
pkg/services/object/common/target/target.go | 9 +++++----
pkg/services/object/patch/streamer.go | 2 +-
pkg/services/object/put/streamer.go | 2 +-
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index 980c4c6bd..a2d6b4d39 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -13,16 +13,16 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
)
-func New(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+func New(prm objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
// prepare needed put parameters
- if err := preparePrm(prm); err != nil {
+ if err := preparePrm(&prm); err != nil {
return nil, fmt.Errorf("could not prepare put parameters: %w", err)
}
if prm.Header.Signature() != nil {
- return newUntrustedTarget(prm)
+ return newUntrustedTarget(&prm)
}
- return newTrustedTarget(prm)
+ return newTrustedTarget(&prm)
}
func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
@@ -49,6 +49,7 @@ func newUntrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWrit
}
func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter, error) {
+ prm.Relay = nil // do not relay request without signature
maxPayloadSz := prm.Config.MaxSizeSrc.MaxObjectSize()
if maxPayloadSz == 0 {
return nil, errors.New("could not obtain max object size parameter")
diff --git a/pkg/services/object/patch/streamer.go b/pkg/services/object/patch/streamer.go
index 73def8c7c..c8ed6fdbf 100644
--- a/pkg/services/object/patch/streamer.go
+++ b/pkg/services/object/patch/streamer.go
@@ -112,7 +112,7 @@ func (s *Streamer) init(ctx context.Context, req *objectV2.PatchRequest) error {
}
oV2.GetHeader().SetOwnerID(ownerID)
- target, err := target.New(&objectwriter.Params{
+ target, err := target.New(objectwriter.Params{
Config: s.Config,
Common: commonPrm,
Header: objectSDK.NewFromV2(oV2),
diff --git a/pkg/services/object/put/streamer.go b/pkg/services/object/put/streamer.go
index d08e7fafa..f71309d31 100644
--- a/pkg/services/object/put/streamer.go
+++ b/pkg/services/object/put/streamer.go
@@ -26,7 +26,7 @@ func (p *Streamer) Init(ctx context.Context, prm *PutInitPrm) error {
}
// initialize destination target
- prmTarget := &objectwriter.Params{
+ prmTarget := objectwriter.Params{
Config: p.Config,
Common: prm.common,
Header: prm.hdr,
From 5f22ba6f380fd9d41be070f000b10cc4432981b9 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Fri, 27 Sep 2024 13:45:57 +0300
Subject: [PATCH 104/655] [#1397] object: Correctly set namespace before APE
check
Signed-off-by: Airat Arifullin
---
pkg/services/object/ape/checker.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/ape/checker.go b/pkg/services/object/ape/checker.go
index 3688638d0..3f6cc7c20 100644
--- a/pkg/services/object/ape/checker.go
+++ b/pkg/services/object/ape/checker.go
@@ -108,7 +108,7 @@ func (c *checkerImpl) CheckAPE(ctx context.Context, prm Prm) error {
return c.checkerCore.CheckAPE(checkercore.CheckPrm{
Request: r,
PublicKey: pub,
- Namespace: prm.Method,
+ Namespace: prm.Namespace,
Container: prm.Container,
ContainerOwner: prm.ContainerOwner,
BearerToken: prm.BearerToken,
From d0ed29b3c73626f6bf881090f86bdc834d81acc1 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 3 Sep 2024 15:42:38 +0300
Subject: [PATCH 105/655] [#1350] node: Add ability to evacuate objects from
`REP 1` only
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-cli/modules/control/evacuation.go | 4 +
docs/evacuation.md | 7 +-
pkg/local_object_storage/engine/evacuate.go | 39 +++-
.../engine/evacuate_test.go | 181 +++++++++++++++++-
pkg/local_object_storage/metabase/list.go | 51 +++++
pkg/local_object_storage/shard/list.go | 30 ++-
pkg/services/control/server/evacuate_async.go | 1 +
pkg/services/control/service.proto | 2 +
pkg/services/control/service_frostfs.pb.go | 31 +++
9 files changed, 340 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-cli/modules/control/evacuation.go b/cmd/frostfs-cli/modules/control/evacuation.go
index 04a67e5b5..fffc5e33e 100644
--- a/cmd/frostfs-cli/modules/control/evacuation.go
+++ b/cmd/frostfs-cli/modules/control/evacuation.go
@@ -20,6 +20,7 @@ const (
awaitFlag = "await"
noProgressFlag = "no-progress"
scopeFlag = "scope"
+ repOneOnlyFlag = "rep-one-only"
containerWorkerCountFlag = "container-worker-count"
objectWorkerCountFlag = "object-worker-count"
@@ -69,6 +70,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
ignoreErrors, _ := cmd.Flags().GetBool(ignoreErrorsFlag)
containerWorkerCount, _ := cmd.Flags().GetUint32(containerWorkerCountFlag)
objectWorkerCount, _ := cmd.Flags().GetUint32(objectWorkerCountFlag)
+ repOneOnly, _ := cmd.Flags().GetBool(repOneOnlyFlag)
req := &control.StartShardEvacuationRequest{
Body: &control.StartShardEvacuationRequest_Body{
@@ -77,6 +79,7 @@ func startEvacuateShard(cmd *cobra.Command, _ []string) {
Scope: getEvacuationScope(cmd),
ContainerWorkerCount: containerWorkerCount,
ObjectWorkerCount: objectWorkerCount,
+ RepOneOnly: repOneOnly,
},
}
@@ -380,6 +383,7 @@ func initControlStartEvacuationShardCmd() {
flags.Bool(noProgressFlag, false, fmt.Sprintf("Print progress if %s provided", awaitFlag))
flags.Uint32(containerWorkerCountFlag, 0, "Count of concurrent container evacuation workers")
flags.Uint32(objectWorkerCountFlag, 0, "Count of concurrent object evacuation workers")
+ flags.Bool(repOneOnlyFlag, false, "Evacuate objects only from containers with policy 'REP 1 ...'")
startEvacuationShardCmd.MarkFlagsMutuallyExclusive(shardIDFlag, shardAllFlag)
}
diff --git a/docs/evacuation.md b/docs/evacuation.md
index 885ce169a..d47d56d15 100644
--- a/docs/evacuation.md
+++ b/docs/evacuation.md
@@ -20,7 +20,12 @@ Because it is necessary to prevent removing by policer objects with policy `REP
## Commands
-`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag. By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+`frostfs-cli control shards evacuation start` starts evacuation process for shards specified. To start evacuating all node shards, use the `--all` flag.
+By default, objects and trees are evacuated. To limit the evacuation scope, use `--scope` flag (possible values are `all`, `trees`, `objects`).
+To evacuate objects only from containers with policy `REP 1` use option `--rep-one-only`.
+To adjust resource consumption required for evacuation use options:
+ - `--container-worker-count` count of concurrent container evacuation workers
+ - `--object-worker-count` count of concurrent object evacuation workers
`frostfs-cli control shards evacuation stop` stops running evacuation process.
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 3db556a8f..a618ff274 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -9,6 +9,7 @@ import (
"sync/atomic"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
@@ -16,6 +17,7 @@ import (
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -88,6 +90,7 @@ type EvacuateShardPrm struct {
IgnoreErrors bool
Async bool
Scope EvacuateScope
+ RepOneOnly bool
ContainerWorkerCount uint32
ObjectWorkerCount uint32
@@ -288,6 +291,7 @@ func (e *StorageEngine) evacuateShards(ctx context.Context, shardIDs []string, p
attribute.Bool("async", prm.Async),
attribute.Bool("ignoreErrors", prm.IgnoreErrors),
attribute.Stringer("scope", prm.Scope),
+ attribute.Bool("repOneOnly", prm.RepOneOnly),
))
defer func() {
@@ -430,13 +434,34 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
) error {
sh := shardsToEvacuate[shardID]
var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, name []byte, _ cid.ID) error {
+ cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error {
select {
case <-ctx.Done():
return context.Cause(ctx)
default:
}
egContainer.Go(func() error {
+ var skip bool
+ c, err := e.containerSource.Load().cs.Get(cnt)
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ skip = true
+ } else {
+ return err
+ }
+ }
+ if !skip && prm.RepOneOnly {
+ skip = e.isNotRepOne(c)
+ }
+ if skip {
+ countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name}
+ count, err := sh.CountAliveObjectsInBucket(ctx, countPrm)
+ if err != nil {
+ return err
+ }
+ res.objSkipped.Add(count)
+ return nil
+ }
var objPrm shard.IterateOverObjectsInContainerPrm
objPrm.BucketName = name
objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
@@ -454,7 +479,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
})
return nil
}
- err := sh.IterateOverObjectsInContainer(ctx, objPrm)
+ err = sh.IterateOverObjectsInContainer(ctx, objPrm)
if err != nil {
cancel(err)
}
@@ -781,6 +806,16 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
return nil
}
+func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
+ p := c.Value.PlacementPolicy()
+ for i := range p.NumberOfReplicas() {
+ if p.ReplicaDescriptor(i).NumberOfObjects() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
) (bool, error) {
diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go
index f72333399..8498c9245 100644
--- a/pkg/local_object_storage/engine/evacuate_test.go
+++ b/pkg/local_object_storage/engine/evacuate_test.go
@@ -11,6 +11,7 @@ import (
"testing"
"time"
+ coreContainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/fstree"
@@ -20,14 +21,38 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
)
+type containerStorage struct {
+ cntmap map[cid.ID]*container.Container
+ latency time.Duration
+}
+
+func (cs *containerStorage) Get(id cid.ID) (*coreContainer.Container, error) {
+ time.Sleep(cs.latency)
+ v, ok := cs.cntmap[id]
+ if !ok {
+ return nil, new(apistatus.ContainerNotFound)
+ }
+ coreCnt := coreContainer.Container{
+ Value: *v,
+ }
+ return &coreCnt, nil
+}
+
+func (cs *containerStorage) DeletionInfo(cid.ID) (*coreContainer.DelInfo, error) {
+ return nil, nil
+}
+
func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEngine, []*shard.ID, []*objectSDK.Object) {
dir := t.TempDir()
@@ -61,10 +86,15 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
{Key: pilorama.AttributeVersion, Value: []byte("XXX")},
{Key: pilorama.AttributeFilename, Value: []byte("file.txt")},
}
-
+ cnrMap := make(map[cid.ID]*container.Container)
for _, sh := range ids {
- for range objPerShard {
+ for i := range objPerShard {
+ // Create dummy container
+ cnr1 := container.Container{}
+ cnr1.SetAttribute("cnr", "cnr"+strconv.Itoa(i))
contID := cidtest.ID()
+ cnrMap[contID] = &cnr1
+
obj := testutil.GenerateObjectWithCID(contID)
objects = append(objects, obj)
@@ -78,6 +108,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng
require.NoError(t, err)
}
}
+ e.SetContainerSource(&containerStorage{cntmap: cnrMap})
return e, ids, objects
}
@@ -177,7 +208,10 @@ func TestEvacuateObjectsNetwork(t *testing.T) {
acceptOneOf := func(objects []*objectSDK.Object, max uint64) func(context.Context, oid.Address, *objectSDK.Object) (bool, error) {
var n atomic.Uint64
+ var mtx sync.Mutex
return func(_ context.Context, addr oid.Address, obj *objectSDK.Object) (bool, error) {
+ mtx.Lock()
+ defer mtx.Unlock()
if n.Load() == max {
return false, errReplication
}
@@ -640,3 +674,146 @@ func TestEvacuateTreesRemote(t *testing.T) {
require.Equal(t, expectedTreeOps, evacuatedTreeOps)
}
+
+func TestEvacuateShardObjectsRepOneOnly(t *testing.T) {
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ // Create container with policy REP 2
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ x1 = netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(1)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("cnr", "cnr1")
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+
+ // Create container with policy REP 1
+ cnr2 := container.Container{}
+ p2 := netmap.PlacementPolicy{}
+ p2.SetContainerBackupFactor(1)
+ x2 := netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ x2 = netmap.ReplicaDescriptor{}
+ x2.SetNumberOfObjects(1)
+ p2.AddReplicas(x2)
+ cnr2.SetPlacementPolicy(p2)
+ cnr2.SetAttribute("cnr", "cnr2")
+
+ var idCnr2 cid.ID
+ container.CalculateID(&idCnr2, cnr2)
+ cnrmap[idCnr2] = &cnr2
+ cids = append(cids, idCnr2)
+
+ // Create container for simulate removing
+ cnr3 := container.Container{}
+ p3 := netmap.PlacementPolicy{}
+ p3.SetContainerBackupFactor(1)
+ x3 := netmap.ReplicaDescriptor{}
+ x3.SetNumberOfObjects(1)
+ p3.AddReplicas(x3)
+ cnr3.SetPlacementPolicy(p3)
+ cnr3.SetAttribute("cnr", "cnr3")
+
+ var idCnr3 cid.ID
+ container.CalculateID(&idCnr3, cnr3)
+ cids = append(cids, idCnr3)
+
+ e.SetContainerSource(&containerStorage{cntmap: cnrmap})
+
+ for _, sh := range ids {
+ for j := range 3 {
+ for range 4 {
+ obj := testutil.GenerateObjectWithCID(cids[j])
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[sh.String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+
+ res, err := e.Evacuate(context.Background(), prm)
+ require.NoError(t, err)
+ require.Equal(t, uint64(4), res.ObjectsEvacuated())
+ require.Equal(t, uint64(8), res.ObjectsSkipped())
+ require.Equal(t, uint64(0), res.ObjectsFailed())
+}
+
+func TestEvacuateShardObjectsRepOneOnlyBench(t *testing.T) {
+ t.Skip()
+ e, ids, _ := newEngineEvacuate(t, 2, 0)
+ defer func() {
+ require.NoError(t, e.Close(context.Background()))
+ }()
+
+ cnrmap := make(map[cid.ID]*container.Container)
+ var cids []cid.ID
+ // Create containers with policy REP 1
+ for i := range 10_000 {
+ cnr1 := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetNumberOfObjects(2)
+ p1.AddReplicas(x1)
+ cnr1.SetPlacementPolicy(p1)
+ cnr1.SetAttribute("i", strconv.Itoa(i))
+
+ var idCnr1 cid.ID
+ container.CalculateID(&idCnr1, cnr1)
+
+ cnrmap[idCnr1] = &cnr1
+ cids = append(cids, idCnr1)
+ }
+
+ e.SetContainerSource(&containerStorage{
+ cntmap: cnrmap,
+ latency: time.Millisecond * 100,
+ })
+
+ for _, cnt := range cids {
+ for range 1 {
+ obj := testutil.GenerateObjectWithCID(cnt)
+ var putPrm shard.PutPrm
+ putPrm.SetObject(obj)
+ _, err := e.shards[ids[0].String()].Put(context.Background(), putPrm)
+ require.NoError(t, err)
+ }
+ }
+
+ var prm EvacuateShardPrm
+ prm.ShardID = ids[0:1]
+ prm.Scope = EvacuateScopeObjects
+ prm.RepOneOnly = true
+ prm.ContainerWorkerCount = 10
+
+ require.NoError(t, e.shards[ids[0].String()].SetMode(mode.ReadOnly))
+
+ start := time.Now()
+ _, err := e.Evacuate(context.Background(), prm)
+ t.Logf("evacuate took %v\n", time.Since(start))
+ require.NoError(t, err)
+}
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 5943be7f4..44f25246e 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -76,6 +76,12 @@ type IterateOverObjectsInContainerPrm struct {
Handler func(context.Context, *objectcore.Info) error
}
+// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInBucketPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+}
+
// ListWithCursor lists physical objects available in metabase starting from
// cursor. Includes objects of all types. Does not include inhumed objects.
// Use cursor value from response for consecutive requests.
@@ -426,3 +432,48 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
}
return nil
}
+
+// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+ var (
+ startedAt = time.Now()
+ success = false
+ )
+ defer func() {
+ db.metrics.AddMethodDuration("CountAliveObjectsInBucket", time.Since(startedAt), success)
+ }()
+ _, span := tracing.StartSpanFromContext(ctx, "metabase.CountAliveObjectsInBucket")
+ defer span.End()
+
+ db.modeMtx.RLock()
+ defer db.modeMtx.RUnlock()
+
+ if db.mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ cidRaw := prm.BucketName[1:bucketKeySize]
+ if cidRaw == nil {
+ return 0, nil
+ }
+ var count uint64
+ err := db.boltDB.View(func(tx *bbolt.Tx) error {
+ bkt := tx.Bucket(prm.BucketName)
+ if bkt == nil {
+ return nil
+ }
+ graveyardBkt := tx.Bucket(graveyardBucketName)
+ garbageBkt := tx.Bucket(garbageBucketName)
+ c := bkt.Cursor()
+ k, _ := c.First()
+ for ; k != nil; k, _ = c.Next() {
+ if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ continue
+ }
+ count++
+ }
+ return nil
+ })
+ success = err == nil
+ return count, metaerr.Wrap(err)
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index 9f56ec750..f5d633b77 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -44,10 +44,16 @@ type IterateOverContainersPrm struct {
type IterateOverObjectsInContainerPrm struct {
// BucketName container's bucket name.
BucketName []byte
- // Handler function executed upon containers in db.
+ // Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
+// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation.
+type CountAliveObjectsInBucketPrm struct {
+ // BucketName container's bucket name.
+ BucketName []byte
+}
+
// ListWithCursorPrm contains parameters for ListWithCursor operation.
type ListWithCursorPrm struct {
count uint32
@@ -229,3 +235,25 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return nil
}
+
+// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+ _, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
+ defer span.End()
+
+ s.m.RLock()
+ defer s.m.RUnlock()
+
+ if s.info.Mode.NoMetabase() {
+ return 0, ErrDegradedMode
+ }
+
+ var metaPrm meta.CountAliveObjectsInBucketPrm
+ metaPrm.BucketName = prm.BucketName
+ count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm)
+ if err != nil {
+ return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
+ }
+
+ return count, nil
+}
diff --git a/pkg/services/control/server/evacuate_async.go b/pkg/services/control/server/evacuate_async.go
index bdc6f7c38..146ac7e16 100644
--- a/pkg/services/control/server/evacuate_async.go
+++ b/pkg/services/control/server/evacuate_async.go
@@ -31,6 +31,7 @@ func (s *Server) StartShardEvacuation(ctx context.Context, req *control.StartSha
Scope: engine.EvacuateScope(req.GetBody().GetScope()),
ContainerWorkerCount: req.GetBody().GetContainerWorkerCount(),
ObjectWorkerCount: req.GetBody().GetObjectWorkerCount(),
+ RepOneOnly: req.GetBody().GetRepOneOnly(),
}
_, err = s.s.Evacuate(ctx, prm)
diff --git a/pkg/services/control/service.proto b/pkg/services/control/service.proto
index 88a06de22..ae1939e13 100644
--- a/pkg/services/control/service.proto
+++ b/pkg/services/control/service.proto
@@ -398,6 +398,8 @@ message StartShardEvacuationRequest {
uint32 container_worker_count = 4;
// Count of concurrent object evacuation workers.
uint32 object_worker_count = 5;
+ // Choose for evacuation objects in `REP 1` containers only.
+ bool rep_one_only = 6;
}
Body body = 1;
diff --git a/pkg/services/control/service_frostfs.pb.go b/pkg/services/control/service_frostfs.pb.go
index e92a8acd1..e16f082b1 100644
--- a/pkg/services/control/service_frostfs.pb.go
+++ b/pkg/services/control/service_frostfs.pb.go
@@ -6516,6 +6516,7 @@ type StartShardEvacuationRequest_Body struct {
Scope uint32 `json:"scope"`
ContainerWorkerCount uint32 `json:"containerWorkerCount"`
ObjectWorkerCount uint32 `json:"objectWorkerCount"`
+ RepOneOnly bool `json:"repOneOnly"`
}
var (
@@ -6537,6 +6538,7 @@ func (x *StartShardEvacuationRequest_Body) StableSize() (size int) {
size += proto.UInt32Size(3, x.Scope)
size += proto.UInt32Size(4, x.ContainerWorkerCount)
size += proto.UInt32Size(5, x.ObjectWorkerCount)
+ size += proto.BoolSize(6, x.RepOneOnly)
return size
}
@@ -6568,6 +6570,9 @@ func (x *StartShardEvacuationRequest_Body) EmitProtobuf(mm *easyproto.MessageMar
if x.ObjectWorkerCount != 0 {
mm.AppendUint32(5, x.ObjectWorkerCount)
}
+ if x.RepOneOnly {
+ mm.AppendBool(6, x.RepOneOnly)
+ }
}
// UnmarshalProtobuf implements the encoding.ProtoUnmarshaler interface.
@@ -6609,6 +6614,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalProtobuf(src []byte) (err er
return fmt.Errorf("cannot unmarshal field %s", "ObjectWorkerCount")
}
x.ObjectWorkerCount = data
+ case 6: // RepOneOnly
+ data, ok := fc.Bool()
+ if !ok {
+ return fmt.Errorf("cannot unmarshal field %s", "RepOneOnly")
+ }
+ x.RepOneOnly = data
}
}
return nil
@@ -6658,6 +6669,15 @@ func (x *StartShardEvacuationRequest_Body) GetObjectWorkerCount() uint32 {
func (x *StartShardEvacuationRequest_Body) SetObjectWorkerCount(v uint32) {
x.ObjectWorkerCount = v
}
+func (x *StartShardEvacuationRequest_Body) GetRepOneOnly() bool {
+ if x != nil {
+ return x.RepOneOnly
+ }
+ return false
+}
+func (x *StartShardEvacuationRequest_Body) SetRepOneOnly(v bool) {
+ x.RepOneOnly = v
+}
// MarshalJSON implements the json.Marshaler interface.
func (x *StartShardEvacuationRequest_Body) MarshalJSON() ([]byte, error) {
@@ -6703,6 +6723,11 @@ func (x *StartShardEvacuationRequest_Body) MarshalEasyJSON(out *jwriter.Writer)
out.RawString(prefix)
out.Uint32(x.ObjectWorkerCount)
}
+ {
+ const prefix string = ",\"repOneOnly\":"
+ out.RawString(prefix)
+ out.Bool(x.RepOneOnly)
+ }
out.RawByte('}')
}
@@ -6768,6 +6793,12 @@ func (x *StartShardEvacuationRequest_Body) UnmarshalEasyJSON(in *jlexer.Lexer) {
f = in.Uint32()
x.ObjectWorkerCount = f
}
+ case "repOneOnly":
+ {
+ var f bool
+ f = in.Bool()
+ x.RepOneOnly = f
+ }
}
in.WantComma()
}
From 7f8a1dcf8e238a08af84a1ef9e180541f783b71f Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Mon, 30 Sep 2024 14:15:13 +0300
Subject: [PATCH 106/655] [#1400] adm: Support flag `alphabet-wallets` for
commands `proxy-add/remove-account`
Signed-off-by: Anton Nikiforov
---
cmd/frostfs-adm/internal/modules/morph/proxy/root.go | 2 ++
1 file changed, 2 insertions(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
index 082bc57d1..1854c8d2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/proxy/root.go
@@ -30,11 +30,13 @@ var (
func initProxyAddAccount() {
AddAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
AddAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ AddAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func initProxyRemoveAccount() {
RemoveAccountCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
RemoveAccountCmd.Flags().String(accountAddressFlag, "", "Wallet address string")
+ RemoveAccountCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
}
func init() {
From a13219808a42f30839fe87ba3ea88a8fdd54f0ac Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 27 Sep 2024 12:39:43 +0300
Subject: [PATCH 107/655] [#1375] node: Configure of the container cache size
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-node/cache.go | 8 ++--
cmd/frostfs-node/config.go | 2 +
cmd/frostfs-node/config/morph/config.go | 15 ++++++
cmd/frostfs-node/container.go | 63 +++++++++++++------------
cmd/frostfs-node/morph.go | 1 +
config/example/node.yaml | 1 +
6 files changed, 55 insertions(+), 35 deletions(-)
diff --git a/cmd/frostfs-node/cache.go b/cmd/frostfs-node/cache.go
index 57f65d873..06142a46c 100644
--- a/cmd/frostfs-node/cache.go
+++ b/cmd/frostfs-node/cache.go
@@ -165,13 +165,11 @@ type ttlContainerStorage struct {
delInfoCache *ttlNetCache[cid.ID, *container.DelInfo]
}
-func newCachedContainerStorage(v container.Source, ttl time.Duration) ttlContainerStorage {
- const containerCacheSize = 100
-
- lruCnrCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.Container, error) {
+func newCachedContainerStorage(v container.Source, ttl time.Duration, containerCacheSize uint32) ttlContainerStorage {
+ lruCnrCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.Container, error) {
return v.Get(id)
}, metrics.NewCacheMetrics("container"))
- lruDelInfoCache := newNetworkTTLCache(containerCacheSize, ttl, func(id cid.ID) (*container.DelInfo, error) {
+ lruDelInfoCache := newNetworkTTLCache(int(containerCacheSize), ttl, func(id cid.ID) (*container.DelInfo, error) {
return v.DeletionInfo(id)
}, metrics.NewCacheMetrics("container_deletion_info"))
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index c625b575f..58a96879f 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -570,6 +570,8 @@ type cfgMorph struct {
// TTL of Sidechain cached values. Non-positive value disables caching.
cacheTTL time.Duration
+ containerCacheSize uint32
+
proxyScriptHash neogoutil.Uint160
}
diff --git a/cmd/frostfs-node/config/morph/config.go b/cmd/frostfs-node/config/morph/config.go
index 1c536a0e2..d089870ea 100644
--- a/cmd/frostfs-node/config/morph/config.go
+++ b/cmd/frostfs-node/config/morph/config.go
@@ -30,6 +30,9 @@ const (
// FrostfsIDCacheSizeDefault is a default value of APE chain cache.
FrostfsIDCacheSizeDefault = 10_000
+
+ // ContainerCacheSizeDefault represents the default size for the container cache.
+ ContainerCacheSizeDefault = 100
)
var errNoMorphEndpoints = errors.New("no morph chain RPC endpoints, see `morph.rpc_endpoint` section")
@@ -103,6 +106,18 @@ func CacheTTL(c *config.Config) time.Duration {
return CacheTTLDefault
}
+// ContainerCacheSize returns the value of "container_cache_size" config parameter
+// from "morph" section.
+//
+// Returns 0 if the value is not positive integer.
+// Returns ContainerCacheSizeDefault if the value is missing.
+func ContainerCacheSize(c *config.Config) uint32 {
+ if c.Sub(subsection).Value("container_cache_size") == nil {
+ return ContainerCacheSizeDefault
+ }
+ return config.Uint32Safe(c.Sub(subsection), "container_cache_size")
+}
+
// SwitchInterval returns the value of "switch_interval" config parameter
// from "morph" section.
//
diff --git a/cmd/frostfs-node/container.go b/cmd/frostfs-node/container.go
index 6733140d2..729fcb8af 100644
--- a/cmd/frostfs-node/container.go
+++ b/cmd/frostfs-node/container.go
@@ -87,43 +87,46 @@ func configureEACLAndContainerSources(c *cfg, client *cntClient.Client, cnrSrc c
cnrRdr.lister = client
} else {
// use RPC node as source of Container contract items (with caching)
- cachedContainerStorage := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL)
- cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
+ c.cfgObject.cnrSource = cnrSrc
+ if c.cfgMorph.containerCacheSize > 0 {
+ containerCache := newCachedContainerStorage(cnrSrc, c.cfgMorph.cacheTTL, c.cfgMorph.containerCacheSize)
- subscribeToContainerCreation(c, func(e event.Event) {
- ev := e.(containerEvent.PutSuccess)
+ subscribeToContainerCreation(c, func(e event.Event) {
+ ev := e.(containerEvent.PutSuccess)
- // read owner of the created container in order to update the reading cache.
- // TODO: use owner directly from the event after neofs-contract#256 will become resolved
- // but don't forget about the profit of reading the new container and caching it:
- // creation success are most commonly tracked by polling GET op.
- cnr, err := cnrSrc.Get(ev.ID)
- if err == nil {
- cachedContainerStorage.containerCache.set(ev.ID, cnr, nil)
- } else {
- // unlike removal, we expect successful receive of the container
- // after successful creation, so logging can be useful
- c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ // read owner of the created container in order to update the reading cache.
+ // TODO: use owner directly from the event after neofs-contract#256 will become resolved
+ // but don't forget about the profit of reading the new container and caching it:
+ // creation success are most commonly tracked by polling GET op.
+ cnr, err := cnrSrc.Get(ev.ID)
+ if err == nil {
+ containerCache.containerCache.set(ev.ID, cnr, nil)
+ } else {
+ // unlike removal, we expect successful receive of the container
+ // after successful creation, so logging can be useful
+ c.log.Error(logs.FrostFSNodeReadNewlyCreatedContainerAfterTheNotification,
+ zap.Stringer("id", ev.ID),
+ zap.Error(err),
+ )
+ }
+
+ c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
zap.Stringer("id", ev.ID),
- zap.Error(err),
)
- }
+ })
- c.log.Debug(logs.FrostFSNodeContainerCreationEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
-
- subscribeToContainerRemoval(c, func(e event.Event) {
- ev := e.(containerEvent.DeleteSuccess)
- cachedContainerStorage.handleRemoval(ev.ID)
- c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
- zap.Stringer("id", ev.ID),
- )
- })
+ subscribeToContainerRemoval(c, func(e event.Event) {
+ ev := e.(containerEvent.DeleteSuccess)
+ containerCache.handleRemoval(ev.ID)
+ c.log.Debug(logs.FrostFSNodeContainerRemovalEventsReceipt,
+ zap.Stringer("id", ev.ID),
+ )
+ })
+ c.cfgObject.cnrSource = containerCache
+ }
+ cachedEACLStorage := newCachedEACLStorage(eACLFetcher, c.cfgMorph.cacheTTL)
c.cfgObject.eaclSource = cachedEACLStorage
- c.cfgObject.cnrSource = cachedContainerStorage
cnrRdr.lister = client
cnrRdr.eacl = c.cfgObject.eaclSource
diff --git a/cmd/frostfs-node/morph.go b/cmd/frostfs-node/morph.go
index 7178cd97d..1bfcb8ac9 100644
--- a/cmd/frostfs-node/morph.go
+++ b/cmd/frostfs-node/morph.go
@@ -90,6 +90,7 @@ func initMorphComponents(ctx context.Context, c *cfg) {
var netmapSource netmap.Source
+ c.cfgMorph.containerCacheSize = morphconfig.ContainerCacheSize(c.appCfg)
c.cfgMorph.cacheTTL = morphconfig.CacheTTL(c.appCfg)
if c.cfgMorph.cacheTTL == 0 {
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 86be35ba8..2a80fba18 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -81,6 +81,7 @@ morph:
cache_ttl: 15s # Sidechain cache TTL value (min interval between similar calls). Negative value disables caching.
# Default value: block time. It is recommended to have this value less or equal to block time.
# Cached entities: containers, container lists, eACL tables.
+ container_cache_size: 100 # container_cache_size is is the maximum number of containers in the cache.
switch_interval: 3m # interval b/w RPC switch attempts if the node is connected not to the highest priority node
rpc_endpoint: # side chain NEO RPC endpoints; are shuffled and used one by one until the first success
- address: wss://rpc1.morph.frostfs.info:40341/ws
From 54eb0058229965b7ddd704fe4da2e24f41c20f3f Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 1 Oct 2024 14:39:36 +0300
Subject: [PATCH 108/655] [#1404] go.mod: Update api-go
Fix #1398
Fix #1399
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 9817f8527..1023948bc 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 3c6dd9a99..5d719a027 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e h1:740ABnOBYx4o6jxULHdSSnVW2fYIO35ohg+Uz59sxd0=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 9c5ddc4dfeb6447ae7d9cc0d74db551271ac6eb1 Mon Sep 17 00:00:00 2001
From: Airat Arifullin
Date: Wed, 2 Oct 2024 10:09:10 +0300
Subject: [PATCH 109/655] [#1407] tree: Set `ContainerOwner` in parameter for
`CheckAPE`
Signed-off-by: Airat Arifullin
---
pkg/services/tree/ape.go | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/pkg/services/tree/ape.go b/pkg/services/tree/ape.go
index 693b16e60..69cf59405 100644
--- a/pkg/services/tree/ape.go
+++ b/pkg/services/tree/ape.go
@@ -75,12 +75,13 @@ func (s *Service) checkAPE(ctx context.Context, bt *bearer.Token,
}
return s.apeChecker.CheckAPE(checkercore.CheckPrm{
- Request: request,
- Namespace: namespace,
- Container: cid,
- PublicKey: publicKey,
- BearerToken: bt,
- SoftAPECheck: false,
+ Request: request,
+ Namespace: namespace,
+ Container: cid,
+ ContainerOwner: container.Value.Owner(),
+ PublicKey: publicKey,
+ BearerToken: bt,
+ SoftAPECheck: false,
})
}
From 57c31e9802ad19b8d64388315cd53a05515e353e Mon Sep 17 00:00:00 2001
From: Ekaterina Lebedeva
Date: Tue, 1 Oct 2024 16:09:05 +0300
Subject: [PATCH 110/655] [#1306] node: Allow tombstone_lifetime config to be
loaded on the fly
Signed-off-by: Ekaterina Lebedeva
---
cmd/frostfs-node/config.go | 17 +++++++++++++++--
cmd/frostfs-node/object.go | 6 ++----
2 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 58a96879f..4ad9ec6c6 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -105,6 +105,10 @@ type applicationConfiguration struct {
timestamp bool
}
+ ObjectCfg struct {
+ tombstoneLifetime uint64
+ }
+
EngineCfg struct {
errorThreshold uint32
shardPoolSize uint32
@@ -223,6 +227,10 @@ func (a *applicationConfiguration) readConfig(c *config.Config) error {
a.LoggerCfg.destination = loggerconfig.Destination(c)
a.LoggerCfg.timestamp = loggerconfig.Timestamp(c)
+ // Object
+
+ a.ObjectCfg.tombstoneLifetime = objectconfig.TombstoneLifetime(c)
+
// Storage Engine
a.EngineCfg.errorThreshold = engineconfig.ShardErrorThreshold(c)
@@ -624,7 +632,7 @@ type cfgObject struct {
cfgLocalStorage cfgLocalStorage
- tombstoneLifetime uint64
+ tombstoneLifetime *atomic.Uint64
skipSessionTokenIssuerVerification bool
}
@@ -815,9 +823,11 @@ func initCfgGRPC() cfgGRPC {
}
func initCfgObject(appCfg *config.Config) cfgObject {
+ var tsLifetime atomic.Uint64
+ tsLifetime.Store(objectconfig.TombstoneLifetime(appCfg))
return cfgObject{
pool: initObjectPool(appCfg),
- tombstoneLifetime: objectconfig.TombstoneLifetime(appCfg),
+ tombstoneLifetime: &tsLifetime,
skipSessionTokenIssuerVerification: objectconfig.Put(appCfg).SkipSessionTokenIssuerVerification(),
}
}
@@ -1296,6 +1306,9 @@ func (c *cfg) reloadConfig(ctx context.Context) {
components := c.getComponents(ctx, logPrm)
+ // Object
+ c.cfgObject.tombstoneLifetime.Store(c.ObjectCfg.tombstoneLifetime)
+
// Storage Engine
var rcfg engine.ReConfiguration
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 9d4e35ca8..47649c88b 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -109,13 +109,12 @@ func (s *objectSvc) GetRangeHash(ctx context.Context, req *object.GetRangeHashRe
type delNetInfo struct {
netmap.State
- tsLifetime uint64
cfg *cfg
}
func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
- return i.tsLifetime, nil
+ return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
}
// returns node owner ID calculated from configured private key.
@@ -424,8 +423,7 @@ func createDeleteService(c *cfg, keyStorage *util.KeyStorage, sGet *getsvc.Servi
sSearch,
sPut,
&delNetInfo{
- State: c.cfgNetmap.state,
- tsLifetime: c.cfgObject.tombstoneLifetime,
+ State: c.cfgNetmap.state,
cfg: c,
},
From f45e75e3eb781662abbbc4f6820076c492fc1f0d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:18:35 +0300
Subject: [PATCH 111/655] [#1409] adm: Do not bind DeltaFlag to viper
We bind flag that could be specified in config.
This is not a config flag, just a command option.
Also fix TestInitialize failures:
```
Error: Received unexpected error:
number of epochs cannot be less than 1
Test: TestInitialize/16_nodes/force-new-epoch
```
Refs #1372 (945b7c740b0deb4)
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 3 ++-
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 1 -
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index 5e4e9c725..5c5fa9988 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -31,7 +31,8 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, viper.GetInt64(commonflags.DeltaFlag)); err != nil {
+ delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag)
+ if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 0288bcdc5..3300db36a 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -22,7 +22,6 @@ var (
PreRun: func(cmd *cobra.Command, _ []string) {
_ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
_ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
- _ = viper.BindPFlag(commonflags.DeltaFlag, cmd.Flags().Lookup(commonflags.DeltaFlag))
},
RunE: ForceNewEpochCmd,
}
From 62028cd7ee0b5d825b71cfa11d1d87369b1da23d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:20:09 +0300
Subject: [PATCH 112/655] [#1409] adm: Uncommonize DeltaFlag
It is used only in `force-new-epoch`, it is not _common_ between
multiple commands.
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-adm/internal/commonflags/flags.go | 1 -
cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go | 5 +++--
cmd/frostfs-adm/internal/modules/morph/netmap/root.go | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/cmd/frostfs-adm/internal/commonflags/flags.go b/cmd/frostfs-adm/internal/commonflags/flags.go
index b51d2e115..81395edb0 100644
--- a/cmd/frostfs-adm/internal/commonflags/flags.go
+++ b/cmd/frostfs-adm/internal/commonflags/flags.go
@@ -39,5 +39,4 @@ const (
CustomZoneFlag = "domain"
AlphabetSizeFlag = "size"
AllFlag = "all"
- DeltaFlag = "delta"
)
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
index 5c5fa9988..94223dbd0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/epoch.go
@@ -4,7 +4,6 @@ import (
"fmt"
"strings"
- "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/commonflags"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/constants"
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-adm/internal/modules/morph/helper"
"github.com/nspcc-dev/neo-go/pkg/io"
@@ -13,6 +12,8 @@ import (
"github.com/spf13/viper"
)
+const deltaFlag = "delta"
+
func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
wCtx, err := helper.NewInitializeContext(cmd, viper.GetViper())
if err != nil {
@@ -31,7 +32,7 @@ func ForceNewEpochCmd(cmd *cobra.Command, _ []string) error {
}
bw := io.NewBufBinWriter()
- delta, _ := cmd.Flags().GetInt64(commonflags.DeltaFlag)
+ delta, _ := cmd.Flags().GetInt64(deltaFlag)
if err := helper.EmitNewEpochCall(bw, wCtx, nmHash, delta); err != nil {
return err
}
diff --git a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
index 3300db36a..55b7e64f0 100644
--- a/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/netmap/root.go
@@ -35,7 +35,7 @@ func initForceNewEpochCmd() {
ForceNewEpoch.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
ForceNewEpoch.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
ForceNewEpoch.Flags().String(commonflags.LocalDumpFlag, "", "Path to the blocks dump file")
- ForceNewEpoch.Flags().Int64(commonflags.DeltaFlag, 1, "Number of epochs to increase the current epoch")
+ ForceNewEpoch.Flags().Int64(deltaFlag, 1, "Number of epochs to increase the current epoch")
}
func init() {
From f83f7feb8caa0ef5ab9a952a6a6d3e2f12a63486 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 2 Oct 2024 11:01:22 +0300
Subject: [PATCH 113/655] [#1391] adm: Properly check whether transfers were
made
Signed-off-by: Evgenii Stratonikov
---
.../morph/initialize/initialize_transfer.go | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
index d7b0ec86c..7f1bfee2b 100644
--- a/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
+++ b/cmd/frostfs-adm/internal/modules/morph/initialize/initialize_transfer.go
@@ -27,12 +27,12 @@ const (
initialAlphabetGASAmount = 10_000 * native.GASFactor
// initialProxyGASAmount represents the amount of GAS given to a proxy contract.
initialProxyGASAmount = 50_000 * native.GASFactor
- // alphabetGasRatio is a coefficient that defines the threshold below which
- // the balance of the alphabet node is considered not replenished. The value
- // of this coefficient is determined empirically.
- alphabetGasRatio = 5
)
+func initialCommitteeGASAmount(c *helper.InitializeContext) int64 {
+ return (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2
+}
+
func transferFunds(c *helper.InitializeContext) error {
ok, err := transferFundsFinished(c)
if ok || err != nil {
@@ -59,7 +59,7 @@ func transferFunds(c *helper.InitializeContext) error {
transferTarget{
Token: gas.Hash,
Address: c.CommitteeAcc.Contract.ScriptHash(),
- Amount: (gasInitialTotalSupply - initialAlphabetGASAmount*int64(len(c.Wallets))) / 2,
+ Amount: initialCommitteeGASAmount(c),
},
transferTarget{
Token: neo.Hash,
@@ -80,12 +80,19 @@ func transferFunds(c *helper.InitializeContext) error {
return c.AwaitTx()
}
+// transferFundsFinished checks balances of accounts we transfer GAS to.
+// The stage is considered finished if the balance is greater than the half of what we need to transfer.
func transferFundsFinished(c *helper.InitializeContext) (bool, error) {
acc := c.Accounts[0]
r := nep17.NewReader(c.ReadOnlyInvoker, gas.Hash)
res, err := r.BalanceOf(acc.Contract.ScriptHash())
- return res.Cmp(big.NewInt(alphabetGasRatio*native.GASFactor)) == 1, err
+ if err != nil || res.Cmp(big.NewInt(initialAlphabetGASAmount/2)) != 1 {
+ return false, err
+ }
+
+ res, err = r.BalanceOf(c.CommitteeAcc.ScriptHash())
+ return res != nil && res.Cmp(big.NewInt(initialCommitteeGASAmount(c)/2)) == 1, err
}
func transferGASToProxy(c *helper.InitializeContext) error {
From 434048e8d959b29375c0d63a112b8eb8df8792d8 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 11:28:00 +0300
Subject: [PATCH 114/655] [#1408] metabase: Fix EC search with slow and fast
filters
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/select.go | 35 ++++++++++++-
.../metabase/select_test.go | 50 +++++++++++++++++++
2 files changed, 83 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index ed43fc41f..85d1b08ba 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -389,8 +389,7 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
- buf := make([]byte, addressKeySize)
- obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ obj, isECChunk, err := db.getObjectForSlowFilters(tx, addr, currEpoch)
if err != nil {
return result, false
}
@@ -401,17 +400,26 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
case v2object.FilterHeaderVersion:
data = []byte(obj.Version().String())
case v2object.FilterHeaderHomomorphicHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent hashes are incomparable
+ }
cs, _ := obj.PayloadHomomorphicHash()
data = cs.Value()
case v2object.FilterHeaderCreationEpoch:
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.CreationEpoch())
case v2object.FilterHeaderPayloadLength:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload lengths are incomparable
+ }
data = make([]byte, 8)
binary.LittleEndian.PutUint64(data, obj.PayloadSize())
case v2object.FilterHeaderOwnerID:
data = []byte(obj.OwnerID().EncodeToString())
case v2object.FilterHeaderPayloadHash:
+ if isECChunk {
+ return result, false // EC chunk and EC parent payload hashes are incomparable
+ }
cs, _ := obj.PayloadChecksum()
data = cs.Value()
default: // user attribute
@@ -439,6 +447,29 @@ func (db *DB) matchSlowFilters(tx *bbolt.Tx, addr oid.Address, f objectSDK.Searc
return result, true
}
+func (db *DB) getObjectForSlowFilters(tx *bbolt.Tx, addr oid.Address, currEpoch uint64) (*objectSDK.Object, bool, error) {
+ buf := make([]byte, addressKeySize)
+ obj, err := db.get(tx, addr, buf, true, false, currEpoch)
+ if err != nil {
+ var ecInfoError *objectSDK.ECInfoError
+ if errors.As(err, &ecInfoError) {
+ for _, chunk := range ecInfoError.ECInfo().Chunks {
+ var objID oid.ID
+ if err = objID.ReadFromV2(chunk.ID); err != nil {
+ continue
+ }
+ addr.SetObject(objID)
+ obj, err = db.get(tx, addr, buf, true, false, currEpoch)
+ if err == nil {
+ return obj, true, nil
+ }
+ }
+ }
+ return nil, false, err
+ }
+ return obj, false, nil
+}
+
func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
objectAttributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index fcd5d3a90..0c6ebc863 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -762,6 +762,56 @@ func TestDB_SelectOwnerID(t *testing.T) {
})
}
+func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ ecChunk1 := oidtest.ID()
+ ecChunk2 := oidtest.ID()
+ ecParent := oidtest.ID()
+ var ecParentAddr oid.Address
+ ecParentAddr.SetContainer(cnr)
+ ecParentAddr.SetObject(ecParent)
+ var ecParentAttr []objectSDK.Attribute
+ var attr objectSDK.Attribute
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/1/2/3")
+ ecParentAttr = append(ecParentAttr, attr)
+
+ chunkObj := testutil.GenerateObjectWithCID(cnr)
+ chunkObj.SetContainerID(cnr)
+ chunkObj.SetID(ecChunk1)
+ chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
+ chunkObj.SetPayloadSize(uint64(5))
+ chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
+
+ chunkObj2 := testutil.GenerateObjectWithCID(cnr)
+ chunkObj2.SetContainerID(cnr)
+ chunkObj2.SetID(ecChunk2)
+ chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
+ chunkObj2.SetPayloadSize(uint64(10))
+ chunkObj2.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 1, 3, []byte{}, 0))
+
+ // put object with EC
+
+ var prm meta.PutPrm
+ prm.SetObject(chunkObj)
+ _, err := db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ prm.SetObject(chunkObj2)
+ _, err = db.Put(context.Background(), prm)
+ require.NoError(t, err)
+
+ fs := objectSDK.SearchFilters{}
+ fs.AddRootFilter()
+ fs.AddFilter(objectSDK.AttributeFilePath, "/1/2/3", objectSDK.MatchCommonPrefix)
+ testSelect(t, db, cnr, fs, ecParentAddr)
+}
+
type testTarget struct {
objects []*objectSDK.Object
}
From 01e3944b31e7daed8ca855244b833302daabe9cc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 14:36:10 +0300
Subject: [PATCH 115/655] [#1408] metabase: Fix tests
No need to specify container ID for objects created with `testutil.GenerateObjectWithCID`.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/delete_ec_test.go | 1 -
pkg/local_object_storage/metabase/inhume_ec_test.go | 2 --
pkg/local_object_storage/metabase/select_test.go | 2 --
3 files changed, 5 deletions(-)
diff --git a/pkg/local_object_storage/metabase/delete_ec_test.go b/pkg/local_object_storage/metabase/delete_ec_test.go
index 66c79ecd7..a25627990 100644
--- a/pkg/local_object_storage/metabase/delete_ec_test.go
+++ b/pkg/local_object_storage/metabase/delete_ec_test.go
@@ -39,7 +39,6 @@ func TestDeleteECObject_WithoutSplit(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/inhume_ec_test.go b/pkg/local_object_storage/metabase/inhume_ec_test.go
index c3b1e72da..32e412c79 100644
--- a/pkg/local_object_storage/metabase/inhume_ec_test.go
+++ b/pkg/local_object_storage/metabase/inhume_ec_test.go
@@ -35,14 +35,12 @@ func TestInhumeECObject(t *testing.T) {
tombstoneID := oidtest.ID()
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index 0c6ebc863..bee778e2b 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -782,14 +782,12 @@ func TestDB_SelectECWithFastAndSlowFilters(t *testing.T) {
ecParentAttr = append(ecParentAttr, attr)
chunkObj := testutil.GenerateObjectWithCID(cnr)
- chunkObj.SetContainerID(cnr)
chunkObj.SetID(ecChunk1)
chunkObj.SetPayload([]byte{0, 1, 2, 3, 4})
chunkObj.SetPayloadSize(uint64(5))
chunkObj.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: ecParent, Attributes: ecParentAttr}, 0, 3, []byte{}, 0))
chunkObj2 := testutil.GenerateObjectWithCID(cnr)
- chunkObj2.SetContainerID(cnr)
chunkObj2.SetID(ecChunk2)
chunkObj2.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
chunkObj2.SetPayloadSize(uint64(10))
From 6c46044c9cba5f2e20e105b3efa7abe166fbf577 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:19:26 +0300
Subject: [PATCH 116/655] [#1410] shard: Move MetricsWriter interface to a
separate file
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/metrics.go | 47 +++++++++++++++++++++++
pkg/local_object_storage/shard/shard.go | 44 ---------------------
2 files changed, 47 insertions(+), 44 deletions(-)
create mode 100644 pkg/local_object_storage/shard/metrics.go
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
new file mode 100644
index 000000000..568c0de5e
--- /dev/null
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -0,0 +1,47 @@
+package shard
+
+import "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+
+// MetricsWriter is an interface that must store shard's metrics.
+type MetricsWriter interface {
+ // SetObjectCounter must set object counter taking into account object type.
+ SetObjectCounter(objectType string, v uint64)
+ // AddToObjectCounter must update object counter taking into account object
+ // type.
+ // Negative parameter must decrease the counter.
+ AddToObjectCounter(objectType string, delta int)
+ // AddToContainerSize must add a value to the container size.
+ // Value can be negative.
+ AddToContainerSize(cnr string, value int64)
+ // AddToPayloadSize must add a value to the payload size.
+ // Value can be negative.
+ AddToPayloadSize(value int64)
+ // IncObjectCounter must increment shard's object counter taking into account
+ // object type.
+ IncObjectCounter(objectType string)
+ // SetShardID must set (update) the shard identifier that will be used in
+ // metrics.
+ SetShardID(id string)
+ // SetReadonly must set shard mode.
+ SetMode(mode mode.Mode)
+ // IncErrorCounter increment error counter.
+ IncErrorCounter()
+ // ClearErrorCounter clear error counter.
+ ClearErrorCounter()
+ // DeleteShardMetrics deletes shard metrics from registry.
+ DeleteShardMetrics()
+ // SetContainerObjectsCount sets container object count.
+ SetContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncContainerObjectsCount increments container object count.
+ IncContainerObjectsCount(cnrID string, objectType string)
+ // SubContainerObjectsCount subtracts container object count.
+ SubContainerObjectsCount(cnrID string, objectType string, value uint64)
+ // IncRefillObjectsCount increments refill objects count.
+ IncRefillObjectsCount(path string, size int, success bool)
+ // SetRefillPercent sets refill percent.
+ SetRefillPercent(path string, percent uint32)
+ // SetRefillStatus sets refill status.
+ SetRefillStatus(path string, status string)
+ // SetEvacuationInProgress sets evacuation status
+ SetEvacuationInProgress(value bool)
+}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index 7496fc352..f5317b16c 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -57,50 +57,6 @@ type DeletedLockCallback func(context.Context, []oid.Address)
// EmptyContainersCallback is a callback hanfling list of zero-size and zero-count containers.
type EmptyContainersCallback func(context.Context, []cid.ID)
-// MetricsWriter is an interface that must store shard's metrics.
-type MetricsWriter interface {
- // SetObjectCounter must set object counter taking into account object type.
- SetObjectCounter(objectType string, v uint64)
- // AddToObjectCounter must update object counter taking into account object
- // type.
- // Negative parameter must decrease the counter.
- AddToObjectCounter(objectType string, delta int)
- // AddToContainerSize must add a value to the container size.
- // Value can be negative.
- AddToContainerSize(cnr string, value int64)
- // AddToPayloadSize must add a value to the payload size.
- // Value can be negative.
- AddToPayloadSize(value int64)
- // IncObjectCounter must increment shard's object counter taking into account
- // object type.
- IncObjectCounter(objectType string)
- // SetShardID must set (update) the shard identifier that will be used in
- // metrics.
- SetShardID(id string)
- // SetReadonly must set shard mode.
- SetMode(mode mode.Mode)
- // IncErrorCounter increment error counter.
- IncErrorCounter()
- // ClearErrorCounter clear error counter.
- ClearErrorCounter()
- // DeleteShardMetrics deletes shard metrics from registry.
- DeleteShardMetrics()
- // SetContainerObjectsCount sets container object count.
- SetContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncContainerObjectsCount increments container object count.
- IncContainerObjectsCount(cnrID string, objectType string)
- // SubContainerObjectsCount subtracts container object count.
- SubContainerObjectsCount(cnrID string, objectType string, value uint64)
- // IncRefillObjectsCount increments refill objects count.
- IncRefillObjectsCount(path string, size int, success bool)
- // SetRefillPercent sets refill percent.
- SetRefillPercent(path string, percent uint32)
- // SetRefillStatus sets refill status.
- SetRefillStatus(path string, status string)
- // SetEvacuationInProgress sets evacuation status
- SetEvacuationInProgress(value bool)
-}
-
type cfg struct {
m sync.RWMutex
From 9206ce5cd2ea973feef6a53ae0453736efacbe11 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:23:59 +0300
Subject: [PATCH 117/655] [#1410] shard: Provide the default implementation for
MetricsWriter
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/id.go | 4 +-
pkg/local_object_storage/shard/metrics.go | 22 ++++++++++
pkg/local_object_storage/shard/mode.go | 4 +-
pkg/local_object_storage/shard/shard.go | 49 +++++++++--------------
4 files changed, 42 insertions(+), 37 deletions(-)
diff --git a/pkg/local_object_storage/shard/id.go b/pkg/local_object_storage/shard/id.go
index 2fe68d270..a72313498 100644
--- a/pkg/local_object_storage/shard/id.go
+++ b/pkg/local_object_storage/shard/id.go
@@ -45,9 +45,7 @@ func (s *Shard) UpdateID() (err error) {
}
shardID := s.info.ID.String()
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.SetShardID(shardID)
- }
+ s.cfg.metricsWriter.SetShardID(shardID)
if s.writeCache != nil && s.writeCache.GetMetrics() != nil {
s.writeCache.GetMetrics().SetShardID(shardID)
}
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 568c0de5e..6bf198048 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -45,3 +45,25 @@ type MetricsWriter interface {
// SetEvacuationInProgress sets evacuation status
SetEvacuationInProgress(value bool)
}
+
+type noopMetrics struct{}
+
+var _ MetricsWriter = noopMetrics{}
+
+func (noopMetrics) SetObjectCounter(string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, int) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) AddToPayloadSize(int64) {}
+func (noopMetrics) IncObjectCounter(string) {}
+func (noopMetrics) SetShardID(string) {}
+func (noopMetrics) SetMode(mode.Mode) {}
+func (noopMetrics) IncErrorCounter() {}
+func (noopMetrics) ClearErrorCounter() {}
+func (noopMetrics) DeleteShardMetrics() {}
+func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncContainerObjectsCount(string, string) {}
+func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string) {}
+func (noopMetrics) SetEvacuationInProgress(bool) {}
diff --git a/pkg/local_object_storage/shard/mode.go b/pkg/local_object_storage/shard/mode.go
index 1bab57448..d90a5f4b6 100644
--- a/pkg/local_object_storage/shard/mode.go
+++ b/pkg/local_object_storage/shard/mode.go
@@ -65,9 +65,7 @@ func (s *Shard) setMode(m mode.Mode) error {
}
s.info.Mode = m
- if s.metricsWriter != nil {
- s.metricsWriter.SetMode(s.info.Mode)
- }
+ s.metricsWriter.SetMode(s.info.Mode)
s.log.Info(logs.ShardShardModeSetSuccessfully,
zap.Stringer("mode", s.info.Mode))
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index f5317b16c..a57b548be 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -105,6 +105,7 @@ func defaultCfg() *cfg {
reportErrorFunc: func(string, string, error) {},
zeroSizeContainersCallback: func(context.Context, []cid.ID) {},
zeroCountContainersCallback: func(context.Context, []cid.ID) {},
+ metricsWriter: noopMetrics{},
}
}
@@ -384,7 +385,7 @@ const (
)
func (s *Shard) updateMetrics(ctx context.Context) {
- if s.cfg.metricsWriter == nil || s.GetMode().NoMetabase() {
+ if s.GetMode().NoMetabase() {
return
}
@@ -439,35 +440,29 @@ func (s *Shard) updateMetrics(ctx context.Context) {
// incObjectCounter increment both physical and logical object
// counters.
func (s *Shard) incObjectCounter(cnrID cid.ID, isUser bool) {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncObjectCounter(physical)
- s.cfg.metricsWriter.IncObjectCounter(logical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
- if isUser {
- s.cfg.metricsWriter.IncObjectCounter(user)
- s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
- }
+ s.cfg.metricsWriter.IncObjectCounter(physical)
+ s.cfg.metricsWriter.IncObjectCounter(logical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), physical)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), logical)
+ if isUser {
+ s.cfg.metricsWriter.IncObjectCounter(user)
+ s.cfg.metricsWriter.IncContainerObjectsCount(cnrID.EncodeToString(), user)
}
}
func (s *Shard) decObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.cfg.metricsWriter.AddToObjectCounter(typ, -int(v))
}
}
func (s *Shard) setObjectCounterBy(typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.cfg.metricsWriter.SetObjectCounter(typ, v)
}
}
func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters) {
- if s.cfg.metricsWriter == nil {
- return
- }
-
for cnrID, count := range byCnr {
if count.Phy > 0 {
s.cfg.metricsWriter.SubContainerObjectsCount(cnrID.EncodeToString(), physical, count.Phy)
@@ -482,46 +477,38 @@ func (s *Shard) decContainerObjectCounter(byCnr map[cid.ID]meta.ObjectCounters)
}
func (s *Shard) addToContainerSize(cnr string, size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
+ if size != 0 {
s.cfg.metricsWriter.AddToContainerSize(cnr, size)
}
}
func (s *Shard) addToPayloadSize(size int64) {
- if s.cfg.metricsWriter != nil && size != 0 {
+ if size != 0 {
s.cfg.metricsWriter.AddToPayloadSize(size)
}
}
func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
- if s.cfg.metricsWriter != nil && v > 0 {
+ if v > 0 {
s.metricsWriter.SetContainerObjectsCount(cnr, typ, v)
}
}
func (s *Shard) IncErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.IncErrorCounter()
- }
+ s.cfg.metricsWriter.IncErrorCounter()
}
func (s *Shard) ClearErrorCounter() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.ClearErrorCounter()
- }
+ s.cfg.metricsWriter.ClearErrorCounter()
}
func (s *Shard) DeleteShardMetrics() {
- if s.cfg.metricsWriter != nil {
- s.cfg.metricsWriter.DeleteShardMetrics()
- }
+ s.cfg.metricsWriter.DeleteShardMetrics()
}
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
s.info.EvacuationInProgress = val
- if s.metricsWriter != nil {
- s.metricsWriter.SetEvacuationInProgress(val)
- }
+ s.metricsWriter.SetEvacuationInProgress(val)
}
From 9a87acb87ad243fcdd932e764a3f5f8d9c5c6657 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 3 Oct 2024 10:40:56 +0300
Subject: [PATCH 118/655] [#1410] engine: Provide the default implementation to
MetricsRegister
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/container.go | 8 +---
pkg/local_object_storage/engine/delete.go | 4 +-
pkg/local_object_storage/engine/engine.go | 1 +
pkg/local_object_storage/engine/get.go | 4 +-
pkg/local_object_storage/engine/head.go | 4 +-
pkg/local_object_storage/engine/inhume.go | 4 +-
pkg/local_object_storage/engine/metrics.go | 45 +++++++++++++++++++
pkg/local_object_storage/engine/put.go | 4 +-
pkg/local_object_storage/engine/range.go | 4 +-
pkg/local_object_storage/engine/select.go | 8 +---
pkg/local_object_storage/engine/shards.go | 46 +++++++++-----------
11 files changed, 77 insertions(+), 55 deletions(-)
diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go
index e45f502ac..6def02f12 100644
--- a/pkg/local_object_storage/engine/container.go
+++ b/pkg/local_object_storage/engine/container.go
@@ -68,9 +68,7 @@ func ContainerSize(e *StorageEngine, id cid.ID) (uint64, error) {
}
func (e *StorageEngine) containerSize(prm ContainerSizePrm) (res ContainerSizeRes, err error) {
- if e.metrics != nil {
- defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("EstimateContainerSize", e.metrics.AddMethodDuration)()
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
var csPrm shard.ContainerSizePrm
@@ -116,9 +114,7 @@ func ListContainers(ctx context.Context, e *StorageEngine) ([]cid.ID, error) {
}
func (e *StorageEngine) listContainers(ctx context.Context) (ListContainersRes, error) {
- if e.metrics != nil {
- defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("ListContainers", e.metrics.AddMethodDuration)()
uniqueIDs := make(map[string]cid.ID)
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 318f938fb..61cb6832d 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -68,9 +68,7 @@ func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRe
}
func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
- if e.metrics != nil {
- defer elapsed("Delete", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Delete", e.metrics.AddMethodDuration)()
var locked struct {
is bool
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 5e883a641..13efdcb84 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -219,6 +219,7 @@ func defaultCfg() *cfg {
res := &cfg{
log: &logger.Logger{Logger: zap.L()},
shardPoolSize: 20,
+ metrics: noopMetrics{},
}
res.containerSource.Store(&containerSource{})
return res
diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go
index 253256c34..4a9199be7 100644
--- a/pkg/local_object_storage/engine/get.go
+++ b/pkg/local_object_storage/engine/get.go
@@ -66,9 +66,7 @@ func (e *StorageEngine) Get(ctx context.Context, prm GetPrm) (res GetRes, err er
}
func (e *StorageEngine) get(ctx context.Context, prm GetPrm) (GetRes, error) {
- if e.metrics != nil {
- defer elapsed("Get", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Get", e.metrics.AddMethodDuration)()
errNotFound := new(apistatus.ObjectNotFound)
diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go
index 6857a3631..d2e3cfd99 100644
--- a/pkg/local_object_storage/engine/head.go
+++ b/pkg/local_object_storage/engine/head.go
@@ -68,9 +68,7 @@ func (e *StorageEngine) Head(ctx context.Context, prm HeadPrm) (res HeadRes, err
func (e *StorageEngine) head(ctx context.Context, prm HeadPrm) (HeadRes, error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.head")
defer span.End()
- if e.metrics != nil {
- defer elapsed("Head", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Head", e.metrics.AddMethodDuration)()
var (
head *objectSDK.Object
diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go
index 683713f94..35ce50f65 100644
--- a/pkg/local_object_storage/engine/inhume.go
+++ b/pkg/local_object_storage/engine/inhume.go
@@ -80,9 +80,7 @@ func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRe
}
func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
- if e.metrics != nil {
- defer elapsed("Inhume", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Inhume", e.metrics.AddMethodDuration)()
var shPrm shard.InhumePrm
if prm.forceRemoval {
diff --git a/pkg/local_object_storage/engine/metrics.go b/pkg/local_object_storage/engine/metrics.go
index 1c088c754..75936206d 100644
--- a/pkg/local_object_storage/engine/metrics.go
+++ b/pkg/local_object_storage/engine/metrics.go
@@ -68,3 +68,48 @@ func (m *gcMetrics) AddExpiredObjectCollectionDuration(d time.Duration, success
func (m *gcMetrics) AddInhumedObjectCount(count uint64, objectType string) {
m.storage.AddInhumedObjectCount(m.shardID, count, objectType)
}
+
+type (
+ noopMetrics struct{}
+ noopWriteCacheMetrics struct{}
+ noopGCMetrics struct{}
+)
+
+var (
+ _ MetricRegister = noopMetrics{}
+ _ metrics.WriteCacheMetrics = noopWriteCacheMetrics{}
+ _ metrics.GCMetrics = noopGCMetrics{}
+)
+
+func (noopMetrics) AddMethodDuration(string, time.Duration) {}
+func (noopMetrics) SetObjectCounter(string, string, uint64) {}
+func (noopMetrics) AddToObjectCounter(string, string, int) {}
+func (noopMetrics) SetMode(string, mode.Mode) {}
+func (noopMetrics) AddToContainerSize(string, int64) {}
+func (noopMetrics) DeleteContainerSize(string) {}
+func (noopMetrics) DeleteContainerCount(string) {}
+func (noopMetrics) AddToPayloadCounter(string, int64) {}
+func (noopMetrics) IncErrorCounter(string) {}
+func (noopMetrics) ClearErrorCounter(string) {}
+func (noopMetrics) DeleteShardMetrics(string) {}
+func (noopMetrics) SetContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncContainerObjectCounter(string, string, string) {}
+func (noopMetrics) SubContainerObjectCounter(string, string, string, uint64) {}
+func (noopMetrics) IncRefillObjectsCount(string, string, int, bool) {}
+func (noopMetrics) SetRefillPercent(string, string, uint32) {}
+func (noopMetrics) SetRefillStatus(string, string, string) {}
+func (noopMetrics) SetEvacuationInProgress(string, bool) {}
+func (noopMetrics) WriteCache() metrics.WriteCacheMetrics { return noopWriteCacheMetrics{} }
+func (noopMetrics) GC() metrics.GCMetrics { return noopGCMetrics{} }
+
+func (noopWriteCacheMetrics) AddMethodDuration(string, string, string, string, bool, time.Duration) {}
+func (noopWriteCacheMetrics) SetActualCount(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetEstimateSize(string, string, string, uint64) {}
+func (noopWriteCacheMetrics) SetMode(string, string) {}
+func (noopWriteCacheMetrics) IncOperationCounter(string, string, string, string, metrics.NullBool) {}
+func (noopWriteCacheMetrics) Close(string, string) {}
+
+func (noopGCMetrics) AddRunDuration(string, time.Duration, bool) {}
+func (noopGCMetrics) AddDeletedCount(string, uint64, uint64) {}
+func (noopGCMetrics) AddExpiredObjectCollectionDuration(string, time.Duration, bool, string) {}
+func (noopGCMetrics) AddInhumedObjectCount(string, uint64, string) {}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index f92d83745..bf86402a7 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -72,9 +72,7 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
}
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
- if e.metrics != nil {
- defer elapsed("Put", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Put", e.metrics.AddMethodDuration)()
addr := object.AddressOf(prm.obj)
diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go
index cbf26ff4e..498674fd2 100644
--- a/pkg/local_object_storage/engine/range.go
+++ b/pkg/local_object_storage/engine/range.go
@@ -82,9 +82,7 @@ func (e *StorageEngine) getRange(ctx context.Context, prm RngPrm) (RngRes, error
))
defer span.End()
- if e.metrics != nil {
- defer elapsed("GetRange", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("GetRange", e.metrics.AddMethodDuration)()
var shPrm shard.RngPrm
shPrm.SetAddress(prm.addr)
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 6a8c9fab9..972a4f52a 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -59,9 +59,7 @@ func (e *StorageEngine) Select(ctx context.Context, prm SelectPrm) (res SelectRe
}
func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("Search", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("Search", e.metrics.AddMethodDuration)()
addrList := make([]oid.Address, 0)
uniqueMap := make(map[string]struct{})
@@ -108,9 +106,7 @@ func (e *StorageEngine) List(ctx context.Context, limit uint64) (res SelectRes,
}
func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, error) {
- if e.metrics != nil {
- defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
- }
+ defer elapsed("ListObjects", e.metrics.AddMethodDuration)()
addrList := make([]oid.Address, 0, limit)
uniqueMap := make(map[string]struct{})
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 2ad6859e4..96f54369b 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -116,9 +116,7 @@ func (e *StorageEngine) AddShard(ctx context.Context, opts ...shard.Option) (*sh
return nil, fmt.Errorf("could not add %s shard: %w", sh.ID().String(), err)
}
- if e.cfg.metrics != nil {
- e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
- }
+ e.cfg.metrics.SetMode(sh.ID().String(), sh.GetMode())
return sh.ID(), nil
}
@@ -152,28 +150,26 @@ func (e *StorageEngine) appendMetrics(id *shard.ID, opts []shard.Option) []shard
e.mtx.RLock()
defer e.mtx.RUnlock()
- if e.metrics != nil {
- opts = append(opts,
- shard.WithMetricsWriter(
- &metricsWithID{
- id: id.String(),
- mw: e.metrics,
- },
- ),
- shard.WithWriteCacheMetrics(
- &writeCacheMetrics{
- shardID: id.String(),
- metrics: e.metrics.WriteCache(),
- },
- ),
- shard.WithGCMetrics(
- &gcMetrics{
- storage: e.metrics.GC(),
- shardID: id.String(),
- },
- ),
- )
- }
+ opts = append(opts,
+ shard.WithMetricsWriter(
+ &metricsWithID{
+ id: id.String(),
+ mw: e.metrics,
+ },
+ ),
+ shard.WithWriteCacheMetrics(
+ &writeCacheMetrics{
+ shardID: id.String(),
+ metrics: e.metrics.WriteCache(),
+ },
+ ),
+ shard.WithGCMetrics(
+ &gcMetrics{
+ storage: e.metrics.GC(),
+ shardID: id.String(),
+ },
+ ),
+ )
return opts
}
From 963faa615ab0a70964821b3a3725c27ed5d7f60e Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 4 Oct 2024 14:58:45 +0300
Subject: [PATCH 119/655] [#1413] engine: Cleanup shard error reporting
- `reportShardErrorBackground()` no longer differs from
`reportShardError()`, reflect this in its name;
- reuse common pieces of code to make it simpler.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/engine.go | 28 ++++-------------------
pkg/local_object_storage/engine/shards.go | 2 +-
2 files changed, 5 insertions(+), 25 deletions(-)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index 13efdcb84..f40c9cc04 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -115,10 +115,8 @@ func (e *StorageEngine) moveToDegraded(sh *shard.Shard, errCount uint32, isMeta
log.Info(logs.EngineShardIsMovedInReadonlyModeDueToErrorThreshold)
}
-// reportShardErrorBackground increases shard error counter and logs an error.
-// It is intended to be used from background workers and
-// doesn't change shard mode because of possible deadlocks.
-func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err error) {
+// reportShardErrorByID increases shard error counter and logs an error.
+func (e *StorageEngine) reportShardErrorByID(id string, msg string, err error) {
e.mtx.RLock()
sh, ok := e.shards[id]
e.mtx.RUnlock()
@@ -127,16 +125,7 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er
return
}
- if isLogical(err) {
- e.log.Warn(msg,
- zap.Stringer("shard_id", sh.ID()),
- zap.String("error", err.Error()))
- return
- }
-
- errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err)
+ e.reportShardError(sh, msg, err)
}
// reportShardError checks that the amount of errors doesn't exceed the configured threshold.
@@ -156,16 +145,7 @@ func (e *StorageEngine) reportShardError(
errCount := sh.errorCount.Add(1)
sh.Shard.IncErrorCounter()
- e.reportShardErrorWithFlags(sh.Shard, errCount, msg, err, fields...)
-}
-func (e *StorageEngine) reportShardErrorWithFlags(
- sh *shard.Shard,
- errCount uint32,
- msg string,
- err error,
- fields ...zap.Field,
-) {
sid := sh.ID()
e.log.Warn(msg, append([]zap.Field{
zap.Stringer("shard_id", sid),
@@ -179,7 +159,7 @@ func (e *StorageEngine) reportShardErrorWithFlags(
req := setModeRequest{
errorCount: errCount,
- sh: sh,
+ sh: sh.Shard,
isMeta: errors.As(err, new(metaerr.Error)),
}
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index 96f54369b..c3ccb5276 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -134,7 +134,7 @@ func (e *StorageEngine) createShard(_ context.Context, opts []shard.Option) (*sh
shard.WithExpiredTombstonesCallback(e.processExpiredTombstones),
shard.WithExpiredLocksCallback(e.processExpiredLocks),
shard.WithDeletedLockCallback(e.processDeletedLocks),
- shard.WithReportErrorFunc(e.reportShardErrorBackground),
+ shard.WithReportErrorFunc(e.reportShardErrorByID),
shard.WithZeroSizeCallback(e.processZeroSizeContainers),
shard.WithZeroCountCallback(e.processZeroCountContainers),
)...)
From 4dc9a1b300b2f22dbd9628713917347bc0d3a6ee Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Fri, 4 Oct 2024 15:07:20 +0300
Subject: [PATCH 120/655] [#1413] engine: Remove error counting methods from
Shard
All error counting and hangling logic is present on the engine level.
Currently, we pass engine metrics with shard ID metric to shard, then
export 3 methods to manipulate these metrics.
In this commits all methods are removed and error counter is tracked on
the engine level exlusively.
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/engine/engine.go | 2 +-
pkg/local_object_storage/engine/shards.go | 6 +++---
pkg/local_object_storage/shard/metrics.go | 9 ---------
pkg/local_object_storage/shard/shard.go | 12 ------------
4 files changed, 4 insertions(+), 25 deletions(-)
diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go
index f40c9cc04..3183d6137 100644
--- a/pkg/local_object_storage/engine/engine.go
+++ b/pkg/local_object_storage/engine/engine.go
@@ -144,7 +144,7 @@ func (e *StorageEngine) reportShardError(
}
errCount := sh.errorCount.Add(1)
- sh.Shard.IncErrorCounter()
+ e.metrics.IncErrorCounter(sh.ID().String())
sid := sh.ID()
e.log.Warn(msg, append([]zap.Field{
diff --git a/pkg/local_object_storage/engine/shards.go b/pkg/local_object_storage/engine/shards.go
index c3ccb5276..aab2c423c 100644
--- a/pkg/local_object_storage/engine/shards.go
+++ b/pkg/local_object_storage/engine/shards.go
@@ -217,7 +217,7 @@ func (e *StorageEngine) removeShards(ids ...string) {
continue
}
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(id)
ss = append(ss, sh)
delete(e.shards, id)
@@ -318,7 +318,7 @@ func (e *StorageEngine) SetShardMode(id *shard.ID, m mode.Mode, resetErrorCounte
if id.String() == shID {
if resetErrorCounter {
sh.errorCount.Store(0)
- sh.Shard.ClearErrorCounter()
+ e.metrics.ClearErrorCounter(shID)
}
return sh.SetMode(m)
}
@@ -422,7 +422,7 @@ func (e *StorageEngine) deleteShards(ids []*shard.ID) ([]hashedShard, error) {
for _, sh := range ss {
idStr := sh.ID().String()
- sh.DeleteShardMetrics()
+ e.metrics.DeleteShardMetrics(idStr)
delete(e.shards, idStr)
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 6bf198048..91bf8d0ae 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -24,12 +24,6 @@ type MetricsWriter interface {
SetShardID(id string)
// SetReadonly must set shard mode.
SetMode(mode mode.Mode)
- // IncErrorCounter increment error counter.
- IncErrorCounter()
- // ClearErrorCounter clear error counter.
- ClearErrorCounter()
- // DeleteShardMetrics deletes shard metrics from registry.
- DeleteShardMetrics()
// SetContainerObjectsCount sets container object count.
SetContainerObjectsCount(cnrID string, objectType string, value uint64)
// IncContainerObjectsCount increments container object count.
@@ -57,9 +51,6 @@ func (noopMetrics) AddToPayloadSize(int64) {}
func (noopMetrics) IncObjectCounter(string) {}
func (noopMetrics) SetShardID(string) {}
func (noopMetrics) SetMode(mode.Mode) {}
-func (noopMetrics) IncErrorCounter() {}
-func (noopMetrics) ClearErrorCounter() {}
-func (noopMetrics) DeleteShardMetrics() {}
func (noopMetrics) SetContainerObjectsCount(string, string, uint64) {}
func (noopMetrics) IncContainerObjectsCount(string, string) {}
func (noopMetrics) SubContainerObjectsCount(string, string, uint64) {}
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index a57b548be..d7e723733 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -494,18 +494,6 @@ func (s *Shard) setContainerObjectsCount(cnr string, typ string, v uint64) {
}
}
-func (s *Shard) IncErrorCounter() {
- s.cfg.metricsWriter.IncErrorCounter()
-}
-
-func (s *Shard) ClearErrorCounter() {
- s.cfg.metricsWriter.ClearErrorCounter()
-}
-
-func (s *Shard) DeleteShardMetrics() {
- s.cfg.metricsWriter.DeleteShardMetrics()
-}
-
func (s *Shard) SetEvacuationInProgress(val bool) {
s.m.Lock()
defer s.m.Unlock()
From 2f710d8f945f90c5d65e4c9a0c53f0dfdcc4f291 Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Fri, 4 Oct 2024 15:23:22 +0300
Subject: [PATCH 121/655] [#1414] metabase: Check parameter for
`CountAliveObjectsInBucket`
Signed-off-by: Anton Nikiforov
---
pkg/local_object_storage/metabase/list.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 44f25246e..74a529809 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -452,10 +452,11 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
return 0, ErrDegradedMode
}
- cidRaw := prm.BucketName[1:bucketKeySize]
- if cidRaw == nil {
+ if len(prm.BucketName) != bucketKeySize {
return 0, nil
}
+
+ cidRaw := prm.BucketName[1:bucketKeySize]
var count uint64
err := db.boltDB.View(func(tx *bbolt.Tx) error {
bkt := tx.Bucket(prm.BucketName)
From fc032838c037c7c649f80181ca71d8c9f6847e7d Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 7 Oct 2024 11:50:47 +0300
Subject: [PATCH 122/655] [#1215] blobstor/test: Cover iteration behaviour
Signed-off-by: Evgenii Stratonikov
---
.../blobstor/iterate_test.go | 172 ++++++------------
1 file changed, 59 insertions(+), 113 deletions(-)
diff --git a/pkg/local_object_storage/blobstor/iterate_test.go b/pkg/local_object_storage/blobstor/iterate_test.go
index 079728380..195d0bd31 100644
--- a/pkg/local_object_storage/blobstor/iterate_test.go
+++ b/pkg/local_object_storage/blobstor/iterate_test.go
@@ -3,10 +3,13 @@ package blobstor
import (
"context"
"encoding/binary"
+ "errors"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/common"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/memstore"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor/teststore"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -90,117 +93,60 @@ func TestIterateObjects(t *testing.T) {
}
func TestIterate_IgnoreErrors(t *testing.T) {
- t.Skip()
- // dir := t.TempDir()
- //
- // const (
- // smallSize = 512
- // objCount = 5
- // )
- // bsOpts := []Option{
- // WithCompressObjects(true),
- // WithRootPath(dir),
- // WithSmallSizeLimit(smallSize * 2), // + header
- // WithBlobovniczaOpenedCacheSize(1),
- // WithBlobovniczaShallowWidth(1),
- // WithBlobovniczaShallowDepth(1)}
- // bs := New(bsOpts...)
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // addrs := make([]oid.Address, objCount)
- // for i := range addrs {
- // addrs[i] = oidtest.Address()
- //
- // obj := object.New()
- // obj.SetContainerID(addrs[i].Container())
- // obj.SetID(addrs[i].Object())
- // obj.SetPayload(make([]byte, smallSize<<(i%2)))
- //
- // objData, err := obj.Marshal()
- // require.NoError(t, err)
- //
- // _, err = bs.PutRaw(addrs[i], objData, true)
- // require.NoError(t, err)
- // }
- //
- // // Construct corrupted compressed object.
- // buf := bytes.NewBuffer(nil)
- // badObject := make([]byte, smallSize/2+1)
- // enc, err := zstd.NewWriter(buf)
- // require.NoError(t, err)
- // rawData := enc.EncodeAll(badObject, nil)
- // for i := 4; /* magic size */ i < len(rawData); i += 2 {
- // rawData[i] ^= 0xFF
- // }
- // // Will be put uncompressed but fetched as compressed because of magic.
- // _, err = bs.PutRaw(oidtest.Address(), rawData, false)
- // require.NoError(t, err)
- // require.NoError(t, bs.fsTree.Put(oidtest.Address(), rawData))
- //
- // require.NoError(t, bs.Close())
- //
- // // Increase width to have blobovnicza which is definitely empty.
- // b := New(append(bsOpts, WithBlobovniczaShallowWidth(2))...)
- // require.NoError(t, b.Open(false))
- // require.NoError(t, b.Init())
- //
- // var p string
- // for i := 0; i < 2; i++ {
- // bp := filepath.Join(bs.rootPath, "1", strconv.FormatUint(uint64(i), 10))
- // if _, ok := bs.blobovniczas.opened.Get(bp); !ok {
- // p = bp
- // break
- // }
- // }
- // require.NotEqual(t, "", p, "expected to not have at least 1 blobovnicza in cache")
- // require.NoError(t, os.Chmod(p, 0))
- //
- // require.NoError(t, b.Close())
- // require.NoError(t, bs.Open(false))
- // require.NoError(t, bs.Init())
- //
- // var prm IteratePrm
- // prm.SetIterationHandler(func(e IterationElement) error {
- // return nil
- // })
- // _, err = bs.Iterate(prm)
- // require.Error(t, err)
- //
- // prm.IgnoreErrors()
- //
- // t.Run("skip invalid objects", func(t *testing.T) {
- // actual := make([]oid.Address, 0, len(addrs))
- // prm.SetIterationHandler(func(e IterationElement) error {
- // obj := object.New()
- // err := obj.Unmarshal(e.data)
- // if err != nil {
- // return err
- // }
- //
- // var addr oid.Address
- // cnr, _ := obj.ContainerID()
- // addr.SetContainer(cnr)
- // id, _ := obj.ID()
- // addr.SetObject(id)
- // actual = append(actual, addr)
- // return nil
- // })
- //
- // _, err := bs.Iterate(prm)
- // require.NoError(t, err)
- // require.ElementsMatch(t, addrs, actual)
- // })
- // t.Run("return errors from handler", func(t *testing.T) {
- // n := 0
- // expectedErr := errors.New("expected error")
- // prm.SetIterationHandler(func(e IterationElement) error {
- // if n++; n == objCount/2 {
- // return expectedErr
- // }
- // return nil
- // })
- // _, err := bs.Iterate(prm)
- // require.ErrorIs(t, err, expectedErr)
- // })
+ ctx := context.Background()
+
+ myErr := errors.New("unique error")
+ nopIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, nil }
+ panicIter := func(common.IteratePrm) (common.IterateRes, error) { panic("unreachable") }
+ errIter := func(common.IteratePrm) (common.IterateRes, error) { return common.IterateRes{}, myErr }
+
+ var s1iter, s2iter func(common.IteratePrm) (common.IterateRes, error)
+ st1 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s1iter(prm)
+ }))
+ st2 := teststore.New(
+ teststore.WithSubstorage(memstore.New()),
+ teststore.WithIterate(func(prm common.IteratePrm) (common.IterateRes, error) {
+ return s2iter(prm)
+ }))
+
+ bsOpts := []Option{WithStorages([]SubStorage{
+ {Storage: st1},
+ {Storage: st2},
+ })}
+ bs := New(bsOpts...)
+ require.NoError(t, bs.Open(ctx, mode.ReadWrite))
+ require.NoError(t, bs.Init())
+
+ nopHandler := func(e common.IterationElement) error {
+ return nil
+ }
+
+ t.Run("no errors", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("error in the first sub storage, the second one is not iterated over", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = panicIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{Handler: nopHandler})
+ require.ErrorIs(t, err, myErr)
+ })
+
+ t.Run("ignore errors, storage 1", func(t *testing.T) {
+ s1iter = errIter
+ s2iter = nopIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
+ t.Run("ignore errors, storage 2", func(t *testing.T) {
+ s1iter = nopIter
+ s2iter = errIter
+ _, err := bs.Iterate(ctx, common.IteratePrm{IgnoreErrors: true, Handler: nopHandler})
+ require.NoError(t, err)
+ })
}
From a5de74a2492deb9e969c1d722c82bb8b050130df Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Mon, 7 Oct 2024 15:13:44 +0300
Subject: [PATCH 123/655] [#1418] go.mod: Update api-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 1023948bc..f81ba9cf7 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
- git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f
+ git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
diff --git a/go.sum b/go.sum
index 5d719a027..8aa087de4 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,7 @@
code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f h1:FZvX6CLzTQqMyMvOerIKMvIEJQbOImDjSooZx3AVRyE=
-git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241001111852-d9a604fbc16f/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4=
+git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
From 5fbb2657ca9ca9bbc3aa2ca9239fbb55ea47cdc3 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 10:02:14 +0300
Subject: [PATCH 124/655] [#1419] mod: Bump sdk-go version
Signed-off-by: Dmitrii Stepanov
---
go.mod | 4 ++--
go.sum | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/go.mod b/go.mod
index f81ba9cf7..91cc55a36 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
@@ -60,7 +60,7 @@ require (
require (
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
- github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
+ github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
diff --git a/go.sum b/go.sum
index 8aa087de4..728592ea5 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98 h1:ijUci3thz0EwWkuRJDocW5D1RkVAJlt9xNG4CYepC90=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98/go.mod h1:GeNpo12HcEW4J412sH5yf8xFYapxlrt5fcYzRwg0Ino=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
@@ -27,8 +27,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
-github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
+github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
+github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
From 0c49bca19c82d574c9a93681bda77362edd5b88c Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 7 Oct 2024 18:32:26 +0300
Subject: [PATCH 125/655] [#1415] lens/explorer: Add timeout for opening
database
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/blobovnicza/tui.go | 13 +------------
cmd/frostfs-lens/internal/meta/tui.go | 13 +------------
cmd/frostfs-lens/internal/tui/util.go | 13 +++++++++++++
cmd/frostfs-lens/internal/writecache/tui.go | 13 +------------
4 files changed, 16 insertions(+), 36 deletions(-)
diff --git a/cmd/frostfs-lens/internal/blobovnicza/tui.go b/cmd/frostfs-lens/internal/blobovnicza/tui.go
index eb4a5ff59..4aa281616 100644
--- a/cmd/frostfs-lens/internal/blobovnicza/tui.go
+++ b/cmd/frostfs-lens/internal/blobovnicza/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-lens/internal/meta/tui.go b/cmd/frostfs-lens/internal/meta/tui.go
index 00e8bf117..5a41f945c 100644
--- a/cmd/frostfs-lens/internal/meta/tui.go
+++ b/cmd/frostfs-lens/internal/meta/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -44,7 +43,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -70,13 +69,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
diff --git a/cmd/frostfs-lens/internal/tui/util.go b/cmd/frostfs-lens/internal/tui/util.go
index d4e13b2a9..2d1ab3e33 100644
--- a/cmd/frostfs-lens/internal/tui/util.go
+++ b/cmd/frostfs-lens/internal/tui/util.go
@@ -3,12 +3,25 @@ package tui
import (
"errors"
"strings"
+ "time"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/mr-tron/base58"
+ "go.etcd.io/bbolt"
)
+func OpenDB(path string, writable bool) (*bbolt.DB, error) {
+ db, err := bbolt.Open(path, 0o600, &bbolt.Options{
+ ReadOnly: !writable,
+ Timeout: 100 * time.Millisecond,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
func CIDParser(s string) (any, error) {
data, err := base58.Decode(s)
if err != nil {
diff --git a/cmd/frostfs-lens/internal/writecache/tui.go b/cmd/frostfs-lens/internal/writecache/tui.go
index 6b7532b08..b7e4d7c96 100644
--- a/cmd/frostfs-lens/internal/writecache/tui.go
+++ b/cmd/frostfs-lens/internal/writecache/tui.go
@@ -9,7 +9,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-lens/internal/tui"
"github.com/rivo/tview"
"github.com/spf13/cobra"
- "go.etcd.io/bbolt"
)
var tuiCMD = &cobra.Command{
@@ -43,7 +42,7 @@ func tuiFunc(cmd *cobra.Command, _ []string) {
}
func runTUI(cmd *cobra.Command) error {
- db, err := openDB(false)
+ db, err := tui.OpenDB(vPath, false)
if err != nil {
return fmt.Errorf("couldn't open database: %w", err)
}
@@ -67,13 +66,3 @@ func runTUI(cmd *cobra.Command) error {
app.SetRoot(ui, true).SetFocus(ui)
return app.Run()
}
-
-func openDB(writable bool) (*bbolt.DB, error) {
- db, err := bbolt.Open(vPath, 0o600, &bbolt.Options{
- ReadOnly: !writable,
- })
- if err != nil {
- return nil, err
- }
- return db, nil
-}
From 899cd55c277b04b974e67df29b81146528d5c293 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 13:28:46 +0300
Subject: [PATCH 126/655] [#1412] engine: PutPrm refactoring
Use fields instead of methods.
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/engine/list_test.go | 6 +----
pkg/local_object_storage/engine/put.go | 24 ++++++--------------
2 files changed, 8 insertions(+), 22 deletions(-)
diff --git a/pkg/local_object_storage/engine/list_test.go b/pkg/local_object_storage/engine/list_test.go
index 11a6c7841..d683b5475 100644
--- a/pkg/local_object_storage/engine/list_test.go
+++ b/pkg/local_object_storage/engine/list_test.go
@@ -82,11 +82,7 @@ func TestListWithCursor(t *testing.T) {
for range tt.objectNum {
containerID := cidtest.ID()
obj := testutil.GenerateObjectWithCIDWithPayload(containerID, []byte{'a'})
-
- var prm PutPrm
- prm.WithObject(obj)
-
- err := e.Put(context.Background(), prm)
+ err := e.Put(context.Background(), PutPrm{Object: obj})
require.NoError(t, err)
expected = append(expected, object.Info{Type: objectSDK.TypeRegular, Address: object.AddressOf(obj)})
}
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index bf86402a7..9ce31e791 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -22,7 +22,7 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ Object *objectSDK.Object
}
var errPutShard = errors.New("could not put object to any shard")
@@ -41,13 +41,6 @@ type putToShardRes struct {
err error
}
-// WithObject is a Put option to set object to save.
-//
-// Option is required.
-func (p *PutPrm) WithObject(obj *objectSDK.Object) {
- p.obj = obj
-}
-
// Put saves the object to local storage.
//
// Returns any error encountered that
@@ -59,7 +52,7 @@ func (p *PutPrm) WithObject(obj *objectSDK.Object) {
func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Put",
trace.WithAttributes(
- attribute.String("address", object.AddressOf(prm.obj).EncodeToString()),
+ attribute.String("address", object.AddressOf(prm.Object).EncodeToString()),
))
defer span.End()
@@ -74,13 +67,13 @@ func (e *StorageEngine) Put(ctx context.Context, prm PutPrm) (err error) {
func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
defer elapsed("Put", e.metrics.AddMethodDuration)()
- addr := object.AddressOf(prm.obj)
+ addr := object.AddressOf(prm.Object)
// In #1146 this check was parallelized, however, it became
// much slower on fast machines for 4 shards.
var parent oid.Address
- if prm.obj.ECHeader() != nil {
- parent.SetObject(prm.obj.ECHeader().Parent())
+ if prm.Object.ECHeader() != nil {
+ parent.SetObject(prm.Object.ECHeader().Parent())
parent.SetContainer(addr.Container())
}
var shPrm shard.ExistsPrm
@@ -113,7 +106,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, pool, addr, prm.obj)
+ shRes = e.putToShard(ctx, sh, pool, addr, prm.Object)
return shRes.status != putToShardUnknown
})
switch shRes.status {
@@ -202,8 +195,5 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
// Put writes provided object to local storage.
func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
- var putPrm PutPrm
- putPrm.WithObject(obj)
-
- return storage.Put(ctx, putPrm)
+ return storage.Put(ctx, PutPrm{Object: obj})
}
From 1b520f79733e3628af5d47b597b5baff60f3f36a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 15:27:06 +0300
Subject: [PATCH 127/655] [#1412] engine: Add `IsIndexedContainer` flag
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/core/container/info.go | 103 ++++++++++++++++++
pkg/core/container/util.go | 12 ++
.../engine/control_test.go | 2 +-
.../engine/delete_test.go | 8 +-
.../engine/engine_test.go | 2 +-
.../engine/inhume_test.go | 2 +-
pkg/local_object_storage/engine/lock_test.go | 14 +--
pkg/local_object_storage/engine/put.go | 7 +-
pkg/local_object_storage/engine/tree_test.go | 2 +-
pkg/services/object/common/writer/ec.go | 3 +-
pkg/services/object/common/writer/local.go | 9 +-
pkg/services/object/common/writer/writer.go | 3 +-
pkg/services/object/put/single.go | 11 +-
pkg/services/policer/check.go | 2 +-
pkg/services/policer/ec.go | 39 ++++---
pkg/services/replicator/pull.go | 3 +-
pkg/services/replicator/put.go | 3 +-
pkg/services/replicator/task.go | 3 +
19 files changed, 182 insertions(+), 50 deletions(-)
create mode 100644 pkg/core/container/info.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 47649c88b..5c322886b 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object) error {
- return engine.Put(ctx, e.engine, o)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error {
+ return engine.Put(ctx, e.engine, o, indexableContainer)
}
diff --git a/pkg/core/container/info.go b/pkg/core/container/info.go
new file mode 100644
index 000000000..62cc21553
--- /dev/null
+++ b/pkg/core/container/info.go
@@ -0,0 +1,103 @@
+package container
+
+import (
+ "sync"
+
+ utilSync "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/sync"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+)
+
+type Info struct {
+ Indexed bool
+ Removed bool
+}
+
+type infoValue struct {
+ info Info
+ err error
+}
+
+type InfoProvider interface {
+ Info(id cid.ID) (Info, error)
+}
+
+type infoProvider struct {
+ mtx *sync.RWMutex
+ cache map[cid.ID]infoValue
+ kl *utilSync.KeyLocker[cid.ID]
+
+ source Source
+ sourceErr error
+ sourceOnce *sync.Once
+ sourceFactory func() (Source, error)
+}
+
+func NewInfoProvider(sourceFactory func() (Source, error)) InfoProvider {
+ return &infoProvider{
+ mtx: &sync.RWMutex{},
+ cache: make(map[cid.ID]infoValue),
+ sourceOnce: &sync.Once{},
+ kl: utilSync.NewKeyLocker[cid.ID](),
+ sourceFactory: sourceFactory,
+ }
+}
+
+func (r *infoProvider) Info(id cid.ID) (Info, error) {
+ v, found := r.tryGetFromCache(id)
+ if found {
+ return v.info, v.err
+ }
+
+ return r.getFromSource(id)
+}
+
+func (r *infoProvider) tryGetFromCache(id cid.ID) (infoValue, bool) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ value, found := r.cache[id]
+ return value, found
+}
+
+func (r *infoProvider) getFromSource(id cid.ID) (Info, error) {
+ r.kl.Lock(id)
+ defer r.kl.Unlock(id)
+
+ if v, ok := r.tryGetFromCache(id); ok {
+ return v.info, v.err
+ }
+
+ r.sourceOnce.Do(func() {
+ r.source, r.sourceErr = r.sourceFactory()
+ })
+ if r.sourceErr != nil {
+ return Info{}, r.sourceErr
+ }
+
+ cnr, err := r.source.Get(id)
+ var civ infoValue
+ if err != nil {
+ if client.IsErrContainerNotFound(err) {
+ removed, err := WasRemoved(r.source, id)
+ if err != nil {
+ civ.err = err
+ } else {
+ civ.info.Removed = removed
+ }
+ } else {
+ civ.err = err
+ }
+ } else {
+ civ.info.Indexed = IsIndexedContainer(cnr.Value)
+ }
+ r.putToCache(id, civ)
+ return civ.info, civ.err
+}
+
+func (r *infoProvider) putToCache(id cid.ID, ct infoValue) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ r.cache[id] = ct
+}
diff --git a/pkg/core/container/util.go b/pkg/core/container/util.go
index 98919284e..d27556807 100644
--- a/pkg/core/container/util.go
+++ b/pkg/core/container/util.go
@@ -4,6 +4,7 @@ import (
"errors"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
@@ -20,3 +21,14 @@ func WasRemoved(s Source, cid cid.ID) (bool, error) {
}
return false, err
}
+
+// IsIndexedContainer returns True if container attributes should be indexed.
+func IsIndexedContainer(cnr containerSDK.Container) bool {
+ var isS3Container bool
+ cnr.IterateAttributes(func(key, _ string) {
+ if key == ".s3-location-constraint" {
+ isS3Container = true
+ }
+ })
+ return !isS3Container
+}
diff --git a/pkg/local_object_storage/engine/control_test.go b/pkg/local_object_storage/engine/control_test.go
index 2de92ae84..83babeca3 100644
--- a/pkg/local_object_storage/engine/control_test.go
+++ b/pkg/local_object_storage/engine/control_test.go
@@ -171,7 +171,7 @@ func TestExecBlocks(t *testing.T) {
addr := object.AddressOf(obj)
- require.NoError(t, Put(context.Background(), e, obj))
+ require.NoError(t, Put(context.Background(), e, obj, false))
// block executions
errBlock := errors.New("block exec err")
diff --git a/pkg/local_object_storage/engine/delete_test.go b/pkg/local_object_storage/engine/delete_test.go
index 4a6758012..0904c9820 100644
--- a/pkg/local_object_storage/engine/delete_test.go
+++ b/pkg/local_object_storage/engine/delete_test.go
@@ -58,9 +58,9 @@ func TestDeleteBigObject(t *testing.T) {
defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
@@ -126,9 +126,9 @@ func TestDeleteBigObjectWithoutGC(t *testing.T) {
defer e.Close(context.Background())
for i := range children {
- require.NoError(t, Put(context.Background(), e, children[i]))
+ require.NoError(t, Put(context.Background(), e, children[i], false))
}
- require.NoError(t, Put(context.Background(), e, link))
+ require.NoError(t, Put(context.Background(), e, link, false))
addrParent := object.AddressOf(parent)
checkGetError[*objectSDK.SplitInfoError](t, e, addrParent, true)
diff --git a/pkg/local_object_storage/engine/engine_test.go b/pkg/local_object_storage/engine/engine_test.go
index 525e17f34..88c523b76 100644
--- a/pkg/local_object_storage/engine/engine_test.go
+++ b/pkg/local_object_storage/engine/engine_test.go
@@ -54,7 +54,7 @@ func benchmarkExists(b *testing.B, shardNum int) {
addr := oidtest.Address()
for range 100 {
obj := testutil.GenerateObjectWithCID(cidtest.ID())
- err := Put(context.Background(), e, obj)
+ err := Put(context.Background(), e, obj, false)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index 9daa113f8..f87679253 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -40,7 +40,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
e := testNewEngine(t).setShardsNum(t, 1).engine
defer e.Close(context.Background())
- err := Put(context.Background(), e, parent)
+ err := Put(context.Background(), e, parent, false)
require.NoError(t, err)
var inhumePrm InhumePrm
diff --git a/pkg/local_object_storage/engine/lock_test.go b/pkg/local_object_storage/engine/lock_test.go
index 9e6758fb4..3702f567f 100644
--- a/pkg/local_object_storage/engine/lock_test.go
+++ b/pkg/local_object_storage/engine/lock_test.go
@@ -97,7 +97,7 @@ func TestLockUserScenario(t *testing.T) {
id, _ := obj.ID()
objAddr.SetObject(id)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -105,7 +105,7 @@ func TestLockUserScenario(t *testing.T) {
locker.WriteMembers([]oid.ID{id})
objectSDK.WriteLock(lockerObj, locker)
- err = Put(context.Background(), e, lockerObj)
+ err = Put(context.Background(), e, lockerObj, false)
require.NoError(t, err)
err = e.Lock(context.Background(), cnr, lockerID, []oid.ID{id})
@@ -124,7 +124,7 @@ func TestLockUserScenario(t *testing.T) {
tombObj.SetID(tombForLockID)
tombObj.SetAttributes(a)
- err = Put(context.Background(), e, tombObj)
+ err = Put(context.Background(), e, tombObj, false)
require.NoError(t, err)
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
@@ -177,7 +177,7 @@ func TestLockExpiration(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
@@ -189,7 +189,7 @@ func TestLockExpiration(t *testing.T) {
lock.SetType(objectSDK.TypeLock)
lock.SetAttributes(a)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
@@ -254,14 +254,14 @@ func TestLockForceRemoval(t *testing.T) {
// 1.
obj := testutil.GenerateObjectWithCID(cnr)
- err = Put(context.Background(), e, obj)
+ err = Put(context.Background(), e, obj, false)
require.NoError(t, err)
// 2.
lock := testutil.GenerateObjectWithCID(cnr)
lock.SetType(objectSDK.TypeLock)
- err = Put(context.Background(), e, lock)
+ err = Put(context.Background(), e, lock, false)
require.NoError(t, err)
id, _ := obj.ID()
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 9ce31e791..41e566560 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -22,7 +22,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- Object *objectSDK.Object
+ Object *objectSDK.Object
+ IsIndexedContainer bool
}
var errPutShard = errors.New("could not put object to any shard")
@@ -194,6 +195,6 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
}
// Put writes provided object to local storage.
-func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object) error {
- return storage.Put(ctx, PutPrm{Object: obj})
+func Put(ctx context.Context, storage *StorageEngine, obj *objectSDK.Object, indexedContainer bool) error {
+ return storage.Put(ctx, PutPrm{Object: obj, IsIndexedContainer: indexedContainer})
}
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 6f694f082..21fcce415 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -37,7 +37,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
for i := range objCount {
obj := testutil.GenerateObjectWithCID(cid)
testutil.AddAttribute(obj, pilorama.AttributeFilename, strconv.Itoa(i))
- err := Put(context.Background(), te.ng, obj)
+ err := Put(context.Background(), te.ng, obj, false)
if err != nil {
b.Fatal(err)
}
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index fb0a8e4e5..6b6a14cc0 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -310,7 +310,8 @@ func (e *ECWriter) putECPartToNode(ctx context.Context, obj *objectSDK.Object, n
func (e *ECWriter) writePartLocal(ctx context.Context, obj *objectSDK.Object) error {
var err error
localTarget := LocalTarget{
- Storage: e.Config.LocalStore,
+ Storage: e.Config.LocalStore,
+ Container: e.Container,
}
completed := make(chan interface{})
if poolErr := e.Config.LocalPool.Submit(func() {
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index 02fd25b9e..e219b44dd 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -4,7 +4,9 @@ import (
"context"
"fmt"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
@@ -13,7 +15,7 @@ import (
type ObjectStorage interface {
// Put must save passed object
// and return any appeared error.
- Put(context.Context, *objectSDK.Object) error
+ Put(context.Context, *objectSDK.Object, bool) error
// Delete must delete passed objects
// and return any appeared error.
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
@@ -25,7 +27,8 @@ type ObjectStorage interface {
}
type LocalTarget struct {
- Storage ObjectStorage
+ Storage ObjectStorage
+ Container containerSDK.Container
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
@@ -44,7 +47,7 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.Storage.Put(ctx, obj); err != nil {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
}
return nil
diff --git a/pkg/services/object/common/writer/writer.go b/pkg/services/object/common/writer/writer.go
index 3d50da988..0e4c4d9c6 100644
--- a/pkg/services/object/common/writer/writer.go
+++ b/pkg/services/object/common/writer/writer.go
@@ -150,7 +150,8 @@ func newDefaultObjectWriter(prm *Params, forECPlacement bool) transformer.Object
nodeTargetInitializer: func(node NodeDescriptor) preparedObjectTarget {
if node.Local {
return LocalTarget{
- Storage: prm.Config.LocalStore,
+ Storage: prm.Config.LocalStore,
+ Container: prm.Container,
}
}
diff --git a/pkg/services/object/put/single.go b/pkg/services/object/put/single.go
index 9b4163268..5f9b5d110 100644
--- a/pkg/services/object/put/single.go
+++ b/pkg/services/object/put/single.go
@@ -177,7 +177,7 @@ func (s *Service) saveToREPReplicas(ctx context.Context, placement putSinglePlac
}
return iter.ForEachNode(ctx, func(ctx context.Context, nd objectwriter.NodeDescriptor) error {
- return s.saveToPlacementNode(ctx, &nd, obj, signer, meta)
+ return s.saveToPlacementNode(ctx, &nd, obj, signer, meta, placement.container)
})
}
@@ -263,10 +263,10 @@ func (s *Service) getPutSinglePlacementOptions(obj *objectSDK.Object, copiesNumb
}
func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwriter.NodeDescriptor, obj *objectSDK.Object,
- signer *putSingleRequestSigner, meta object.ContentMeta,
+ signer *putSingleRequestSigner, meta object.ContentMeta, container containerSDK.Container,
) error {
if nodeDesc.Local {
- return s.saveLocal(ctx, obj, meta)
+ return s.saveLocal(ctx, obj, meta, container)
}
var info client.NodeInfo
@@ -281,9 +281,10 @@ func (s *Service) saveToPlacementNode(ctx context.Context, nodeDesc *objectwrite
return s.redirectPutSingleRequest(ctx, signer, obj, info, c)
}
-func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta) error {
+func (s *Service) saveLocal(ctx context.Context, obj *objectSDK.Object, meta object.ContentMeta, container containerSDK.Container) error {
localTarget := &objectwriter.LocalTarget{
- Storage: s.Config.LocalStore,
+ Storage: s.Config.LocalStore,
+ Container: container,
}
return localTarget.WriteObject(ctx, obj, meta)
}
diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 06282bd8d..401977f66 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -37,7 +37,7 @@ func (p *Policer) processObject(ctx context.Context, objInfo objectcore.Info) er
policy := cnr.Value.PlacementPolicy()
if policycore.IsECPlacement(policy) {
- return p.processECContainerObject(ctx, objInfo, policy)
+ return p.processECContainerObject(ctx, objInfo, cnr.Value)
}
return p.processRepContainerObject(ctx, objInfo, policy)
}
diff --git a/pkg/services/policer/ec.go b/pkg/services/policer/ec.go
index e822d1c09..6d2c153c9 100644
--- a/pkg/services/policer/ec.go
+++ b/pkg/services/policer/ec.go
@@ -10,6 +10,7 @@ import (
objectcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/replicator"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
@@ -27,11 +28,11 @@ type ecChunkProcessResult struct {
var errInvalidECPlacement = errors.New("invalid EC placement: EC placement must have one placement vector with at least one node")
-func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
+func (p *Policer) processECContainerObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
if objInfo.ECInfo == nil {
- return p.processECContainerRepObject(ctx, objInfo, policy)
+ return p.processECContainerRepObject(ctx, objInfo, cnr.PlacementPolicy())
}
- return p.processECContainerECObject(ctx, objInfo, policy)
+ return p.processECContainerECObject(ctx, objInfo, cnr)
}
// processECContainerRepObject processes non erasure coded objects in EC container: tombstones, locks and linking objects.
@@ -67,8 +68,8 @@ func (p *Policer) processECContainerRepObject(ctx context.Context, objInfo objec
return nil
}
-func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, policy netmap.PlacementPolicy) error {
- nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, policy)
+func (p *Policer) processECContainerECObject(ctx context.Context, objInfo objectcore.Info, cnr containerSDK.Container) error {
+ nn, err := p.placementBuilder.BuildPlacement(objInfo.Address.Container(), &objInfo.ECInfo.ParentID, cnr.PlacementPolicy())
if err != nil {
return fmt.Errorf("%s: %w", logs.PolicerCouldNotBuildPlacementVectorForObject, err)
}
@@ -85,9 +86,9 @@ func (p *Policer) processECContainerECObject(ctx context.Context, objInfo object
res := p.processECChunk(ctx, objInfo, nn[0])
if !res.validPlacement {
// drop local chunk only if all required chunks are in place
- res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0])
+ res.removeLocal = res.removeLocal && p.pullRequiredECChunks(ctx, objInfo, nn[0], cnr)
}
- p.adjustECPlacement(ctx, objInfo, nn[0], policy)
+ p.adjustECPlacement(ctx, objInfo, nn[0], cnr)
if res.removeLocal {
p.log.Info(logs.PolicerRedundantLocalObjectCopyDetected, zap.Stringer("object", objInfo.Address))
@@ -138,7 +139,7 @@ func (p *Policer) processECChunk(ctx context.Context, objInfo objectcore.Info, n
}
}
-func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo) bool {
+func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) bool {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -169,8 +170,9 @@ func (p *Policer) pullRequiredECChunks(ctx context.Context, objInfo objectcore.I
addr.SetContainer(objInfo.Address.Container())
addr.SetObject(indexToObjectID[index])
p.replicator.HandlePullTask(ctx, replicator.Task{
- Addr: addr,
- Nodes: candidates,
+ Addr: addr,
+ Nodes: candidates,
+ Container: cnr,
})
}
// there was some missing chunks, it's not ok
@@ -245,7 +247,7 @@ func (p *Policer) resolveRemoteECChunks(ctx context.Context, parentAddress oid.A
return true
}
-func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, policy netmap.PlacementPolicy) {
+func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info, nodes []netmap.NodeInfo, cnr containerSDK.Container) {
var parentAddress oid.Address
parentAddress.SetContainer(objInfo.Address.Container())
parentAddress.SetObject(objInfo.ECInfo.ParentID)
@@ -292,7 +294,7 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
if !restore || uint32(len(resolved)) == objInfo.ECInfo.Total {
return
}
- if objInfo.ECInfo.Total-uint32(len(resolved)) > policy.ReplicaDescriptor(0).GetECParityCount() {
+ if objInfo.ECInfo.Total-uint32(len(resolved)) > cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount() {
var found []uint32
for i := range resolved {
found = append(found, i)
@@ -300,11 +302,13 @@ func (p *Policer) adjustECPlacement(ctx context.Context, objInfo objectcore.Info
p.log.Error(logs.PolicerCouldNotRestoreObjectNotEnoughChunks, zap.Stringer("object", parentAddress), zap.Uint32s("found_chunks", found))
return
}
- p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, policy)
+ p.restoreECObject(ctx, objInfo, parentAddress, nodes, resolved, chunkIDs, cnr)
}
-func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID, policy netmap.PlacementPolicy) {
- c, err := erasurecode.NewConstructor(int(policy.ReplicaDescriptor(0).GetECDataCount()), int(policy.ReplicaDescriptor(0).GetECParityCount()))
+func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info, parentAddress oid.Address, nodes []netmap.NodeInfo, existedChunks map[uint32][]netmap.NodeInfo, chunkIDs map[uint32]oid.ID,
+ cnr containerSDK.Container,
+) {
+ c, err := erasurecode.NewConstructor(int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECDataCount()), int(cnr.PlacementPolicy().ReplicaDescriptor(0).GetECParityCount()))
if err != nil {
p.log.Error(logs.PolicerFailedToRestoreObject, zap.Stringer("object", parentAddress), zap.Error(err))
return
@@ -339,8 +343,9 @@ func (p *Policer) restoreECObject(ctx context.Context, objInfo objectcore.Info,
targetNode := nodes[idx%len(nodes)]
if p.cfg.netmapKeys.IsLocalKey(targetNode.PublicKey()) {
p.replicator.HandleLocalPutTask(ctx, replicator.Task{
- Addr: addr,
- Obj: part,
+ Addr: addr,
+ Obj: part,
+ Container: cnr,
})
} else {
p.replicator.HandleReplicationTask(ctx, replicator.Task{
diff --git a/pkg/services/replicator/pull.go b/pkg/services/replicator/pull.go
index d178700f6..7e7090237 100644
--- a/pkg/services/replicator/pull.go
+++ b/pkg/services/replicator/pull.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
getsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/get"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
@@ -62,7 +63,7 @@ func (p *Replicator) HandlePullTask(ctx context.Context, task Task) {
return
}
- err := engine.Put(ctx, p.localStorage, obj)
+ err := engine.Put(ctx, p.localStorage, obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
diff --git a/pkg/services/replicator/put.go b/pkg/services/replicator/put.go
index c06ec3f65..537833516 100644
--- a/pkg/services/replicator/put.go
+++ b/pkg/services/replicator/put.go
@@ -5,6 +5,7 @@ import (
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ containerCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
@@ -37,7 +38,7 @@ func (p *Replicator) HandleLocalPutTask(ctx context.Context, task Task) {
return
}
- err := engine.Put(ctx, p.localStorage, task.Obj)
+ err := engine.Put(ctx, p.localStorage, task.Obj, containerCore.IsIndexedContainer(task.Container))
if err != nil {
p.log.Error(logs.ReplicatorCouldNotPutObjectToLocalStorage,
zap.Stringer("object", task.Addr),
diff --git a/pkg/services/replicator/task.go b/pkg/services/replicator/task.go
index d2b5b2506..a03f8dcaa 100644
--- a/pkg/services/replicator/task.go
+++ b/pkg/services/replicator/task.go
@@ -1,6 +1,7 @@
package replicator
import (
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -16,4 +17,6 @@ type Task struct {
Obj *objectSDK.Object
// Nodes is a list of potential object holders.
Nodes []netmap.NodeInfo
+
+ Container containerSDK.Container
}
From be744ae3e6eadb5b02952cbb110ef59f33f799bc Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 1 Oct 2024 18:19:12 +0300
Subject: [PATCH 128/655] [#1412] metabase: Index attributes for indexed
containers
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 4 +-
pkg/local_object_storage/engine/evacuate.go | 11 +--
pkg/local_object_storage/engine/put.go | 5 +-
pkg/local_object_storage/metabase/delete.go | 45 ++++++++++
.../metabase/delete_meta_test.go | 85 +++++++++++++++++++
pkg/local_object_storage/metabase/put.go | 80 ++++++++++++++++-
pkg/local_object_storage/metabase/util.go | 8 +-
pkg/local_object_storage/shard/put.go | 8 +-
8 files changed, 231 insertions(+), 15 deletions(-)
create mode 100644 pkg/local_object_storage/metabase/delete_meta_test.go
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 5c322886b..f2c4bff1d 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -535,6 +535,6 @@ func (e engineWithoutNotifications) Lock(ctx context.Context, locker oid.Address
return e.engine.Lock(ctx, locker.Container(), locker.Object(), toLock)
}
-func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexableContainer bool) error {
- return engine.Put(ctx, e.engine, o, indexableContainer)
+func (e engineWithoutNotifications) Put(ctx context.Context, o *objectSDK.Object, indexedContainer bool) error {
+ return engine.Put(ctx, e.engine, o, indexedContainer)
}
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index a618ff274..1baf237f9 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -18,6 +18,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@@ -471,7 +472,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
default:
}
egObject.Go(func() error {
- err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate)
+ err := e.evacuateObject(ctx, shardID, objInfo, prm, res, shards, shardsToEvacuate, c.Value)
if err != nil {
cancel(err)
}
@@ -744,7 +745,7 @@ func (e *StorageEngine) getActualShards(shardIDs []string, prm EvacuateShardPrm)
}
func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objInfo *object.Info, prm EvacuateShardPrm, res *EvacuateShardRes,
- getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard,
+ getShards func() []pooledShard, shardsToEvacuate map[string]*shard.Shard, cnr containerSDK.Container,
) error {
ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.evacuateObjects")
defer span.End()
@@ -773,7 +774,7 @@ func (e *StorageEngine) evacuateObject(ctx context.Context, shardID string, objI
return err
}
- evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res)
+ evacuatedLocal, err := e.tryEvacuateObjectLocal(ctx, addr, getRes.Object(), shardsToEvacuate[shardID], shards, shardsToEvacuate, res, cnr)
if err != nil {
return err
}
@@ -817,7 +818,7 @@ func (e *StorageEngine) isNotRepOne(c *container.Container) bool {
}
func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Address, object *objectSDK.Object, sh *shard.Shard,
- shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes,
+ shards []pooledShard, shardsToEvacuate map[string]*shard.Shard, res *EvacuateShardRes, cnr containerSDK.Container,
) (bool, error) {
hrw.SortHasherSliceByValue(shards, hrw.StringHash(addr.EncodeToString()))
for j := range shards {
@@ -830,7 +831,7 @@ func (e *StorageEngine) tryEvacuateObjectLocal(ctx context.Context, addr oid.Add
if _, ok := shardsToEvacuate[shards[j].ID().String()]; ok {
continue
}
- switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object).status {
+ switch e.putToShard(ctx, shards[j].hashedShard, shards[j].pool, addr, object, container.IsIndexedContainer(cnr)).status {
case putToShardSuccess:
res.objEvacuated.Add(1)
e.log.Debug(logs.EngineObjectIsMovedToAnotherShard,
diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go
index 41e566560..a50d80b75 100644
--- a/pkg/local_object_storage/engine/put.go
+++ b/pkg/local_object_storage/engine/put.go
@@ -107,7 +107,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// Shard was concurrently removed, skip.
return false
}
- shRes = e.putToShard(ctx, sh, pool, addr, prm.Object)
+ shRes = e.putToShard(ctx, sh, pool, addr, prm.Object, prm.IsIndexedContainer)
return shRes.status != putToShardUnknown
})
switch shRes.status {
@@ -125,7 +125,7 @@ func (e *StorageEngine) put(ctx context.Context, prm PutPrm) error {
// putToShard puts object to sh.
// Return putToShardStatus and error if it is necessary to propagate an error upper.
func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool util.WorkerPool,
- addr oid.Address, obj *objectSDK.Object,
+ addr oid.Address, obj *objectSDK.Object, isIndexedContainer bool,
) (res putToShardRes) {
exitCh := make(chan struct{})
@@ -158,6 +158,7 @@ func (e *StorageEngine) putToShard(ctx context.Context, sh hashedShard, pool uti
var putPrm shard.PutPrm
putPrm.SetObject(obj)
+ putPrm.SetIndexAttributes(isIndexedContainer)
_, err = sh.Put(ctx, putPrm)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 4ad11164f..3add1f268 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -338,6 +338,11 @@ func (db *DB) deleteObject(
return fmt.Errorf("can't remove list indexes: %w", err)
}
+ err = updateFKBTIndexes(tx, obj, delFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("can't remove fake bucket tree indexes: %w", err)
+ }
+
if isParent {
// remove record from the garbage bucket, because regular object deletion does nothing for virtual object
garbageBKT := tx.Bucket(garbageBucketName)
@@ -415,6 +420,46 @@ func delListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return nil
}
+func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt := tx.Bucket(item.name)
+ if bkt == nil {
+ return nil
+ }
+
+ fkbtRoot := bkt.Bucket(item.key)
+ if fkbtRoot == nil {
+ return nil
+ }
+
+ if err := fkbtRoot.Delete(item.val); err != nil {
+ return err
+ }
+
+ if hasAnyItem(fkbtRoot) {
+ return nil
+ }
+
+ if err := bkt.DeleteBucket(item.key); err != nil {
+ return err
+ }
+
+ if hasAnyItem(bkt) {
+ return nil
+ }
+
+ return tx.DeleteBucket(item.name)
+}
+
+func hasAnyItem(b *bbolt.Bucket) bool {
+ var hasAnyItem bool
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ hasAnyItem = true
+ break
+ }
+ return hasAnyItem
+}
+
func delUniqueIndexes(tx *bbolt.Tx, obj *objectSDK.Object, isParent bool) error {
addr := object.AddressOf(obj)
diff --git a/pkg/local_object_storage/metabase/delete_meta_test.go b/pkg/local_object_storage/metabase/delete_meta_test.go
new file mode 100644
index 000000000..cdfe2a203
--- /dev/null
+++ b/pkg/local_object_storage/metabase/delete_meta_test.go
@@ -0,0 +1,85 @@
+package meta
+
+import (
+ "bytes"
+ "context"
+ "path/filepath"
+ "testing"
+
+ objectCore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ "github.com/stretchr/testify/require"
+ "go.etcd.io/bbolt"
+)
+
+func TestPutDeleteIndexAttributes(t *testing.T) {
+ db := New([]Option{
+ WithPath(filepath.Join(t.TempDir(), "metabase")),
+ WithPermissions(0o600),
+ WithEpochState(epochState{}),
+ }...)
+
+ require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
+ require.NoError(t, db.Init())
+ defer func() { require.NoError(t, db.Close()) }()
+
+ cnr := cidtest.ID()
+ obj1 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj1, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj1, objectSDK.AttributeFilePath, "/path/to/object")
+
+ var putPrm PutPrm
+ putPrm.SetObject(obj1)
+
+ _, err := db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+
+ obj2 := testutil.GenerateObjectWithCID(cnr)
+ testutil.AddAttribute(obj2, "S3-Access-Box-CRDT-Name", "CRDT-Name")
+ testutil.AddAttribute(obj2, objectSDK.AttributeFilePath, "/path/to/object")
+
+ putPrm.SetObject(obj2)
+ putPrm.SetIndexAttributes(true)
+
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
+
+ objKey := objectKey(objectCore.AddressOf(obj2).Object(), make([]byte, objectKeySize))
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("CRDT-Name"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.NotNil(t, b)
+ b = b.Bucket([]byte("/path/to/object"))
+ require.NotNil(t, b)
+ require.True(t, bytes.Equal(zeroValue, b.Get(objKey)))
+ return nil
+ }))
+
+ var dPrm DeletePrm
+ dPrm.SetAddresses(objectCore.AddressOf(obj1), objectCore.AddressOf(obj2))
+ _, err = db.Delete(context.Background(), dPrm)
+ require.NoError(t, err)
+
+ require.NoError(t, db.boltDB.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(attributeBucketName(cnr, "S3-Access-Box-CRDT-Name", make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ b = tx.Bucket(attributeBucketName(cnr, objectSDK.AttributeFilePath, make([]byte, bucketKeySize)))
+ require.Nil(t, b)
+ return nil
+ }))
+}
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 087529895..0c14196b7 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -35,6 +35,8 @@ type PutPrm struct {
obj *objectSDK.Object
id []byte
+
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -52,6 +54,10 @@ func (p *PutPrm) SetStorageID(id []byte) {
p.id = id
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
var (
ErrUnknownObjectType = errors.New("unknown object type")
ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
@@ -90,7 +96,7 @@ func (db *DB) Put(ctx context.Context, prm PutPrm) (res PutRes, err error) {
err = db.boltDB.Batch(func(tx *bbolt.Tx) error {
var e error
- res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch)
+ res, e = db.put(tx, prm.obj, prm.id, nil, currEpoch, prm.indexAttributes)
return e
})
if err == nil {
@@ -108,6 +114,7 @@ func (db *DB) put(tx *bbolt.Tx,
id []byte,
si *objectSDK.SplitInfo,
currEpoch uint64,
+ indexAttributes bool,
) (PutRes, error) {
cnr, ok := obj.ContainerID()
if !ok {
@@ -129,7 +136,7 @@ func (db *DB) put(tx *bbolt.Tx,
return PutRes{}, db.updateObj(tx, obj, id, si, isParent)
}
- return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch)
+ return PutRes{Inserted: true}, db.insertObject(tx, obj, id, si, isParent, cnr, currEpoch, indexAttributes)
}
func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool) error {
@@ -152,14 +159,14 @@ func (db *DB) updateObj(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *obje
return nil
}
-func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64) error {
+func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *objectSDK.SplitInfo, isParent bool, cnr cid.ID, currEpoch uint64, indexAttributes bool) error {
if par := obj.Parent(); par != nil && !isParent { // limit depth by two
parentSI, err := splitInfoFromObject(obj)
if err != nil {
return err
}
- _, err = db.put(tx, par, id, parentSI, currEpoch)
+ _, err = db.put(tx, par, id, parentSI, currEpoch, indexAttributes)
if err != nil {
return err
}
@@ -175,6 +182,13 @@ func (db *DB) insertObject(tx *bbolt.Tx, obj *objectSDK.Object, id []byte, si *o
return fmt.Errorf("can't put list indexes: %w", err)
}
+ if indexAttributes {
+ err = updateFKBTIndexes(tx, obj, putFKBTIndexItem)
+ if err != nil {
+ return fmt.Errorf("can't put fake bucket tree indexes: %w", err)
+ }
+ }
+
// update container volume size estimation
if obj.Type() == objectSDK.TypeRegular && !isParent {
err = changeContainerSize(tx, cnr, obj.PayloadSize(), true)
@@ -381,6 +395,50 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
return nil
}
+var indexedAttributes = map[string]struct{}{
+ "S3-Access-Box-CRDT-Name": {},
+ objectSDK.AttributeFilePath: {},
+}
+
+// IsAtrributeIndexed returns True if attribute is indexed by metabase.
+func IsAtrributeIndexed(attr string) bool {
+ _, found := indexedAttributes[attr]
+ return found
+}
+
+func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
+ id, _ := obj.ID()
+ cnr, _ := obj.ContainerID()
+ objKey := objectKey(id, make([]byte, objectKeySize))
+
+ key := make([]byte, bucketKeySize)
+ var attrs []objectSDK.Attribute
+ if obj.ECHeader() != nil {
+ attrs = obj.ECHeader().ParentAttributes()
+ objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
+ } else {
+ attrs = obj.Attributes()
+ }
+
+ // user specified attributes
+ for i := range attrs {
+ if !IsAtrributeIndexed(attrs[i].Key()) {
+ continue
+ }
+ key = attributeBucketName(cnr, attrs[i].Key(), key)
+ err := f(tx, namedBucketItem{
+ name: key,
+ key: []byte(attrs[i].Value()),
+ val: objKey,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func hasExpirationEpoch(obj *objectSDK.Object) (uint64, bool) {
attributes := obj.Attributes()
if ech := obj.ECHeader(); ech != nil {
@@ -425,6 +483,20 @@ func putUniqueIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
return updateUniqueIndexItem(tx, item, func(_, val []byte) ([]byte, error) { return val, nil })
}
+func putFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
+ bkt, err := createBucketLikelyExists(tx, item.name)
+ if err != nil {
+ return fmt.Errorf("can't create index %v: %w", item.name, err)
+ }
+
+ fkbtRoot, err := createBucketLikelyExists(bkt, item.key)
+ if err != nil {
+ return fmt.Errorf("can't create fake bucket tree index %v: %w", item.key, err)
+ }
+
+ return fkbtRoot.Put(item.val, zeroValue)
+}
+
func putListIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
bkt, err := createBucketLikelyExists(tx, item.name)
if err != nil {
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index eef7210dc..4679de332 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -99,7 +99,6 @@ const (
// userAttributePrefix was used for prefixing FKBT index buckets containing objects.
// Key: attribute value
// Value: bucket containing object IDs as keys
- // removed in version 3
userAttributePrefix
// ====================
@@ -170,6 +169,13 @@ func smallBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, smallPrefix, key)
}
+// attributeBucketName returns _.
+func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
+ key[0] = userAttributePrefix
+ cnr.Encode(key[1:])
+ return append(key[:bucketKeySize], attributeKey...)
+}
+
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go
index d7a9e7012..24cc75154 100644
--- a/pkg/local_object_storage/shard/put.go
+++ b/pkg/local_object_storage/shard/put.go
@@ -17,7 +17,8 @@ import (
// PutPrm groups the parameters of Put operation.
type PutPrm struct {
- obj *objectSDK.Object
+ obj *objectSDK.Object
+ indexAttributes bool
}
// PutRes groups the resulting values of Put operation.
@@ -28,6 +29,10 @@ func (p *PutPrm) SetObject(obj *objectSDK.Object) {
p.obj = obj
}
+func (p *PutPrm) SetIndexAttributes(v bool) {
+ p.indexAttributes = v
+}
+
// Put saves the object in shard.
//
// Returns any error encountered that
@@ -84,6 +89,7 @@ func (s *Shard) Put(ctx context.Context, prm PutPrm) (PutRes, error) {
var pPrm meta.PutPrm
pPrm.SetObject(prm.obj)
pPrm.SetStorageID(res.StorageID)
+ pPrm.SetIndexAttributes(prm.indexAttributes)
res, err := s.metaBase.Put(ctx, pPrm)
if err != nil {
// may we need to handle this case in a special way
From 1efa64ee72851e53ca5eb2bf643e74141b41ca46 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 13:04:29 +0300
Subject: [PATCH 129/655] [#1412] metabase: Add search by indexed attributes
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/db_test.go | 12 +-
pkg/local_object_storage/metabase/select.go | 115 ++++++++++++++++--
.../metabase/select_test.go | 81 +++++++-----
3 files changed, 169 insertions(+), 39 deletions(-)
diff --git a/pkg/local_object_storage/metabase/db_test.go b/pkg/local_object_storage/metabase/db_test.go
index 01e1ed2bc..0abb5ea89 100644
--- a/pkg/local_object_storage/metabase/db_test.go
+++ b/pkg/local_object_storage/metabase/db_test.go
@@ -32,7 +32,17 @@ func putBig(db *meta.DB, obj *objectSDK.Object) error {
}
func testSelect(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, exp ...oid.Address) {
- res, err := metaSelect(db, cnr, fs)
+ res, err := metaSelect(db, cnr, fs, false)
+ require.NoError(t, err)
+ require.Len(t, res, len(exp))
+
+ for i := range exp {
+ require.Contains(t, res, exp[i])
+ }
+}
+
+func testSelect2(t *testing.T, db *meta.DB, cnr cid.ID, fs objectSDK.SearchFilters, useAttrIndex bool, exp ...oid.Address) {
+ res, err := metaSelect(db, cnr, fs, useAttrIndex)
require.NoError(t, err)
require.Len(t, res, len(exp))
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 85d1b08ba..88ef7d5a4 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -37,8 +37,9 @@ type (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ useAttributeIndex bool
}
// SelectRes groups the resulting values of Select operation.
@@ -56,6 +57,10 @@ func (p *SelectPrm) SetFilters(fs objectSDK.SearchFilters) {
p.filters = fs
}
+func (p *SelectPrm) SetUseAttributeIndex(v bool) {
+ p.useAttributeIndex = v
+}
+
// AddressList returns list of addresses of the selected objects.
func (r SelectRes) AddressList() []oid.Address {
return r.addrList
@@ -92,14 +97,14 @@ func (db *DB) Select(ctx context.Context, prm SelectPrm) (res SelectRes, err err
currEpoch := db.epochState.CurrentEpoch()
return res, metaerr.Wrap(db.boltDB.View(func(tx *bbolt.Tx) error {
- res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch)
+ res.addrList, err = db.selectObjects(tx, prm.cnr, prm.filters, currEpoch, prm.useAttributeIndex)
success = err == nil
return err
}))
}
-func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64) ([]oid.Address, error) {
- group, err := groupFilters(fs)
+func (db *DB) selectObjects(tx *bbolt.Tx, cnr cid.ID, fs objectSDK.SearchFilters, currEpoch uint64, useAttributeIndex bool) ([]oid.Address, error) {
+ group, err := groupFilters(fs, useAttributeIndex)
if err != nil {
return nil, err
}
@@ -218,7 +223,13 @@ func (db *DB) selectFastFilter(
selectAllFromBucket(tx, primaryBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, tombstoneBucketName(cnr, bucketName), to, fNum)
selectAllFromBucket(tx, bucketNameLockers(cnr, bucketName), to, fNum)
- default:
+ default: // user attribute
+ bucketName := attributeBucketName(cnr, f.Header(), bucketName)
+ if f.Operation() == objectSDK.MatchNotPresent {
+ selectOutsideFKBT(tx, allBucketNames(cnr), bucketName, to, fNum)
+ } else {
+ db.selectFromFKBT(tx, bucketName, f, to, fNum)
+ }
}
}
@@ -228,6 +239,15 @@ var mBucketNaming = map[string][]func(cid.ID, []byte) []byte{
v2object.TypeLock.String(): {bucketNameLockers},
}
+func allBucketNames(cnr cid.ID) (names [][]byte) {
+ for _, fns := range mBucketNaming {
+ for _, fn := range fns {
+ names = append(names, fn(cnr, make([]byte, bucketKeySize)))
+ }
+ }
+ return
+}
+
func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal string) (names [][]byte) {
appendNames := func(key string) {
fns, ok := mBucketNaming[key]
@@ -259,6 +279,81 @@ func bucketNamesForType(cnr cid.ID, mType objectSDK.SearchMatchType, typeVal str
return
}
+func (db *DB) selectFromFKBT(
+ tx *bbolt.Tx,
+ name []byte, // fkbt root bucket name
+ f objectSDK.SearchFilter, // filter for operation and value
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) { //
+ matchFunc, ok := db.matchers[f.Operation()]
+ if !ok {
+ db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation()))
+
+ return
+ }
+
+ fkbtRoot := tx.Bucket(name)
+ if fkbtRoot == nil {
+ return
+ }
+
+ err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ fkbtLeaf := fkbtRoot.Bucket(k)
+ if fkbtLeaf == nil {
+ return nil
+ }
+
+ return fkbtLeaf.ForEach(func(k, _ []byte) error {
+ markAddressInCache(to, fNum, string(k))
+
+ return nil
+ })
+ })
+ if err != nil {
+ db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
+ }
+}
+
+// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
+// resulting cache.
+func selectOutsideFKBT(
+ tx *bbolt.Tx,
+ incl [][]byte, // buckets
+ name []byte, // fkbt root bucket name
+ to map[string]int, // resulting cache
+ fNum int, // index of filter
+) {
+ mExcl := make(map[string]struct{})
+
+ bktExcl := tx.Bucket(name)
+ if bktExcl != nil {
+ _ = bktExcl.ForEachBucket(func(k []byte) error {
+ exclBktLeaf := bktExcl.Bucket(k)
+ return exclBktLeaf.ForEach(func(k, _ []byte) error {
+ mExcl[string(k)] = struct{}{}
+
+ return nil
+ })
+ })
+ }
+
+ for i := range incl {
+ bktIncl := tx.Bucket(incl[i])
+ if bktIncl == nil {
+ continue
+ }
+
+ _ = bktIncl.ForEach(func(k, _ []byte) error {
+ if _, ok := mExcl[string(k)]; !ok {
+ markAddressInCache(to, fNum, string(k))
+ }
+
+ return nil
+ })
+ }
+}
+
// selectFromList looks into index to find list of addresses to add in
// resulting cache.
func (db *DB) selectFromList(
@@ -486,7 +581,7 @@ func attributeValue(obj *objectSDK.Object, attribute string) (string, bool) {
// groupFilters divides filters in two groups: fast and slow. Fast filters
// processed by indexes and slow filters processed after by unmarshaling
// object headers.
-func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
+func groupFilters(filters objectSDK.SearchFilters, useAttributeIndex bool) (filterGroup, error) {
res := filterGroup{
fastFilters: make(objectSDK.SearchFilters, 0, len(filters)),
slowFilters: make(objectSDK.SearchFilters, 0, len(filters)),
@@ -511,7 +606,11 @@ func groupFilters(filters objectSDK.SearchFilters) (filterGroup, error) {
v2object.FilterPropertyPhy:
res.fastFilters = append(res.fastFilters, filters[i])
default:
- res.slowFilters = append(res.slowFilters, filters[i])
+ if useAttributeIndex && IsAtrributeIndexed(filters[i].Header()) {
+ res.fastFilters = append(res.fastFilters, filters[i])
+ } else {
+ res.slowFilters = append(res.slowFilters, filters[i])
+ }
}
}
diff --git a/pkg/local_object_storage/metabase/select_test.go b/pkg/local_object_storage/metabase/select_test.go
index bee778e2b..5cc25a9f6 100644
--- a/pkg/local_object_storage/metabase/select_test.go
+++ b/pkg/local_object_storage/metabase/select_test.go
@@ -26,6 +26,16 @@ import (
func TestDB_SelectUserAttributes(t *testing.T) {
t.Parallel()
+ t.Run("with_index", func(t *testing.T) {
+ testSelectUserAttributes(t, true)
+ })
+ t.Run("without_index", func(t *testing.T) {
+ testSelectUserAttributes(t, false)
+ })
+}
+
+func testSelectUserAttributes(t *testing.T, index bool) {
+ t.Parallel()
db := newDB(t)
defer func() { require.NoError(t, db.Close()) }()
@@ -36,44 +46,52 @@ func TestDB_SelectUserAttributes(t *testing.T) {
testutil.AddAttribute(raw1, "foo", "bar")
testutil.AddAttribute(raw1, "x", "y")
- err := putBig(db, raw1)
+ var putPrm meta.PutPrm
+ putPrm.SetIndexAttributes(index)
+ putPrm.SetObject(raw1)
+ _, err := db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw2 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw2, "foo", "bar")
testutil.AddAttribute(raw2, "x", "z")
- err = putBig(db, raw2)
+ putPrm.SetObject(raw2)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw3 := testutil.GenerateObjectWithCID(cnr)
testutil.AddAttribute(raw3, "a", "b")
- err = putBig(db, raw3)
+ putPrm.SetObject(raw3)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw4 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw4, "path", "test/1/2")
+ testutil.AddAttribute(raw4, objectSDK.AttributeFilePath, "/test/1/2")
- err = putBig(db, raw4)
+ putPrm.SetObject(raw4)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw5 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw5, "path", "test/1/3")
+ testutil.AddAttribute(raw5, objectSDK.AttributeFilePath, "/test/1/3")
- err = putBig(db, raw5)
+ putPrm.SetObject(raw5)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw6 := testutil.GenerateObjectWithCID(cnr)
- testutil.AddAttribute(raw6, "path", "test/2/3")
+ testutil.AddAttribute(raw6, objectSDK.AttributeFilePath, "/test/2/3")
- err = putBig(db, raw6)
+ putPrm.SetObject(raw6)
+ _, err = db.Put(context.Background(), putPrm)
require.NoError(t, err)
raw7 := testutil.GenerateObjectWithCID(cnr)
var attr objectSDK.Attribute
- attr.SetKey("path")
- attr.SetValue("test/3/4")
+ attr.SetKey(objectSDK.AttributeFilePath)
+ attr.SetValue("/test/3/4")
attrs := raw7.Attributes()
attrs = append(attrs, attr)
ech := objectSDK.NewECHeader(objectSDK.ECParentInfo{
@@ -81,37 +99,39 @@ func TestDB_SelectUserAttributes(t *testing.T) {
Attributes: attrs,
}, 0, 3, []byte{}, 0)
raw7.SetECHeader(ech)
- require.NoError(t, putBig(db, raw7))
+ putPrm.SetObject(raw7)
+ _, err = db.Put(context.Background(), putPrm)
+ require.NoError(t, err)
var raw7Parent oid.Address
raw7Parent.SetContainer(cnr)
raw7Parent.SetObject(ech.Parent())
fs := objectSDK.SearchFilters{}
fs.AddFilter("foo", "bar", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
)
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw1))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw1))
fs = objectSDK.SearchFilters{}
fs.AddFilter("x", "y", objectSDK.MatchStringNotEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw2))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw2))
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "b", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs, object.AddressOf(raw3))
+ testSelect2(t, db, cnr, fs, index, object.AddressOf(raw3))
fs = objectSDK.SearchFilters{}
fs.AddFilter("c", "d", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs)
+ testSelect2(t, db, cnr, fs, index)
fs = objectSDK.SearchFilters{}
fs.AddFilter("foo", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw3),
object.AddressOf(raw4),
object.AddressOf(raw5),
@@ -121,7 +141,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("a", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw4),
@@ -131,7 +151,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -143,7 +163,7 @@ func TestDB_SelectUserAttributes(t *testing.T) {
fs = objectSDK.SearchFilters{}
fs.AddFilter("key", "", objectSDK.MatchNotPresent)
- testSelect(t, db, cnr, fs,
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw1),
object.AddressOf(raw2),
object.AddressOf(raw3),
@@ -154,8 +174,8 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
object.AddressOf(raw6),
@@ -163,15 +183,15 @@ func TestDB_SelectUserAttributes(t *testing.T) {
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/1", objectSDK.MatchCommonPrefix)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/1", objectSDK.MatchCommonPrefix)
+ testSelect2(t, db, cnr, fs, index,
object.AddressOf(raw4),
object.AddressOf(raw5),
)
fs = objectSDK.SearchFilters{}
- fs.AddFilter("path", "test/3/4", objectSDK.MatchStringEqual)
- testSelect(t, db, cnr, fs,
+ fs.AddFilter(objectSDK.AttributeFilePath, "/test/3/4", objectSDK.MatchStringEqual)
+ testSelect2(t, db, cnr, fs, index,
raw7Parent,
)
}
@@ -1185,11 +1205,11 @@ func TestExpiredObjects(t *testing.T) {
cidExp, _ := exp.ContainerID()
cidNonExp, _ := nonExp.ContainerID()
- objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{})
+ objs, err := metaSelect(db, cidExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.Empty(t, objs) // expired object should not be returned
- objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{})
+ objs, err = metaSelect(db, cidNonExp, objectSDK.SearchFilters{}, false)
require.NoError(t, err)
require.NotEmpty(t, objs)
})
@@ -1211,10 +1231,11 @@ func benchmarkSelect(b *testing.B, db *meta.DB, cid cidSDK.ID, fs objectSDK.Sear
}
}
-func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func metaSelect(db *meta.DB, cnr cidSDK.ID, fs objectSDK.SearchFilters, useAttributeIndex bool) ([]oid.Address, error) {
var prm meta.SelectPrm
prm.SetFilters(fs)
prm.SetContainerID(cnr)
+ prm.SetUseAttributeIndex(useAttributeIndex)
res, err := db.Select(context.Background(), prm)
return res.AddressList(), err
From 4572fa487493f21cc5ebffcdc526270452a36e6a Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 14:52:54 +0300
Subject: [PATCH 130/655] [#1412] searchSvc: Check container is indexed
For non S3 containers it is expected to use attributes index for some
attributes.
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/object.go | 5 +++--
pkg/local_object_storage/engine/delete.go | 2 +-
pkg/local_object_storage/engine/inhume_test.go | 4 ++--
pkg/local_object_storage/engine/select.go | 14 ++++++++------
pkg/local_object_storage/engine/tree_test.go | 2 +-
pkg/local_object_storage/shard/select.go | 9 ++++++---
pkg/services/object/search/container.go | 10 ++++++++++
pkg/services/object/search/service.go | 4 ++++
pkg/services/object/search/util.go | 7 ++++++-
9 files changed, 41 insertions(+), 16 deletions(-)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index f2c4bff1d..68acb05d3 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -174,7 +174,7 @@ func initObjectService(c *cfg) {
sPutV2 := createPutSvcV2(sPut, keyStorage)
- sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache)
+ sSearch := createSearchSvc(c, keyStorage, traverseGen, c.clientCache, c.cfgObject.cnrSource)
sSearchV2 := createSearchSvcV2(sSearch, keyStorage)
@@ -366,7 +366,7 @@ func createPatchSvc(sGet *getsvc.Service, sPut *putsvc.Service) *patchsvc.Servic
return patchsvc.NewService(sPut.Config, sGet)
}
-func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache) *searchsvc.Service {
+func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.TraverserGenerator, coreConstructor *cache.ClientCache, containerSource containercore.Source) *searchsvc.Service {
ls := c.cfgObject.cfgLocalStorage.localStorage
return searchsvc.New(
@@ -377,6 +377,7 @@ func createSearchSvc(c *cfg, keyStorage *util.KeyStorage, traverseGen *util.Trav
),
c.netMapSource,
keyStorage,
+ containerSource,
searchsvc.WithLogger(c.log),
)
}
diff --git a/pkg/local_object_storage/engine/delete.go b/pkg/local_object_storage/engine/delete.go
index 61cb6832d..3ec3f8f9b 100644
--- a/pkg/local_object_storage/engine/delete.go
+++ b/pkg/local_object_storage/engine/delete.go
@@ -143,7 +143,7 @@ func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, fo
var selectPrm shard.SelectPrm
selectPrm.SetFilters(fs)
- selectPrm.SetContainerID(addr.Container())
+ selectPrm.SetContainerID(addr.Container(), false) // doesn't matter for search by splitID
var inhumePrm shard.InhumePrm
if force {
diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go
index f87679253..b4fbbd810 100644
--- a/pkg/local_object_storage/engine/inhume_test.go
+++ b/pkg/local_object_storage/engine/inhume_test.go
@@ -49,7 +49,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
@@ -78,7 +78,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
_, err = e.Inhume(context.Background(), inhumePrm)
require.NoError(t, err)
- addrs, err := Select(context.Background(), e, cnr, fs)
+ addrs, err := Select(context.Background(), e, cnr, false, fs)
require.NoError(t, err)
require.Empty(t, addrs)
})
diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go
index 972a4f52a..a85891f0c 100644
--- a/pkg/local_object_storage/engine/select.go
+++ b/pkg/local_object_storage/engine/select.go
@@ -14,8 +14,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ indexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -24,8 +25,9 @@ type SelectRes struct {
}
// WithContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) WithContainerID(cnr cid.ID) {
+func (p *SelectPrm) WithContainerID(cnr cid.ID, indexedContainer bool) {
p.cnr = cnr
+ p.indexedContainer = indexedContainer
}
// WithFilters is a Select option to set the object filters.
@@ -67,7 +69,7 @@ func (e *StorageEngine) _select(ctx context.Context, prm SelectPrm) (SelectRes,
var outError error
var shPrm shard.SelectPrm
- shPrm.SetContainerID(prm.cnr)
+ shPrm.SetContainerID(prm.cnr, prm.indexedContainer)
shPrm.SetFilters(prm.filters)
e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
@@ -140,9 +142,9 @@ func (e *StorageEngine) list(ctx context.Context, limit uint64) (SelectRes, erro
}
// Select selects objects from local storage using provided filters.
-func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, fs objectSDK.SearchFilters) ([]oid.Address, error) {
+func Select(ctx context.Context, storage *StorageEngine, cnr cid.ID, isIndexedContainer bool, fs objectSDK.SearchFilters) ([]oid.Address, error) {
var selectPrm SelectPrm
- selectPrm.WithContainerID(cnr)
+ selectPrm.WithContainerID(cnr, isIndexedContainer)
selectPrm.WithFilters(fs)
res, err := storage.Select(ctx, selectPrm)
diff --git a/pkg/local_object_storage/engine/tree_test.go b/pkg/local_object_storage/engine/tree_test.go
index 21fcce415..ea0a9e74e 100644
--- a/pkg/local_object_storage/engine/tree_test.go
+++ b/pkg/local_object_storage/engine/tree_test.go
@@ -50,7 +50,7 @@ func benchmarkTreeVsSearch(b *testing.B, objCount int) {
b.Run("search", func(b *testing.B) {
var prm SelectPrm
- prm.WithContainerID(cid)
+ prm.WithContainerID(cid, true)
var fs objectSDK.SearchFilters
fs.AddFilter(pilorama.AttributeFilename, strconv.Itoa(objCount/2), objectSDK.MatchStringEqual)
diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go
index 1615f5fbe..184ca9b71 100644
--- a/pkg/local_object_storage/shard/select.go
+++ b/pkg/local_object_storage/shard/select.go
@@ -15,8 +15,9 @@ import (
// SelectPrm groups the parameters of Select operation.
type SelectPrm struct {
- cnr cid.ID
- filters objectSDK.SearchFilters
+ cnr cid.ID
+ filters objectSDK.SearchFilters
+ isIndexedContainer bool
}
// SelectRes groups the resulting values of Select operation.
@@ -25,8 +26,9 @@ type SelectRes struct {
}
// SetContainerID is a Select option to set the container id to search in.
-func (p *SelectPrm) SetContainerID(cnr cid.ID) {
+func (p *SelectPrm) SetContainerID(cnr cid.ID, isIndexedContainer bool) {
p.cnr = cnr
+ p.isIndexedContainer = isIndexedContainer
}
// SetFilters is a Select option to set the object filters.
@@ -61,6 +63,7 @@ func (s *Shard) Select(ctx context.Context, prm SelectPrm) (SelectRes, error) {
var selectPrm meta.SelectPrm
selectPrm.SetFilters(prm.filters)
selectPrm.SetContainerID(prm.cnr)
+ selectPrm.SetUseAttributeIndex(prm.isIndexedContainer)
mRes, err := s.metaBase.Select(ctx, selectPrm)
if err != nil {
diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go
index d70574156..39259b0ca 100644
--- a/pkg/services/object/search/container.go
+++ b/pkg/services/object/search/container.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ containerSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"go.uber.org/zap"
)
@@ -112,3 +113,12 @@ func (exec *execCtx) processCurrentEpoch(ctx context.Context) error {
return nil
}
+
+func (exec *execCtx) getContainer() (containerSDK.Container, error) {
+ cnrID := exec.containerID()
+ cnr, err := exec.svc.containerSource.Get(cnrID)
+ if err != nil {
+ return containerSDK.Container{}, err
+ }
+ return cnr.Value, nil
+}
diff --git a/pkg/services/object/search/service.go b/pkg/services/object/search/service.go
index cc388c1b2..7700f78d8 100644
--- a/pkg/services/object/search/service.go
+++ b/pkg/services/object/search/service.go
@@ -54,6 +54,8 @@ type cfg struct {
}
keyStore *util.KeyStorage
+
+ containerSource container.Source
}
// New creates, initializes and returns utility serving
@@ -63,6 +65,7 @@ func New(e *engine.StorageEngine,
tg *util.TraverserGenerator,
ns netmap.Source,
ks *util.KeyStorage,
+ cs container.Source,
opts ...Option,
) *Service {
c := &cfg{
@@ -76,6 +79,7 @@ func New(e *engine.StorageEngine,
traverserGenerator: tg,
currentEpochReceiver: ns,
keyStore: ks,
+ containerSource: cs,
}
for i := range opts {
diff --git a/pkg/services/object/search/util.go b/pkg/services/object/search/util.go
index 67b6c0d01..910384a0b 100644
--- a/pkg/services/object/search/util.go
+++ b/pkg/services/object/search/util.go
@@ -5,6 +5,7 @@ import (
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
internalclient "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/internal/client"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
@@ -112,9 +113,13 @@ func (c *clientWrapper) searchObjects(ctx context.Context, exec *execCtx, info c
}
func (e *storageEngineWrapper) search(ctx context.Context, exec *execCtx) ([]oid.ID, error) {
+ cnr, err := exec.getContainer()
+ if err != nil {
+ return nil, err
+ }
var selectPrm engine.SelectPrm
selectPrm.WithFilters(exec.searchFilters())
- selectPrm.WithContainerID(exec.containerID())
+ selectPrm.WithContainerID(exec.containerID(), container.IsIndexedContainer(cnr))
r, err := e.storage.Select(ctx, selectPrm)
if err != nil {
From 3da168f8cf96cb342c4cfe753e38c8a0bd7ad0a4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 2 Oct 2024 16:46:39 +0300
Subject: [PATCH 131/655] [#1412] shard: Resolve container is indexed on
metabase resync
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 23 +++++++++++++++++++++--
internal/logs/logs.go | 1 +
pkg/local_object_storage/shard/control.go | 17 +++++++++++++++++
pkg/local_object_storage/shard/shard.go | 10 ++++++++++
4 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 4ad9ec6c6..3c7e310b4 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -1058,7 +1058,9 @@ func initLocalStorage(ctx context.Context, c *cfg) {
var shardsAttached int
for _, optsWithMeta := range c.shardOpts(ctx) {
- id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts, shard.WithTombstoneSource(c.createTombstoneSource()))...)
+ id, err := ls.AddShard(ctx, append(optsWithMeta.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)))...)
if err != nil {
c.log.Error(logs.FrostFSNodeFailedToAttachShardToEngine, zap.Error(err))
} else {
@@ -1313,7 +1315,10 @@ func (c *cfg) reloadConfig(ctx context.Context) {
var rcfg engine.ReConfiguration
for _, optsWithID := range c.shardOpts(ctx) {
- rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts, shard.WithTombstoneSource(c.createTombstoneSource())))
+ rcfg.AddShard(optsWithID.configID, append(optsWithID.shOpts,
+ shard.WithTombstoneSource(c.createTombstoneSource()),
+ shard.WithContainerInfoProvider(c.createContainerInfoProvider(ctx)),
+ ))
}
err = c.cfgObject.cfgLocalStorage.localStorage.Reload(ctx, rcfg)
@@ -1414,6 +1419,20 @@ func (c *cfg) createTombstoneSource() *tombstone.ExpirationChecker {
return tombstoneSource
}
+func (c *cfg) createContainerInfoProvider(ctx context.Context) container.InfoProvider {
+ return container.NewInfoProvider(func() (container.Source, error) {
+ // threadsafe: called on init or on sighup when morph initialized
+ if c.cfgMorph.client == nil {
+ initMorphComponents(ctx, c)
+ }
+ cc, err := containerClient.NewFromMorph(c.cfgMorph.client, c.cfgContainer.scriptHash, 0, containerClient.TryNotary())
+ if err != nil {
+ return nil, err
+ }
+ return containerClient.AsContainerSource(cc), nil
+ })
+}
+
func (c *cfg) shutdown() {
old := c.swapHealthStatus(control.HealthStatus_SHUTTING_DOWN)
if old == control.HealthStatus_SHUTTING_DOWN {
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 7aef6873e..1ae4f0d3f 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -249,6 +249,7 @@ const (
ShardMetabaseFailureSwitchingMode = "metabase failure, switching mode"
ShardCantMoveShardToReadonlySwitchMode = "can't move shard to readonly, switch mode"
ShardCouldNotUnmarshalObject = "could not unmarshal object"
+ ShardSkipObjectFromResyncContainerDeleted = "object skipped from metabase resync: container deleted"
ShardCouldNotCloseShardComponent = "could not close shard component"
ShardCantOpenMetabaseMoveToADegradedMode = "can't open metabase, move to a degraded mode"
ShardCantInitializeMetabaseMoveToADegradedreadonlyMode = "can't initialize metabase, move to a degraded-read-only mode"
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index de881654a..4f9f25608 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
+ "slices"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
@@ -275,6 +276,21 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
return nil
}
+ hasIndexedAttribute := slices.IndexFunc(obj.Attributes(), func(attr objectSDK.Attribute) bool { return meta.IsAtrributeIndexed(attr.Key()) }) > 0
+
+ var isIndexedContainer bool
+ if hasIndexedAttribute {
+ info, err := s.containerInfo.Info(addr.Container())
+ if err != nil {
+ return err
+ }
+ if info.Removed {
+ s.log.Debug(logs.ShardSkipObjectFromResyncContainerDeleted, zap.Stringer("address", addr))
+ return nil
+ }
+ isIndexedContainer = info.Indexed
+ }
+
var err error
switch obj.Type() {
case objectSDK.TypeTombstone:
@@ -290,6 +306,7 @@ func (s *Shard) refillObject(ctx context.Context, data []byte, addr oid.Address,
var mPrm meta.PutPrm
mPrm.SetObject(obj)
mPrm.SetStorageID(descriptor)
+ mPrm.SetIndexAttributes(hasIndexedAttribute && isIndexedContainer)
_, err = s.metaBase.Put(ctx, mPrm)
if err != nil && !client.IsErrObjectAlreadyRemoved(err) && !errors.Is(err, meta.ErrObjectIsExpired) {
diff --git a/pkg/local_object_storage/shard/shard.go b/pkg/local_object_storage/shard/shard.go
index d7e723733..413bfd2f7 100644
--- a/pkg/local_object_storage/shard/shard.go
+++ b/pkg/local_object_storage/shard/shard.go
@@ -7,6 +7,7 @@ import (
"time"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/blobstor"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/pilorama"
@@ -95,6 +96,8 @@ type cfg struct {
metricsWriter MetricsWriter
reportErrorFunc func(selfID string, message string, err error)
+
+ containerInfo container.InfoProvider
}
func defaultCfg() *cfg {
@@ -358,6 +361,13 @@ func WithZeroCountCallback(cb EmptyContainersCallback) Option {
}
}
+// WithContainerInfoProvider returns option to set container info provider.
+func WithContainerInfoProvider(containerInfo container.InfoProvider) Option {
+ return func(c *cfg) {
+ c.containerInfo = containerInfo
+ }
+}
+
func (s *Shard) fillInfo() {
s.cfg.info.MetaBaseInfo = s.metaBase.DumpInfo()
s.cfg.info.BlobStorInfo = s.blobStor.DumpInfo()
From 8093e145b316762977aff5b2c8babda64ae7283b Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 3 Oct 2024 11:06:31 +0300
Subject: [PATCH 132/655] [#1412] adm: Resolve container type by metabase
upgrade
Signed-off-by: Dmitrii Stepanov
---
.../internal/modules/metabase/upgrade.go | 83 +++++++++++++----
pkg/local_object_storage/metabase/upgrade.go | 93 +++++++++++++++++--
.../metabase/upgrade_test.go | 21 +++--
pkg/local_object_storage/metabase/util.go | 15 +++
4 files changed, 179 insertions(+), 33 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
index 96cb62f10..00b30c9b2 100644
--- a/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
+++ b/cmd/frostfs-adm/internal/modules/metabase/upgrade.go
@@ -1,6 +1,7 @@
package metabase
import (
+ "context"
"errors"
"fmt"
"sync"
@@ -10,19 +11,24 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
engineconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine"
shardconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard"
+ morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client"
+ morphcontainer "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/client/container"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
const (
- pathFlag = "path"
noCompactFlag = "no-compact"
)
-var errNoPathsFound = errors.New("no metabase paths found")
-
-var path string
+var (
+ errNoPathsFound = errors.New("no metabase paths found")
+ errNoMorphEndpointsFound = errors.New("no morph endpoints found")
+)
var UpgradeCmd = &cobra.Command{
Use: "upgrade",
@@ -39,17 +45,10 @@ func upgrade(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
- noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
- var paths []string
- if path != "" {
- paths = append(paths, path)
- }
appCfg := config.New(configFile, configDir, config.EnvPrefix)
- if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
- paths = append(paths, sc.Metabase().Path())
- return nil
- }); err != nil {
- return fmt.Errorf("failed to get metabase paths: %w", err)
+ paths, err := getMetabasePaths(appCfg)
+ if err != nil {
+ return err
}
if len(paths) == 0 {
return errNoPathsFound
@@ -58,6 +57,16 @@ func upgrade(cmd *cobra.Command, _ []string) error {
for i, path := range paths {
cmd.Println(i+1, ":", path)
}
+ mc, err := createMorphClient(cmd.Context(), appCfg)
+ if err != nil {
+ return err
+ }
+ defer mc.Close()
+ civ, err := createContainerInfoProvider(mc)
+ if err != nil {
+ return err
+ }
+ noCompact, _ := cmd.Flags().GetBool(noCompactFlag)
result := make(map[string]bool)
var resultGuard sync.Mutex
eg, ctx := errgroup.WithContext(cmd.Context())
@@ -65,7 +74,7 @@ func upgrade(cmd *cobra.Command, _ []string) error {
eg.Go(func() error {
var success bool
cmd.Println("upgrading metabase", path, "...")
- if err := meta.Upgrade(ctx, path, !noCompact, func(a ...any) {
+ if err := meta.Upgrade(ctx, path, !noCompact, civ, func(a ...any) {
cmd.Println(append([]any{time.Now().Format(time.RFC3339), ":", path, ":"}, a...)...)
}); err != nil {
cmd.Println("error: failed to upgrade metabase", path, ":", err)
@@ -92,8 +101,50 @@ func upgrade(cmd *cobra.Command, _ []string) error {
return nil
}
+func getMetabasePaths(appCfg *config.Config) ([]string, error) {
+ var paths []string
+ if err := engineconfig.IterateShards(appCfg, false, func(sc *shardconfig.Config) error {
+ paths = append(paths, sc.Metabase().Path())
+ return nil
+ }); err != nil {
+ return nil, fmt.Errorf("get metabase paths: %w", err)
+ }
+ return paths, nil
+}
+
+func createMorphClient(ctx context.Context, appCfg *config.Config) (*client.Client, error) {
+ addresses := morphconfig.RPCEndpoint(appCfg)
+ if len(addresses) == 0 {
+ return nil, errNoMorphEndpointsFound
+ }
+ key := nodeconfig.Key(appCfg)
+ cli, err := client.New(ctx,
+ key,
+ client.WithDialTimeout(morphconfig.DialTimeout(appCfg)),
+ client.WithEndpoints(addresses...),
+ client.WithSwitchInterval(morphconfig.SwitchInterval(appCfg)),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("create morph client:%w", err)
+ }
+ return cli, nil
+}
+
+func createContainerInfoProvider(cli *client.Client) (container.InfoProvider, error) {
+ sh, err := cli.NNSContractAddress(client.NNSContainerContractName)
+ if err != nil {
+ return nil, fmt.Errorf("resolve container contract hash: %w", err)
+ }
+ cc, err := morphcontainer.NewFromMorph(cli, sh, 0, morphcontainer.TryNotary())
+ if err != nil {
+ return nil, fmt.Errorf("create morph container client: %w", err)
+ }
+ return container.NewInfoProvider(func() (container.Source, error) {
+ return morphcontainer.AsContainerSource(cc), nil
+ }), nil
+}
+
func initUpgradeCommand() {
flags := UpgradeCmd.Flags()
- flags.StringVar(&path, pathFlag, "", "Path to metabase file")
flags.Bool(noCompactFlag, false, "Do not compact upgraded metabase file")
}
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index b5de430dc..f2a0107a1 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"encoding/binary"
+ "encoding/hex"
"errors"
"fmt"
"os"
@@ -12,6 +13,7 @@ import (
"time"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.etcd.io/bbolt"
@@ -25,15 +27,15 @@ const (
upgradeTimeout = 1 * time.Second
)
-var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, log func(a ...any)) error{
+var updates = map[uint64]func(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error{
2: upgradeFromV2ToV3,
- 3: func(_ context.Context, _ *bbolt.DB, log func(a ...any)) error {
+ 3: func(_ context.Context, _ *bbolt.DB, _ container.InfoProvider, log func(a ...any)) error {
log("metabase already upgraded")
return nil
},
}
-func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any)) error {
+func Upgrade(ctx context.Context, path string, compact bool, cs container.InfoProvider, log func(a ...any)) error {
if _, err := os.Stat(path); err != nil {
return fmt.Errorf("check metabase existence: %w", err)
}
@@ -61,7 +63,7 @@ func Upgrade(ctx context.Context, path string, compact bool, log func(a ...any))
}); err != nil {
return fmt.Errorf("set upgrade key %w", err)
}
- if err := updater(ctx, db, log); err != nil {
+ if err := updater(ctx, db, cs, log); err != nil {
return fmt.Errorf("update metabase schema: %w", err)
}
if err := db.Update(func(tx *bbolt.Tx) error {
@@ -113,11 +115,11 @@ func compactDB(db *bbolt.DB) error {
return nil
}
-func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
+func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- if err := dropUserAttributes(ctx, db, log); err != nil {
+ if err := dropUserAttributes(ctx, db, cs, log); err != nil {
return err
}
if err := dropOwnerIDIndex(ctx, db, log); err != nil {
@@ -323,10 +325,81 @@ func iterateExpirationAttributeKeyBucket(ctx context.Context, b *bbolt.Bucket, i
return nil
}
-func dropUserAttributes(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
- return dropBucketsByPrefix(ctx, db, []byte{userAttributePrefix}, func(a ...any) {
- log(append([]any{"user attributes:"}, a...)...)
- })
+func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProvider, log func(a ...any)) error {
+ log("deleting user attribute buckets...")
+ const batch = 1000
+ prefix := []byte{userAttributePrefix}
+ last := prefix
+ var count uint64
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ var keys [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ c := tx.Cursor()
+ for k, _ := c.Seek(last); k != nil && bytes.HasPrefix(k, prefix) && len(keys) < batch; k, _ = c.Next() {
+ if bytes.Equal(last, k) {
+ continue
+ }
+ keys = append(keys, bytes.Clone(k))
+ }
+ return nil
+ }); err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
+ return err
+ }
+ if len(keys) == 0 {
+ log("deleting user attribute buckets completed successfully, deleted", count, "buckets")
+ return nil
+ }
+ last = keys[len(keys)-1]
+ keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ if err != nil {
+ return err
+ }
+ if err := db.Update(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ log("deleting buckets completed with an error:", err)
+ return err
+ }
+ count += uint64(len(keysToDrop))
+ log("deleted", count, "buckets")
+ }
+}
+
+func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
+ var keysToDrop [][]byte
+ for _, key := range keys {
+ attr, ok := attributeFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse attribute key from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ if !IsAtrributeIndexed(attr) {
+ keysToDrop = append(keysToDrop, key)
+ continue
+ }
+ contID, ok := cidFromAttributeBucket(key)
+ if !ok {
+ return nil, fmt.Errorf("failed to parse container ID from user attribute bucket key %s", hex.EncodeToString(key))
+ }
+ info, err := cs.Info(contID)
+ if err != nil {
+ return nil, err
+ }
+ if info.Removed || !info.Indexed {
+ keysToDrop = append(keysToDrop, key)
+ }
+ }
+ return keysToDrop, nil
}
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
diff --git a/pkg/local_object_storage/metabase/upgrade_test.go b/pkg/local_object_storage/metabase/upgrade_test.go
index 3797de0a4..9c525291a 100644
--- a/pkg/local_object_storage/metabase/upgrade_test.go
+++ b/pkg/local_object_storage/metabase/upgrade_test.go
@@ -12,6 +12,7 @@ import (
"time"
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard/mode"
@@ -35,13 +36,19 @@ func TestUpgradeV2ToV3(t *testing.T) {
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.ErrorIs(t, db.Init(), ErrOutdatedVersion)
require.NoError(t, db.Close())
- require.NoError(t, Upgrade(context.Background(), path, true, t.Log))
+ require.NoError(t, Upgrade(context.Background(), path, true, &testContainerInfoProvider{}, t.Log))
require.NoError(t, db.Open(context.Background(), mode.ReadWrite))
require.NoError(t, db.Init())
require.NoError(t, db.Close())
fmt.Println()
}
+type testContainerInfoProvider struct{}
+
+func (p *testContainerInfoProvider) Info(id cid.ID) (container.Info, error) {
+ return container.Info{}, nil
+}
+
func createTempCopy(t *testing.T, path string) string {
src, err := os.Open(path)
require.NoError(t, err)
@@ -95,7 +102,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(obj, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
@@ -118,8 +125,8 @@ func TestGenerateMetabaseFile(t *testing.T) {
child.SetParent(parent)
idParent, _ := parent.ID()
child.SetParentID(idParent)
- testutil.AddAttribute(child, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
- testutil.AddAttribute(parent, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(child, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(parent, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
testutil.AddAttribute(child, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
testutil.AddAttribute(parent, objectV2.SysAttributeExpEpoch, strconv.FormatUint(uint64(i%minEpoch+minEpoch), 10))
_, err := db.Put(ctx, PutPrm{
@@ -138,7 +145,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -160,7 +167,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
@@ -190,7 +197,7 @@ func TestGenerateMetabaseFile(t *testing.T) {
i := i
eg.Go(func() error {
obj := testutil.GenerateObjectWithCID(containers[i%len(containers)])
- testutil.AddAttribute(obj, "FileName", strconv.FormatInt(int64(i%maxFilename), 10))
+ testutil.AddAttribute(obj, objectSDK.AttributeFilePath, strconv.FormatInt(int64(i%maxFilename), 10))
_, err := db.Put(ctx, PutPrm{
obj: obj,
id: []byte(strconv.FormatInt(int64(i%maxStorageID), 10) + "/" + strconv.FormatInt(int64(i%maxStorageID), 10)),
diff --git a/pkg/local_object_storage/metabase/util.go b/pkg/local_object_storage/metabase/util.go
index 4679de332..0a2f91a47 100644
--- a/pkg/local_object_storage/metabase/util.go
+++ b/pkg/local_object_storage/metabase/util.go
@@ -176,6 +176,21 @@ func attributeBucketName(cnr cid.ID, attributeKey string, key []byte) []byte {
return append(key[:bucketKeySize], attributeKey...)
}
+func cidFromAttributeBucket(bucketName []byte) (cid.ID, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return cid.ID{}, false
+ }
+ var result cid.ID
+ return result, result.Decode(bucketName[1:bucketKeySize]) == nil
+}
+
+func attributeFromAttributeBucket(bucketName []byte) (string, bool) {
+ if len(bucketName) < bucketKeySize || bucketName[0] != userAttributePrefix {
+ return "", false
+ }
+ return string(bucketName[bucketKeySize:]), true
+}
+
// rootBucketName returns _root.
func rootBucketName(cnr cid.ID, key []byte) []byte {
return bucketName(cnr, rootPrefix, key)
From 87f4b934d1cca9a671dc93fcc4cdb5861be35915 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Thu, 3 Oct 2024 17:57:21 +0300
Subject: [PATCH 133/655] [#1412] metabase: Run bucket drop steps on upgrade
concurrently
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 22 ++++++++++++--------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index f2a0107a1..a997b90a0 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -119,13 +119,17 @@ func upgradeFromV2ToV3(ctx context.Context, db *bbolt.DB, cs container.InfoProvi
if err := createExpirationEpochBuckets(ctx, db, log); err != nil {
return err
}
- if err := dropUserAttributes(ctx, db, cs, log); err != nil {
- return err
- }
- if err := dropOwnerIDIndex(ctx, db, log); err != nil {
- return err
- }
- if err := dropPayloadChecksumIndex(ctx, db, log); err != nil {
+ eg, ctx := errgroup.WithContext(ctx)
+ eg.Go(func() error {
+ return dropUserAttributes(ctx, db, cs, log)
+ })
+ eg.Go(func() error {
+ return dropOwnerIDIndex(ctx, db, log)
+ })
+ eg.Go(func() error {
+ return dropPayloadChecksumIndex(ctx, db, log)
+ })
+ if err := eg.Wait(); err != nil {
return err
}
return db.Update(func(tx *bbolt.Tx) error {
@@ -360,7 +364,7 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
if err != nil {
return err
}
- if err := db.Update(func(tx *bbolt.Tx) error {
+ if err := db.Batch(func(tx *bbolt.Tx) error {
for _, k := range keysToDrop {
if err := tx.DeleteBucket(k); err != nil {
return err
@@ -439,7 +443,7 @@ func dropBucketsByPrefix(ctx context.Context, db *bbolt.DB, prefix []byte, log f
log("deleting buckets completed successfully, deleted", count, "buckets")
return nil
}
- if err := db.Update(func(tx *bbolt.Tx) error {
+ if err := db.Batch(func(tx *bbolt.Tx) error {
for _, k := range keys {
if err := tx.DeleteBucket(k); err != nil {
return err
From fe9f664b577f1b51797e375ef736977ed61d9757 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Fri, 4 Oct 2024 10:49:39 +0300
Subject: [PATCH 134/655] [#1412] metabase: Drop empty user attribute buckets
on upgrade
Signed-off-by: Dmitrii Stepanov
---
pkg/local_object_storage/metabase/upgrade.go | 166 +++++++++++++++++--
1 file changed, 154 insertions(+), 12 deletions(-)
diff --git a/pkg/local_object_storage/metabase/upgrade.go b/pkg/local_object_storage/metabase/upgrade.go
index a997b90a0..1f2c7956b 100644
--- a/pkg/local_object_storage/metabase/upgrade.go
+++ b/pkg/local_object_storage/metabase/upgrade.go
@@ -360,26 +360,40 @@ func dropUserAttributes(ctx context.Context, db *bbolt.DB, cs container.InfoProv
return nil
}
last = keys[len(keys)-1]
- keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ cnt, err := dropNonIndexedUserAttributeBuckets(db, cs, keys)
if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
return err
}
- if err := db.Batch(func(tx *bbolt.Tx) error {
- for _, k := range keysToDrop {
- if err := tx.DeleteBucket(k); err != nil {
- return err
- }
- }
- return nil
- }); err != nil {
- log("deleting buckets completed with an error:", err)
+ count += cnt
+ cnt, err = dropEmptyUserAttributeBuckets(ctx, db, keys)
+ if err != nil {
+ log("deleting user attribute buckets completed with an error:", err)
return err
}
- count += uint64(len(keysToDrop))
- log("deleted", count, "buckets")
+ count += cnt
+ log("deleted", count, "user attribute buckets")
}
}
+func dropNonIndexedUserAttributeBuckets(db *bbolt.DB, cs container.InfoProvider, keys [][]byte) (uint64, error) {
+ keysToDrop, err := selectUserAttributeKeysToDrop(keys, cs)
+ if err != nil {
+ return 0, fmt.Errorf("select non indexed user attributes: %w", err)
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, k := range keysToDrop {
+ if err := tx.DeleteBucket(k); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop non indexed user attributes: %w", err)
+ }
+ return uint64(len(keysToDrop)), nil
+}
+
func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([][]byte, error) {
var keysToDrop [][]byte
for _, key := range keys {
@@ -406,6 +420,134 @@ func selectUserAttributeKeysToDrop(keys [][]byte, cs container.InfoProvider) ([]
return keysToDrop, nil
}
+func dropEmptyUserAttributeBuckets(ctx context.Context, db *bbolt.DB, keys [][]byte) (uint64, error) {
+ var dropBuckets [][]byte
+ for _, key := range keys {
+ select {
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ default:
+ }
+
+ if err := dropEmptyNestedBuckets(ctx, db, key); err != nil {
+ return 0, err
+ }
+
+ empty, exists, err := bucketIsEmpty(db, key)
+ if err != nil {
+ return 0, err
+ }
+ if empty && exists {
+ dropBuckets = append(dropBuckets, key)
+ }
+ }
+ if len(dropBuckets) == 0 {
+ return 0, nil
+ }
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ for _, key := range dropBuckets {
+ if err := tx.DeleteBucket(key); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return 0, fmt.Errorf("drop empty user attributes buckets: %w", err)
+ }
+ return uint64(len(dropBuckets)), nil
+}
+
+func bucketIsEmpty(db *bbolt.DB, bucketKey []byte) (bool, bool, error) {
+ var empty bool
+ var exists bool
+ if err := db.View(func(tx *bbolt.Tx) error {
+ b := tx.Bucket(bucketKey)
+ if b == nil {
+ return nil
+ }
+ exists = true
+ empty = !hasAnyItem(b)
+ return nil
+ }); err != nil {
+ return false, false, fmt.Errorf("bucket empty check: %w", err)
+ }
+ return empty, exists, nil
+}
+
+func dropEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey []byte) error {
+ var last []byte
+ for {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var dropBuckets [][]byte
+ var err error
+ dropBuckets, last, err = selectEmptyNestedBuckets(ctx, db, rootBucketKey, last)
+ if err != nil {
+ return fmt.Errorf("select empty nested buckets: %w", err)
+ }
+ if len(dropBuckets) == 0 {
+ return nil
+ }
+
+ if err := db.Batch(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ for _, sb := range dropBuckets {
+ if err := rootBucket.DeleteBucket(sb); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("drop empty nested buckets: %w", err)
+ }
+ }
+}
+
+func selectEmptyNestedBuckets(ctx context.Context, db *bbolt.DB, rootBucketKey, last []byte) ([][]byte, []byte, error) {
+ const batchSize = 1000
+ var result [][]byte
+ if err := db.View(func(tx *bbolt.Tx) error {
+ rootBucket := tx.Bucket(rootBucketKey)
+ if rootBucket == nil {
+ return nil
+ }
+ c := rootBucket.Cursor()
+ for k, v := c.Seek(last); k != nil && len(result) < batchSize; k, v = c.Next() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if bytes.Equal(last, k) {
+ continue
+ }
+ last = bytes.Clone(k)
+ if v != nil { // record
+ continue
+ }
+ nestedBucket := rootBucket.Bucket(k)
+ if nestedBucket == nil {
+ continue
+ }
+ if !hasAnyItem(nestedBucket) {
+ result = append(result, bytes.Clone(k))
+ }
+ }
+ return nil
+ }); err != nil {
+ return nil, nil, err
+ }
+ return result, last, nil
+}
+
func dropOwnerIDIndex(ctx context.Context, db *bbolt.DB, log func(a ...any)) error {
return dropBucketsByPrefix(ctx, db, []byte{ownerPrefix}, func(a ...any) {
log(append([]any{"owner ID index:"}, a...)...)
From c065d55ca31c18fa48c8d8a173237095179be732 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Mon, 7 Oct 2024 17:19:04 +0300
Subject: [PATCH 135/655] [#1412] metabase: Drop logging inside transaction
This could lead to hang the db.
Signed-off-by: Dmitrii Stepanov
---
internal/logs/logs.go | 6 ----
pkg/local_object_storage/metabase/select.go | 31 ++-------------------
2 files changed, 2 insertions(+), 35 deletions(-)
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 1ae4f0d3f..84bd023f1 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -226,12 +226,6 @@ const (
EngineFinishedSuccessfullyShardsEvacuation = "shards evacuation finished successfully"
EngineFinishedWithErrorShardsEvacuation = "shards evacuation finished with error"
EngineObjectIsMovedToAnotherShard = "object is moved to another shard"
- MetabaseMissingMatcher = "missing matcher"
- MetabaseErrorInFKBTSelection = "error in FKBT selection"
- MetabaseCantDecodeListBucketLeaf = "can't decode list bucket leaf"
- MetabaseUnknownOperation = "unknown operation"
- MetabaseCantIterateOverTheBucket = "can't iterate over the bucket"
- MetabaseCouldNotIterateOverTheBuckets = "could not iterate over the buckets"
MetabaseCreatedDirectoryForMetabase = "created directory for Metabase"
MetabaseOpenedBoltDBInstanceForMetabase = "opened boltDB instance for Metabase"
MetabaseCheckingMetabaseVersion = "checking metabase version"
diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go
index 88ef7d5a4..41f05b756 100644
--- a/pkg/local_object_storage/metabase/select.go
+++ b/pkg/local_object_storage/metabase/select.go
@@ -9,7 +9,6 @@ import (
"time"
v2object "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/metaerr"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
@@ -18,7 +17,6 @@ import (
"go.etcd.io/bbolt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
- "go.uber.org/zap"
)
type (
@@ -288,8 +286,6 @@ func (db *DB) selectFromFKBT(
) { //
matchFunc, ok := db.matchers[f.Operation()]
if !ok {
- db.log.Debug(logs.MetabaseMissingMatcher, zap.Stringer("operation", f.Operation()))
-
return
}
@@ -298,7 +294,7 @@ func (db *DB) selectFromFKBT(
return
}
- err := matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = matchFunc.matchBucket(fkbtRoot, f.Header(), f.Value(), func(k, _ []byte) error {
fkbtLeaf := fkbtRoot.Bucket(k)
if fkbtLeaf == nil {
return nil
@@ -310,9 +306,6 @@ func (db *DB) selectFromFKBT(
return nil
})
})
- if err != nil {
- db.log.Debug(logs.MetabaseErrorInFKBTSelection, zap.String("error", err.Error()))
- }
}
// selectOutsideFKBT looks into all incl buckets to find list of addresses outside to add in
@@ -377,24 +370,17 @@ func (db *DB) selectFromList(
case objectSDK.MatchStringEqual:
lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value())))
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf, zap.String("error", err.Error()))
return
}
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation, zap.Uint32("operation", uint32(op)))
-
return
}
if err = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(_, val []byte) error {
l, err := decodeList(val)
if err != nil {
- db.log.Debug(logs.MetabaseCantDecodeListBucketLeaf,
- zap.String("error", err.Error()),
- )
-
return err
}
@@ -402,10 +388,6 @@ func (db *DB) selectFromList(
return nil
}); err != nil {
- db.log.Debug(logs.MetabaseCantIterateOverTheBucket,
- zap.String("error", err.Error()),
- )
-
return
}
}
@@ -447,10 +429,6 @@ func (db *DB) selectObjectID(
default:
fMatch, ok := db.matchers[op]
if !ok {
- db.log.Debug(logs.MetabaseUnknownOperation,
- zap.Uint32("operation", uint32(f.Operation())),
- )
-
return
}
@@ -461,18 +439,13 @@ func (db *DB) selectObjectID(
return
}
- err := fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
+ _ = fMatch.matchBucket(bkt, f.Header(), f.Value(), func(k, _ []byte) error {
var id oid.ID
if err := id.Decode(k); err == nil {
appendOID(id)
}
return nil
})
- if err != nil {
- db.log.Debug(logs.MetabaseCouldNotIterateOverTheBuckets,
- zap.String("error", err.Error()),
- )
- }
}
}
}
From 936ebbb8e5c1b1967e6fd0ec5ec50bd282c1f8bc Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Tue, 8 Oct 2024 18:39:52 +0300
Subject: [PATCH 136/655] [#1423] metabase: Hide `BucketName` form upper levels
Signed-off-by: Anton Nikiforov
---
pkg/local_object_storage/engine/evacuate.go | 12 +-
pkg/local_object_storage/metabase/list.go | 104 ++++++++++--------
.../metabase/list_test.go | 57 ++++++++++
pkg/local_object_storage/shard/list.go | 32 +++---
4 files changed, 143 insertions(+), 62 deletions(-)
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index 1baf237f9..c1b9276f3 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -435,7 +435,7 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
) error {
sh := shardsToEvacuate[shardID]
var cntPrm shard.IterateOverContainersPrm
- cntPrm.Handler = func(ctx context.Context, name []byte, cnt cid.ID) error {
+ cntPrm.Handler = func(ctx context.Context, objType objectSDK.Type, cnt cid.ID) error {
select {
case <-ctx.Done():
return context.Cause(ctx)
@@ -455,8 +455,11 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
skip = e.isNotRepOne(c)
}
if skip {
- countPrm := shard.CountAliveObjectsInBucketPrm{BucketName: name}
- count, err := sh.CountAliveObjectsInBucket(ctx, countPrm)
+ countPrm := shard.CountAliveObjectsInContainerPrm{
+ ObjectType: objType,
+ ContainerID: cnt,
+ }
+ count, err := sh.CountAliveObjectsInContainer(ctx, countPrm)
if err != nil {
return err
}
@@ -464,7 +467,8 @@ func (e *StorageEngine) evacuateShardObjects(ctx context.Context, cancel context
return nil
}
var objPrm shard.IterateOverObjectsInContainerPrm
- objPrm.BucketName = name
+ objPrm.ObjectType = objType
+ objPrm.ContainerID = cnt
objPrm.Handler = func(ctx context.Context, objInfo *object.Info) error {
select {
case <-ctx.Done():
diff --git a/pkg/local_object_storage/metabase/list.go b/pkg/local_object_storage/metabase/list.go
index 74a529809..b007ef0da 100644
--- a/pkg/local_object_storage/metabase/list.go
+++ b/pkg/local_object_storage/metabase/list.go
@@ -65,21 +65,25 @@ func (l ListRes) Cursor() *Cursor {
// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
type IterateOverContainersPrm struct {
// Handler function executed upon containers in db.
- Handler func(context.Context, []byte, cid.ID) error
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
}
// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
type IterateOverObjectsInContainerPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
// Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
-// CountAliveObjectsInBucketPrm contains parameters for IterateOverObjectsInContainer operation.
-type CountAliveObjectsInBucketPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+// CountAliveObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
}
// ListWithCursor lists physical objects available in metabase starting from
@@ -319,12 +323,20 @@ func (db *DB) iterateOverContainers(ctx context.Context, tx *bbolt.Tx, prm Itera
if cidRaw == nil {
continue
}
-
- bktName := make([]byte, len(name))
- copy(bktName, name)
var cnt cid.ID
copy(cnt[:], containerID[:])
- err := prm.Handler(ctx, bktName, cnt)
+ var objType objectSDK.Type
+ switch prefix[0] {
+ case primaryPrefix:
+ objType = objectSDK.TypeRegular
+ case lockersPrefix:
+ objType = objectSDK.TypeLock
+ case tombstonePrefix:
+ objType = objectSDK.TypeTombstone
+ default:
+ continue
+ }
+ err := prm.Handler(ctx, objType, cnt)
if err != nil {
return err
}
@@ -356,22 +368,29 @@ func (db *DB) IterateOverObjectsInContainer(ctx context.Context, prm IterateOver
return ErrDegradedMode
}
- var containerID cid.ID
- cidRaw, prefix := parseContainerIDWithPrefix(&containerID, prm.BucketName)
- if cidRaw == nil {
- return nil
- }
err := db.boltDB.View(func(tx *bbolt.Tx) error {
- return db.iterateOverObjectsInContainer(ctx, tx, cidRaw, prefix, containerID, prm)
+ return db.iterateOverObjectsInContainer(ctx, tx, prm)
})
success = err == nil
return metaerr.Wrap(err)
}
-func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, cidRaw []byte, prefix byte,
- containerID cid.ID, prm IterateOverObjectsInContainerPrm,
-) error {
- bkt := tx.Bucket(prm.BucketName)
+func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, prm IterateOverObjectsInContainerPrm) error {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
+ return nil
+ }
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
+
+ bkt := tx.Bucket(bucketName)
if bkt == nil {
return nil
}
@@ -380,32 +399,19 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
c := bkt.Cursor()
k, v := c.First()
- var objType objectSDK.Type
-
- switch prefix {
- case primaryPrefix:
- objType = objectSDK.TypeRegular
- case lockersPrefix:
- objType = objectSDK.TypeLock
- case tombstonePrefix:
- objType = objectSDK.TypeTombstone
- default:
- return nil
- }
-
for ; k != nil; k, v = c.Next() {
var obj oid.ID
if err := obj.Decode(k); err != nil {
break
}
- if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
continue
}
var isLinkingObj bool
var ecInfo *objectcore.ECInfo
- if objType == objectSDK.TypeRegular {
+ if prm.ObjectType == objectSDK.TypeRegular {
var o objectSDK.Object
if err := o.Unmarshal(v); err != nil {
return err
@@ -422,9 +428,9 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
}
var a oid.Address
- a.SetContainer(containerID)
+ a.SetContainer(prm.ContainerID)
a.SetObject(obj)
- objInfo := objectcore.Info{Address: a, Type: objType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
+ objInfo := objectcore.Info{Address: a, Type: prm.ObjectType, IsLinkingObject: isLinkingObj, ECInfo: ecInfo}
err := prm.Handler(ctx, &objInfo)
if err != nil {
return err
@@ -433,8 +439,8 @@ func (db *DB) iterateOverObjectsInContainer(ctx context.Context, tx *bbolt.Tx, c
return nil
}
-// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
-func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (db *DB) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
var (
startedAt = time.Now()
success = false
@@ -452,14 +458,22 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
return 0, ErrDegradedMode
}
- if len(prm.BucketName) != bucketKeySize {
+ var prefix byte
+ switch prm.ObjectType {
+ case objectSDK.TypeRegular:
+ prefix = primaryPrefix
+ case objectSDK.TypeLock:
+ prefix = lockersPrefix
+ case objectSDK.TypeTombstone:
+ prefix = tombstonePrefix
+ default:
return 0, nil
}
-
- cidRaw := prm.BucketName[1:bucketKeySize]
+ bucketName := []byte{prefix}
+ bucketName = append(bucketName, prm.ContainerID[:]...)
var count uint64
err := db.boltDB.View(func(tx *bbolt.Tx) error {
- bkt := tx.Bucket(prm.BucketName)
+ bkt := tx.Bucket(bucketName)
if bkt == nil {
return nil
}
@@ -468,7 +482,7 @@ func (db *DB) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjec
c := bkt.Cursor()
k, _ := c.First()
for ; k != nil; k, _ = c.Next() {
- if inGraveyardWithKey(append(cidRaw, k...), graveyardBkt, garbageBkt) > 0 {
+ if inGraveyardWithKey(append(prm.ContainerID[:], k...), graveyardBkt, garbageBkt) > 0 {
continue
}
count++
diff --git a/pkg/local_object_storage/metabase/list_test.go b/pkg/local_object_storage/metabase/list_test.go
index bc1726bd6..203802ec0 100644
--- a/pkg/local_object_storage/metabase/list_test.go
+++ b/pkg/local_object_storage/metabase/list_test.go
@@ -8,6 +8,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/internal/testutil"
meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
+ cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@@ -219,3 +220,59 @@ func metaListWithCursor(db *meta.DB, count uint32, cursor *meta.Cursor) ([]objec
r, err := db.ListWithCursor(context.Background(), listPrm)
return r.AddressList(), r.Cursor(), err
}
+
+func TestIterateOver(t *testing.T) {
+ t.Parallel()
+
+ db := newDB(t)
+ defer func() { require.NoError(t, db.Close()) }()
+
+ const total uint64 = 5
+ for _, typ := range []objectSDK.Type{objectSDK.TypeRegular, objectSDK.TypeTombstone, objectSDK.TypeLock} {
+ var expected []*objectSDK.Object
+ // fill metabase with objects
+ cid := cidtest.ID()
+ for range total {
+ obj := testutil.GenerateObjectWithCID(cid)
+ obj.SetType(typ)
+ err := metaPut(db, obj, nil)
+ require.NoError(t, err)
+ expected = append(expected, obj)
+ }
+
+ var metaIter meta.IterateOverObjectsInContainerPrm
+ var count uint64
+ metaIter.Handler = func(context.Context, *object.Info) error {
+ count++
+ return nil
+ }
+ metaIter.ContainerID = cid
+ metaIter.ObjectType = typ
+ err := db.IterateOverObjectsInContainer(context.Background(), metaIter)
+ require.NoError(t, err)
+ require.Equal(t, total, count)
+
+ var metaCount meta.CountAliveObjectsInContainerPrm
+ metaCount.ContainerID = cid
+ metaCount.ObjectType = typ
+ res, err := db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, res, total)
+
+ err = metaDelete(db, object.AddressOf(expected[0]), object.AddressOf(expected[1]))
+ require.NoError(t, err)
+
+ res, err = db.CountAliveObjectsInContainer(context.Background(), metaCount)
+ require.NoError(t, err)
+ require.Equal(t, uint64(3), res)
+ }
+ var count int
+ var metaPrm meta.IterateOverContainersPrm
+ metaPrm.Handler = func(context.Context, objectSDK.Type, cidSDK.ID) error {
+ count++
+ return nil
+ }
+ err := db.IterateOverContainers(context.Background(), metaPrm)
+ require.NoError(t, err)
+ require.Equal(t, 3, count)
+}
diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go
index f5d633b77..8d09974b8 100644
--- a/pkg/local_object_storage/shard/list.go
+++ b/pkg/local_object_storage/shard/list.go
@@ -37,21 +37,25 @@ func (r ListContainersRes) Containers() []cid.ID {
// IterateOverContainersPrm contains parameters for IterateOverContainers operation.
type IterateOverContainersPrm struct {
// Handler function executed upon containers in db.
- Handler func(context.Context, []byte, cid.ID) error
+ Handler func(context.Context, objectSDK.Type, cid.ID) error
}
// IterateOverObjectsInContainerPrm contains parameters for IterateOverObjectsInContainer operation.
type IterateOverObjectsInContainerPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
// Handler function executed upon objects in db.
Handler func(context.Context, *objectcore.Info) error
}
-// CountAliveObjectsInBucketPrm contains parameters for CountAliveObjectsInBucket operation.
-type CountAliveObjectsInBucketPrm struct {
- // BucketName container's bucket name.
- BucketName []byte
+// CountAliveObjectsInContainerPrm contains parameters for CountAliveObjectsInContainer operation.
+type CountAliveObjectsInContainerPrm struct {
+ // ObjectType type of objects to iterate over.
+ ObjectType objectSDK.Type
+ // ContainerID container for objects to iterate over.
+ ContainerID cid.ID
}
// ListWithCursorPrm contains parameters for ListWithCursor operation.
@@ -226,7 +230,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
}
var metaPrm meta.IterateOverObjectsInContainerPrm
- metaPrm.BucketName = prm.BucketName
+ metaPrm.ContainerID = prm.ContainerID
+ metaPrm.ObjectType = prm.ObjectType
metaPrm.Handler = prm.Handler
err := s.metaBase.IterateOverObjectsInContainer(ctx, metaPrm)
if err != nil {
@@ -236,8 +241,8 @@ func (s *Shard) IterateOverObjectsInContainer(ctx context.Context, prm IterateOv
return nil
}
-// CountAliveObjectsInBucket count objects in bucket which aren't in graveyard or garbage.
-func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObjectsInBucketPrm) (uint64, error) {
+// CountAliveObjectsInContainer count objects in bucket which aren't in graveyard or garbage.
+func (s *Shard) CountAliveObjectsInContainer(ctx context.Context, prm CountAliveObjectsInContainerPrm) (uint64, error) {
_, span := tracing.StartSpanFromContext(ctx, "shard.CountAliveObjectsInBucket")
defer span.End()
@@ -248,9 +253,10 @@ func (s *Shard) CountAliveObjectsInBucket(ctx context.Context, prm CountAliveObj
return 0, ErrDegradedMode
}
- var metaPrm meta.CountAliveObjectsInBucketPrm
- metaPrm.BucketName = prm.BucketName
- count, err := s.metaBase.CountAliveObjectsInBucket(ctx, metaPrm)
+ var metaPrm meta.CountAliveObjectsInContainerPrm
+ metaPrm.ObjectType = prm.ObjectType
+ metaPrm.ContainerID = prm.ContainerID
+ count, err := s.metaBase.CountAliveObjectsInContainer(ctx, metaPrm)
if err != nil {
return 0, fmt.Errorf("could not count alive objects in bucket: %w", err)
}
From 4190fba86d575ba6c05aeb919ae5bc30572ecbd2 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 8 Oct 2024 16:44:34 +0300
Subject: [PATCH 137/655] [#1425] Remove SetEACL-related code
Signed-off-by: Evgenii Stratonikov
---
.../modules/morph/container/container.go | 8 --
internal/logs/logs.go | 3 -
.../processors/container/handlers.go | 4 -
.../processors/container/processor.go | 10 ---
pkg/morph/client/container/client.go | 1 -
pkg/morph/event/container/eacl.go | 51 -------------
pkg/morph/event/container/eacl_notary.go | 75 -------------------
pkg/services/container/morph/executor_test.go | 4 -
8 files changed, 156 deletions(-)
delete mode 100644 pkg/morph/event/container/eacl.go
delete mode 100644 pkg/morph/event/container/eacl_notary.go
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index a66438975..e280bc634 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -159,9 +159,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
if err != nil {
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- if len(ea.Value) != 0 {
- cnt.EACL = ea
- }
return cnt, nil
}
@@ -258,10 +255,6 @@ func restoreOrPutContainers(containers []Container, isOK func([]byte) bool, cmd
func putContainer(bw *io.BufBinWriter, ch util.Uint160, cnt Container) {
emit.AppCall(bw.BinWriter, ch, "put", callflag.All,
cnt.Value, cnt.Signature, cnt.PublicKey, cnt.Token)
- if ea := cnt.EACL; ea != nil {
- emit.AppCall(bw.BinWriter, ch, "setEACL", callflag.All,
- ea.Value, ea.Signature, ea.PublicKey, ea.Token)
- }
}
func isContainerRestored(cmd *cobra.Command, wCtx *helper.InitializeContext, containerHash util.Uint160, bw *io.BufBinWriter, hashValue util.Uint256) (bool, error) {
@@ -322,7 +315,6 @@ type Container struct {
Signature []byte `json:"signature"`
PublicKey []byte `json:"public_key"`
Token []byte `json:"token"`
- EACL *EACL `json:"eacl"`
}
// EACL represents extended ACL struct in contract storage.
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index 84bd023f1..ca783a39d 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -308,9 +308,6 @@ const (
ContainerNonAlphabetModeIgnoreContainerDelete = "non alphabet mode, ignore container delete"
ContainerDeleteContainerCheckFailed = "delete container check failed"
ContainerCouldNotApproveDeleteContainer = "could not approve delete container"
- ContainerNonAlphabetModeIgnoreSetEACL = "non alphabet mode, ignore set EACL"
- ContainerSetEACLCheckFailed = "set EACL check failed"
- ContainerCouldNotApproveSetEACL = "could not approve set EACL"
FrostFSNonAlphabetModeIgnoreConfig = "non alphabet mode, ignore config"
FrostFSCantRelaySetConfigEvent = "can't relay set config event"
FrostFSFrostfsWorkerPool = "frostfs worker pool"
diff --git a/pkg/innerring/processors/container/handlers.go b/pkg/innerring/processors/container/handlers.go
index 8bb89abe2..a54f3c772 100644
--- a/pkg/innerring/processors/container/handlers.go
+++ b/pkg/innerring/processors/container/handlers.go
@@ -48,7 +48,3 @@ func (cp *Processor) handleDelete(ev event.Event) {
zap.Int("capacity", cp.pool.Cap()))
}
}
-
-func (cp *Processor) handleSetEACL(_ event.Event) {
- cp.log.Warn(logs.SkipDeprecatedNotification, zap.String("type", "set EACL"))
-}
diff --git a/pkg/innerring/processors/container/processor.go b/pkg/innerring/processors/container/processor.go
index 8fd9edfb8..a6fbdc707 100644
--- a/pkg/innerring/processors/container/processor.go
+++ b/pkg/innerring/processors/container/processor.go
@@ -157,11 +157,6 @@ func (cp *Processor) ListenerNotaryParsers() []event.NotaryParserInfo {
p.SetParser(containerEvent.ParseDeleteNotary)
pp = append(pp, p)
- // set EACL
- p.SetRequestType(containerEvent.SetEACLNotaryEvent)
- p.SetParser(containerEvent.ParseSetEACLNotary)
- pp = append(pp, p)
-
return pp
}
@@ -190,10 +185,5 @@ func (cp *Processor) ListenerNotaryHandlers() []event.NotaryHandlerInfo {
h.SetHandler(cp.handleDelete)
hh = append(hh, h)
- // set eACL
- h.SetRequestType(containerEvent.SetEACLNotaryEvent)
- h.SetHandler(cp.handleSetEACL)
- hh = append(hh, h)
-
return hh
}
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index 9dd3a337b..fc892aafb 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -28,7 +28,6 @@ const (
listMethod = "list"
containersOfMethod = "containersOf"
eaclMethod = "eACL"
- setEACLMethod = "setEACL"
deletionInfoMethod = "deletionInfo"
startEstimationMethod = "startContainerEstimation"
diff --git a/pkg/morph/event/container/eacl.go b/pkg/morph/event/container/eacl.go
deleted file mode 100644
index 4168d8842..000000000
--- a/pkg/morph/event/container/eacl.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package container
-
-import (
- "github.com/nspcc-dev/neo-go/pkg/network/payload"
-)
-
-// SetEACL represents structure of notification about
-// modified eACL table coming from FrostFS Container contract.
-type SetEACL struct {
- TableValue []byte
- SignatureValue []byte
- PublicKeyValue []byte
- TokenValue []byte
-
- // For notary notifications only.
- // Contains raw transactions of notary request.
- NotaryRequestValue *payload.P2PNotaryRequest
-}
-
-// MorphEvent implements Neo:Morph Event interface.
-func (SetEACL) MorphEvent() {}
-
-// Table returns returns eACL table in a binary FrostFS API format.
-func (x SetEACL) Table() []byte {
- return x.TableValue
-}
-
-// Signature returns signature of the binary table.
-func (x SetEACL) Signature() []byte {
- return x.SignatureValue
-}
-
-// PublicKey returns public keys of container
-// owner in a binary format.
-func (x SetEACL) PublicKey() []byte {
- return x.PublicKeyValue
-}
-
-// SessionToken returns binary token of the session
-// within which the eACL was set.
-func (x SetEACL) SessionToken() []byte {
- return x.TokenValue
-}
-
-// NotaryRequest returns raw notary request if notification
-// was received via notary service. Otherwise, returns nil.
-func (x SetEACL) NotaryRequest() *payload.P2PNotaryRequest {
- return x.NotaryRequestValue
-}
-
-const expectedItemNumEACL = 4
diff --git a/pkg/morph/event/container/eacl_notary.go b/pkg/morph/event/container/eacl_notary.go
deleted file mode 100644
index a4fe7c966..000000000
--- a/pkg/morph/event/container/eacl_notary.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package container
-
-import (
- "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/morph/event"
- "github.com/nspcc-dev/neo-go/pkg/vm/opcode"
-)
-
-func (x *SetEACL) setTable(v []byte) {
- if v != nil {
- x.TableValue = v
- }
-}
-
-func (x *SetEACL) setSignature(v []byte) {
- if v != nil {
- x.SignatureValue = v
- }
-}
-
-func (x *SetEACL) setPublicKey(v []byte) {
- if v != nil {
- x.PublicKeyValue = v
- }
-}
-
-func (x *SetEACL) setToken(v []byte) {
- if v != nil {
- x.TokenValue = v
- }
-}
-
-var setEACLFieldSetters = []func(*SetEACL, []byte){
- // order on stack is reversed
- (*SetEACL).setToken,
- (*SetEACL).setPublicKey,
- (*SetEACL).setSignature,
- (*SetEACL).setTable,
-}
-
-const (
- // SetEACLNotaryEvent is method name for container EACL operations
- // in `Container` contract. Is used as identificator for notary
- // EACL changing requests.
- SetEACLNotaryEvent = "setEACL"
-)
-
-// ParseSetEACLNotary from NotaryEvent into container event structure.
-func ParseSetEACLNotary(ne event.NotaryEvent) (event.Event, error) {
- var (
- ev SetEACL
- currentOp opcode.Opcode
- )
-
- fieldNum := 0
-
- for _, op := range ne.Params() {
- currentOp = op.Code()
-
- switch {
- case opcode.PUSHDATA1 <= currentOp && currentOp <= opcode.PUSHDATA4:
- if fieldNum == expectedItemNumEACL {
- return nil, event.UnexpectedArgNumErr(SetEACLNotaryEvent)
- }
-
- setEACLFieldSetters[fieldNum](&ev, op.Param())
- fieldNum++
- default:
- return nil, event.UnexpectedOpcode(SetEACLNotaryEvent, op.Code())
- }
- }
-
- ev.NotaryRequestValue = ne.Raw()
-
- return ev, nil
-}
diff --git a/pkg/services/container/morph/executor_test.go b/pkg/services/container/morph/executor_test.go
index 560c69232..c64310eb3 100644
--- a/pkg/services/container/morph/executor_test.go
+++ b/pkg/services/container/morph/executor_test.go
@@ -32,10 +32,6 @@ func (m mock) Delete(_ containerCore.RemovalWitness) error {
return nil
}
-func (m mock) PutEACL(_ containerCore.EACL) error {
- return nil
-}
-
func TestInvalidToken(t *testing.T) {
m := mock{}
e := containerSvcMorph.NewExecutor(m, m)
From cc5360a57851e27dd51d72cf0ff5fa6ac44aba2f Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Tue, 8 Oct 2024 16:46:41 +0300
Subject: [PATCH 138/655] [#1425] morph/event: Rename eacl_test.go
Signed-off-by: Evgenii Stratonikov
---
pkg/morph/event/container/{eacl_test.go => util_test.go} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename pkg/morph/event/container/{eacl_test.go => util_test.go} (100%)
diff --git a/pkg/morph/event/container/eacl_test.go b/pkg/morph/event/container/util_test.go
similarity index 100%
rename from pkg/morph/event/container/eacl_test.go
rename to pkg/morph/event/container/util_test.go
From 94302235d03ad147c9d42791de39f75aaa7fe7f9 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:03:58 +0300
Subject: [PATCH 139/655] [#1425] adm: Remove eACL fetching from
dump-containers
Signed-off-by: Evgenii Stratonikov
---
.../modules/morph/container/container.go | 62 +------------------
1 file changed, 2 insertions(+), 60 deletions(-)
diff --git a/cmd/frostfs-adm/internal/modules/morph/container/container.go b/cmd/frostfs-adm/internal/modules/morph/container/container.go
index e280bc634..6f08d1655 100644
--- a/cmd/frostfs-adm/internal/modules/morph/container/container.go
+++ b/cmd/frostfs-adm/internal/modules/morph/container/container.go
@@ -139,13 +139,12 @@ func dumpContainers(cmd *cobra.Command, _ []string) error {
func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invoker, id []byte) (*Container, error) {
bw.Reset()
emit.AppCall(bw.BinWriter, ch, "get", callflag.All, id)
- emit.AppCall(bw.BinWriter, ch, "eACL", callflag.All, id)
res, err := inv.Run(bw.Bytes())
if err != nil {
return nil, fmt.Errorf("can't get container info: %w", err)
}
- if len(res.Stack) != 2 {
- return nil, fmt.Errorf("%w: expected 2 items on stack", errInvalidContainerResponse)
+ if len(res.Stack) != 1 {
+ return nil, fmt.Errorf("%w: expected 1 items on stack", errInvalidContainerResponse)
}
cnt := new(Container)
@@ -154,11 +153,6 @@ func dumpSingleContainer(bw *io.BufBinWriter, ch util.Uint160, inv *invoker.Invo
return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
}
- ea := new(EACL)
- err = ea.FromStackItem(res.Stack[1])
- if err != nil {
- return nil, fmt.Errorf("%w: %v", errInvalidContainerResponse, err)
- }
return cnt, nil
}
@@ -317,14 +311,6 @@ type Container struct {
Token []byte `json:"token"`
}
-// EACL represents extended ACL struct in contract storage.
-type EACL struct {
- Value []byte `json:"value"`
- Signature []byte `json:"signature"`
- PublicKey []byte `json:"public_key"`
- Token []byte `json:"token"`
-}
-
// ToStackItem implements stackitem.Convertible.
func (c *Container) ToStackItem() (stackitem.Item, error) {
return stackitem.NewStruct([]stackitem.Item{
@@ -369,50 +355,6 @@ func (c *Container) FromStackItem(item stackitem.Item) error {
return nil
}
-// ToStackItem implements stackitem.Convertible.
-func (c *EACL) ToStackItem() (stackitem.Item, error) {
- return stackitem.NewStruct([]stackitem.Item{
- stackitem.NewByteArray(c.Value),
- stackitem.NewByteArray(c.Signature),
- stackitem.NewByteArray(c.PublicKey),
- stackitem.NewByteArray(c.Token),
- }), nil
-}
-
-// FromStackItem implements stackitem.Convertible.
-func (c *EACL) FromStackItem(item stackitem.Item) error {
- arr, ok := item.Value().([]stackitem.Item)
- if !ok || len(arr) != 4 {
- return errors.New("invalid stack item type")
- }
-
- value, err := arr[0].TryBytes()
- if err != nil {
- return errors.New("invalid eACL value")
- }
-
- sig, err := arr[1].TryBytes()
- if err != nil {
- return errors.New("invalid eACL signature")
- }
-
- pub, err := arr[2].TryBytes()
- if err != nil {
- return errors.New("invalid eACL public key")
- }
-
- tok, err := arr[3].TryBytes()
- if err != nil {
- return errors.New("invalid eACL token")
- }
-
- c.Value = value
- c.Signature = sig
- c.PublicKey = pub
- c.Token = tok
- return nil
-}
-
// getCIDFilterFunc returns filtering function for container IDs.
// Raw byte slices are used because it works with structures returned
// from contract.
From 02bb7159a54a9522ad0bc97d1a5456f5cfc425e4 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:50:30 +0300
Subject: [PATCH 140/655] [#1425] services/tree: Remove eACL processing
Signed-off-by: Evgenii Stratonikov
---
cmd/frostfs-node/tree.go | 1 -
pkg/services/tree/options.go | 9 --
pkg/services/tree/signature.go | 137 +------------------------
pkg/services/tree/signature_test.go | 151 ++++++++++++++++++----------
4 files changed, 100 insertions(+), 198 deletions(-)
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index d22e510de..192f08471 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -54,7 +54,6 @@ func initTreeService(c *cfg) {
cli: c.shared.cnrClient,
}),
tree.WithFrostfsidSubjectProvider(c.shared.frostfsidClient),
- tree.WithEACLSource(c.cfgObject.eaclSource),
tree.WithNetmapSource(c.netMapSource),
tree.WithPrivateKey(&c.key.PrivateKey),
tree.WithLogger(c.log),
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 6a20fe5cc..1db5607f6 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -33,7 +33,6 @@ type cfg struct {
nmSource netmap.Source
cnrSource ContainerSource
frostfsidSubjectProvider frostfsidcore.SubjectProvider
- eaclSource container.EACLSource
forest pilorama.Forest
// replication-related parameters
replicatorChannelCapacity int
@@ -65,14 +64,6 @@ func WithFrostfsidSubjectProvider(provider frostfsidcore.SubjectProvider) Option
}
}
-// WithEACLSource sets a eACL table source for a tree service.
-// This option is required.
-func WithEACLSource(src container.EACLSource) Option {
- return func(c *cfg) {
- c.eaclSource = src
- }
-}
-
// WithNetmapSource sets a netmap source for a tree service.
// This option is required.
func WithNetmapSource(src netmap.Source) Option {
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 58cab659f..305adf2d7 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -9,10 +9,8 @@ import (
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
- "git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
core "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
@@ -20,7 +18,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
- "go.uber.org/zap"
)
type message interface {
@@ -30,16 +27,11 @@ type message interface {
SetSignature(*Signature)
}
-func basicACLErr(op acl.Op) error {
- return fmt.Errorf("access to operation %s is denied by basic ACL check", op)
-}
-
func eACLErr(op eacl.Operation, err error) error {
return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
}
var (
- errBearerWrongOwner = errors.New("bearer token must be signed by the container owner")
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
)
@@ -77,56 +69,7 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get request role: %w", err)
}
- basicACL := cnr.Value.BasicACL()
- // Basic ACL mask can be unset, if a container operations are performed
- // with strict APE checks only.
- //
- // FIXME(@aarifullin): tree service temporiraly performs APE checks on
- // object verbs, because tree verbs have not been introduced yet.
- if basicACL == 0x0 {
- return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
- }
-
- if !basicACL.IsOpAllowed(op, role) {
- return basicACLErr(op)
- }
-
- if !basicACL.Extendable() {
- return nil
- }
-
- var useBearer bool
- if len(rawBearer) != 0 {
- if !basicACL.AllowedBearerRules(op) {
- s.log.Debug(logs.TreeBearerPresentedButNotAllowedByACL,
- zap.String("cid", cid.EncodeToString()),
- zap.Stringer("op", op),
- )
- } else {
- useBearer = true
- }
- }
-
- var tb eacl.Table
- signer := req.GetSignature().GetKey()
- if useBearer && !bt.Impersonate() {
- if !bearer.ResolveIssuer(*bt).Equals(cnr.Value.Owner()) {
- return eACLErr(eaclOp, errBearerWrongOwner)
- }
- tb = bt.EACLTable()
- } else {
- tbCore, err := s.eaclSource.GetEACL(cid)
- if err != nil {
- return handleGetEACLError(err)
- }
- tb = *tbCore.Value
-
- if useBearer && bt.Impersonate() {
- signer = bt.SigningKeyBytes()
- }
- }
-
- return checkEACL(tb, signer, eACLRole(role), eaclOp)
+ return s.checkAPE(ctx, bt, cnr, cid, op, role, pubKey)
}
// Returns true iff the operation is read-only and request was signed
@@ -168,14 +111,6 @@ func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*beare
return bt, nil
}
-func handleGetEACLError(err error) error {
- if client.IsErrEACLNotFound(err) {
- return nil
- }
-
- return fmt.Errorf("get eACL table: %w", err)
-}
-
func verifyMessage(m message) error {
binBody, err := m.ReadSignedData(nil)
if err != nil {
@@ -260,73 +195,3 @@ func eACLOp(op acl.Op) eacl.Operation {
panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
}
}
-
-func eACLRole(role acl.Role) eacl.Role {
- switch role {
- case acl.RoleOwner:
- return eacl.RoleUser
- case acl.RoleOthers:
- return eacl.RoleOthers
- default:
- panic(fmt.Sprintf("unexpected tree service ACL role: %s", role))
- }
-}
-
-var (
- errDENY = errors.New("DENY eACL rule")
- errNoAllowRules = errors.New("not found allowing rules for the request")
-)
-
-// checkEACL searches for the eACL rules that could be applied to the request
-// (a tuple of a signer key, his FrostFS role and a request operation).
-// It does not filter the request by the filters of the eACL table since tree
-// requests do not contain any "object" information that could be filtered and,
-// therefore, filtering leads to unexpected results.
-// The code was copied with the minor updates from the SDK repo:
-// https://github.com/nspcc-dev/frostfs-sdk-go/blob/43a57d42dd50dc60465bfd3482f7f12bcfcf3411/eacl/validator.go#L28.
-func checkEACL(tb eacl.Table, signer []byte, role eacl.Role, op eacl.Operation) error {
- for _, record := range tb.Records() {
- // check type of operation
- if record.Operation() != op {
- continue
- }
-
- // check target
- if !targetMatches(record, role, signer) {
- continue
- }
-
- switch a := record.Action(); a {
- case eacl.ActionAllow:
- return nil
- case eacl.ActionDeny:
- return eACLErr(op, errDENY)
- default:
- return eACLErr(op, fmt.Errorf("unexpected action: %s", a))
- }
- }
-
- return eACLErr(op, errNoAllowRules)
-}
-
-func targetMatches(rec eacl.Record, role eacl.Role, signer []byte) bool {
- for _, target := range rec.Targets() {
- // check public key match
- if pubs := target.BinaryKeys(); len(pubs) != 0 {
- for _, key := range pubs {
- if bytes.Equal(key, signer) {
- return true
- }
- }
-
- continue
- }
-
- // check target group match
- if role == target.Role() {
- return true
- }
- }
-
- return false
-}
diff --git a/pkg/services/tree/signature_test.go b/pkg/services/tree/signature_test.go
index 3c3ebfe89..939ff170d 100644
--- a/pkg/services/tree/signature_test.go
+++ b/pkg/services/tree/signature_test.go
@@ -4,22 +4,30 @@ import (
"context"
"crypto/ecdsa"
"crypto/sha256"
+ "encoding/hex"
"errors"
"testing"
aclV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
+ "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
containercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ checkercore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/common/ape"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
- eaclSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
+ "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine/inmemory"
+ "git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/nspcc-dev/neo-go/pkg/util"
"github.com/stretchr/testify/require"
)
@@ -27,6 +35,34 @@ type dummyNetmapSource struct {
netmap.Source
}
+type dummySubjectProvider struct {
+ subjects map[util.Uint160]client.SubjectExtended
+}
+
+func (s dummySubjectProvider) GetSubject(addr util.Uint160) (*client.Subject, error) {
+ res := s.subjects[addr]
+ return &client.Subject{
+ PrimaryKey: res.PrimaryKey,
+ AdditionalKeys: res.AdditionalKeys,
+ Namespace: res.Namespace,
+ Name: res.Name,
+ KV: res.KV,
+ }, nil
+}
+
+func (s dummySubjectProvider) GetSubjectExtended(addr util.Uint160) (*client.SubjectExtended, error) {
+ res := s.subjects[addr]
+ return &res, nil
+}
+
+type dummyEpochSource struct {
+ epoch uint64
+}
+
+func (s dummyEpochSource) CurrentEpoch() uint64 {
+ return s.epoch
+}
+
type dummyContainerSource map[string]*containercore.Container
func (s dummyContainerSource) List() ([]cid.ID, error) {
@@ -57,16 +93,6 @@ func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, e
return &containercore.DelInfo{}, nil
}
-type dummyEACLSource map[string]*containercore.EACL
-
-func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
- cntEACL, ok := s[id.String()]
- if !ok {
- return nil, errors.New("container not found")
- }
- return cntEACL, nil
-}
-
func testContainer(owner user.ID) container.Container {
var r netmapSDK.ReplicaDescriptor
r.SetNumberOfObjects(1)
@@ -81,6 +107,8 @@ func testContainer(owner user.ID) container.Container {
return cnt
}
+const currentEpoch = 123
+
func TestMessageSign(t *testing.T) {
privs := make([]*keys.PrivateKey, 4)
for i := range privs {
@@ -99,6 +127,15 @@ func TestMessageSign(t *testing.T) {
Value: testContainer(ownerID),
}
+ e := inmemory.NewInMemoryLocalOverrides()
+ e.MorphRuleChainStorage().AddMorphRuleChain(chain.Ingress, engine.Target{
+ Type: engine.Container,
+ Name: cid1.EncodeToString(),
+ }, testChain(privs[0].PublicKey(), privs[1].PublicKey()))
+ frostfsidProvider := dummySubjectProvider{
+ subjects: make(map[util.Uint160]client.SubjectExtended),
+ }
+
s := &Service{
cfg: cfg{
log: test.NewLogger(t),
@@ -107,12 +144,10 @@ func TestMessageSign(t *testing.T) {
cnrSource: dummyContainerSource{
cid1.String(): cnr,
},
- eaclSource: dummyEACLSource{
- cid1.String(): &containercore.EACL{
- Value: testTable(cid1, privs[0].PublicKey(), privs[1].PublicKey()),
- },
- },
+ frostfsidSubjectProvider: frostfsidProvider,
+ state: dummyEpochSource{epoch: currentEpoch},
},
+ apeChecker: checkercore.New(e.LocalStorage(), e.MorphRuleChainStorage(), frostfsidProvider, dummyEpochSource{}),
}
rawCID1 := make([]byte, sha256.Size)
@@ -235,46 +270,58 @@ func TestMessageSign(t *testing.T) {
func testBearerToken(cid cid.ID, forPutGet, forGet *keys.PublicKey) bearer.Token {
var b bearer.Token
- b.SetEACLTable(*testTable(cid, forPutGet, forGet))
+ b.SetExp(currentEpoch + 1)
+ b.SetAPEOverride(bearer.APEOverride{
+ Target: ape.ChainTarget{
+ TargetType: ape.TargetTypeContainer,
+ Name: cid.EncodeToString(),
+ },
+ Chains: []ape.Chain{{Raw: testChain(forPutGet, forGet).Bytes()}},
+ })
return b
}
-func testTable(cid cid.ID, forPutGet, forGet *keys.PublicKey) *eaclSDK.Table {
- tgtGet := eaclSDK.NewTarget()
- tgtGet.SetRole(eaclSDK.RoleUnknown)
- tgtGet.SetBinaryKeys([][]byte{forPutGet.Bytes(), forGet.Bytes()})
-
- rGet := eaclSDK.NewRecord()
- rGet.SetAction(eaclSDK.ActionAllow)
- rGet.SetOperation(eaclSDK.OperationGet)
- rGet.SetTargets(*tgtGet)
-
- tgtPut := eaclSDK.NewTarget()
- tgtPut.SetRole(eaclSDK.RoleUnknown)
- tgtPut.SetBinaryKeys([][]byte{forPutGet.Bytes()})
-
- rPut := eaclSDK.NewRecord()
- rPut.SetAction(eaclSDK.ActionAllow)
- rPut.SetOperation(eaclSDK.OperationPut)
- rPut.SetTargets(*tgtPut)
-
- tb := eaclSDK.NewTable()
- tb.AddRecord(rGet)
- tb.AddRecord(rPut)
-
- tgt := eaclSDK.NewTarget()
- tgt.SetRole(eaclSDK.RoleOthers)
-
- for _, op := range []eaclSDK.Operation{eaclSDK.OperationGet, eaclSDK.OperationPut} {
- r := eaclSDK.NewRecord()
- r.SetAction(eaclSDK.ActionDeny)
- r.SetTargets(*tgt)
- r.SetOperation(op)
- tb.AddRecord(r)
+func testChain(forPutGet, forGet *keys.PublicKey) *chain.Chain {
+ ruleGet := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodGetObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forGet.Bytes()),
+ },
+ },
+ }
+ rulePut := chain.Rule{
+ Status: chain.Allow,
+ Resources: chain.Resources{Names: []string{native.ResourceFormatAllObjects}},
+ Actions: chain.Actions{Names: []string{native.MethodPutObject}},
+ Any: true,
+ Condition: []chain.Condition{
+ {
+ Op: chain.CondStringEquals,
+ Kind: chain.KindRequest,
+ Key: native.PropertyKeyActorPublicKey,
+ Value: hex.EncodeToString(forPutGet.Bytes()),
+ },
+ },
}
- tb.SetCID(cid)
-
- return tb
+ return &chain.Chain{
+ Rules: []chain.Rule{
+ ruleGet,
+ rulePut,
+ },
+ }
}
From 11347602719a764179a74382076de4430936d7ad Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Wed, 9 Oct 2024 10:55:48 +0300
Subject: [PATCH 141/655] [#1425] services/tree: Remove eACL mentions from
bearer token parsing errors
Signed-off-by: Evgenii Stratonikov
---
pkg/services/tree/signature.go | 30 ++++++------------------------
1 file changed, 6 insertions(+), 24 deletions(-)
diff --git a/pkg/services/tree/signature.go b/pkg/services/tree/signature.go
index 305adf2d7..20a629fcc 100644
--- a/pkg/services/tree/signature.go
+++ b/pkg/services/tree/signature.go
@@ -15,7 +15,6 @@ import (
cidSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
- "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
@@ -27,10 +26,6 @@ type message interface {
SetSignature(*Signature)
}
-func eACLErr(op eacl.Operation, err error) error {
- return fmt.Errorf("access to operation %s is denied by extended ACL check: %w", op, err)
-}
-
var (
errBearerWrongContainer = errors.New("bearer token is created for another container")
errBearerSignature = errors.New("invalid bearer token signature")
@@ -57,11 +52,9 @@ func (s *Service) verifyClient(ctx context.Context, req message, cid cidSDK.ID,
return fmt.Errorf("can't get container %s: %w", cid, err)
}
- eaclOp := eACLOp(op)
-
- bt, err := parseBearer(rawBearer, cid, eaclOp)
+ bt, err := parseBearer(rawBearer, cid)
if err != nil {
- return err
+ return fmt.Errorf("access to operation %s is denied: %w", op, err)
}
role, pubKey, err := roleAndPubKeyFromReq(cnr, req, bt)
@@ -93,20 +86,20 @@ func (s *Service) isAuthorized(req message, op acl.Op) (bool, error) {
return false, nil
}
-func parseBearer(rawBearer []byte, cid cidSDK.ID, eaclOp eacl.Operation) (*bearer.Token, error) {
+func parseBearer(rawBearer []byte, cid cidSDK.ID) (*bearer.Token, error) {
if len(rawBearer) == 0 {
return nil, nil
}
bt := new(bearer.Token)
if err := bt.Unmarshal(rawBearer); err != nil {
- return nil, eACLErr(eaclOp, fmt.Errorf("invalid bearer token: %w", err))
+ return nil, fmt.Errorf("invalid bearer token: %w", err)
}
if !bt.AssertContainer(cid) {
- return nil, eACLErr(eaclOp, errBearerWrongContainer)
+ return nil, errBearerWrongContainer
}
if !bt.VerifySignature() {
- return nil, eACLErr(eaclOp, errBearerSignature)
+ return nil, errBearerSignature
}
return bt, nil
}
@@ -184,14 +177,3 @@ func roleAndPubKeyFromReq(cnr *core.Container, req message, bt *bearer.Token) (a
return role, pub, nil
}
-
-func eACLOp(op acl.Op) eacl.Operation {
- switch op {
- case acl.OpObjectGet:
- return eacl.OperationGet
- case acl.OpObjectPut:
- return eacl.OperationPut
- default:
- panic(fmt.Sprintf("unexpected tree service ACL operation: %s", op))
- }
-}
From dfb00083d07499a0e3d89076cc3f08729c00cb71 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 10 Oct 2024 14:57:39 +0300
Subject: [PATCH 142/655] [#1426] go.mod: Update sdk-go
Signed-off-by: Evgenii Stratonikov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 91cc55a36..9a64f0e81 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
- git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa
+ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/hrw v1.2.1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
diff --git a/go.sum b/go.sum
index 728592ea5..777d9b3ab 100644
--- a/go.sum
+++ b/go.sum
@@ -10,8 +10,8 @@ git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d/go.mod h1:7ZZq8iguY7qFsXajdHGmZd2AW4QbucyrJwhbsRfOfek=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573 h1:6qCcm1oqFbmf9C5AauXzrL5OPGnTbI9HoB/jAtD9274=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa h1:Jr8hXNNFECLhC7S45HuyQms4U/gim1xILoU3g4ZZnHg=
-git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241007135805-4c310ae1c7fa/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509 h1:5gtEq4bjVgAbTOrbEquspyM3s+qsMtkpGC5m9FtfImk=
+git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
From 5992ee901ae574d536a82bd7f1504852e17fb086 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 11 Oct 2024 11:33:36 +0300
Subject: [PATCH 143/655] [#1411] go.mod: Bump frostfs-contract to v0.20.0
Signed-off-by: Alexander Chuprov
---
go.mod | 2 +-
go.sum | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/go.mod b/go.mod
index 9a64f0e81..1468c12b2 100644
--- a/go.mod
+++ b/go.mod
@@ -5,7 +5,7 @@ go 1.22
require (
code.gitea.io/sdk/gitea v0.17.1
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3
- git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c
+ git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
diff --git a/go.sum b/go.sum
index 777d9b3ab..5ce81807a 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ code.gitea.io/sdk/gitea v0.17.1 h1:3jCPOG2ojbl8AcfaUCRYLT5MUcBMFwS0OSK2mA5Zok8=
code.gitea.io/sdk/gitea v0.17.1/go.mod h1:aCnBqhHpoEWA180gMbaCtdX9Pl6BWBAuuP2miadoTNM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3 h1:6QXNnfBgYx81UZsBdpPnQY+ZMSKGFbFc29wV7DJ/UG4=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241007120543-29c522d5d8a3/go.mod h1:F5GS7hRb62PUy5sTYDC4ajVdeffoAfjHSSHTKUJEaYU=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c h1:9JmXxWcqWaOXModBwtj/WlPYOfSk2jRSDoRsDRYvgnA=
-git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240909072709-3e221b973a3c/go.mod h1:DVb1AqP323wiEwCtlfoJJTERF4Xji2L39X6T8kv26RE=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0 h1:8Z5iPhieCrbcdhxBuY/Bajh6V5fki7Whh0b4S2zYJYU=
+git.frostfs.info/TrueCloudLab/frostfs-contract v0.20.0/go.mod h1:Y2Xorxc8SBO4phoek7n3XxaPZz5rIrFgDsU4TOjmlGA=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-locode-db v0.4.1-0.20240710074952-65761deb5c0d h1:uJ/wvuMdepbkaV8XMS5uN9B0FQWMep0CttSuDZiDhq0=
From 42bf03e5cc04f2b6d67465608b842693171344ba Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Fri, 11 Oct 2024 11:33:56 +0300
Subject: [PATCH 144/655] [#1411] adm/nns: Add 'delRecord'
Signed-off-by: Alexander Chuprov
---
.../internal/modules/morph/nns/record.go | 29 +++++++++++++++++++
.../internal/modules/morph/nns/root.go | 10 +++++++
2 files changed, 39 insertions(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/record.go b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
index 0e217eb61..66bb1b94f 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/record.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/record.go
@@ -47,6 +47,19 @@ func initDelRecordsCmd() {
_ = cobra.MarkFlagRequired(delRecordsCmd.Flags(), nnsRecordTypeFlag)
}
+func initDelRecordCmd() {
+ Cmd.AddCommand(delRecordCmd)
+ delRecordCmd.Flags().StringP(commonflags.EndpointFlag, commonflags.EndpointFlagShort, "", commonflags.EndpointFlagDesc)
+ delRecordCmd.Flags().String(commonflags.AlphabetWalletsFlag, "", commonflags.AlphabetWalletsFlagDesc)
+ delRecordCmd.Flags().String(nnsNameFlag, "", nnsNameFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordTypeFlag, "", nnsRecordTypeFlagDesc)
+ delRecordCmd.Flags().String(nnsRecordDataFlag, "", nnsRecordDataFlagDesc)
+
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsNameFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordTypeFlag)
+ _ = cobra.MarkFlagRequired(delRecordCmd.Flags(), nnsRecordDataFlag)
+}
+
func addRecord(cmd *cobra.Command, _ []string) {
c, actor, _ := getRPCClient(cmd)
name, _ := cmd.Flags().GetString(nnsNameFlag)
@@ -115,6 +128,22 @@ func delRecords(cmd *cobra.Command, _ []string) {
cmd.Println("Records removed successfully")
}
+func delRecord(cmd *cobra.Command, _ []string) {
+ c, actor, _ := getRPCClient(cmd)
+ name, _ := cmd.Flags().GetString(nnsNameFlag)
+ data, _ := cmd.Flags().GetString(nnsRecordDataFlag)
+ recordType, _ := cmd.Flags().GetString(nnsRecordTypeFlag)
+ typ, err := getRecordType(recordType)
+ commonCmd.ExitOnErr(cmd, "unable to parse record type: %w", err)
+ h, vub, err := c.DeleteRecord(name, typ, data)
+ commonCmd.ExitOnErr(cmd, "unable to delete record: %w", err)
+
+ cmd.Println("Waiting for transaction to persist...")
+ _, err = actor.Wait(h, vub, err)
+ commonCmd.ExitOnErr(cmd, "delete records error: %w", err)
+ cmd.Println("Record removed successfully")
+}
+
func getRecordType(recordType string) (*big.Int, error) {
switch strings.ToUpper(recordType) {
case "A":
diff --git a/cmd/frostfs-adm/internal/modules/morph/nns/root.go b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
index 56774c292..9bdeaccd9 100644
--- a/cmd/frostfs-adm/internal/modules/morph/nns/root.go
+++ b/cmd/frostfs-adm/internal/modules/morph/nns/root.go
@@ -95,6 +95,15 @@ var (
},
Run: delRecords,
}
+ delRecordCmd = &cobra.Command{
+ Use: "delete-record",
+ Short: "Removes domain record with the specified type and data",
+ PreRun: func(cmd *cobra.Command, _ []string) {
+ _ = viper.BindPFlag(commonflags.EndpointFlag, cmd.Flags().Lookup(commonflags.EndpointFlag))
+ _ = viper.BindPFlag(commonflags.AlphabetWalletsFlag, cmd.Flags().Lookup(commonflags.AlphabetWalletsFlag))
+ },
+ Run: delRecord,
+ }
)
func init() {
@@ -106,4 +115,5 @@ func init() {
initAddRecordCmd()
initGetRecordsCmd()
initDelRecordsCmd()
+ initDelRecordCmd()
}
From acd6eb18151d7e2fec413d97e273b13076a9b4bb Mon Sep 17 00:00:00 2001
From: Anton Nikiforov
Date: Fri, 11 Oct 2024 15:40:01 +0300
Subject: [PATCH 145/655] [#1427] object: Fix `Put` for `EC` object when node
unavailable
There might be situation when context canceled earlier than traverser move to another part of the nodes.
To avoid this, need to wait for the result from concurrent put at each traverser iteration.
Signed-off-by: Anton Nikiforov
---
pkg/services/object/common/writer/ec.go | 20 +-
pkg/services/object/common/writer/ec_test.go | 191 +++++++++++++++++++
2 files changed, 205 insertions(+), 6 deletions(-)
create mode 100644 pkg/services/object/common/writer/ec_test.go
diff --git a/pkg/services/object/common/writer/ec.go b/pkg/services/object/common/writer/ec.go
index 6b6a14cc0..dffe52a6d 100644
--- a/pkg/services/object/common/writer/ec.go
+++ b/pkg/services/object/common/writer/ec.go
@@ -197,14 +197,15 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
if err != nil {
return err
}
+ partsProcessed := make([]atomic.Bool, len(parts))
objID, _ := obj.ID()
t, err := placement.NewTraverser(append(e.PlacementOpts, placement.ForObject(objID))...)
if err != nil {
return err
}
- eg, egCtx := errgroup.WithContext(ctx)
for {
+ eg, egCtx := errgroup.WithContext(ctx)
nodes := t.Next()
if len(nodes) == 0 {
break
@@ -216,13 +217,20 @@ func (e *ECWriter) writeRawObject(ctx context.Context, obj *objectSDK.Object) er
}
for idx := range parts {
- eg.Go(func() error {
- return e.writePart(egCtx, parts[idx], idx, nodes, visited)
- })
- t.SubmitSuccess()
+ if !partsProcessed[idx].Load() {
+ eg.Go(func() error {
+ err := e.writePart(egCtx, parts[idx], idx, nodes, visited)
+ if err == nil {
+ partsProcessed[idx].Store(true)
+ t.SubmitSuccess()
+ }
+ return err
+ })
+ }
}
+ err = eg.Wait()
}
- if err := eg.Wait(); err != nil {
+ if err != nil {
return errIncompletePut{
singleErr: err,
}
diff --git a/pkg/services/object/common/writer/ec_test.go b/pkg/services/object/common/writer/ec_test.go
new file mode 100644
index 000000000..32863d678
--- /dev/null
+++ b/pkg/services/object/common/writer/ec_test.go
@@ -0,0 +1,191 @@
+package writer
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "strconv"
+ "testing"
+
+ rawclient "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
+ netmapcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/util/logger"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
+ apiclient "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
+ cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+ cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
+ objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+ oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
+ oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
+ usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
+ "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
+ "git.frostfs.info/TrueCloudLab/tzhash/tz"
+ "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
+ "github.com/panjf2000/ants/v2"
+ "github.com/stretchr/testify/require"
+)
+
+type testPlacementBuilder struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (p *testPlacementBuilder) BuildPlacement(_ cid.ID, _ *oid.ID, _ netmap.PlacementPolicy) (
+ [][]netmap.NodeInfo, error,
+) {
+ arr := make([]netmap.NodeInfo, len(p.vectors[0]))
+ copy(arr, p.vectors[0])
+ return [][]netmap.NodeInfo{arr}, nil
+}
+
+type nmKeys struct{}
+
+func (nmKeys) IsLocalKey(_ []byte) bool {
+ return false
+}
+
+type clientConstructor struct {
+ vectors [][]netmap.NodeInfo
+}
+
+func (c clientConstructor) Get(info client.NodeInfo) (client.MultiAddressClient, error) {
+ if bytes.Equal(info.PublicKey(), c.vectors[0][0].PublicKey()) ||
+ bytes.Equal(info.PublicKey(), c.vectors[0][1].PublicKey()) {
+ return multiAddressClient{err: errors.New("node unavailable")}, nil
+ }
+ return multiAddressClient{}, nil
+}
+
+type multiAddressClient struct {
+ client.MultiAddressClient
+ err error
+}
+
+func (c multiAddressClient) ObjectPutSingle(_ context.Context, _ apiclient.PrmObjectPutSingle) (*apiclient.ResObjectPutSingle, error) {
+ if c.err != nil {
+ return nil, c.err
+ }
+ return &apiclient.ResObjectPutSingle{}, nil
+}
+
+func (c multiAddressClient) ReportError(error) {
+}
+
+func (multiAddressClient) RawForAddress(context.Context, network.Address, func(cli *rawclient.Client) error) error {
+ return nil
+}
+
+func TestECWriter(t *testing.T) {
+ // Create container with policy EC 1.1
+ cnr := container.Container{}
+ p1 := netmap.PlacementPolicy{}
+ p1.SetContainerBackupFactor(1)
+ x1 := netmap.ReplicaDescriptor{}
+ x1.SetECDataCount(1)
+ x1.SetECParityCount(1)
+ p1.AddReplicas(x1)
+ cnr.SetPlacementPolicy(p1)
+ cnr.SetAttribute("cnr", "cnr1")
+
+ cid := cidtest.ID()
+
+ // Create 4 nodes, 2 nodes for chunks,
+ // 2 nodes for the case when the first two will fail.
+ ns, _ := testNodeMatrix(t, []int{4})
+
+ data := make([]byte, 100)
+ _, _ = rand.Read(data)
+ ver := version.Current()
+
+ var csum checksum.Checksum
+ csum.SetSHA256(sha256.Sum256(data))
+
+ var csumTZ checksum.Checksum
+ csumTZ.SetTillichZemor(tz.Sum(csum.Value()))
+
+ obj := objectSDK.New()
+ obj.SetID(oidtest.ID())
+ obj.SetOwnerID(usertest.ID())
+ obj.SetContainerID(cid)
+ obj.SetVersion(&ver)
+ obj.SetPayload(data)
+ obj.SetPayloadSize(uint64(len(data)))
+ obj.SetPayloadChecksum(csum)
+ obj.SetPayloadHomomorphicHash(csumTZ)
+
+ // Builder return nodes without sort by hrw
+ builder := &testPlacementBuilder{
+ vectors: ns,
+ }
+
+ ownerKey, err := keys.NewPrivateKey()
+ require.NoError(t, err)
+
+ pool, err := ants.NewPool(4, ants.WithNonblocking(true))
+ require.NoError(t, err)
+
+ log, err := logger.NewLogger(nil)
+ require.NoError(t, err)
+
+ var n nmKeys
+ ecw := ECWriter{
+ Config: &Config{
+ NetmapKeys: n,
+ RemotePool: pool,
+ Logger: log,
+ ClientConstructor: clientConstructor{vectors: ns},
+ },
+ PlacementOpts: append(
+ []placement.Option{placement.UseBuilder(builder), placement.ForContainer(cnr)},
+ placement.WithCopyNumbers(nil)), // copies number ignored for EC
+ Container: cnr,
+ Key: &ownerKey.PrivateKey,
+ Relay: nil,
+ ObjectMetaValid: true,
+ }
+
+ err = ecw.WriteObject(context.Background(), obj)
+ require.NoError(t, err)
+}
+
+func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
+ mNodes := make([][]netmap.NodeInfo, len(dim))
+ mAddr := make([][]string, len(dim))
+
+ for i := range dim {
+ ns := make([]netmap.NodeInfo, dim[i])
+ as := make([]string, dim[i])
+
+ for j := range dim[i] {
+ a := fmt.Sprintf("/ip4/192.168.0.%s/tcp/%s",
+ strconv.Itoa(i),
+ strconv.Itoa(60000+j),
+ )
+
+ var ni netmap.NodeInfo
+ ni.SetNetworkEndpoints(a)
+ ni.SetPublicKey([]byte(a))
+
+ var na network.AddressGroup
+
+ err := na.FromIterator(netmapcore.Node(ni))
+ require.NoError(t, err)
+
+ as[j] = network.StringifyGroup(na)
+
+ ns[j] = ni
+ }
+
+ mNodes[i] = ns
+ mAddr[i] = as
+ }
+
+ return mNodes, mAddr
+}
From d2a59b2de8572952df34e2b66c3bf51d03ce13d9 Mon Sep 17 00:00:00 2001
From: Aleksey Savchuk
Date: Mon, 14 Oct 2024 15:51:37 +0300
Subject: [PATCH 146/655] [#1429] lens/explorer: Fix locked object records
display text
Display texts for a locked object and a list of it lockers were mistakenly swapped.
Signed-off-by: Aleksey Savchuk
---
cmd/frostfs-lens/internal/schema/metabase/records/string.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/cmd/frostfs-lens/internal/schema/metabase/records/string.go b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
index a6c70d537..ec0ab8e1a 100644
--- a/cmd/frostfs-lens/internal/schema/metabase/records/string.go
+++ b/cmd/frostfs-lens/internal/schema/metabase/records/string.go
@@ -38,7 +38,7 @@ func (r *ContainerVolumeRecord) String() string {
func (r *LockedRecord) String() string {
return fmt.Sprintf(
- "Locker OID %s %c Locked [%d]OID {...}",
+ "Object OID %s %c Lockers [%d]OID {...}",
common.FormatSimple(fmt.Sprintf("%-44s", r.id), tcell.ColorAqua),
tview.Borders.Vertical,
len(r.ids),
From 714ff784fa460767e82527b71fd520932b0256ed Mon Sep 17 00:00:00 2001
From: Vitaliy Potyarkin
Date: Mon, 14 Oct 2024 17:31:26 +0300
Subject: [PATCH 147/655] [#1431] objsvc: Use specific values in message about
address mismatch
This makes troubleshooting failed operations much easier
Signed-off-by: Vitaliy Potyarkin
---
pkg/services/object/common/target/target.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/services/object/common/target/target.go b/pkg/services/object/common/target/target.go
index a2d6b4d39..9e0f49297 100644
--- a/pkg/services/object/common/target/target.go
+++ b/pkg/services/object/common/target/target.go
@@ -86,7 +86,7 @@ func newTrustedTarget(prm *objectwriter.Params) (transformer.ChunkedObjectWriter
user.IDFromKey(&ownerSession, key.PublicKey)
if !ownerObj.Equals(ownerSession) {
- return nil, errors.New("session token is missing but object owner id is different from the default key")
+ return nil, fmt.Errorf("session token is missing but object owner id (%s) is different from the default key (%s)", ownerObj, ownerSession)
}
} else {
if !ownerObj.Equals(sessionInfo.Owner) {
From 3012286452e8b2bb04a6ad9b70e364b00b29919f Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:10:03 +0300
Subject: [PATCH 148/655] [#1431] metabase: Fix unreachable code
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/metabase/delete.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/metabase/delete.go b/pkg/local_object_storage/metabase/delete.go
index 3add1f268..b5ac22017 100644
--- a/pkg/local_object_storage/metabase/delete.go
+++ b/pkg/local_object_storage/metabase/delete.go
@@ -453,7 +453,7 @@ func delFKBTIndexItem(tx *bbolt.Tx, item namedBucketItem) error {
func hasAnyItem(b *bbolt.Bucket) bool {
var hasAnyItem bool
c := b.Cursor()
- for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ for k, _ := c.First(); k != nil; {
hasAnyItem = true
break
}
From d53732f663ce46ff29196782b5d836a37c1f6c7d Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:17:00 +0300
Subject: [PATCH 149/655] [#1431] engine: Delete always false condition
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/engine/evacuate.go | 3 ---
1 file changed, 3 deletions(-)
diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go
index c1b9276f3..940e30323 100644
--- a/pkg/local_object_storage/engine/evacuate.go
+++ b/pkg/local_object_storage/engine/evacuate.go
@@ -56,9 +56,6 @@ func (s EvacuateScope) String() string {
var sb strings.Builder
first := true
if s&EvacuateScopeObjects == EvacuateScopeObjects {
- if !first {
- sb.WriteString(";")
- }
sb.WriteString("objects")
first = false
}
From 63466d71b22d8b23eff4d0fc1eea5a16d36a138b Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:20:14 +0300
Subject: [PATCH 150/655] [#1431] engine: Delete unused constants
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-cli/modules/ape_manager/add_chain.go | 9 ++++-----
internal/logs/logs.go | 14 --------------
pkg/morph/client/container/client.go | 1 -
3 files changed, 4 insertions(+), 20 deletions(-)
diff --git a/cmd/frostfs-cli/modules/ape_manager/add_chain.go b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
index c6622da25..a85f3c93e 100644
--- a/cmd/frostfs-cli/modules/ape_manager/add_chain.go
+++ b/cmd/frostfs-cli/modules/ape_manager/add_chain.go
@@ -31,11 +31,10 @@ const (
)
const (
- defaultNamespace = ""
- namespaceTarget = "namespace"
- containerTarget = "container"
- userTarget = "user"
- groupTarget = "group"
+ namespaceTarget = "namespace"
+ containerTarget = "container"
+ userTarget = "user"
+ groupTarget = "group"
)
var errUnknownTargetType = errors.New("unknown target type")
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index ca783a39d..b4bc31b0c 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -14,8 +14,6 @@ const (
InterruptPlacementIterationByContext = "interrupt placement iteration by context"
Notification = "notification"
-
- SkipDeprecatedNotification = "skip deprecated notification"
)
const (
@@ -41,8 +39,6 @@ const (
InnerringCantUpdatePersistentState = "can't update persistent state"
InnerringCloserError = "closer error"
InnerringReadConfigFromBlockchain = "read config from blockchain"
- NotificatorNotificatorStartProcessingObjectNotifications = "notificator: start processing object notifications"
- NotificatorNotificatorProcessingObjectNotification = "notificator: processing object notification"
PolicerCouldNotGetContainer = "could not get container"
PolicerCouldNotConfirmContainerRemoval = "could not confirm container removal"
PolicerCouldNotInhumeObjectWithMissingContainer = "could not inhume object with missing container"
@@ -61,7 +57,6 @@ const (
ReplicatorCouldNotReplicateObject = "could not replicate object"
ReplicatorObjectSuccessfullyReplicated = "object successfully replicated"
TreeRedirectingTreeServiceQuery = "redirecting tree service query"
- TreeBearerPresentedButNotAllowedByACL = "bearer presented but not allowed by ACL"
TreeCouldNotGetLastSynchronizedHeightForATree = "could not get last synchronized height for a tree"
TreeCouldNotUpdateLastSynchronizedHeightForATree = "could not update last synchronized height for a tree"
TreeSynchronizeTree = "synchronize tree"
@@ -107,7 +102,6 @@ const (
GetUnableToGetAllPartsECObject = "unable to get all parts, continue to reconstruct with existed"
GetUnableToGetPartECObject = "unable to get part of the erasure-encoded object"
GetUnableToHeadPartECObject = "unable to head part of the erasure-encoded object"
- GetUnableToGetECObjectContainer = "unable to get container for erasure-coded object"
GetUnableToHeadPartsECObject = "unable to head parts of the erasure-encoded object"
GetAssemblingSplittedObjectCompleted = "assembling splitted object completed"
GetAssemblingECObjectCompleted = "assembling erasure-coded object completed"
@@ -271,9 +265,7 @@ const (
ShardFailureToMarkLockersAsGarbage = "failure to mark lockers as garbage"
ShardFailureToGetExpiredUnlockedObjects = "failure to get expired unlocked objects"
ShardCouldNotMarkObjectToDeleteInMetabase = "could not mark object to delete in metabase"
- WritecacheTriedToFlushItemsFromWritecache = "tried to flush items from write-cache"
WritecacheWaitingForChannelsToFlush = "waiting for channels to flush"
- WritecacheCantRemoveObjectsFromTheDatabase = "can't remove objects from the database"
WritecacheCantRemoveObjectFromWritecache = "can't remove object from write-cache"
BlobovniczatreeCouldNotGetObjectFromLevel = "could not get object from level"
BlobovniczatreeCouldNotCloseBlobovnicza = "could not close Blobovnicza"
@@ -413,11 +405,6 @@ const (
FrostFSNodeReadNewlyCreatedContainerAfterTheNotification = "read newly created container after the notification"
FrostFSNodeContainerCreationEventsReceipt = "container creation event's receipt"
FrostFSNodeContainerRemovalEventsReceipt = "container removal event's receipt"
- FrostFSNodeNotificatorCouldNotListContainers = "notificator: could not list containers"
- FrostFSNodeNotificatorCouldNotSelectObjectsFromContainer = "notificator: could not select objects from container"
- FrostFSNodeNotificatorCouldNotProcessObject = "notificator: could not process object"
- FrostFSNodeNotificatorFinishedProcessingObjectNotifications = "notificator: finished processing object notifications"
- FrostFSNodeCouldNotWriteObjectNotification = "could not write object notification"
FrostFSNodeCouldNotGetMaxObjectSizeValue = "could not get max object size value"
FrostFSNodeCouldNotInhumeMarkRedundantCopyAsGarbage = "could not inhume mark redundant copy as garbage"
FrostFSNodeFailedInitTracing = "failed init tracing"
@@ -461,7 +448,6 @@ const (
FSTreeCantUnmarshalObject = "can't unmarshal an object"
FSTreeCantFushObjectBlobstor = "can't flush an object to blobstor"
FSTreeCantUpdateID = "can't update object storage ID"
- FSTreeCantDecodeDBObjectAddress = "can't decode object address from the DB"
PutSingleRedirectFailure = "failed to redirect PutSingle request"
StorageIDRetrievalFailure = "can't get storage ID from metabase"
ObjectRemovalFailureBlobStor = "can't remove object from blobStor"
diff --git a/pkg/morph/client/container/client.go b/pkg/morph/client/container/client.go
index fc892aafb..f735a5ff7 100644
--- a/pkg/morph/client/container/client.go
+++ b/pkg/morph/client/container/client.go
@@ -33,7 +33,6 @@ const (
startEstimationMethod = "startContainerEstimation"
stopEstimationMethod = "stopContainerEstimation"
- putSizeMethod = "putContainerSize"
listSizesMethod = "listContainerSizes"
getSizeMethod = "getContainerSize"
From 00b1cecfb7486aac93e8806caa6563fe75eabc1b Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:25:00 +0300
Subject: [PATCH 151/655] [#1431] obj_storage/shard: Fix visibility of
'newMetricStore'
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/shard/control_test.go | 4 ++--
pkg/local_object_storage/shard/metrics_test.go | 4 ++--
pkg/local_object_storage/shard/reload_test.go | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go
index 44fee1636..6b9eaa550 100644
--- a/pkg/local_object_storage/shard/control_test.go
+++ b/pkg/local_object_storage/shard/control_test.go
@@ -126,7 +126,7 @@ func TestRefillMetabaseCorrupted(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
@@ -190,7 +190,7 @@ func TestRefillMetabase(t *testing.T) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go
index 56622326a..cec5a12ad 100644
--- a/pkg/local_object_storage/shard/metrics_test.go
+++ b/pkg/local_object_storage/shard/metrics_test.go
@@ -35,7 +35,7 @@ type metricsStore struct {
refillStatus string
}
-func NewMetricStore() *metricsStore {
+func newMetricStore() *metricsStore {
return &metricsStore{
objCounters: map[string]uint64{
"phy": 0,
@@ -404,7 +404,7 @@ func shardWithMetrics(t *testing.T, path string) (*Shard, *metricsStore) {
}),
}
- mm := NewMetricStore()
+ mm := newMetricStore()
sh := New(
WithID(NewIDFromBytes([]byte{})),
diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go
index 7dacbfa6c..7dd7189bb 100644
--- a/pkg/local_object_storage/shard/reload_test.go
+++ b/pkg/local_object_storage/shard/reload_test.go
@@ -51,7 +51,7 @@ func TestShardReload(t *testing.T) {
WithMetaBaseOptions(metaOpts...),
WithPiloramaOptions(
pilorama.WithPath(filepath.Join(p, "pilorama"))),
- WithMetricsWriter(NewMetricStore()),
+ WithMetricsWriter(newMetricStore()),
}
sh := New(opts...)
From f6582081a4ee67e97773f655b8f18148946c5a0c Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 17:31:15 +0300
Subject: [PATCH 152/655] [#1431] obj_storage/metabase: Delete unused variable
Signed-off-by: Alexander Chuprov
---
pkg/local_object_storage/metabase/put.go | 5 ++---
pkg/services/object/remote_reader.go | 3 ---
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/pkg/local_object_storage/metabase/put.go b/pkg/local_object_storage/metabase/put.go
index 0c14196b7..b329e8032 100644
--- a/pkg/local_object_storage/metabase/put.go
+++ b/pkg/local_object_storage/metabase/put.go
@@ -59,9 +59,8 @@ func (p *PutPrm) SetIndexAttributes(v bool) {
}
var (
- ErrUnknownObjectType = errors.New("unknown object type")
- ErrIncorrectSplitInfoUpdate = errors.New("updating split info on object without it")
- ErrIncorrectRootObject = errors.New("invalid root object")
+ ErrUnknownObjectType = errors.New("unknown object type")
+ ErrIncorrectRootObject = errors.New("invalid root object")
)
// Put saves object header in metabase. Object payload expected to be cut.
diff --git a/pkg/services/object/remote_reader.go b/pkg/services/object/remote_reader.go
index 18b6107cf..bc6ffd160 100644
--- a/pkg/services/object/remote_reader.go
+++ b/pkg/services/object/remote_reader.go
@@ -2,7 +2,6 @@ package object
import (
"context"
- "errors"
"fmt"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
@@ -35,8 +34,6 @@ type RemoteRequestPrm struct {
const remoteOpTTL = 1
-var ErrNotFound = errors.New("object header not found")
-
// NewRemoteReader creates, initializes and returns new RemoteHeader instance.
func NewRemoteReader(keyStorage *util.KeyStorage, cache ClientConstructor) *RemoteReader {
return &RemoteReader{
From d83879d4b859f016a9bfef808b19324ce593814e Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 18:05:55 +0300
Subject: [PATCH 153/655] [#1431] node: Fix comment format
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-node/config/profiler/config.go | 2 +-
cmd/frostfs-node/object.go | 2 +-
pkg/core/netmap/keys.go | 2 +-
.../netmap/nodevalidation/locode/deps.go | 16 ++++++++--------
pkg/innerring/processors/netmap/processor.go | 2 +-
pkg/local_object_storage/shard/metrics.go | 2 +-
pkg/morph/client/actor.go | 2 +-
pkg/morph/event/container/put_notary.go | 2 +-
pkg/network/group.go | 4 ++--
pkg/services/control/ir/server/deps.go | 2 +-
pkg/services/control/rpc.go | 2 +-
pkg/services/control/server/server.go | 4 ++--
pkg/services/netmap/executor.go | 4 ++--
pkg/services/object/common/writer/distributed.go | 2 +-
pkg/services/object/delete/service.go | 4 ++--
pkg/services/object/patch/service.go | 2 +-
pkg/services/policer/option.go | 2 +-
pkg/util/rand/rand.go | 2 +-
pkg/util/sdnotify/sdnotify.go | 2 +-
19 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/cmd/frostfs-node/config/profiler/config.go b/cmd/frostfs-node/config/profiler/config.go
index 191694970..6c3e8adab 100644
--- a/cmd/frostfs-node/config/profiler/config.go
+++ b/cmd/frostfs-node/config/profiler/config.go
@@ -52,7 +52,7 @@ func Address(c *config.Config) string {
return AddressDefault
}
-// BlockRates returns the value of "block_rate" config parameter
+// BlockRate returns the value of "block_rate" config parameter
// from "pprof" section.
func BlockRate(c *config.Config) int {
s := c.Sub(subsection)
diff --git a/cmd/frostfs-node/object.go b/cmd/frostfs-node/object.go
index 68acb05d3..c484c5d8c 100644
--- a/cmd/frostfs-node/object.go
+++ b/cmd/frostfs-node/object.go
@@ -117,7 +117,7 @@ func (i *delNetInfo) TombstoneLifetime() (uint64, error) {
return i.cfg.cfgObject.tombstoneLifetime.Load(), nil
}
-// returns node owner ID calculated from configured private key.
+// LocalNodeID returns node owner ID calculated from configured private key.
//
// Implements method needed for Object.Delete service.
func (i *delNetInfo) LocalNodeID() user.ID {
diff --git a/pkg/core/netmap/keys.go b/pkg/core/netmap/keys.go
index 29cb2dc94..0c64bb798 100644
--- a/pkg/core/netmap/keys.go
+++ b/pkg/core/netmap/keys.go
@@ -2,6 +2,6 @@ package netmap
// AnnouncedKeys is an interface of utility for working with the announced public keys of the storage nodes.
type AnnouncedKeys interface {
- // Checks if the key was announced by a local node.
+ // IsLocalKey checks if the key was announced by a local node.
IsLocalKey(key []byte) bool
}
diff --git a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
index 8f6667933..ba5db9205 100644
--- a/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
+++ b/pkg/innerring/processors/netmap/nodevalidation/locode/deps.go
@@ -8,38 +8,38 @@ import (
// Record is an interface of read-only
// FrostFS LOCODE database single entry.
type Record interface {
- // Must return ISO 3166-1 alpha-2
+ // CountryCode must return ISO 3166-1 alpha-2
// country code.
//
// Must not return nil.
CountryCode() *locodedb.CountryCode
- // Must return English short country name
+ // CountryName must return English short country name
// officially used by the ISO 3166
// Maintenance Agency (ISO 3166/MA).
CountryName() string
- // Must return UN/LOCODE 3-character code
+ // LocationCode must return UN/LOCODE 3-character code
// for the location (numerals 2-9 may also
// be used).
//
// Must not return nil.
LocationCode() *locodedb.LocationCode
- // Must return name of the location which
+ // LocationName must return name of the location which
// have been allocated a UN/LOCODE without
// diacritic sign.
LocationName() string
- // Must return ISO 1-3 character alphabetic
+ // SubDivCode Must return ISO 1-3 character alphabetic
// and/or numeric code for the administrative
// division of the country concerned.
SubDivCode() string
- // Must return subdivision name.
+ // SubDivName must return subdivision name.
SubDivName() string
- // Must return existing continent where is
+ // Continent must return existing continent where is
// the location.
//
// Must not return nil.
@@ -49,7 +49,7 @@ type Record interface {
// DB is an interface of read-only
// FrostFS LOCODE database.
type DB interface {
- // Must find the record that corresponds to
+ // Get must find the record that corresponds to
// LOCODE and provides the Record interface.
//
// Must return an error if Record is nil.
diff --git a/pkg/innerring/processors/netmap/processor.go b/pkg/innerring/processors/netmap/processor.go
index e8fb8721b..4cecda59c 100644
--- a/pkg/innerring/processors/netmap/processor.go
+++ b/pkg/innerring/processors/netmap/processor.go
@@ -43,7 +43,7 @@ type (
// of information about the node and its finalization for adding
// to the network map.
NodeValidator interface {
- // Must verify and optionally update NodeInfo structure.
+ // VerifyAndUpdate must verify and optionally update NodeInfo structure.
//
// Must return an error if NodeInfo input is invalid.
// Must return an error if it is not possible to correctly
diff --git a/pkg/local_object_storage/shard/metrics.go b/pkg/local_object_storage/shard/metrics.go
index 91bf8d0ae..087ba42ef 100644
--- a/pkg/local_object_storage/shard/metrics.go
+++ b/pkg/local_object_storage/shard/metrics.go
@@ -22,7 +22,7 @@ type MetricsWriter interface {
// SetShardID must set (update) the shard identifier that will be used in
// metrics.
SetShardID(id string)
- // SetReadonly must set shard mode.
+ // SetMode set mode of shard.
SetMode(mode mode.Mode)
// SetContainerObjectsCount sets container object count.
SetContainerObjectsCount(cnrID string, objectType string, value uint64)
diff --git a/pkg/morph/client/actor.go b/pkg/morph/client/actor.go
index b6718dea5..2849f3052 100644
--- a/pkg/morph/client/actor.go
+++ b/pkg/morph/client/actor.go
@@ -16,7 +16,7 @@ type actorProvider interface {
GetRPCActor() actor.RPCActor
}
-// Client switches an established connection with neo-go if it is broken.
+// SwitchRPCGuardedActor switches an established connection with neo-go if it is broken.
// This leads to an invalidation of an rpc actor within Client. That means the
// components that are initilized with the rpc actor may unintentionally use
// it when it is already invalidated. SwitchRPCGuardedActor is used to prevent
diff --git a/pkg/morph/event/container/put_notary.go b/pkg/morph/event/container/put_notary.go
index f5779ced6..6b2ee7b0a 100644
--- a/pkg/morph/event/container/put_notary.go
+++ b/pkg/morph/event/container/put_notary.go
@@ -46,7 +46,7 @@ const (
// put container requests.
PutNotaryEvent = "put"
- // PutNotaryEvent is an ID of notary "put named container" notification.
+ // PutNamedNotaryEvent is an ID of notary "put named container" notification.
PutNamedNotaryEvent = "putNamed"
)
diff --git a/pkg/network/group.go b/pkg/network/group.go
index a6de0653e..9843b14d4 100644
--- a/pkg/network/group.go
+++ b/pkg/network/group.go
@@ -67,11 +67,11 @@ func (x AddressGroup) Swap(i, j int) {
// MultiAddressIterator is an interface of network address group.
type MultiAddressIterator interface {
- // Must iterate over network addresses and pass each one
+ // IterateAddresses must iterate over network addresses and pass each one
// to the handler until it returns true.
IterateAddresses(func(string) bool)
- // Must return number of addresses in group.
+ // NumberOfAddresses must return number of addresses in group.
NumberOfAddresses() int
}
diff --git a/pkg/services/control/ir/server/deps.go b/pkg/services/control/ir/server/deps.go
index 0c2de5300..9d5cfefc8 100644
--- a/pkg/services/control/ir/server/deps.go
+++ b/pkg/services/control/ir/server/deps.go
@@ -5,7 +5,7 @@ import control "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/control/
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current health status of the IR application.
+ // HealthStatus must calculate and return current health status of the IR application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/control/rpc.go b/pkg/services/control/rpc.go
index 80aece008..04524a68c 100644
--- a/pkg/services/control/rpc.go
+++ b/pkg/services/control/rpc.go
@@ -292,7 +292,7 @@ func ListTargetsLocalOverrides(cli *client.Client, req *ListTargetsLocalOverride
return wResp.message, nil
}
-// RemoveChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
+// GetChainLocalOverride executes ControlService.RemoveChainLocalOverride RPC.
func GetChainLocalOverride(cli *client.Client, req *GetChainLocalOverrideRequest, opts ...client.CallOption) (*GetChainLocalOverrideResponse, error) {
wResp := newResponseWrapper[GetChainLocalOverrideResponse]()
wReq := &requestWrapper{m: req}
diff --git a/pkg/services/control/server/server.go b/pkg/services/control/server/server.go
index f3fe56a46..b6fdcb246 100644
--- a/pkg/services/control/server/server.go
+++ b/pkg/services/control/server/server.go
@@ -26,13 +26,13 @@ type Server struct {
// HealthChecker is component interface for calculating
// the current health status of a node.
type HealthChecker interface {
- // Must calculate and return current status of the node in FrostFS network map.
+ // NetmapStatus must calculate and return current status of the node in FrostFS network map.
//
// If status can not be calculated for any reason,
// control.netmapStatus_STATUS_UNDEFINED should be returned.
NetmapStatus() control.NetmapStatus
- // Must calculate and return current health status of the node application.
+ // HealthStatus must calculate and return current health status of the node application.
//
// If status can not be calculated for any reason,
// control.HealthStatus_HEALTH_STATUS_UNDEFINED should be returned.
diff --git a/pkg/services/netmap/executor.go b/pkg/services/netmap/executor.go
index 496b07a98..ae2044246 100644
--- a/pkg/services/netmap/executor.go
+++ b/pkg/services/netmap/executor.go
@@ -26,7 +26,7 @@ type executorSvc struct {
// NodeState encapsulates information
// about current node state.
type NodeState interface {
- // Must return current node state
+ // LocalNodeInfo must return current node state
// in FrostFS API v2 NodeInfo structure.
LocalNodeInfo() (*netmap.NodeInfo, error)
@@ -39,7 +39,7 @@ type NodeState interface {
// NetworkInfo encapsulates source of the
// recent information about the FrostFS network.
type NetworkInfo interface {
- // Must return recent network information in FrostFS API v2 NetworkInfo structure.
+ // Dump must return recent network information in FrostFS API v2 NetworkInfo structure.
//
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
diff --git a/pkg/services/object/common/writer/distributed.go b/pkg/services/object/common/writer/distributed.go
index f62934bed..f7486eae7 100644
--- a/pkg/services/object/common/writer/distributed.go
+++ b/pkg/services/object/common/writer/distributed.go
@@ -28,7 +28,7 @@ type distributedWriter struct {
resetSuccessAfterOnBroadcast bool
}
-// parameters and state of container Traversal.
+// Traversal parameters and state of container.
type Traversal struct {
Opts []placement.Option
diff --git a/pkg/services/object/delete/service.go b/pkg/services/object/delete/service.go
index 0ba21eee3..e4f7a8c50 100644
--- a/pkg/services/object/delete/service.go
+++ b/pkg/services/object/delete/service.go
@@ -27,11 +27,11 @@ type Option func(*cfg)
type NetworkInfo interface {
netmap.State
- // Must return the lifespan of the tombstones
+ // TombstoneLifetime must return the lifespan of the tombstones
// in the FrostFS epochs.
TombstoneLifetime() (uint64, error)
- // Returns user ID of the local storage node. Result must not be nil.
+ // LocalNodeID returns user ID of the local storage node. Result must not be nil.
// New tombstone objects will have the result as an owner ID if removal is executed w/o a session.
LocalNodeID() user.ID
}
diff --git a/pkg/services/object/patch/service.go b/pkg/services/object/patch/service.go
index f1082dfff..953f82b48 100644
--- a/pkg/services/object/patch/service.go
+++ b/pkg/services/object/patch/service.go
@@ -26,7 +26,7 @@ func NewService(cfg *objectwriter.Config,
}
}
-// Put calls internal service and returns v2 object streamer.
+// Patch calls internal service and returns v2 object streamer.
func (s *Service) Patch() (object.PatchObjectStream, error) {
nodeKey, err := s.Config.KeyStorage.GetKey(nil)
if err != nil {
diff --git a/pkg/services/policer/option.go b/pkg/services/policer/option.go
index 9dbfd8b9f..336f7a0ab 100644
--- a/pkg/services/policer/option.go
+++ b/pkg/services/policer/option.go
@@ -143,7 +143,7 @@ func WithPlacementBuilder(v placement.Builder) Option {
}
}
-// WithRemoteObjectHeader returns option to set remote object header receiver of Policer.
+// WithRemoteObjectHeaderFunc returns option to set remote object header receiver of Policer.
func WithRemoteObjectHeaderFunc(v RemoteObjectHeaderFunc) Option {
return func(c *cfg) {
c.remoteHeader = v
diff --git a/pkg/util/rand/rand.go b/pkg/util/rand/rand.go
index 97508f82a..a06296a07 100644
--- a/pkg/util/rand/rand.go
+++ b/pkg/util/rand/rand.go
@@ -13,7 +13,7 @@ func Uint64() uint64 {
return source.Uint64()
}
-// Uint64 returns a random uint32 value.
+// Uint32 returns a random uint32 value.
func Uint32() uint32 {
return source.Uint32()
}
diff --git a/pkg/util/sdnotify/sdnotify.go b/pkg/util/sdnotify/sdnotify.go
index e94ff77ad..22549bc96 100644
--- a/pkg/util/sdnotify/sdnotify.go
+++ b/pkg/util/sdnotify/sdnotify.go
@@ -24,7 +24,7 @@ var (
errSocketIsNotInitialized = errors.New("socket is not initialized")
)
-// Initializes socket with provided name of
+// InitSocket initializes socket with provided name of
// environment variable.
func InitSocket() error {
notifySocket := os.Getenv("NOTIFY_SOCKET")
From 41038b2ec0fab0d9488f15330e8777f053a28c03 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 18:16:59 +0300
Subject: [PATCH 154/655] [#1431] node: Fix 'empty slice declaration using a
literal'
Signed-off-by: Alexander Chuprov
---
pkg/innerring/processors/alphabet/handlers_test.go | 6 +++---
pkg/local_object_storage/shard/control.go | 2 +-
pkg/morph/event/notary_preparator_test.go | 2 +-
scripts/populate-metabase/internal/generate.go | 8 ++++----
4 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/pkg/innerring/processors/alphabet/handlers_test.go b/pkg/innerring/processors/alphabet/handlers_test.go
index dfda37472..c7a004b54 100644
--- a/pkg/innerring/processors/alphabet/handlers_test.go
+++ b/pkg/innerring/processors/alphabet/handlers_test.go
@@ -95,7 +95,7 @@ func TestProcessorEmitsGasToNetmapIfNoParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -167,7 +167,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
t.Parallel()
var emission uint64 = 100_000
var index int = 5
- var parsedWallets []util.Uint160 = []util.Uint160{}
+ var parsedWallets []util.Uint160
alphabetContracts := innerring.NewAlphabetContracts()
for i := range index + 1 {
@@ -176,7 +176,7 @@ func TestProcessorDoesntEmitGasIfNoNetmapOrParsedWallets(t *testing.T) {
morphClient := &testMorphClient{}
- nodes := []netmap.NodeInfo{}
+ var nodes []netmap.NodeInfo
network := &netmap.NetMap{}
network.SetNodes(nodes)
diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go
index 4f9f25608..62800dbd0 100644
--- a/pkg/local_object_storage/shard/control.go
+++ b/pkg/local_object_storage/shard/control.go
@@ -368,7 +368,7 @@ func (s *Shard) Close() error {
if s.rb != nil {
s.rb.Stop(s.log)
}
- components := []interface{ Close() error }{}
+ var components []interface{ Close() error }
if s.pilorama != nil {
components = append(components, s.pilorama)
diff --git a/pkg/morph/event/notary_preparator_test.go b/pkg/morph/event/notary_preparator_test.go
index 4c269bcbd..60ddb4601 100644
--- a/pkg/morph/event/notary_preparator_test.go
+++ b/pkg/morph/event/notary_preparator_test.go
@@ -25,7 +25,7 @@ var (
alphaKeys keys.PublicKeys
wrongAlphaKeys keys.PublicKeys
- dummyAlphabetInvocationScript = []byte{} // expected to be empty if generated by Notary Actor, as requester can't fill it in
+ dummyAlphabetInvocationScript []byte
dummyAlphabetInvocationScriptOld = append([]byte{byte(opcode.PUSHDATA1), 64}, make([]byte, 64)...) // expected to be dummy if generated manually
wrongDummyInvocationScript = append([]byte{byte(opcode.PUSHDATA1), 64, 1}, make([]byte, 63)...)
diff --git a/scripts/populate-metabase/internal/generate.go b/scripts/populate-metabase/internal/generate.go
index d2004b673..8a96dcaaa 100644
--- a/scripts/populate-metabase/internal/generate.go
+++ b/scripts/populate-metabase/internal/generate.go
@@ -18,7 +18,7 @@ import (
)
func GeneratePayloadPool(count uint, size uint) [][]byte {
- pool := [][]byte{}
+ var pool [][]byte
for i := uint(0); i < count; i++ {
payload := make([]byte, size)
_, _ = rand.Read(payload)
@@ -29,7 +29,7 @@ func GeneratePayloadPool(count uint, size uint) [][]byte {
}
func GenerateAttributePool(count uint) []objectSDK.Attribute {
- pool := []objectSDK.Attribute{}
+ var pool []objectSDK.Attribute
for i := uint(0); i < count; i++ {
for j := uint(0); j < count; j++ {
attr := *objectSDK.NewAttribute()
@@ -42,7 +42,7 @@ func GenerateAttributePool(count uint) []objectSDK.Attribute {
}
func GenerateOwnerPool(count uint) []user.ID {
- pool := []user.ID{}
+ var pool []user.ID
for i := uint(0); i < count; i++ {
pool = append(pool, usertest.ID())
}
@@ -117,7 +117,7 @@ func WithPayloadFromPool(pool [][]byte) ObjectOption {
func WithAttributesFromPool(pool []objectSDK.Attribute, count uint) ObjectOption {
return func(obj *objectSDK.Object) {
- attrs := []objectSDK.Attribute{}
+ var attrs []objectSDK.Attribute
for i := uint(0); i < count; i++ {
attrs = append(attrs, pool[rand.Intn(len(pool))])
}
From 07ce40e1196a44d305390dcea8e1e0040f6a16d2 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Tue, 15 Oct 2024 12:28:58 +0300
Subject: [PATCH 155/655] [#1430] adm/morph: Add NNS address display in
'deploy'
Signed-off-by: Alexander Chuprov
---
cmd/frostfs-adm/internal/modules/morph/helper/contract.go | 1 +
1 file changed, 1 insertion(+)
diff --git a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
index 2011301d1..eea3b040e 100644
--- a/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
+++ b/cmd/frostfs-adm/internal/modules/morph/helper/contract.go
@@ -166,5 +166,6 @@ func DeployNNS(c *InitializeContext, method string) error {
return fmt.Errorf("can't send deploy transaction: %w", err)
}
+ c.Command.Println("NNS hash:", invokeHash.StringLE())
return c.AwaitTx()
}
From 90f36693995e1b411094686e4419bb7d11831f35 Mon Sep 17 00:00:00 2001
From: Alexander Chuprov
Date: Mon, 14 Oct 2024 16:07:38 +0300
Subject: [PATCH 156/655] [#1342] network/cache: Add node address to error
multiClient
Signed-off-by: Alexander Chuprov
---
pkg/network/cache/multi.go | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index 9305c143b..b83cbb217 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -169,15 +169,16 @@ func (x *multiClient) iterateClients(ctx context.Context, f func(clientcore.Clie
var siErr *objectSDK.SplitInfoError
var eiErr *objectSDK.ECInfoError
+ if err != nil {
+ err = fmt.Errorf("client connection error at %v: %w", addr, err)
+ x.ReportError(err)
+ }
+
success := err == nil || errors.Is(err, context.Canceled) || errors.As(err, &siErr) || errors.As(err, &eiErr)
if success || firstErr == nil || errors.Is(firstErr, errRecentlyFailed) {
firstErr = err
}
- if err != nil {
- x.ReportError(err)
- }
-
return success
})
From b0c5def2d934ed5b79f54fb37160560f576785f4 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 17 Oct 2024 14:16:03 +0300
Subject: [PATCH 157/655] [#1433] shard/test: Use WithDisabledGC() option where
possible
Signed-off-by: Evgenii Stratonikov
---
pkg/local_object_storage/shard/gc_internal_test.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pkg/local_object_storage/shard/gc_internal_test.go b/pkg/local_object_storage/shard/gc_internal_test.go
index 3993593ad..11db5e54e 100644
--- a/pkg/local_object_storage/shard/gc_internal_test.go
+++ b/pkg/local_object_storage/shard/gc_internal_test.go
@@ -73,10 +73,10 @@ func Test_ObjectNotFoundIfNotDeletedFromMetabase(t *testing.T) {
return pool
}),
WithGCRemoverSleepInterval(1 * time.Second),
+ WithDisabledGC(),
}
sh = New(opts...)
- sh.gcCfg.testHookRemover = func(context.Context) gcRunResult { return gcRunResult{} }
require.NoError(t, sh.Open(context.Background()))
require.NoError(t, sh.Init(context.Background()))
defer func() { require.NoError(t, sh.Close()) }()
From b42bcdc6fa6cca2cf8e5a5fbaf2c8cf82f957b37 Mon Sep 17 00:00:00 2001
From: Evgenii Stratonikov
Date: Thu, 17 Oct 2024 14:37:26 +0300
Subject: [PATCH 158/655] [#1433] services/object: Put object before auxiliary
info
Consider the following operations ordering:
1. Inhume(with tombstone A) --> add tombstone mark for an object
2. --> new epoch arives
3. --> GCMark is added for a tombstone A, because it is unavailable
4. Put(A) --> return error, because the object already has a GCMark
It is possible, and I have successfully reproduced it with a test on the
shard level. However, the error is related to the specific
_ordering_ of operations with engine. And triggering race-conditions like
this is only possible on a shard level currently, so no tests are
written.
Signed-off-by: Evgenii Stratonikov
---
pkg/services/object/common/writer/local.go | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/pkg/services/object/common/writer/local.go b/pkg/services/object/common/writer/local.go
index e219b44dd..cf3d03275 100644
--- a/pkg/services/object/common/writer/local.go
+++ b/pkg/services/object/common/writer/local.go
@@ -32,6 +32,10 @@ type LocalTarget struct {
}
func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, meta objectCore.ContentMeta) error {
+ if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
+ return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
+ }
+
switch meta.Type() {
case objectSDK.TypeTombstone:
err := t.Storage.Delete(ctx, objectCore.AddressOf(obj), meta.Objects())
@@ -47,8 +51,5 @@ func (t LocalTarget) WriteObject(ctx context.Context, obj *objectSDK.Object, met
// objects that do not change meta storage
}
- if err := t.Storage.Put(ctx, obj, containerCore.IsIndexedContainer(t.Container)); err != nil {
- return fmt.Errorf("(%T) could not put object to local storage: %w", t, err)
- }
return nil
}
From 3304afa9d1f9893ad72bcd9445751798b6558c16 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 15:24:01 +0300
Subject: [PATCH 159/655] [#1422] config: Add multinet config
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config/multinet/config.go | 62 +++++++++++++++++++
.../config/multinet/config_test.go | 52 ++++++++++++++++
config/example/ir.env | 9 +++
config/example/ir.yaml | 15 +++++
config/example/node.env | 10 +++
config/example/node.json | 22 +++++++
config/example/node.yaml | 15 +++++
docs/storage-node-configuration.md | 39 ++++++++++--
8 files changed, 219 insertions(+), 5 deletions(-)
create mode 100644 cmd/frostfs-node/config/multinet/config.go
create mode 100644 cmd/frostfs-node/config/multinet/config_test.go
diff --git a/cmd/frostfs-node/config/multinet/config.go b/cmd/frostfs-node/config/multinet/config.go
new file mode 100644
index 000000000..f598efc51
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config.go
@@ -0,0 +1,62 @@
+package multinet
+
+import (
+ "strconv"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+)
+
+const (
+ subsection = "multinet"
+
+ FallbackDelayDefault = 300 * time.Millisecond
+)
+
+// Enabled returns the value of "enabled" config parameter from "multinet" section.
+func Enabled(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "enabled")
+}
+
+type Subnet struct {
+ Mask string
+ SourceIPs []string
+}
+
+// Subnets returns the value of "subnets" config parameter from "multinet" section.
+func Subnets(c *config.Config) []Subnet {
+ var result []Subnet
+ sub := c.Sub(subsection).Sub("subnets")
+ for i := 0; ; i++ {
+ s := sub.Sub(strconv.FormatInt(int64(i), 10))
+ mask := config.StringSafe(s, "mask")
+ if mask == "" {
+ break
+ }
+ sourceIPs := config.StringSliceSafe(s, "source_ips")
+ result = append(result, Subnet{
+ Mask: mask,
+ SourceIPs: sourceIPs,
+ })
+ }
+ return result
+}
+
+// Balancer returns the value of "balancer" config parameter from "multinet" section.
+func Balancer(c *config.Config) string {
+ return config.StringSafe(c.Sub(subsection), "balancer")
+}
+
+// Restrict returns the value of "restrict" config parameter from "multinet" section.
+func Restrict(c *config.Config) bool {
+ return config.BoolSafe(c.Sub(subsection), "restrict")
+}
+
+// FallbackDelay returns the value of "fallback_delay" config parameter from "multinet" section.
+func FallbackDelay(c *config.Config) time.Duration {
+ fd := config.DurationSafe(c.Sub(subsection), "fallback_delay")
+ if fd != 0 { // negative value means no fallback
+ return fd
+ }
+ return FallbackDelayDefault
+}
diff --git a/cmd/frostfs-node/config/multinet/config_test.go b/cmd/frostfs-node/config/multinet/config_test.go
new file mode 100644
index 000000000..5f7dc6d53
--- /dev/null
+++ b/cmd/frostfs-node/config/multinet/config_test.go
@@ -0,0 +1,52 @@
+package multinet
+
+import (
+ "testing"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config"
+ configtest "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/test"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMultinetSection(t *testing.T) {
+ t.Run("defaults", func(t *testing.T) {
+ empty := configtest.EmptyConfig()
+ require.Equal(t, false, Enabled(empty))
+ require.Equal(t, ([]Subnet)(nil), Subnets(empty))
+ require.Equal(t, "", Balancer(empty))
+ require.Equal(t, false, Restrict(empty))
+ require.Equal(t, FallbackDelayDefault, FallbackDelay(empty))
+ })
+
+ const path = "../../../../config/example/node"
+
+ fileConfigTest := func(c *config.Config) {
+ require.Equal(t, true, Enabled(c))
+ require.Equal(t, []Subnet{
+ {
+ Mask: "192.168.219.174/24",
+ SourceIPs: []string{
+ "192.168.218.185",
+ "192.168.219.185",
+ },
+ },
+ {
+ Mask: "10.78.70.74/24",
+ SourceIPs: []string{
+ "10.78.70.185",
+ "10.78.71.185",
+ },
+ },
+ }, Subnets(c))
+ require.Equal(t, "roundrobin", Balancer(c))
+ require.Equal(t, false, Restrict(c))
+ require.Equal(t, 350*time.Millisecond, FallbackDelay(c))
+ }
+
+ configtest.ForEachFileType(path, fileConfigTest)
+
+ t.Run("ENV", func(t *testing.T) {
+ configtest.ForEnvFileType(t, path, fileConfigTest)
+ })
+}
diff --git a/config/example/ir.env b/config/example/ir.env
index 7234a4b32..ebd91c243 100644
--- a/config/example/ir.env
+++ b/config/example/ir.env
@@ -80,3 +80,12 @@ FROSTFS_IR_PPROF_MUTEX_RATE=10000
FROSTFS_IR_PROMETHEUS_ENABLED=true
FROSTFS_IR_PROMETHEUS_ADDRESS=localhost:9090
FROSTFS_IR_PROMETHEUS_SHUTDOWN_TIMEOUT=30s
+
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/ir.yaml b/config/example/ir.yaml
index 4c64f088b..49f9fd324 100644
--- a/config/example/ir.yaml
+++ b/config/example/ir.yaml
@@ -123,3 +123,18 @@ prometheus:
systemdnotify:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
diff --git a/config/example/node.env b/config/example/node.env
index 6618a981a..580d343fb 100644
--- a/config/example/node.env
+++ b/config/example/node.env
@@ -206,3 +206,13 @@ FROSTFS_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
# AUDIT section
FROSTFS_AUDIT_ENABLED=true
+
+# MULTINET section
+FROSTFS_MULTINET_ENABLED=true
+FROSTFS_MULTINET_SUBNETS_0_MASK="192.168.219.174/24"
+FROSTFS_MULTINET_SUBNETS_0_SOURCE_IPS="192.168.218.185 192.168.219.185"
+FROSTFS_MULTINET_SUBNETS_1_MASK="10.78.70.74/24"
+FROSTFS_MULTINET_SUBNETS_1_SOURCE_IPS="10.78.70.185 10.78.71.185"
+FROSTFS_MULTINET_BALANCER=roundrobin
+FROSTFS_MULTINET_RESTRICT=false
+FROSTFS_MULTINET_FALLBACK_DELAY=350ms
diff --git a/config/example/node.json b/config/example/node.json
index 0d100ed80..3470d2d12 100644
--- a/config/example/node.json
+++ b/config/example/node.json
@@ -264,5 +264,27 @@
},
"audit": {
"enabled": true
+ },
+ "multinet": {
+ "enabled": true,
+ "subnets": [
+ {
+ "mask": "192.168.219.174/24",
+ "source_ips": [
+ "192.168.218.185",
+ "192.168.219.185"
+ ]
+ },
+ {
+ "mask": "10.78.70.74/24",
+ "source_ips":[
+ "10.78.70.185",
+ "10.78.71.185"
+ ]
+ }
+ ],
+ "balancer": "roundrobin",
+ "restrict": false,
+ "fallback_delay": "350ms"
}
}
diff --git a/config/example/node.yaml b/config/example/node.yaml
index 2a80fba18..2a963fc0f 100644
--- a/config/example/node.yaml
+++ b/config/example/node.yaml
@@ -240,3 +240,18 @@ runtime:
audit:
enabled: true
+
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
diff --git a/docs/storage-node-configuration.md b/docs/storage-node-configuration.md
index c74695e2b..2b94400df 100644
--- a/docs/storage-node-configuration.md
+++ b/docs/storage-node-configuration.md
@@ -25,8 +25,8 @@ There are some custom types used for brevity:
| `replicator` | [Replicator service configuration](#replicator-section) |
| `storage` | [Storage engine configuration](#storage-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
-| `audit` | [Audit configuration](#audit-section) |
-
+| `audit` | [Audit configuration](#audit-section) |
+| `multinet` | [Multinet configuration](#multinet-section) |
# `control` section
```yaml
@@ -435,6 +435,35 @@ audit:
enabled: true
```
-| Parameter | Type | Default value | Description |
-|---------------------|--------|---------------|---------------------------------------------------|
-| `soft_memory_limit` | `bool` | false | If `true` then audit event logs will be recorded. |
+| Parameter | Type | Default value | Description |
+|-----------|--------|---------------|---------------------------------------------------|
+| `enabled` | `bool` | false | If `true` then audit event logs will be recorded. |
+
+
+# `multinet` section
+Contains multinet parameters.
+
+```yaml
+multinet:
+ enabled: true
+ subnets:
+ - mask: 192.168.219.174/24
+ source_ips:
+ - 192.168.218.185
+ - 192.168.219.185
+ - mask: 10.78.70.74/24
+ source_ips:
+ - 10.78.70.185
+ - 10.78.71.185
+ balancer: roundrobin
+ restrict: false
+ fallback_delay: 350ms
+```
+
+| Parameter | Type | Default value | Description |
+| ---------------- | ---------- | ------------- | -------------------------------------------------------------------------------------------------------------------------- |
+| `enabled` | `bool` | false | If `true` then source-based routing is enabled. |
+| `subnets` | `subnet` | empty | Resulting subnets. |
+| `balancer` | `string` | "" | Balancer to select network interfaces, allowed values are "" (no balancing, use first suitable interface) or "roundrobin". |
+| `restrict` | `bool` | false | If `true` then any requests that do not match `subnets` will fail. |
+| `fallback_delay` | `duration` | 350ms | Delay before fallback to secondary IP addresses in case of hostname resolve. |
From 74db7352653b67e67e9345e0659fd37047fec710 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Tue, 8 Oct 2024 17:25:37 +0300
Subject: [PATCH 160/655] [#1422] node: Add dialer source to config
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 30 +++++++++++++++
go.mod | 1 +
go.sum | 2 +
internal/logs/logs.go | 1 +
internal/net/config.go | 66 +++++++++++++++++++++++++++++++++
internal/net/dial_target.go | 54 +++++++++++++++++++++++++++
internal/net/dialer.go | 35 ++++++++++++++++++
internal/net/dialer_source.go | 69 +++++++++++++++++++++++++++++++++++
8 files changed, 258 insertions(+)
create mode 100644 internal/net/config.go
create mode 100644 internal/net/dial_target.go
create mode 100644 internal/net/dialer.go
create mode 100644 internal/net/dialer_source.go
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index 3c7e310b4..dc1bad485 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -26,12 +26,14 @@ import (
fstreeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/engine/shard/blobstor/fstree"
loggerconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/logger"
morphconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/morph"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/multinet"
nodeconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/node"
objectconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/object"
replicatorconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/replicator"
tracingconfig "git.frostfs.info/TrueCloudLab/frostfs-node/cmd/frostfs-node/config/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/metrics"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/ape/chainbase"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
@@ -436,6 +438,8 @@ type shared struct {
metricsCollector *metrics.NodeMetrics
metricsSvc *objectService.MetricCollector
+
+ dialerSource *internalNet.DialerSource
}
// dynamicConfiguration stores parameters of the
@@ -760,6 +764,9 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
persistate, err := state.NewPersistentStorage(nodeconfig.PersistentState(appCfg).Path())
fatalOnErr(err)
+ ds, err := internalNet.NewDialerSource(internalNetConfig(appCfg))
+ fatalOnErr(err)
+
cacheOpts := cache.ClientCacheOpts{
DialTimeout: apiclientconfig.DialTimeout(appCfg),
StreamTimeout: apiclientconfig.StreamTimeout(appCfg),
@@ -778,9 +785,27 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
putClientCache: cache.NewSDKClientCache(cacheOpts),
persistate: persistate,
metricsCollector: metrics.NewNodeMetrics(),
+ dialerSource: ds,
}
}
+func internalNetConfig(appCfg *config.Config) internalNet.Config {
+ result := internalNet.Config{
+ Enabled: multinet.Enabled(appCfg),
+ Balancer: multinet.Balancer(appCfg),
+ Restrict: multinet.Restrict(appCfg),
+ FallbackDelay: multinet.FallbackDelay(appCfg),
+ }
+ sn := multinet.Subnets(appCfg)
+ for _, s := range sn {
+ result.Subnets = append(result.Subnets, internalNet.Subnet{
+ Prefix: s.Mask,
+ SourceIPs: s.SourceIPs,
+ })
+ }
+ return result
+}
+
func initNetmap(appCfg *config.Config, netState *networkState, relayOnly bool) cfgNetmap {
netmapWorkerPool, err := ants.NewPool(notificationHandlerPoolSize)
fatalOnErr(err)
@@ -1336,6 +1361,11 @@ func (c *cfg) reloadConfig(ctx context.Context) {
}
}
+ if err := c.dialerSource.Update(internalNetConfig(c.appCfg)); err != nil {
+ c.log.Error(logs.FailedToUpdateMultinetConfiguration, zap.Error(err))
+ return
+ }
+
c.log.Info(logs.FrostFSNodeConfigurationHasBeenReloadedSuccessfully)
}
diff --git a/go.mod b/go.mod
index 1468c12b2..a84d3122a 100644
--- a/go.mod
+++ b/go.mod
@@ -11,6 +11,7 @@ require (
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/hrw v1.2.1
+ git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
diff --git a/go.sum b/go.sum
index 5ce81807a..43d53aa40 100644
--- a/go.sum
+++ b/go.sum
@@ -14,6 +14,8 @@ git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241010110344-99c5c5836509/go.mod h1:jmb7yxzZota9jWbC10p+7YR+6wwJPBj7J/Fl5VDkXys=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
+git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928 h1:LK3mCkNZkY48eBA9jnk1N0eQZLsZhOG+XYw4EBoKUjM=
git.frostfs.info/TrueCloudLab/neoneo-go v0.106.1-0.20240726093631-5481339d6928/go.mod h1:bZyJexBlrja4ngxiBgo8by5pVHuAbhg9l09/8yVGDyg=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240814080254-96225afacb88 h1:vgbfkcnIexZUm3vREBBSa/Gv1Whjd1SFCUd0A+IaGPQ=
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index b4bc31b0c..0e9d58f32 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -523,4 +523,5 @@ const (
WritecacheShrinkSkippedNotEmpty = "writecache shrink skipped: not empty"
BlobovniczatreeFailedToRemoveRebuildTempFile = "failed to remove rebuild temp file"
WritecacheCantGetObject = "can't get an object from fstree"
+ FailedToUpdateMultinetConfiguration = "failed to update multinet configuration"
)
diff --git a/internal/net/config.go b/internal/net/config.go
new file mode 100644
index 000000000..10450db23
--- /dev/null
+++ b/internal/net/config.go
@@ -0,0 +1,66 @@
+package net
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+ "slices"
+ "time"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+var errEmptySourceIPList = errors.New("empty source IP list")
+
+type Subnet struct {
+ Prefix string
+ SourceIPs []string
+}
+
+type Config struct {
+ Enabled bool
+ Subnets []Subnet
+ Balancer string
+ Restrict bool
+ FallbackDelay time.Duration
+}
+
+func (c Config) toMultinetConfig() (multinet.Config, error) {
+ var subnets []multinet.Subnet
+ for _, s := range c.Subnets {
+ var ms multinet.Subnet
+ p, err := netip.ParsePrefix(s.Prefix)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
+ }
+ ms.Prefix = p
+ for _, ip := range s.SourceIPs {
+ addr, err := netip.ParseAddr(ip)
+ if err != nil {
+ return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
+ }
+ ms.SourceIPs = append(ms.SourceIPs, addr)
+ }
+ if len(ms.SourceIPs) == 0 {
+ return multinet.Config{}, errEmptySourceIPList
+ }
+ subnets = append(subnets, ms)
+ }
+ return multinet.Config{
+ Subnets: subnets,
+ Balancer: multinet.BalancerType(c.Balancer),
+ Restrict: c.Restrict,
+ FallbackDelay: c.FallbackDelay,
+ Dialer: newDefaulDialer(),
+ }, nil
+}
+
+func (c Config) equals(other Config) bool {
+ return c.Enabled == other.Enabled &&
+ slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
+ return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
+ }) &&
+ c.Balancer == other.Balancer &&
+ c.Restrict == other.Restrict &&
+ c.FallbackDelay == other.FallbackDelay
+}
diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go
new file mode 100644
index 000000000..6265f1860
--- /dev/null
+++ b/internal/net/dial_target.go
@@ -0,0 +1,54 @@
+// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
+
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package net
+
+import (
+ "net/url"
+ "strings"
+)
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
new file mode 100644
index 000000000..4537490f6
--- /dev/null
+++ b/internal/net/dialer.go
@@ -0,0 +1,35 @@
+package net
+
+import (
+ "context"
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type Dialer interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+func newDefaulDialer() net.Dialer {
+ // From `grpc.WithContextDialer` comment:
+ //
+ // Note: All supported releases of Go (as of December 2023) override the OS
+ // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+ // with OS defaults for keepalive time and interval, use a net.Dialer that sets
+ // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+ // option to true from the Control field. For a concrete example of how to do
+ // this, see internal.NetDialerWithTCPKeepalive().
+ //
+ // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
+ return net.Dialer{
+ KeepAlive: time.Duration(-1),
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go
new file mode 100644
index 000000000..e6a142a08
--- /dev/null
+++ b/internal/net/dialer_source.go
@@ -0,0 +1,69 @@
+package net
+
+import (
+ "context"
+ "net"
+ "sync"
+
+ "git.frostfs.info/TrueCloudLab/multinet"
+)
+
+type DialerSource struct {
+ guard sync.RWMutex
+
+ c Config
+
+ md multinet.Dialer
+}
+
+func NewDialerSource(c Config) (*DialerSource, error) {
+ result := &DialerSource{}
+ if err := result.build(c); err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+func (s *DialerSource) build(c Config) error {
+ if c.Enabled {
+ mc, err := c.toMultinetConfig()
+ if err != nil {
+ return err
+ }
+ md, err := multinet.NewDialer(mc)
+ if err != nil {
+ return err
+ }
+ s.md = md
+ s.c = c
+ return nil
+ }
+ s.md = nil
+ s.c = c
+ return nil
+}
+
+// GrpcContextDialer returns grpc.WithContextDialer func.
+// Returns nil if multinet disabled.
+func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
+ s.guard.RLock()
+ defer s.guard.RUnlock()
+
+ if s.c.Enabled {
+ return func(ctx context.Context, address string) (net.Conn, error) {
+ network, address := parseDialTarget(address)
+ return s.md.DialContext(ctx, network, address)
+ }
+ }
+ return nil
+}
+
+func (s *DialerSource) Update(c Config) error {
+ s.guard.Lock()
+ defer s.guard.Unlock()
+
+ if s.c.equals(c) {
+ return nil
+ }
+ return s.build(c)
+}
From 6c96cc2af6eb9cb64e747c5b758fbec4d90c7287 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:11:44 +0300
Subject: [PATCH 161/655] [#1422] node: Use dialer source for SDK cache
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/config.go | 1 +
internal/net/dialer.go | 6 +++++-
pkg/network/cache/client.go | 2 ++
pkg/network/cache/multi.go | 25 ++++++++++++++-----------
4 files changed, 22 insertions(+), 12 deletions(-)
diff --git a/cmd/frostfs-node/config.go b/cmd/frostfs-node/config.go
index dc1bad485..d44597857 100644
--- a/cmd/frostfs-node/config.go
+++ b/cmd/frostfs-node/config.go
@@ -773,6 +773,7 @@ func initShared(appCfg *config.Config, key *keys.PrivateKey, netState *networkSt
Key: &key.PrivateKey,
AllowExternal: apiclientconfig.AllowExternal(appCfg),
ReconnectTimeout: apiclientconfig.ReconnectTimeout(appCfg),
+ DialerSource: ds,
}
return shared{
diff --git a/internal/net/dialer.go b/internal/net/dialer.go
index 4537490f6..daf0f815f 100644
--- a/internal/net/dialer.go
+++ b/internal/net/dialer.go
@@ -13,6 +13,10 @@ type Dialer interface {
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
+func DialContextTCP(ctx context.Context, address string, d Dialer) (net.Conn, error) {
+ return d.DialContext(ctx, "tcp", address)
+}
+
func newDefaulDialer() net.Dialer {
// From `grpc.WithContextDialer` comment:
//
@@ -28,7 +32,7 @@ func newDefaulDialer() net.Dialer {
KeepAlive: time.Duration(-1),
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
- unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
})
},
}
diff --git a/pkg/network/cache/client.go b/pkg/network/cache/client.go
index 371d3c76f..63ae0bfdb 100644
--- a/pkg/network/cache/client.go
+++ b/pkg/network/cache/client.go
@@ -5,6 +5,7 @@ import (
"sync"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
clientcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
)
@@ -25,6 +26,7 @@ type (
Key *ecdsa.PrivateKey
ResponseCallback func(client.ResponseMetaInfo) error
AllowExternal bool
+ DialerSource *net.DialerSource
}
)
diff --git a/pkg/network/cache/multi.go b/pkg/network/cache/multi.go
index b83cbb217..e936ead65 100644
--- a/pkg/network/cache/multi.go
+++ b/pkg/network/cache/multi.go
@@ -60,18 +60,21 @@ func (x *multiClient) createForAddress(ctx context.Context, addr network.Address
prmInit.Key = *x.opts.Key
}
+ grpcOpts := []grpc.DialOption{
+ grpc.WithChainUnaryInterceptor(
+ metrics.NewUnaryClientInterceptor(),
+ tracing.NewUnaryClientInteceptor(),
+ ),
+ grpc.WithChainStreamInterceptor(
+ metrics.NewStreamClientInterceptor(),
+ tracing.NewStreamClientInterceptor(),
+ ),
+ grpc.WithContextDialer(x.opts.DialerSource.GrpcContextDialer()),
+ }
+
prmDial := client.PrmDial{
- Endpoint: addr.URIAddr(),
- GRPCDialOptions: []grpc.DialOption{
- grpc.WithChainUnaryInterceptor(
- metrics.NewUnaryClientInterceptor(),
- tracing.NewUnaryClientInteceptor(),
- ),
- grpc.WithChainStreamInterceptor(
- metrics.NewStreamClientInterceptor(),
- tracing.NewStreamClientInterceptor(),
- ),
- },
+ Endpoint: addr.URIAddr(),
+ GRPCDialOptions: grpcOpts,
}
if x.opts.DialTimeout > 0 {
prmDial.DialTimeout = x.opts.DialTimeout
From e314f328c4806bf1b34b6e3c31abdc4afdfaaac4 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:18:24 +0300
Subject: [PATCH 162/655] [#1422] tree: Use dialer source for tree service
connections
Signed-off-by: Dmitrii Stepanov
---
cmd/frostfs-node/tree.go | 1 +
pkg/services/tree/cache.go | 6 +++++-
pkg/services/tree/options.go | 8 ++++++++
pkg/services/tree/service.go | 2 +-
4 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/cmd/frostfs-node/tree.go b/cmd/frostfs-node/tree.go
index 192f08471..f188e2fbc 100644
--- a/cmd/frostfs-node/tree.go
+++ b/cmd/frostfs-node/tree.go
@@ -67,6 +67,7 @@ func initTreeService(c *cfg) {
tree.WithAPELocalOverrideStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.LocalStorage()),
tree.WithAPEMorphRuleStorage(c.cfgObject.cfgAccessPolicyEngine.accessPolicyEngine.MorphRuleChainStorage()),
tree.WithNetmapState(c.cfgNetmap.state),
+ tree.WithDialerSource(c.dialerSource),
)
c.cfgGRPC.performAndSave(func(_ string, _ net.Listener, s *grpc.Server) {
diff --git a/pkg/services/tree/cache.go b/pkg/services/tree/cache.go
index 38501b852..e490cb855 100644
--- a/pkg/services/tree/cache.go
+++ b/pkg/services/tree/cache.go
@@ -8,6 +8,7 @@ import (
"sync"
"time"
+ internalNet "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/network"
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
@@ -21,6 +22,7 @@ type clientCache struct {
sync.Mutex
simplelru.LRU[string, cacheItem]
key *ecdsa.PrivateKey
+ ds *internalNet.DialerSource
}
type cacheItem struct {
@@ -36,7 +38,7 @@ const (
var errRecentlyFailed = errors.New("client has recently failed")
-func (c *clientCache) init(pk *ecdsa.PrivateKey) {
+func (c *clientCache) init(pk *ecdsa.PrivateKey, ds *internalNet.DialerSource) {
l, _ := simplelru.NewLRU(defaultClientCacheSize, func(_ string, value cacheItem) {
if conn := value.cc; conn != nil {
_ = conn.Close()
@@ -44,6 +46,7 @@ func (c *clientCache) init(pk *ecdsa.PrivateKey) {
})
c.LRU = *l
c.key = pk
+ c.ds = ds
}
func (c *clientCache) get(ctx context.Context, netmapAddr string) (TreeServiceClient, error) {
@@ -99,6 +102,7 @@ func (c *clientCache) dialTreeService(ctx context.Context, netmapAddr string) (*
metrics.NewStreamClientInterceptor(),
tracing.NewStreamClientInterceptor(),
),
+ grpc.WithContextDialer(c.ds.GrpcContextDialer()),
}
if !netAddr.IsTLSEnabled() {
diff --git a/pkg/services/tree/options.go b/pkg/services/tree/options.go
index 1db5607f6..1633ae557 100644
--- a/pkg/services/tree/options.go
+++ b/pkg/services/tree/options.go
@@ -4,6 +4,7 @@ import (
"crypto/ecdsa"
"time"
+ "git.frostfs.info/TrueCloudLab/frostfs-node/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/container"
frostfsidcore "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/frostfsid"
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/netmap"
@@ -45,6 +46,7 @@ type cfg struct {
morphChainStorage policyengine.MorphRuleChainStorageReader
metrics MetricsRegister
+ ds *net.DialerSource
}
// Option represents configuration option for a tree service.
@@ -161,3 +163,9 @@ func WithNetmapState(state netmap.State) Option {
c.state = state
}
}
+
+func WithDialerSource(ds *net.DialerSource) Option {
+ return func(c *cfg) {
+ c.ds = ds
+ }
+}
diff --git a/pkg/services/tree/service.go b/pkg/services/tree/service.go
index 60bb1a6ad..2cb2af294 100644
--- a/pkg/services/tree/service.go
+++ b/pkg/services/tree/service.go
@@ -65,7 +65,7 @@ func New(opts ...Option) *Service {
s.log = &logger.Logger{Logger: zap.NewNop()}
}
- s.cache.init(s.key)
+ s.cache.init(s.key, s.ds)
s.closeCh = make(chan struct{})
s.replicateCh = make(chan movePair, s.replicatorChannelCapacity)
s.replicateLocalCh = make(chan applyOp)
From 5b653aa65fd65759ee733fd552d17870c6660b13 Mon Sep 17 00:00:00 2001
From: Dmitrii Stepanov
Date: Wed, 9 Oct 2024 11:34:36 +0300
Subject: [PATCH 163/655] [#1422] morph: Drop single client as not used
Signed-off-by: Dmitrii Stepanov