[#52] Remove storage groups and audit
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
38b03ff28b
commit
29b188db57
13 changed files with 1 additions and 1343 deletions
26
audit/doc.go
26
audit/doc.go
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Package audit provides features to process data audit in FrostFS system.
|
||||
|
||||
Result type groups values which can be gathered during data audit process:
|
||||
|
||||
var res audit.Result
|
||||
res.ForEpoch(32)
|
||||
res.ForContainer(cnr)
|
||||
// ...
|
||||
res.Complete()
|
||||
|
||||
Result instances can be stored in a binary format. On reporter side:
|
||||
|
||||
data := res.Marshal()
|
||||
// send data
|
||||
|
||||
On receiver side:
|
||||
|
||||
var res audit.Result
|
||||
err := res.Unmarshal(data)
|
||||
// ...
|
||||
|
||||
Using package types in an application is recommended to potentially work with
|
||||
different protocol versions with which these types are compatible.
|
||||
*/
|
||||
package audit
|
377
audit/result.go
377
audit/result.go
|
@ -1,377 +0,0 @@
|
|||
package audit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/audit"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
||||
)
|
||||
|
||||
// Result represents report on the results of the data audit in FrostFS system.
|
||||
//
|
||||
// Result is mutually binary-compatible with git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/audit.DataAuditResult
|
||||
// message. See Marshal / Unmarshal methods.
|
||||
//
|
||||
// Instances can be created using built-in var declaration.
|
||||
type Result struct {
|
||||
versionEncoded bool
|
||||
|
||||
v2 audit.DataAuditResult
|
||||
}
|
||||
|
||||
// Marshal encodes Result into a canonical FrostFS binary format (Protocol Buffers
|
||||
// with direct field order).
|
||||
//
|
||||
// Writes version.Current() protocol version into the resulting message if Result
|
||||
// hasn't been already decoded from such a message using Unmarshal.
|
||||
//
|
||||
// See also Unmarshal.
|
||||
func (r *Result) Marshal() []byte {
|
||||
if !r.versionEncoded {
|
||||
var verV2 refs.Version
|
||||
version.Current().WriteToV2(&verV2)
|
||||
r.v2.SetVersion(&verV2)
|
||||
r.versionEncoded = true
|
||||
}
|
||||
|
||||
return r.v2.StableMarshal(nil)
|
||||
}
|
||||
|
||||
var errCIDNotSet = errors.New("container ID is not set")
|
||||
|
||||
// Unmarshal decodes Result from its canonical FrostFS binary format (Protocol Buffers
|
||||
// with direct field order). Returns an error describing a format violation.
|
||||
//
|
||||
// See also Marshal.
|
||||
func (r *Result) Unmarshal(data []byte) error {
|
||||
err := r.v2.Unmarshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.versionEncoded = true
|
||||
|
||||
// format checks
|
||||
|
||||
var cID cid.ID
|
||||
|
||||
cidV2 := r.v2.GetContainerID()
|
||||
if cidV2 == nil {
|
||||
return errCIDNotSet
|
||||
}
|
||||
|
||||
err = cID.ReadFromV2(*cidV2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not convert V2 container ID: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
oID oid.ID
|
||||
oidV2 refs.ObjectID
|
||||
)
|
||||
|
||||
for _, oidV2 = range r.v2.GetPassSG() {
|
||||
err = oID.ReadFromV2(oidV2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid passed storage group ID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, oidV2 = range r.v2.GetFailSG() {
|
||||
err = oID.ReadFromV2(oidV2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid failed storage group ID: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Epoch returns FrostFS epoch when the data associated with the Result was audited.
|
||||
//
|
||||
// Zero Result has zero epoch.
|
||||
//
|
||||
// See also ForEpoch.
|
||||
func (r Result) Epoch() uint64 {
|
||||
return r.v2.GetAuditEpoch()
|
||||
}
|
||||
|
||||
// ForEpoch specifies FrostFS epoch when the data associated with the Result was audited.
|
||||
//
|
||||
// See also Epoch.
|
||||
func (r *Result) ForEpoch(epoch uint64) {
|
||||
r.v2.SetAuditEpoch(epoch)
|
||||
}
|
||||
|
||||
// Container returns identifier of the container with which the data audit Result
|
||||
// is associated and a bool that indicates container ID field presence in the Result.
|
||||
//
|
||||
// Zero Result does not have container ID.
|
||||
//
|
||||
// See also ForContainer.
|
||||
func (r Result) Container() (cid.ID, bool) {
|
||||
var cID cid.ID
|
||||
|
||||
cidV2 := r.v2.GetContainerID()
|
||||
if cidV2 != nil {
|
||||
_ = cID.ReadFromV2(*cidV2)
|
||||
return cID, true
|
||||
}
|
||||
|
||||
return cID, false
|
||||
}
|
||||
|
||||
// ForContainer sets identifier of the container with which the data audit Result
|
||||
// is associated.
|
||||
//
|
||||
// See also Container.
|
||||
func (r *Result) ForContainer(cnr cid.ID) {
|
||||
var cidV2 refs.ContainerID
|
||||
cnr.WriteToV2(&cidV2)
|
||||
|
||||
r.v2.SetContainerID(&cidV2)
|
||||
}
|
||||
|
||||
// AuditorKey returns public key of the auditing FrostFS Inner Ring node in
|
||||
// a FrostFS binary key format.
|
||||
//
|
||||
// Zero Result has nil key. Return value MUST NOT be mutated: to do this,
|
||||
// first make a copy.
|
||||
//
|
||||
// See also SetAuditorPublicKey.
|
||||
func (r Result) AuditorKey() []byte {
|
||||
return r.v2.GetPublicKey()
|
||||
}
|
||||
|
||||
// SetAuditorKey specifies public key of the auditing FrostFS Inner Ring node in
|
||||
// a FrostFS binary key format.
|
||||
//
|
||||
// Argument MUST NOT be mutated at least until the end of using the Result.
|
||||
//
|
||||
// See also AuditorKey.
|
||||
func (r *Result) SetAuditorKey(key []byte) {
|
||||
r.v2.SetPublicKey(key)
|
||||
}
|
||||
|
||||
// Completed returns completion state of the data audit associated with the Result.
|
||||
//
|
||||
// Zero Result corresponds to incomplete data audit.
|
||||
//
|
||||
// See also Complete.
|
||||
func (r Result) Completed() bool {
|
||||
return r.v2.GetComplete()
|
||||
}
|
||||
|
||||
// Complete marks the data audit associated with the Result as completed.
|
||||
//
|
||||
// See also Completed.
|
||||
func (r *Result) Complete() {
|
||||
r.v2.SetComplete(true)
|
||||
}
|
||||
|
||||
// RequestsPoR returns number of requests made by Proof-of-Retrievability
|
||||
// audit check to get all headers of the objects inside storage groups.
|
||||
//
|
||||
// Zero Result has zero requests.
|
||||
//
|
||||
// See also SetRequestsPoR.
|
||||
func (r Result) RequestsPoR() uint32 {
|
||||
return r.v2.GetRequests()
|
||||
}
|
||||
|
||||
// SetRequestsPoR sets number of requests made by Proof-of-Retrievability
|
||||
// audit check to get all headers of the objects inside storage groups.
|
||||
//
|
||||
// See also RequestsPoR.
|
||||
func (r *Result) SetRequestsPoR(v uint32) {
|
||||
r.v2.SetRequests(v)
|
||||
}
|
||||
|
||||
// RetriesPoR returns number of retries made by Proof-of-Retrievability
|
||||
// audit check to get all headers of the objects inside storage groups.
|
||||
//
|
||||
// Zero Result has zero retries.
|
||||
//
|
||||
// See also SetRetriesPoR.
|
||||
func (r Result) RetriesPoR() uint32 {
|
||||
return r.v2.GetRetries()
|
||||
}
|
||||
|
||||
// SetRetriesPoR sets number of retries made by Proof-of-Retrievability
|
||||
// audit check to get all headers of the objects inside storage groups.
|
||||
//
|
||||
// See also RetriesPoR.
|
||||
func (r *Result) SetRetriesPoR(v uint32) {
|
||||
r.v2.SetRetries(v)
|
||||
}
|
||||
|
||||
// IteratePassedStorageGroups iterates over all storage groups that passed
|
||||
// Proof-of-Retrievability audit check and passes them into f. Breaks on f's
|
||||
// false return, f MUST NOT be nil.
|
||||
//
|
||||
// Zero Result has no passed storage groups and doesn't call f.
|
||||
//
|
||||
// See also SubmitPassedStorageGroup.
|
||||
func (r Result) IteratePassedStorageGroups(f func(oid.ID) bool) {
|
||||
r2 := r.v2.GetPassSG()
|
||||
|
||||
var id oid.ID
|
||||
|
||||
for i := range r2 {
|
||||
_ = id.ReadFromV2(r2[i])
|
||||
|
||||
if !f(id) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitPassedStorageGroup marks storage group as passed Proof-of-Retrievability
|
||||
// audit check.
|
||||
//
|
||||
// See also IteratePassedStorageGroups.
|
||||
func (r *Result) SubmitPassedStorageGroup(sg oid.ID) {
|
||||
var idV2 refs.ObjectID
|
||||
sg.WriteToV2(&idV2)
|
||||
|
||||
r.v2.SetPassSG(append(r.v2.GetPassSG(), idV2))
|
||||
}
|
||||
|
||||
// IterateFailedStorageGroups is similar to IteratePassedStorageGroups but for failed groups.
|
||||
//
|
||||
// See also SubmitFailedStorageGroup.
|
||||
func (r Result) IterateFailedStorageGroups(f func(oid.ID) bool) {
|
||||
v := r.v2.GetFailSG()
|
||||
var id oid.ID
|
||||
|
||||
for i := range v {
|
||||
_ = id.ReadFromV2(v[i])
|
||||
if !f(id) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitFailedStorageGroup is similar to SubmitPassedStorageGroup but for failed groups.
|
||||
//
|
||||
// See also IterateFailedStorageGroups.
|
||||
func (r *Result) SubmitFailedStorageGroup(sg oid.ID) {
|
||||
var idV2 refs.ObjectID
|
||||
sg.WriteToV2(&idV2)
|
||||
|
||||
r.v2.SetFailSG(append(r.v2.GetFailSG(), idV2))
|
||||
}
|
||||
|
||||
// Hits returns number of sampled objects under audit placed
|
||||
// in an optimal way according to the container's placement policy
|
||||
// when checking Proof-of-Placement.
|
||||
//
|
||||
// Zero result has zero hits.
|
||||
//
|
||||
// See also SetHits.
|
||||
func (r Result) Hits() uint32 {
|
||||
return r.v2.GetHit()
|
||||
}
|
||||
|
||||
// SetHits sets number of sampled objects under audit placed
|
||||
// in an optimal way according to the containers placement policy
|
||||
// when checking Proof-of-Placement.
|
||||
//
|
||||
// See also Hits.
|
||||
func (r *Result) SetHits(hit uint32) {
|
||||
r.v2.SetHit(hit)
|
||||
}
|
||||
|
||||
// Misses returns number of sampled objects under audit placed
|
||||
// in suboptimal way according to the container's placement policy,
|
||||
// but still at a satisfactory level when checking Proof-of-Placement.
|
||||
//
|
||||
// Zero Result has zero misses.
|
||||
//
|
||||
// See also SetMisses.
|
||||
func (r Result) Misses() uint32 {
|
||||
return r.v2.GetMiss()
|
||||
}
|
||||
|
||||
// SetMisses sets number of sampled objects under audit placed
|
||||
// in suboptimal way according to the container's placement policy,
|
||||
// but still at a satisfactory level when checking Proof-of-Placement.
|
||||
//
|
||||
// See also Misses.
|
||||
func (r *Result) SetMisses(miss uint32) {
|
||||
r.v2.SetMiss(miss)
|
||||
}
|
||||
|
||||
// Failures returns number of sampled objects under audit stored
|
||||
// in a way not confirming placement policy or not found at all
|
||||
// when checking Proof-of-Placement.
|
||||
//
|
||||
// Zero result has zero failures.
|
||||
//
|
||||
// See also SetFailures.
|
||||
func (r Result) Failures() uint32 {
|
||||
return r.v2.GetFail()
|
||||
}
|
||||
|
||||
// SetFailures sets number of sampled objects under audit stored
|
||||
// in a way not confirming placement policy or not found at all
|
||||
// when checking Proof-of-Placement.
|
||||
//
|
||||
// See also Failures.
|
||||
func (r *Result) SetFailures(fail uint32) {
|
||||
r.v2.SetFail(fail)
|
||||
}
|
||||
|
||||
// IteratePassedStorageNodes iterates over all storage nodes that passed at least one
|
||||
// Proof-of-Data-Possession audit check and passes their public keys into f. Breaks on
|
||||
// f's false return.
|
||||
//
|
||||
// f MUST NOT be nil and MUST NOT mutate parameter passed into it at least until
|
||||
// the end of using the Result.
|
||||
//
|
||||
// Zero Result has no passed storage nodes and doesn't call f.
|
||||
//
|
||||
// See also SubmitPassedStorageNode.
|
||||
func (r Result) IteratePassedStorageNodes(f func([]byte) bool) {
|
||||
v := r.v2.GetPassNodes()
|
||||
|
||||
for i := range v {
|
||||
if !f(v[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitPassedStorageNodes marks storage node list as passed Proof-of-Data-Possession
|
||||
// audit check. The list contains public keys.
|
||||
//
|
||||
// Argument and its elements MUST NOT be mutated at least until the end of using the Result.
|
||||
//
|
||||
// See also IteratePassedStorageNodes.
|
||||
func (r *Result) SubmitPassedStorageNodes(list [][]byte) {
|
||||
r.v2.SetPassNodes(list)
|
||||
}
|
||||
|
||||
// IterateFailedStorageNodes is similar to IteratePassedStorageNodes but for failed nodes.
|
||||
//
|
||||
// See also SubmitPassedStorageNodes.
|
||||
func (r Result) IterateFailedStorageNodes(f func([]byte) bool) {
|
||||
v := r.v2.GetFailNodes()
|
||||
|
||||
for i := range v {
|
||||
if !f(v[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitFailedStorageNodes is similar to SubmitPassedStorageNodes but for failed nodes.
|
||||
//
|
||||
// See also IterateFailedStorageNodes.
|
||||
func (r *Result) SubmitFailedStorageNodes(list [][]byte) {
|
||||
r.v2.SetFailNodes(list)
|
||||
}
|
|
@ -1,191 +0,0 @@
|
|||
package audit_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
|
||||
audittest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit/test"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestResultData(t *testing.T) {
|
||||
var r audit.Result
|
||||
|
||||
countSG := func(passed bool, f func(oid.ID)) int {
|
||||
called := 0
|
||||
|
||||
ff := func(arg oid.ID) bool {
|
||||
called++
|
||||
|
||||
if f != nil {
|
||||
f(arg)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if passed {
|
||||
r.IteratePassedStorageGroups(ff)
|
||||
} else {
|
||||
r.IterateFailedStorageGroups(ff)
|
||||
}
|
||||
|
||||
return called
|
||||
}
|
||||
|
||||
countPassSG := func(f func(oid.ID)) int { return countSG(true, f) }
|
||||
countFailSG := func(f func(oid.ID)) int { return countSG(false, f) }
|
||||
|
||||
countNodes := func(passed bool, f func([]byte)) int {
|
||||
called := 0
|
||||
|
||||
ff := func(arg []byte) bool {
|
||||
called++
|
||||
|
||||
if f != nil {
|
||||
f(arg)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if passed {
|
||||
r.IteratePassedStorageNodes(ff)
|
||||
} else {
|
||||
r.IterateFailedStorageNodes(ff)
|
||||
}
|
||||
|
||||
return called
|
||||
}
|
||||
|
||||
countPassNodes := func(f func([]byte)) int { return countNodes(true, f) }
|
||||
countFailNodes := func(f func([]byte)) int { return countNodes(false, f) }
|
||||
|
||||
require.Zero(t, r.Epoch())
|
||||
_, set := r.Container()
|
||||
require.False(t, set)
|
||||
require.Nil(t, r.AuditorKey())
|
||||
require.False(t, r.Completed())
|
||||
require.Zero(t, r.RequestsPoR())
|
||||
require.Zero(t, r.RetriesPoR())
|
||||
require.Zero(t, countPassSG(nil))
|
||||
require.Zero(t, countFailSG(nil))
|
||||
require.Zero(t, countPassNodes(nil))
|
||||
require.Zero(t, countFailNodes(nil))
|
||||
|
||||
epoch := uint64(13)
|
||||
r.ForEpoch(epoch)
|
||||
require.Equal(t, epoch, r.Epoch())
|
||||
|
||||
cnr := cidtest.ID()
|
||||
r.ForContainer(cnr)
|
||||
cID, set := r.Container()
|
||||
require.True(t, set)
|
||||
require.Equal(t, cnr, cID)
|
||||
|
||||
key := []byte{1, 2, 3}
|
||||
r.SetAuditorKey(key)
|
||||
require.Equal(t, key, r.AuditorKey())
|
||||
|
||||
r.Complete()
|
||||
require.True(t, r.Completed())
|
||||
|
||||
requests := uint32(2)
|
||||
r.SetRequestsPoR(requests)
|
||||
require.Equal(t, requests, r.RequestsPoR())
|
||||
|
||||
retries := uint32(1)
|
||||
r.SetRetriesPoR(retries)
|
||||
require.Equal(t, retries, r.RetriesPoR())
|
||||
|
||||
passSG1, passSG2 := oidtest.ID(), oidtest.ID()
|
||||
r.SubmitPassedStorageGroup(passSG1)
|
||||
r.SubmitPassedStorageGroup(passSG2)
|
||||
|
||||
called1, called2 := false, false
|
||||
|
||||
require.EqualValues(t, 2, countPassSG(func(id oid.ID) {
|
||||
if id.Equals(passSG1) {
|
||||
called1 = true
|
||||
} else if id.Equals(passSG2) {
|
||||
called2 = true
|
||||
}
|
||||
}))
|
||||
require.True(t, called1)
|
||||
require.True(t, called2)
|
||||
|
||||
failSG1, failSG2 := oidtest.ID(), oidtest.ID()
|
||||
r.SubmitFailedStorageGroup(failSG1)
|
||||
r.SubmitFailedStorageGroup(failSG2)
|
||||
|
||||
called1, called2 = false, false
|
||||
|
||||
require.EqualValues(t, 2, countFailSG(func(id oid.ID) {
|
||||
if id.Equals(failSG1) {
|
||||
called1 = true
|
||||
} else if id.Equals(failSG2) {
|
||||
called2 = true
|
||||
}
|
||||
}))
|
||||
require.True(t, called1)
|
||||
require.True(t, called2)
|
||||
|
||||
hit := uint32(1)
|
||||
r.SetHits(hit)
|
||||
require.Equal(t, hit, r.Hits())
|
||||
|
||||
miss := uint32(2)
|
||||
r.SetMisses(miss)
|
||||
require.Equal(t, miss, r.Misses())
|
||||
|
||||
fail := uint32(3)
|
||||
r.SetFailures(fail)
|
||||
require.Equal(t, fail, r.Failures())
|
||||
|
||||
passNodes := [][]byte{{1}, {2}}
|
||||
r.SubmitPassedStorageNodes(passNodes)
|
||||
|
||||
called1, called2 = false, false
|
||||
|
||||
require.EqualValues(t, 2, countPassNodes(func(arg []byte) {
|
||||
if bytes.Equal(arg, passNodes[0]) {
|
||||
called1 = true
|
||||
} else if bytes.Equal(arg, passNodes[1]) {
|
||||
called2 = true
|
||||
}
|
||||
}))
|
||||
require.True(t, called1)
|
||||
require.True(t, called2)
|
||||
|
||||
failNodes := [][]byte{{3}, {4}}
|
||||
r.SubmitFailedStorageNodes(failNodes)
|
||||
|
||||
called1, called2 = false, false
|
||||
|
||||
require.EqualValues(t, 2, countFailNodes(func(arg []byte) {
|
||||
if bytes.Equal(arg, failNodes[0]) {
|
||||
called1 = true
|
||||
} else if bytes.Equal(arg, failNodes[1]) {
|
||||
called2 = true
|
||||
}
|
||||
}))
|
||||
require.True(t, called1)
|
||||
require.True(t, called2)
|
||||
}
|
||||
|
||||
func TestResultEncoding(t *testing.T) {
|
||||
r := *audittest.Result()
|
||||
|
||||
t.Run("binary", func(t *testing.T) {
|
||||
data := r.Marshal()
|
||||
|
||||
var r2 audit.Result
|
||||
require.NoError(t, r2.Unmarshal(data))
|
||||
|
||||
require.Equal(t, r, r2)
|
||||
})
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
/*
|
||||
Package audittest provides functions for convenient testing of audit package API.
|
||||
|
||||
Note that importing the package into source files is highly discouraged.
|
||||
|
||||
Random instance generation functions can be useful when testing expects any value, e.g.:
|
||||
|
||||
import audittest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit/test"
|
||||
|
||||
dec := audittest.Result()
|
||||
// test the value
|
||||
*/
|
||||
package audittest
|
|
@ -1,36 +0,0 @@
|
|||
package audittest
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/audit"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
)
|
||||
|
||||
// Result returns random audit.Result.
|
||||
func Result() *audit.Result {
|
||||
var x audit.Result
|
||||
|
||||
x.ForContainer(cidtest.ID())
|
||||
x.SetAuditorKey([]byte("key"))
|
||||
x.Complete()
|
||||
x.ForEpoch(44)
|
||||
x.SetHits(55)
|
||||
x.SetMisses(66)
|
||||
x.SetFailures(77)
|
||||
x.SetRequestsPoR(88)
|
||||
x.SetRequestsPoR(99)
|
||||
x.SubmitFailedStorageNodes([][]byte{
|
||||
[]byte("node1"),
|
||||
[]byte("node2"),
|
||||
})
|
||||
x.SubmitPassedStorageNodes([][]byte{
|
||||
[]byte("node3"),
|
||||
[]byte("node4"),
|
||||
})
|
||||
x.SubmitPassedStorageGroup(oidtest.ID())
|
||||
x.SubmitPassedStorageGroup(oidtest.ID())
|
||||
x.SubmitFailedStorageGroup(oidtest.ID())
|
||||
x.SubmitFailedStorageGroup(oidtest.ID())
|
||||
|
||||
return &x
|
||||
}
|
|
@ -220,16 +220,6 @@ func TestObject_SetSessionToken(t *testing.T) {
|
|||
require.Equal(t, tok, obj.SessionToken())
|
||||
}
|
||||
|
||||
func TestObject_SetType(t *testing.T) {
|
||||
obj := New()
|
||||
|
||||
typ := TypeStorageGroup
|
||||
|
||||
obj.SetType(typ)
|
||||
|
||||
require.Equal(t, typ, obj.Type())
|
||||
}
|
||||
|
||||
func TestObject_CutPayload(t *testing.T) {
|
||||
o1 := New()
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ type Type object.Type
|
|||
const (
|
||||
TypeRegular Type = iota
|
||||
TypeTombstone
|
||||
TypeStorageGroup
|
||||
_
|
||||
TypeLock
|
||||
)
|
||||
|
||||
|
@ -25,7 +25,6 @@ func TypeFromV2(t object.Type) Type {
|
|||
//
|
||||
// String mapping:
|
||||
// - TypeTombstone: TOMBSTONE;
|
||||
// - TypeStorageGroup: STORAGE_GROUP;
|
||||
// - TypeLock: LOCK;
|
||||
// - TypeRegular, default: REGULAR.
|
||||
func (t Type) String() string {
|
||||
|
|
|
@ -21,10 +21,6 @@ func TestType_ToV2(t *testing.T) {
|
|||
t: object.TypeTombstone,
|
||||
t2: v2object.TypeTombstone,
|
||||
},
|
||||
{
|
||||
t: object.TypeStorageGroup,
|
||||
t2: v2object.TypeStorageGroup,
|
||||
},
|
||||
{
|
||||
t: object.TypeLock,
|
||||
t2: v2object.TypeLock,
|
||||
|
@ -47,7 +43,6 @@ func TestType_String(t *testing.T) {
|
|||
|
||||
testEnumStrings(t, new(object.Type), []enumStringItem{
|
||||
{val: toPtr(object.TypeTombstone), str: "TOMBSTONE"},
|
||||
{val: toPtr(object.TypeStorageGroup), str: "STORAGE_GROUP"},
|
||||
{val: toPtr(object.TypeRegular), str: "REGULAR"},
|
||||
{val: toPtr(object.TypeLock), str: "LOCK"},
|
||||
})
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
Package storagegroup provides features to work with information that is
|
||||
used for proof of storage in FrostFS system.
|
||||
|
||||
StorageGroup type groups verification values for Data Audit sessions:
|
||||
|
||||
// receive sg info
|
||||
|
||||
sg.ExpirationEpoch() // expiration of the storage group
|
||||
sg.Members() // objects in the group
|
||||
sg.ValidationDataHash() // hash for objects validation
|
||||
sg.ValidationDataSize() // total objects' payload size
|
||||
|
||||
Instances can be also used to process FrostFS API V2 protocol messages
|
||||
(see neo.fs.v2.storagegroup package in https://git.frostfs.info/TrueCloudLab/frostfs-api).
|
||||
|
||||
On client side:
|
||||
|
||||
import "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/storagegroup"
|
||||
|
||||
var msg storagegroup.StorageGroup
|
||||
sg.WriteToV2(&msg)
|
||||
|
||||
// send msg
|
||||
|
||||
On server side:
|
||||
|
||||
// recv msg
|
||||
|
||||
var sg StorageGroupDecimal
|
||||
sg.ReadFromV2(msg)
|
||||
|
||||
// process sg
|
||||
|
||||
Using package types in an application is recommended to potentially work with
|
||||
different protocol versions with which these types are compatible.
|
||||
*/
|
||||
package storagegroup
|
|
@ -1,329 +0,0 @@
|
|||
package storagegroup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/storagegroup"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// StorageGroup represents storage group of the FrostFS objects.
|
||||
//
|
||||
// StorageGroup is mutually compatible with git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/storagegroup.StorageGroup
|
||||
// message. See ReadFromMessageV2 / WriteToMessageV2 methods.
|
||||
//
|
||||
// Instances can be created using built-in var declaration.
|
||||
//
|
||||
// Note that direct typecast is not safe and may result in loss of compatibility:
|
||||
//
|
||||
// _ = StorageGroup(storagegroup.StorageGroup) // not recommended
|
||||
type StorageGroup storagegroup.StorageGroup
|
||||
|
||||
// reads StorageGroup from the storagegroup.StorageGroup message. If checkFieldPresence is set,
|
||||
// returns an error on absence of any protocol-required field.
|
||||
func (sg *StorageGroup) readFromV2(m storagegroup.StorageGroup, checkFieldPresence bool) error {
|
||||
var err error
|
||||
|
||||
h := m.GetValidationHash()
|
||||
if h != nil {
|
||||
err = new(checksum.Checksum).ReadFromV2(*h)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hash: %w", err)
|
||||
}
|
||||
} else if checkFieldPresence {
|
||||
return errors.New("missing hash")
|
||||
}
|
||||
|
||||
members := m.GetMembers()
|
||||
if len(members) > 0 {
|
||||
var member oid.ID
|
||||
mMembers := make(map[oid.ID]struct{}, len(members))
|
||||
var exits bool
|
||||
|
||||
for i := range members {
|
||||
err = member.ReadFromV2(members[i])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid member: %w", err)
|
||||
}
|
||||
|
||||
_, exits = mMembers[member]
|
||||
if exits {
|
||||
return fmt.Errorf("duplicated member %s", member)
|
||||
}
|
||||
|
||||
mMembers[member] = struct{}{}
|
||||
}
|
||||
} else if checkFieldPresence {
|
||||
return errors.New("missing members")
|
||||
}
|
||||
|
||||
*sg = StorageGroup(m)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFromV2 reads StorageGroup from the storagegroup.StorageGroup message.
|
||||
// Checks if the message conforms to FrostFS API V2 protocol.
|
||||
//
|
||||
// See also WriteToV2.
|
||||
func (sg *StorageGroup) ReadFromV2(m storagegroup.StorageGroup) error {
|
||||
return sg.readFromV2(m, true)
|
||||
}
|
||||
|
||||
// WriteToV2 writes StorageGroup to the storagegroup.StorageGroup message.
|
||||
// The message must not be nil.
|
||||
//
|
||||
// See also ReadFromV2.
|
||||
func (sg StorageGroup) WriteToV2(m *storagegroup.StorageGroup) {
|
||||
*m = (storagegroup.StorageGroup)(sg)
|
||||
}
|
||||
|
||||
// ValidationDataSize returns total size of the payloads
|
||||
// of objects in the storage group.
|
||||
//
|
||||
// Zero StorageGroup has 0 data size.
|
||||
//
|
||||
// See also SetValidationDataSize.
|
||||
func (sg StorageGroup) ValidationDataSize() uint64 {
|
||||
v2 := (storagegroup.StorageGroup)(sg)
|
||||
return v2.GetValidationDataSize()
|
||||
}
|
||||
|
||||
// SetValidationDataSize sets total size of the payloads
|
||||
// of objects in the storage group.
|
||||
//
|
||||
// See also ValidationDataSize.
|
||||
func (sg *StorageGroup) SetValidationDataSize(epoch uint64) {
|
||||
(*storagegroup.StorageGroup)(sg).SetValidationDataSize(epoch)
|
||||
}
|
||||
|
||||
// ValidationDataHash returns homomorphic hash from the
|
||||
// concatenation of the payloads of the storage group members
|
||||
// and bool that indicates checksum presence in the storage
|
||||
// group.
|
||||
//
|
||||
// Zero StorageGroup does not have validation data checksum.
|
||||
//
|
||||
// See also SetValidationDataHash.
|
||||
func (sg StorageGroup) ValidationDataHash() (v checksum.Checksum, isSet bool) {
|
||||
v2 := (storagegroup.StorageGroup)(sg)
|
||||
if checksumV2 := v2.GetValidationHash(); checksumV2 != nil {
|
||||
_ = v.ReadFromV2(*checksumV2) // FIXME(@cthulhu-rider): #226 handle error
|
||||
isSet = true
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SetValidationDataHash sets homomorphic hash from the
|
||||
// concatenation of the payloads of the storage group members.
|
||||
//
|
||||
// See also ValidationDataHash.
|
||||
func (sg *StorageGroup) SetValidationDataHash(hash checksum.Checksum) {
|
||||
var v2 refs.Checksum
|
||||
hash.WriteToV2(&v2)
|
||||
|
||||
(*storagegroup.StorageGroup)(sg).SetValidationHash(&v2)
|
||||
}
|
||||
|
||||
// ExpirationEpoch returns last FrostFS epoch number
|
||||
// of the storage group lifetime.
|
||||
//
|
||||
// Zero StorageGroup has 0 expiration epoch.
|
||||
//
|
||||
// See also SetExpirationEpoch.
|
||||
func (sg StorageGroup) ExpirationEpoch() uint64 {
|
||||
v2 := (storagegroup.StorageGroup)(sg)
|
||||
return v2.GetExpirationEpoch()
|
||||
}
|
||||
|
||||
// SetExpirationEpoch sets last FrostFS epoch number
|
||||
// of the storage group lifetime.
|
||||
//
|
||||
// See also ExpirationEpoch.
|
||||
func (sg *StorageGroup) SetExpirationEpoch(epoch uint64) {
|
||||
(*storagegroup.StorageGroup)(sg).SetExpirationEpoch(epoch)
|
||||
}
|
||||
|
||||
// Members returns strictly ordered list of
|
||||
// storage group member objects.
|
||||
//
|
||||
// Zero StorageGroup has nil members value.
|
||||
//
|
||||
// See also SetMembers.
|
||||
func (sg StorageGroup) Members() []oid.ID {
|
||||
v2 := (storagegroup.StorageGroup)(sg)
|
||||
mV2 := v2.GetMembers()
|
||||
|
||||
if mV2 == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m := make([]oid.ID, len(mV2))
|
||||
|
||||
for i := range mV2 {
|
||||
_ = m[i].ReadFromV2(mV2[i])
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// SetMembers sets strictly ordered list of
|
||||
// storage group member objects.
|
||||
//
|
||||
// See also Members.
|
||||
func (sg *StorageGroup) SetMembers(members []oid.ID) {
|
||||
mV2 := (*storagegroup.StorageGroup)(sg).GetMembers()
|
||||
|
||||
if members == nil {
|
||||
mV2 = nil
|
||||
} else {
|
||||
ln := len(members)
|
||||
|
||||
if cap(mV2) >= ln {
|
||||
mV2 = mV2[:0]
|
||||
} else {
|
||||
mV2 = make([]refs.ObjectID, 0, ln)
|
||||
}
|
||||
|
||||
var oidV2 refs.ObjectID
|
||||
|
||||
for i := 0; i < ln; i++ {
|
||||
members[i].WriteToV2(&oidV2)
|
||||
mV2 = append(mV2, oidV2)
|
||||
}
|
||||
}
|
||||
|
||||
(*storagegroup.StorageGroup)(sg).SetMembers(mV2)
|
||||
}
|
||||
|
||||
// Marshal marshals StorageGroup into a protobuf binary form.
|
||||
//
|
||||
// See also Unmarshal.
|
||||
func (sg StorageGroup) Marshal() ([]byte, error) {
|
||||
return (*storagegroup.StorageGroup)(&sg).StableMarshal(nil), nil
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals protobuf binary representation of StorageGroup.
|
||||
//
|
||||
// See also Marshal.
|
||||
func (sg *StorageGroup) Unmarshal(data []byte) error {
|
||||
v2 := (*storagegroup.StorageGroup)(sg)
|
||||
err := v2.Unmarshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sg.readFromV2(*v2, false)
|
||||
}
|
||||
|
||||
// MarshalJSON encodes StorageGroup to protobuf JSON format.
|
||||
//
|
||||
// See also UnmarshalJSON.
|
||||
func (sg StorageGroup) MarshalJSON() ([]byte, error) {
|
||||
v2 := (storagegroup.StorageGroup)(sg)
|
||||
return v2.MarshalJSON()
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes StorageGroup from protobuf JSON format.
|
||||
//
|
||||
// See also MarshalJSON.
|
||||
func (sg *StorageGroup) UnmarshalJSON(data []byte) error {
|
||||
v2 := (*storagegroup.StorageGroup)(sg)
|
||||
err := v2.UnmarshalJSON(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sg.readFromV2(*v2, false)
|
||||
}
|
||||
|
||||
// ReadFromObject assemble StorageGroup from a regular
|
||||
// Object structure. Object must contain unambiguous information
|
||||
// about its expiration epoch, otherwise behaviour is undefined.
|
||||
//
|
||||
// Returns any error appeared during storage group parsing; returns
|
||||
// error if object is not of TypeStorageGroup type.
|
||||
func ReadFromObject(sg *StorageGroup, o objectSDK.Object) error {
|
||||
if typ := o.Type(); typ != objectSDK.TypeStorageGroup {
|
||||
return fmt.Errorf("object is not of StorageGroup type: %s", typ)
|
||||
}
|
||||
|
||||
err := sg.Unmarshal(o.Payload())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not unmarshal object: %w", err)
|
||||
}
|
||||
|
||||
var expObj uint64
|
||||
|
||||
for _, attr := range o.Attributes() {
|
||||
if attr.Key() == objectV2.SysAttributeExpEpoch {
|
||||
expObj, err = strconv.ParseUint(attr.Value(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get expiration from object: %w", err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Supporting deprecated functionality.
|
||||
// See https://github.com/nspcc-dev/neofs-api/pull/205.
|
||||
if expSG := sg.ExpirationEpoch(); expObj != expSG {
|
||||
return fmt.Errorf(
|
||||
"expiration does not match: from object: %d, from payload: %d",
|
||||
expObj, expSG)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteToObject writes StorageGroup to a regular
|
||||
// Object structure. Object must not contain ambiguous
|
||||
// information about its expiration epoch or must not
|
||||
// have it at all.
|
||||
//
|
||||
// Written information:
|
||||
// - expiration epoch;
|
||||
// - object type (TypeStorageGroup);
|
||||
// - raw payload.
|
||||
func WriteToObject(sg StorageGroup, o *objectSDK.Object) {
|
||||
sgRaw, err := sg.Marshal()
|
||||
if err != nil {
|
||||
// Marshal() does not return errors
|
||||
// in the next API release
|
||||
panic(fmt.Errorf("could not marshal storage group: %w", err))
|
||||
}
|
||||
|
||||
o.SetPayload(sgRaw)
|
||||
o.SetType(objectSDK.TypeStorageGroup)
|
||||
|
||||
attrs := o.Attributes()
|
||||
var expAttrFound bool
|
||||
|
||||
for i := range attrs {
|
||||
if attrs[i].Key() == objectV2.SysAttributeExpEpoch {
|
||||
expAttrFound = true
|
||||
attrs[i].SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10))
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !expAttrFound {
|
||||
var attr objectSDK.Attribute
|
||||
|
||||
attr.SetKey(objectV2.SysAttributeExpEpoch)
|
||||
attr.SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10))
|
||||
|
||||
attrs = append(attrs, attr)
|
||||
}
|
||||
|
||||
o.SetAttributes(attrs...)
|
||||
}
|
|
@ -1,283 +0,0 @@
|
|||
package storagegroup_test
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||
storagegroupV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/storagegroup"
|
||||
storagegroupV2test "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/storagegroup/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
|
||||
storagegrouptest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStorageGroup(t *testing.T) {
|
||||
var sg storagegroup.StorageGroup
|
||||
|
||||
sz := uint64(13)
|
||||
sg.SetValidationDataSize(sz)
|
||||
require.Equal(t, sz, sg.ValidationDataSize())
|
||||
|
||||
cs := checksumtest.Checksum()
|
||||
sg.SetValidationDataHash(cs)
|
||||
cs2, set := sg.ValidationDataHash()
|
||||
|
||||
require.True(t, set)
|
||||
require.Equal(t, cs, cs2)
|
||||
|
||||
exp := uint64(33)
|
||||
sg.SetExpirationEpoch(exp)
|
||||
require.Equal(t, exp, sg.ExpirationEpoch())
|
||||
|
||||
members := []oid.ID{oidtest.ID(), oidtest.ID()}
|
||||
sg.SetMembers(members)
|
||||
require.Equal(t, members, sg.Members())
|
||||
}
|
||||
|
||||
func TestStorageGroup_ReadFromV2(t *testing.T) {
|
||||
t.Run("from zero", func(t *testing.T) {
|
||||
var (
|
||||
x storagegroup.StorageGroup
|
||||
v2 storagegroupV2.StorageGroup
|
||||
)
|
||||
|
||||
require.Error(t, x.ReadFromV2(v2))
|
||||
})
|
||||
|
||||
t.Run("from non-zero", func(t *testing.T) {
|
||||
var (
|
||||
x storagegroup.StorageGroup
|
||||
v2 = storagegroupV2test.GenerateStorageGroup(false)
|
||||
)
|
||||
|
||||
// https://github.com/nspcc-dev/neofs-api-go/issues/394
|
||||
v2.SetMembers(generateOIDList())
|
||||
|
||||
size := v2.GetValidationDataSize()
|
||||
epoch := v2.GetExpirationEpoch()
|
||||
mm := v2.GetMembers()
|
||||
hashV2 := v2.GetValidationHash()
|
||||
|
||||
require.NoError(t, x.ReadFromV2(*v2))
|
||||
|
||||
require.Equal(t, epoch, x.ExpirationEpoch())
|
||||
require.Equal(t, size, x.ValidationDataSize())
|
||||
|
||||
var hash checksum.Checksum
|
||||
require.NoError(t, hash.ReadFromV2(*hashV2))
|
||||
h, set := x.ValidationDataHash()
|
||||
require.True(t, set)
|
||||
require.Equal(t, hash, h)
|
||||
|
||||
var oidV2 refs.ObjectID
|
||||
|
||||
for i, m := range mm {
|
||||
x.Members()[i].WriteToV2(&oidV2)
|
||||
require.Equal(t, m, oidV2)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageGroupEncoding(t *testing.T) {
|
||||
sg := storagegrouptest.StorageGroup()
|
||||
|
||||
t.Run("binary", func(t *testing.T) {
|
||||
data, err := sg.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
var sg2 storagegroup.StorageGroup
|
||||
require.NoError(t, sg2.Unmarshal(data))
|
||||
|
||||
require.Equal(t, sg, sg2)
|
||||
})
|
||||
|
||||
t.Run("json", func(t *testing.T) {
|
||||
data, err := sg.MarshalJSON()
|
||||
require.NoError(t, err)
|
||||
|
||||
var sg2 storagegroup.StorageGroup
|
||||
require.NoError(t, sg2.UnmarshalJSON(data))
|
||||
|
||||
require.Equal(t, sg, sg2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageGroup_WriteToV2(t *testing.T) {
|
||||
t.Run("zero to v2", func(t *testing.T) {
|
||||
var (
|
||||
x storagegroup.StorageGroup
|
||||
v2 storagegroupV2.StorageGroup
|
||||
)
|
||||
|
||||
x.WriteToV2(&v2)
|
||||
|
||||
require.Nil(t, v2.GetValidationHash())
|
||||
require.Nil(t, v2.GetMembers())
|
||||
require.Zero(t, v2.GetValidationDataSize())
|
||||
require.Zero(t, v2.GetExpirationEpoch())
|
||||
})
|
||||
|
||||
t.Run("non-zero to v2", func(t *testing.T) {
|
||||
var (
|
||||
x = storagegrouptest.StorageGroup()
|
||||
v2 storagegroupV2.StorageGroup
|
||||
)
|
||||
|
||||
x.WriteToV2(&v2)
|
||||
|
||||
require.Equal(t, x.ExpirationEpoch(), v2.GetExpirationEpoch())
|
||||
require.Equal(t, x.ValidationDataSize(), v2.GetValidationDataSize())
|
||||
|
||||
var hash checksum.Checksum
|
||||
require.NoError(t, hash.ReadFromV2(*v2.GetValidationHash()))
|
||||
|
||||
h, set := x.ValidationDataHash()
|
||||
require.True(t, set)
|
||||
require.Equal(t, h, hash)
|
||||
|
||||
var oidV2 refs.ObjectID
|
||||
|
||||
for i, m := range x.Members() {
|
||||
m.WriteToV2(&oidV2)
|
||||
require.Equal(t, oidV2, v2.GetMembers()[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
t.Run("default values", func(t *testing.T) {
|
||||
var sg storagegroup.StorageGroup
|
||||
|
||||
// check initial values
|
||||
require.Nil(t, sg.Members())
|
||||
_, set := sg.ValidationDataHash()
|
||||
require.False(t, set)
|
||||
require.Zero(t, sg.ExpirationEpoch())
|
||||
require.Zero(t, sg.ValidationDataSize())
|
||||
})
|
||||
}
|
||||
|
||||
func generateOIDList() []refs.ObjectID {
|
||||
const size = 3
|
||||
|
||||
mmV2 := make([]refs.ObjectID, size)
|
||||
for i := 0; i < size; i++ {
|
||||
oidV2 := make([]byte, sha256.Size)
|
||||
oidV2[i] = byte(i)
|
||||
|
||||
mmV2[i].SetValue(oidV2)
|
||||
}
|
||||
|
||||
return mmV2
|
||||
}
|
||||
|
||||
func TestStorageGroup_SetMembers_DoubleSetting(t *testing.T) {
|
||||
var sg storagegroup.StorageGroup
|
||||
|
||||
mm := []oid.ID{oidtest.ID(), oidtest.ID(), oidtest.ID()} // cap is 3 at least
|
||||
sg.SetMembers(mm)
|
||||
|
||||
// the previous cap is more that a new length;
|
||||
// slicing should not lead to `out of range`
|
||||
// and apply update correctly
|
||||
sg.SetMembers(mm[:1])
|
||||
}
|
||||
|
||||
func TestStorageGroupFromObject(t *testing.T) {
|
||||
sg := storagegrouptest.StorageGroup()
|
||||
|
||||
var o objectSDK.Object
|
||||
|
||||
var expAttr objectSDK.Attribute
|
||||
expAttr.SetKey(objectV2.SysAttributeExpEpoch)
|
||||
expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch(), 10))
|
||||
|
||||
sgRaw, err := sg.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
o.SetPayload(sgRaw)
|
||||
o.SetType(objectSDK.TypeStorageGroup)
|
||||
|
||||
t.Run("correct object", func(t *testing.T) {
|
||||
o.SetAttributes(objectSDK.Attribute{}, expAttr, objectSDK.Attribute{})
|
||||
|
||||
var sg2 storagegroup.StorageGroup
|
||||
require.NoError(t, storagegroup.ReadFromObject(&sg2, o))
|
||||
require.Equal(t, sg, sg2)
|
||||
})
|
||||
|
||||
t.Run("incorrect exp attr", func(t *testing.T) {
|
||||
var sg2 storagegroup.StorageGroup
|
||||
|
||||
expAttr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10))
|
||||
o.SetAttributes(expAttr)
|
||||
|
||||
require.Error(t, storagegroup.ReadFromObject(&sg2, o))
|
||||
})
|
||||
|
||||
t.Run("incorrect object type", func(t *testing.T) {
|
||||
var sg2 storagegroup.StorageGroup
|
||||
|
||||
o.SetType(objectSDK.TypeTombstone)
|
||||
require.Error(t, storagegroup.ReadFromObject(&sg2, o))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageGroupToObject(t *testing.T) {
|
||||
sg := storagegrouptest.StorageGroup()
|
||||
|
||||
sgRaw, err := sg.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("empty object", func(t *testing.T) {
|
||||
var o objectSDK.Object
|
||||
storagegroup.WriteToObject(sg, &o)
|
||||
|
||||
exp, found := expFromObj(t, o)
|
||||
require.True(t, found)
|
||||
|
||||
require.Equal(t, sgRaw, o.Payload())
|
||||
require.Equal(t, sg.ExpirationEpoch(), exp)
|
||||
require.Equal(t, objectSDK.TypeStorageGroup, o.Type())
|
||||
})
|
||||
|
||||
t.Run("obj already has exp attr", func(t *testing.T) {
|
||||
var o objectSDK.Object
|
||||
|
||||
var attr objectSDK.Attribute
|
||||
attr.SetKey(objectV2.SysAttributeExpEpoch)
|
||||
attr.SetValue(strconv.FormatUint(sg.ExpirationEpoch()+1, 10))
|
||||
|
||||
o.SetAttributes(objectSDK.Attribute{}, attr, objectSDK.Attribute{})
|
||||
|
||||
storagegroup.WriteToObject(sg, &o)
|
||||
|
||||
exp, found := expFromObj(t, o)
|
||||
require.True(t, found)
|
||||
|
||||
require.Equal(t, sgRaw, o.Payload())
|
||||
require.Equal(t, sg.ExpirationEpoch(), exp)
|
||||
require.Equal(t, objectSDK.TypeStorageGroup, o.Type())
|
||||
})
|
||||
}
|
||||
|
||||
func expFromObj(t *testing.T, o objectSDK.Object) (uint64, bool) {
|
||||
for _, attr := range o.Attributes() {
|
||||
if attr.Key() == objectV2.SysAttributeExpEpoch {
|
||||
exp, err := strconv.ParseUint(attr.Value(), 10, 64)
|
||||
require.NoError(t, err)
|
||||
|
||||
return exp, true
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
/*
|
||||
Package storagegrouptest provides functions for convenient testing of storagegroup package API.
|
||||
|
||||
Note that importing the package into source files is highly discouraged.
|
||||
|
||||
Random instance generation functions can be useful when testing expects any value, e.g.:
|
||||
|
||||
import storagegrouptest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup/test"
|
||||
|
||||
val := storagegrouptest.StorageGroup()
|
||||
// test the value
|
||||
*/
|
||||
package storagegrouptest
|
|
@ -1,20 +0,0 @@
|
|||
package storagegrouptest
|
||||
|
||||
import (
|
||||
checksumtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum/test"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/storagegroup"
|
||||
)
|
||||
|
||||
// StorageGroup returns random storagegroup.StorageGroup.
|
||||
func StorageGroup() storagegroup.StorageGroup {
|
||||
var x storagegroup.StorageGroup
|
||||
|
||||
x.SetExpirationEpoch(66)
|
||||
x.SetValidationDataSize(322)
|
||||
x.SetValidationDataHash(checksumtest.Checksum())
|
||||
x.SetMembers([]oid.ID{oidtest.ID(), oidtest.ID()})
|
||||
|
||||
return x
|
||||
}
|
Loading…
Reference in a new issue