forked from TrueCloudLab/frostfs-sdk-go
Compare commits
24 commits
support/v1
...
master
Author | SHA1 | Date | |
---|---|---|---|
ebd8fcd168 | |||
717a7d00ef | |||
dd23c6fd2b | |||
6a52487edd | |||
c5c6272029 | |||
3de256d05e | |||
09b79d13f3 | |||
d4e6f4e125 | |||
b2ad1f3b3e | |||
32a975a20d | |||
eaf36706a2 | |||
02c936f397 | |||
99e02858af | |||
12ddefe078 | |||
20ab57bf7e | |||
3790142b10 | |||
ec0cb2169f | |||
425d48f68b | |||
6d0da3f861 | |||
1af9b6d18b | |||
bd2d350b09 | |||
e9be3e6d94 | |||
70e9e40c7f | |||
d33b54d280 |
56 changed files with 2926 additions and 371 deletions
52
ape/chain.go
Normal file
52
ape/chain.go
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
package ape
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidChainRepresentation = errors.New("invalid chain representation")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChainID is Chain's identifier.
|
||||||
|
type ChainID []byte
|
||||||
|
|
||||||
|
// Chain is an SDK representation for v2's Chain.
|
||||||
|
//
|
||||||
|
// Note that Chain (as well as v2's Chain) and all related entities
|
||||||
|
// are NOT operated by Access-Policy-Engine (APE). The client is responsible
|
||||||
|
// to convert these types to policy-engine entities.
|
||||||
|
type Chain struct {
|
||||||
|
// Raw is the encoded chain kind.
|
||||||
|
// It assumes that Raw's bytes are the result of encoding provided by
|
||||||
|
// policy-engine package.
|
||||||
|
Raw []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToV2 converts Chain to v2.
|
||||||
|
func (c *Chain) ToV2() *apeV2.Chain {
|
||||||
|
v2ct := new(apeV2.Chain)
|
||||||
|
|
||||||
|
if c.Raw != nil {
|
||||||
|
v2Raw := new(apeV2.ChainRaw)
|
||||||
|
v2Raw.SetRaw(c.Raw)
|
||||||
|
v2ct.SetKind(v2Raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v2ct
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFromV2 fills Chain from v2.
|
||||||
|
func (c *Chain) ReadFromV2(v2ct *apeV2.Chain) error {
|
||||||
|
switch v := v2ct.GetKind().(type) {
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported chain kind: %T", v)
|
||||||
|
case *apeV2.ChainRaw:
|
||||||
|
raw := v.GetRaw()
|
||||||
|
c.Raw = raw
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
53
ape/chain_target.go
Normal file
53
ape/chain_target.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package ape
|
||||||
|
|
||||||
|
import (
|
||||||
|
apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TargetType is an SDK representation for v2's TargetType.
|
||||||
|
type TargetType apeV2.TargetType
|
||||||
|
|
||||||
|
const (
|
||||||
|
TargetTypeUndefined TargetType = iota
|
||||||
|
TargetTypeNamespace
|
||||||
|
TargetTypeContainer
|
||||||
|
TargetTypeUser
|
||||||
|
TargetTypeGroup
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToV2 converts TargetType to v2.
|
||||||
|
func (targetType TargetType) ToV2() apeV2.TargetType {
|
||||||
|
return apeV2.TargetType(targetType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromV2 reads TargetType to v2.
|
||||||
|
func (targetType *TargetType) FromV2(v2targetType apeV2.TargetType) {
|
||||||
|
*targetType = TargetType(v2targetType)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainTarget is an SDK representation for v2's ChainTarget.
|
||||||
|
//
|
||||||
|
// Note that ChainTarget (as well as v2's ChainTarget) and all related entities
|
||||||
|
// are NOT operated by Access-Policy-Engine (APE). The client is responsible
|
||||||
|
// to convert these types to policy-engine entities.
|
||||||
|
type ChainTarget struct {
|
||||||
|
TargetType TargetType
|
||||||
|
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToV2 converts ChainTarget to v2.
|
||||||
|
func (ct *ChainTarget) ToV2() *apeV2.ChainTarget {
|
||||||
|
v2ct := new(apeV2.ChainTarget)
|
||||||
|
|
||||||
|
v2ct.SetTargetType(ct.TargetType.ToV2())
|
||||||
|
v2ct.SetName(ct.Name)
|
||||||
|
|
||||||
|
return v2ct
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromV2 reads ChainTarget frpm v2.
|
||||||
|
func (ct *ChainTarget) FromV2(v2ct *apeV2.ChainTarget) {
|
||||||
|
ct.TargetType.FromV2(v2ct.GetTargetType())
|
||||||
|
ct.Name = v2ct.GetName()
|
||||||
|
}
|
65
ape/chain_target_test.go
Normal file
65
ape/chain_target_test.go
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
package ape_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
m = map[ape.TargetType]apeV2.TargetType{
|
||||||
|
ape.TargetTypeUndefined: apeV2.TargetTypeUndefined,
|
||||||
|
ape.TargetTypeNamespace: apeV2.TargetTypeNamespace,
|
||||||
|
ape.TargetTypeContainer: apeV2.TargetTypeContainer,
|
||||||
|
ape.TargetTypeUser: apeV2.TargetTypeUser,
|
||||||
|
ape.TargetTypeGroup: apeV2.TargetTypeGroup,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTargetType(t *testing.T) {
|
||||||
|
for typesdk, typev2 := range m {
|
||||||
|
t.Run("from sdk to v2 "+typev2.String(), func(t *testing.T) {
|
||||||
|
v2 := typesdk.ToV2()
|
||||||
|
require.Equal(t, v2, typev2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("from v2 to sdk "+typev2.String(), func(t *testing.T) {
|
||||||
|
var typ ape.TargetType
|
||||||
|
typ.FromV2(typev2)
|
||||||
|
require.Equal(t, typesdk, typ)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChainTarget(t *testing.T) {
|
||||||
|
var (
|
||||||
|
typ = ape.TargetTypeNamespace
|
||||||
|
name = "namespaceXXYYZZ"
|
||||||
|
)
|
||||||
|
|
||||||
|
t.Run("from sdk to v2", func(t *testing.T) {
|
||||||
|
ct := ape.ChainTarget{
|
||||||
|
TargetType: typ,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
|
||||||
|
v2 := ct.ToV2()
|
||||||
|
require.Equal(t, m[typ], v2.GetTargetType())
|
||||||
|
require.Equal(t, name, v2.GetName())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("from v2 to sdk", func(t *testing.T) {
|
||||||
|
v2 := &apeV2.ChainTarget{}
|
||||||
|
v2.SetTargetType(m[typ])
|
||||||
|
v2.SetName(name)
|
||||||
|
|
||||||
|
var ct ape.ChainTarget
|
||||||
|
ct.FromV2(v2)
|
||||||
|
|
||||||
|
require.Equal(t, typ, ct.TargetType)
|
||||||
|
require.Equal(t, name, ct.Name)
|
||||||
|
})
|
||||||
|
}
|
43
ape/chain_test.go
Normal file
43
ape/chain_test.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package ape_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
encoded = `{"ID":"","Rules":[{"Status":"Allow","Actions":{"Inverted":false,"Names":["GetObject"]},"Resources":{"Inverted":false,"Names":["native:object/*"]},"Any":false,"Condition":[{"Op":"StringEquals","Object":"Resource","Key":"Department","Value":"HR"}]}],"MatchType":"DenyPriority"}`
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChainData(t *testing.T) {
|
||||||
|
t.Run("raw chain", func(t *testing.T) {
|
||||||
|
var c ape.Chain
|
||||||
|
|
||||||
|
b := []byte(encoded)
|
||||||
|
c.Raw = b
|
||||||
|
|
||||||
|
v2, ok := c.ToV2().GetKind().(*apeV2.ChainRaw)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, b, v2.Raw)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChainMessageV2(t *testing.T) {
|
||||||
|
b := []byte(encoded)
|
||||||
|
|
||||||
|
v2Raw := &apeV2.ChainRaw{}
|
||||||
|
v2Raw.SetRaw(b)
|
||||||
|
|
||||||
|
v2 := &apeV2.Chain{}
|
||||||
|
v2.SetKind(v2Raw)
|
||||||
|
|
||||||
|
var c ape.Chain
|
||||||
|
c.ReadFromV2(v2)
|
||||||
|
|
||||||
|
require.NotNil(t, c.Raw)
|
||||||
|
require.Equal(t, b, c.Raw)
|
||||||
|
}
|
114
bearer/bearer.go
114
bearer/bearer.go
|
@ -6,7 +6,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||||
|
apeV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/ape"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||||
|
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
|
frostfscrypto "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto"
|
||||||
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
frostfsecdsa "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/crypto/ecdsa"
|
||||||
|
@ -33,9 +35,85 @@ type Token struct {
|
||||||
sigSet bool
|
sigSet bool
|
||||||
sig refs.Signature
|
sig refs.Signature
|
||||||
|
|
||||||
|
apeOverrideSet bool
|
||||||
|
apeOverride APEOverride
|
||||||
|
|
||||||
impersonate bool
|
impersonate bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APEOverride is the list of APE chains defined for a target.
|
||||||
|
// These chains are meant to serve as overrides to the already defined (or even undefined)
|
||||||
|
// APE chains for the target (see contract `Policy`).
|
||||||
|
//
|
||||||
|
// The server-side processing of the bearer token with set APE overrides must verify if a client is permitted
|
||||||
|
// to override chains for the target, preventing unauthorized access through the APE mechanism.
|
||||||
|
type APEOverride struct {
|
||||||
|
// Target for which chains are applied.
|
||||||
|
Target apeSDK.ChainTarget
|
||||||
|
|
||||||
|
// The list of APE chains.
|
||||||
|
Chains []apeSDK.Chain
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals APEOverride into a protobuf binary form.
|
||||||
|
func (c *APEOverride) Marshal() ([]byte, error) {
|
||||||
|
return c.ToV2().StableMarshal(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals protobuf binary representation of APEOverride.
|
||||||
|
func (c *APEOverride) Unmarshal(data []byte) error {
|
||||||
|
overrideV2 := new(acl.APEOverride)
|
||||||
|
if err := overrideV2.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.FromV2(overrideV2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes APEOverride to protobuf JSON format.
|
||||||
|
func (c *APEOverride) MarshalJSON() ([]byte, error) {
|
||||||
|
return c.ToV2().MarshalJSON()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes APEOverride from protobuf JSON format.
|
||||||
|
func (c *APEOverride) UnmarshalJSON(data []byte) error {
|
||||||
|
overrideV2 := new(acl.APEOverride)
|
||||||
|
if err := overrideV2.UnmarshalJSON(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.FromV2(overrideV2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *APEOverride) FromV2(tokenAPEChains *acl.APEOverride) error {
|
||||||
|
c.Target.FromV2(tokenAPEChains.GetTarget())
|
||||||
|
if chains := tokenAPEChains.GetChains(); len(chains) > 0 {
|
||||||
|
c.Chains = make([]apeSDK.Chain, len(chains))
|
||||||
|
for i := range chains {
|
||||||
|
if err := c.Chains[i].ReadFromV2(chains[i]); err != nil {
|
||||||
|
return fmt.Errorf("invalid APE chain: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *APEOverride) ToV2() *acl.APEOverride {
|
||||||
|
if c == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
apeOverride := new(acl.APEOverride)
|
||||||
|
apeOverride.SetTarget(c.Target.ToV2())
|
||||||
|
chains := make([]*apeV2.Chain, len(c.Chains))
|
||||||
|
for i := range c.Chains {
|
||||||
|
chains[i] = c.Chains[i].ToV2()
|
||||||
|
}
|
||||||
|
apeOverride.SetChains(chains)
|
||||||
|
|
||||||
|
return apeOverride
|
||||||
|
}
|
||||||
|
|
||||||
// reads Token from the acl.BearerToken message. If checkFieldPresence is set,
|
// reads Token from the acl.BearerToken message. If checkFieldPresence is set,
|
||||||
// returns an error on absence of any protocol-required field.
|
// returns an error on absence of any protocol-required field.
|
||||||
func (b *Token) readFromV2(m acl.BearerToken, checkFieldPresence bool) error {
|
func (b *Token) readFromV2(m acl.BearerToken, checkFieldPresence bool) error {
|
||||||
|
@ -48,10 +126,11 @@ func (b *Token) readFromV2(m acl.BearerToken, checkFieldPresence bool) error {
|
||||||
|
|
||||||
b.impersonate = body.GetImpersonate()
|
b.impersonate = body.GetImpersonate()
|
||||||
|
|
||||||
|
apeOverrides := body.GetAPEOverride()
|
||||||
eaclTable := body.GetEACL()
|
eaclTable := body.GetEACL()
|
||||||
if b.eaclTableSet = eaclTable != nil; b.eaclTableSet {
|
if b.eaclTableSet = eaclTable != nil; b.eaclTableSet {
|
||||||
b.eaclTable = *eacl.NewTableFromV2(eaclTable)
|
b.eaclTable = *eacl.NewTableFromV2(eaclTable)
|
||||||
} else if checkFieldPresence && !b.impersonate {
|
} else if checkFieldPresence && !b.impersonate && apeOverrides == nil {
|
||||||
return errors.New("missing eACL table")
|
return errors.New("missing eACL table")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,6 +151,14 @@ func (b *Token) readFromV2(m acl.BearerToken, checkFieldPresence bool) error {
|
||||||
return errors.New("missing token lifetime")
|
return errors.New("missing token lifetime")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if b.apeOverrideSet = apeOverrides != nil; b.apeOverrideSet {
|
||||||
|
if err = b.apeOverride.FromV2(apeOverrides); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if checkFieldPresence && !b.impersonate && !b.eaclTableSet {
|
||||||
|
return errors.New("missing APE override")
|
||||||
|
}
|
||||||
|
|
||||||
sig := m.GetSignature()
|
sig := m.GetSignature()
|
||||||
if b.sigSet = sig != nil; sig != nil {
|
if b.sigSet = sig != nil; sig != nil {
|
||||||
b.sig = *sig
|
b.sig = *sig
|
||||||
|
@ -90,7 +177,7 @@ func (b *Token) ReadFromV2(m acl.BearerToken) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b Token) fillBody() *acl.BearerTokenBody {
|
func (b Token) fillBody() *acl.BearerTokenBody {
|
||||||
if !b.eaclTableSet && !b.targetUserSet && !b.lifetimeSet && !b.impersonate {
|
if !b.eaclTableSet && !b.targetUserSet && !b.lifetimeSet && !b.impersonate && !b.apeOverrideSet {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,6 +203,10 @@ func (b Token) fillBody() *acl.BearerTokenBody {
|
||||||
body.SetLifetime(&lifetime)
|
body.SetLifetime(&lifetime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if b.apeOverrideSet {
|
||||||
|
body.SetAPEOverride(b.apeOverride.ToV2())
|
||||||
|
}
|
||||||
|
|
||||||
body.SetImpersonate(b.impersonate)
|
body.SetImpersonate(b.impersonate)
|
||||||
|
|
||||||
return &body
|
return &body
|
||||||
|
@ -214,6 +305,25 @@ func (b Token) EACLTable() eacl.Table {
|
||||||
return eacl.Table{}
|
return eacl.Table{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetAPEOverride sets APE override to the bearer token.
|
||||||
|
//
|
||||||
|
// See also: APEOverride.
|
||||||
|
func (b *Token) SetAPEOverride(v APEOverride) {
|
||||||
|
b.apeOverride = v
|
||||||
|
b.apeOverrideSet = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// APEOverride returns APE override set by SetAPEOverride.
|
||||||
|
//
|
||||||
|
// Zero Token has zero APEOverride.
|
||||||
|
func (b *Token) APEOverride() APEOverride {
|
||||||
|
if b.apeOverrideSet {
|
||||||
|
return b.apeOverride
|
||||||
|
}
|
||||||
|
|
||||||
|
return APEOverride{}
|
||||||
|
}
|
||||||
|
|
||||||
// SetImpersonate mark token as impersonate to consider token signer as request owner.
|
// SetImpersonate mark token as impersonate to consider token signer as request owner.
|
||||||
// If this field is true extended EACLTable in token body isn't processed.
|
// If this field is true extended EACLTable in token body isn't processed.
|
||||||
func (b *Token) SetImpersonate(v bool) {
|
func (b *Token) SetImpersonate(v bool) {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package bearer_test
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||||
|
@ -81,6 +82,58 @@ func TestToken_SetEACLTable(t *testing.T) {
|
||||||
require.True(t, isEqualEACLTables(eaclTable, val.EACLTable()))
|
require.True(t, isEqualEACLTables(eaclTable, val.EACLTable()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestToken_SetAPEOverrides(t *testing.T) {
|
||||||
|
var val bearer.Token
|
||||||
|
var m acl.BearerToken
|
||||||
|
filled := bearertest.Token()
|
||||||
|
|
||||||
|
val.WriteToV2(&m)
|
||||||
|
require.Zero(t, m.GetBody())
|
||||||
|
|
||||||
|
val2 := filled
|
||||||
|
|
||||||
|
require.NoError(t, val2.Unmarshal(val.Marshal()))
|
||||||
|
require.Zero(t, val2.APEOverride())
|
||||||
|
|
||||||
|
val2 = filled
|
||||||
|
|
||||||
|
jd, err := val.MarshalJSON()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, val2.UnmarshalJSON(jd))
|
||||||
|
require.Zero(t, val2.APEOverride())
|
||||||
|
|
||||||
|
// set value
|
||||||
|
|
||||||
|
tApe := bearertest.APEOverride()
|
||||||
|
|
||||||
|
val.SetAPEOverride(tApe)
|
||||||
|
require.Equal(t, tApe, val.APEOverride())
|
||||||
|
|
||||||
|
val.WriteToV2(&m)
|
||||||
|
require.NotNil(t, m.GetBody().GetAPEOverride())
|
||||||
|
require.True(t, tokenAPEOverridesEqual(tApe.ToV2(), m.GetBody().GetAPEOverride()))
|
||||||
|
|
||||||
|
val2 = filled
|
||||||
|
|
||||||
|
require.NoError(t, val2.Unmarshal(val.Marshal()))
|
||||||
|
apeOverride := val2.APEOverride()
|
||||||
|
require.True(t, tokenAPEOverridesEqual(tApe.ToV2(), apeOverride.ToV2()))
|
||||||
|
|
||||||
|
val2 = filled
|
||||||
|
|
||||||
|
jd, err = val.MarshalJSON()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.NoError(t, val2.UnmarshalJSON(jd))
|
||||||
|
apeOverride = val.APEOverride()
|
||||||
|
require.True(t, tokenAPEOverridesEqual(tApe.ToV2(), apeOverride.ToV2()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenAPEOverridesEqual(lhs, rhs *acl.APEOverride) bool {
|
||||||
|
return reflect.DeepEqual(lhs, rhs)
|
||||||
|
}
|
||||||
|
|
||||||
func TestToken_ForUser(t *testing.T) {
|
func TestToken_ForUser(t *testing.T) {
|
||||||
var val bearer.Token
|
var val bearer.Token
|
||||||
var m acl.BearerToken
|
var m acl.BearerToken
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package bearertest
|
package bearertest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
eacltest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl/test"
|
eacltest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl/test"
|
||||||
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
||||||
|
@ -15,6 +16,17 @@ func Token() (t bearer.Token) {
|
||||||
t.SetIat(1)
|
t.SetIat(1)
|
||||||
t.ForUser(usertest.ID())
|
t.ForUser(usertest.ID())
|
||||||
t.SetEACLTable(*eacltest.Table())
|
t.SetEACLTable(*eacltest.Table())
|
||||||
|
t.SetAPEOverride(APEOverride())
|
||||||
|
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func APEOverride() bearer.APEOverride {
|
||||||
|
return bearer.APEOverride{
|
||||||
|
Target: ape.ChainTarget{
|
||||||
|
TargetType: ape.TargetTypeContainer,
|
||||||
|
Name: "F8JsMnChywiPvbDvpxMbjTjx5KhWHHp6gCDt8BhzL9kF",
|
||||||
|
},
|
||||||
|
Chains: []ape.Chain{{Raw: []byte("{}")}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
79
client/apemanager_add_chain.go
Normal file
79
client/apemanager_add_chain.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
|
||||||
|
rpcapi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||||
|
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
|
||||||
|
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrmAPEManagerAddChain groups parameters of APEManagerAddChain operation.
|
||||||
|
type PrmAPEManagerAddChain struct {
|
||||||
|
XHeaders []string
|
||||||
|
|
||||||
|
ChainTarget apeSDK.ChainTarget
|
||||||
|
|
||||||
|
Chain apeSDK.Chain
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prm *PrmAPEManagerAddChain) buildRequest(c *Client) (*apemanagerV2.AddChainRequest, error) {
|
||||||
|
if len(prm.XHeaders)%2 != 0 {
|
||||||
|
return nil, errorInvalidXHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(apemanagerV2.AddChainRequest)
|
||||||
|
reqBody := new(apemanagerV2.AddChainRequestBody)
|
||||||
|
|
||||||
|
reqBody.SetTarget(prm.ChainTarget.ToV2())
|
||||||
|
reqBody.SetChain(prm.Chain.ToV2())
|
||||||
|
|
||||||
|
req.SetBody(reqBody)
|
||||||
|
|
||||||
|
var meta sessionV2.RequestMetaHeader
|
||||||
|
writeXHeadersToMeta(prm.XHeaders, &meta)
|
||||||
|
|
||||||
|
c.prepareRequest(req, &meta)
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResAPEManagerAddChain struct {
|
||||||
|
statusRes
|
||||||
|
|
||||||
|
// ChainID of set Chain. If Chain does not contain chainID before request, then
|
||||||
|
// ChainID is generated.
|
||||||
|
ChainID apeSDK.ChainID
|
||||||
|
}
|
||||||
|
|
||||||
|
// APEManagerAddChain sets Chain for ChainTarget.
|
||||||
|
func (c *Client) APEManagerAddChain(ctx context.Context, prm PrmAPEManagerAddChain) (*ResAPEManagerAddChain, error) {
|
||||||
|
req, err := prm.buildRequest(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signature.SignServiceMessage(&c.prm.Key, req); err != nil {
|
||||||
|
return nil, fmt.Errorf("sign request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := rpcapi.AddChain(&c.c, req, client.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res ResAPEManagerAddChain
|
||||||
|
res.st, err = c.processResponse(resp)
|
||||||
|
if err != nil || !apistatus.IsSuccessful(res.st) {
|
||||||
|
return &res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res.ChainID = resp.GetBody().GetChainID()
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
80
client/apemanager_list_chains.go
Normal file
80
client/apemanager_list_chains.go
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
|
||||||
|
rpcapi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||||
|
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
|
||||||
|
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrmAPEManagerListChains groups parameters of APEManagerListChains operation.
|
||||||
|
type PrmAPEManagerListChains struct {
|
||||||
|
XHeaders []string
|
||||||
|
|
||||||
|
ChainTarget apeSDK.ChainTarget
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prm *PrmAPEManagerListChains) buildRequest(c *Client) (*apemanagerV2.ListChainsRequest, error) {
|
||||||
|
if len(prm.XHeaders)%2 != 0 {
|
||||||
|
return nil, errorInvalidXHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(apemanagerV2.ListChainsRequest)
|
||||||
|
reqBody := new(apemanagerV2.ListChainsRequestBody)
|
||||||
|
|
||||||
|
reqBody.SetTarget(prm.ChainTarget.ToV2())
|
||||||
|
|
||||||
|
req.SetBody(reqBody)
|
||||||
|
|
||||||
|
var meta sessionV2.RequestMetaHeader
|
||||||
|
writeXHeadersToMeta(prm.XHeaders, &meta)
|
||||||
|
|
||||||
|
c.prepareRequest(req, &meta)
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResAPEManagerListChains struct {
|
||||||
|
statusRes
|
||||||
|
|
||||||
|
Chains []apeSDK.Chain
|
||||||
|
}
|
||||||
|
|
||||||
|
// APEManagerListChains lists Chains for ChainTarget.
|
||||||
|
func (c *Client) APEManagerListChains(ctx context.Context, prm PrmAPEManagerListChains) (*ResAPEManagerListChains, error) {
|
||||||
|
req, err := prm.buildRequest(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signature.SignServiceMessage(&c.prm.Key, req); err != nil {
|
||||||
|
return nil, fmt.Errorf("sign request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := rpcapi.ListChains(&c.c, req, client.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res ResAPEManagerListChains
|
||||||
|
res.st, err = c.processResponse(resp)
|
||||||
|
if err != nil || !apistatus.IsSuccessful(res.st) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ch := range resp.GetBody().GetChains() {
|
||||||
|
var chSDK apeSDK.Chain
|
||||||
|
if err := chSDK.ReadFromV2(ch); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.Chains = append(res.Chains, chSDK)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
73
client/apemanager_remove_chain.go
Normal file
73
client/apemanager_remove_chain.go
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
apemanagerV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
|
||||||
|
rpcapi "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/rpc/client"
|
||||||
|
sessionV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/signature"
|
||||||
|
apeSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ape"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrmAPEManagerRemoveChain groups parameters of APEManagerRemoveChain operation.
|
||||||
|
type PrmAPEManagerRemoveChain struct {
|
||||||
|
XHeaders []string
|
||||||
|
|
||||||
|
ChainTarget apeSDK.ChainTarget
|
||||||
|
|
||||||
|
ChainID apeSDK.ChainID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (prm *PrmAPEManagerRemoveChain) buildRequest(c *Client) (*apemanagerV2.RemoveChainRequest, error) {
|
||||||
|
if len(prm.XHeaders)%2 != 0 {
|
||||||
|
return nil, errorInvalidXHeaders
|
||||||
|
}
|
||||||
|
|
||||||
|
req := new(apemanagerV2.RemoveChainRequest)
|
||||||
|
reqBody := new(apemanagerV2.RemoveChainRequestBody)
|
||||||
|
|
||||||
|
reqBody.SetTarget(prm.ChainTarget.ToV2())
|
||||||
|
reqBody.SetChainID(prm.ChainID)
|
||||||
|
|
||||||
|
req.SetBody(reqBody)
|
||||||
|
|
||||||
|
var meta sessionV2.RequestMetaHeader
|
||||||
|
writeXHeadersToMeta(prm.XHeaders, &meta)
|
||||||
|
|
||||||
|
c.prepareRequest(req, &meta)
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResAPEManagerRemoveChain struct {
|
||||||
|
statusRes
|
||||||
|
}
|
||||||
|
|
||||||
|
// APEManagerRemoveChain removes Chain with ChainID defined for ChainTarget.
|
||||||
|
func (c *Client) APEManagerRemoveChain(ctx context.Context, prm PrmAPEManagerRemoveChain) (*ResAPEManagerRemoveChain, error) {
|
||||||
|
req, err := prm.buildRequest(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := signature.SignServiceMessage(&c.prm.Key, req); err != nil {
|
||||||
|
return nil, fmt.Errorf("sign request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := rpcapi.RemoveChain(&c.c, req, client.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res ResAPEManagerRemoveChain
|
||||||
|
res.st, err = c.processResponse(resp)
|
||||||
|
if err != nil || !apistatus.IsSuccessful(res.st) {
|
||||||
|
return &res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
|
@ -61,6 +61,12 @@ func IsErrSessionNotFound(err error) bool {
|
||||||
return wrapsErrType[*apistatus.SessionTokenNotFound](err)
|
return wrapsErrType[*apistatus.SessionTokenNotFound](err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsErrAPEManagerAccessDenied checks if err corresponds to FrostFS status return
|
||||||
|
// corresponding to apemanager access deny. Supports wrapped errors.
|
||||||
|
func IsErrAPEManagerAccessDenied(err error) bool {
|
||||||
|
return wrapsErrType[*apistatus.APEManagerAccessDenied](err)
|
||||||
|
}
|
||||||
|
|
||||||
// returns error describing missing field with the given name.
|
// returns error describing missing field with the given name.
|
||||||
func newErrMissingResponseField(name string) error {
|
func newErrMissingResponseField(name string) error {
|
||||||
return fmt.Errorf("missing %s field in the response", name)
|
return fmt.Errorf("missing %s field in the response", name)
|
||||||
|
|
|
@ -150,6 +150,9 @@ func (x *ObjectReader) ReadHeader(dst *object.Object) bool {
|
||||||
case *v2object.SplitInfo:
|
case *v2object.SplitInfo:
|
||||||
x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
||||||
return false
|
return false
|
||||||
|
case *v2object.ECInfo:
|
||||||
|
x.err = object.NewECInfoError(object.NewECInfoFromV2(v))
|
||||||
|
return false
|
||||||
case *v2object.GetObjectPartInit:
|
case *v2object.GetObjectPartInit:
|
||||||
partInit = v
|
partInit = v
|
||||||
}
|
}
|
||||||
|
@ -258,6 +261,7 @@ func (x *ObjectReader) close(ignoreEOF bool) (*ResObjectGet, error) {
|
||||||
// Return errors:
|
// Return errors:
|
||||||
//
|
//
|
||||||
// *object.SplitInfoError (returned on virtual objects with PrmObjectGet.MakeRaw).
|
// *object.SplitInfoError (returned on virtual objects with PrmObjectGet.MakeRaw).
|
||||||
|
// *object.ECInfoError (returned on erasure-coded objects with PrmObjectGet.MakeRaw).
|
||||||
//
|
//
|
||||||
// Return statuses:
|
// Return statuses:
|
||||||
// - global (see Client docs);
|
// - global (see Client docs);
|
||||||
|
@ -454,6 +458,7 @@ func (prm *PrmObjectHead) buildRequest(c *Client) (*v2object.HeadRequest, error)
|
||||||
// Return errors:
|
// Return errors:
|
||||||
//
|
//
|
||||||
// *object.SplitInfoError (returned on virtual objects with PrmObjectHead.MakeRaw).
|
// *object.SplitInfoError (returned on virtual objects with PrmObjectHead.MakeRaw).
|
||||||
|
// *object.ECInfoError (returned on erasure-coded objects with PrmObjectHead.MakeRaw).
|
||||||
//
|
//
|
||||||
// Return statuses:
|
// Return statuses:
|
||||||
// - global (see Client docs);
|
// - global (see Client docs);
|
||||||
|
@ -502,6 +507,8 @@ func (c *Client) ObjectHead(ctx context.Context, prm PrmObjectHead) (*ResObjectH
|
||||||
return nil, fmt.Errorf("unexpected header type %T", v)
|
return nil, fmt.Errorf("unexpected header type %T", v)
|
||||||
case *v2object.SplitInfo:
|
case *v2object.SplitInfo:
|
||||||
return nil, object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
return nil, object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
||||||
|
case *v2object.ECInfo:
|
||||||
|
return nil, object.NewECInfoError(object.NewECInfoFromV2(v))
|
||||||
case *v2object.HeaderWithSignature:
|
case *v2object.HeaderWithSignature:
|
||||||
res.hdr = v
|
res.hdr = v
|
||||||
}
|
}
|
||||||
|
@ -665,6 +672,9 @@ func (x *ObjectRangeReader) readChunk(buf []byte) (int, bool) {
|
||||||
case *v2object.SplitInfo:
|
case *v2object.SplitInfo:
|
||||||
x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
x.err = object.NewSplitInfoError(object.NewSplitInfoFromV2(v))
|
||||||
return read, false
|
return read, false
|
||||||
|
case *v2object.ECInfo:
|
||||||
|
x.err = object.NewECInfoError(object.NewECInfoFromV2(v))
|
||||||
|
return read, false
|
||||||
case *v2object.GetRangePartChunk:
|
case *v2object.GetRangePartChunk:
|
||||||
partChunk = v
|
partChunk = v
|
||||||
}
|
}
|
||||||
|
@ -725,6 +735,7 @@ func (x *ObjectRangeReader) close(ignoreEOF bool) (*ResObjectRange, error) {
|
||||||
// Return errors:
|
// Return errors:
|
||||||
//
|
//
|
||||||
// *object.SplitInfoError (returned on virtual objects with PrmObjectRange.MakeRaw).
|
// *object.SplitInfoError (returned on virtual objects with PrmObjectRange.MakeRaw).
|
||||||
|
// *object.ECInfoError (returned on erasure-coded objects with PrmObjectRange.MakeRaw).
|
||||||
//
|
//
|
||||||
// Return statuses:
|
// Return statuses:
|
||||||
// - global (see Client docs);
|
// - global (see Client docs);
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
|
|
||||||
|
buffPool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -36,6 +37,8 @@ type PrmObjectPutInit struct {
|
||||||
WithoutHomomorphHash bool
|
WithoutHomomorphHash bool
|
||||||
|
|
||||||
Key *ecdsa.PrivateKey
|
Key *ecdsa.PrivateKey
|
||||||
|
|
||||||
|
Pool *buffPool.BufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetCopiesNumber sets number of object copies that is enough to consider put successful.
|
// SetCopiesNumber sets number of object copies that is enough to consider put successful.
|
||||||
|
|
|
@ -3,6 +3,7 @@ package client
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
buffPool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
||||||
|
@ -26,6 +27,7 @@ func (c *Client) objectPutInitTransformer(prm PrmObjectPutInit) (*objectWriterTr
|
||||||
MaxSize: prm.MaxSize,
|
MaxSize: prm.MaxSize,
|
||||||
WithoutHomomorphicHash: prm.WithoutHomomorphHash,
|
WithoutHomomorphicHash: prm.WithoutHomomorphHash,
|
||||||
NetworkState: prm.EpochSource,
|
NetworkState: prm.EpochSource,
|
||||||
|
Pool: prm.Pool,
|
||||||
})
|
})
|
||||||
return &w, nil
|
return &w, nil
|
||||||
}
|
}
|
||||||
|
@ -114,6 +116,7 @@ func (it *internalTarget) tryPutSingle(ctx context.Context, o *object.Object) (b
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
it.returnBuffPool(o.Payload())
|
||||||
id, _ := o.ID()
|
id, _ := o.ID()
|
||||||
it.res = &ResObjectPut{
|
it.res = &ResObjectPut{
|
||||||
statusRes: res.statusRes,
|
statusRes: res.statusRes,
|
||||||
|
@ -126,3 +129,12 @@ func (it *internalTarget) tryPutSingle(ctx context.Context, o *object.Object) (b
|
||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (it *internalTarget) returnBuffPool(playback []byte) {
|
||||||
|
if it.prm.Pool == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buffer buffPool.Buffer
|
||||||
|
buffer.Data = playback
|
||||||
|
it.prm.Pool.Put(&buffer)
|
||||||
|
}
|
||||||
|
|
53
client/status/apemanager.go
Normal file
53
client/status/apemanager.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
package apistatus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APEManagerAccessDenied describes status of the failure because of the access control violation.
|
||||||
|
// Instances provide Status and StatusV2 interfaces.
|
||||||
|
type APEManagerAccessDenied struct {
|
||||||
|
v2 status.Status
|
||||||
|
}
|
||||||
|
|
||||||
|
const defaultAPEManagerAccessDeniedMsg = "apemanager access denied"
|
||||||
|
|
||||||
|
func (x *APEManagerAccessDenied) Error() string {
|
||||||
|
msg := x.v2.Message()
|
||||||
|
if msg == "" {
|
||||||
|
msg = defaultAPEManagerAccessDeniedMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
return errMessageStatusV2(
|
||||||
|
globalizeCodeV2(apemanager.StatusAPEManagerAccessDenied, apemanager.GlobalizeFail),
|
||||||
|
msg,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *APEManagerAccessDenied) fromStatusV2(st *status.Status) {
|
||||||
|
x.v2 = *st
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToStatusV2 converts APEManagerAccessDenied to v2's Status.
|
||||||
|
// If the value was returned by FromStatusV2, returns the source message.
|
||||||
|
// Otherwise, returns message with
|
||||||
|
// - code: APE_MANAGER_ACCESS_DENIED;
|
||||||
|
// - string message: "apemanager access denied";
|
||||||
|
// - details: empty.
|
||||||
|
func (x APEManagerAccessDenied) ToStatusV2() *status.Status {
|
||||||
|
x.v2.SetCode(globalizeCodeV2(apemanager.StatusAPEManagerAccessDenied, apemanager.GlobalizeFail))
|
||||||
|
x.v2.SetMessage(defaultAPEManagerAccessDeniedMsg)
|
||||||
|
return &x.v2
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteReason writes human-readable access rejection reason.
|
||||||
|
func (x *APEManagerAccessDenied) WriteReason(reason string) {
|
||||||
|
apemanager.WriteAccessDeniedDesc(&x.v2, reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reason returns human-readable access rejection reason returned by the server.
|
||||||
|
// Returns empty value is reason is not presented.
|
||||||
|
func (x APEManagerAccessDenied) Reason() string {
|
||||||
|
return apemanager.ReadAccessDeniedDesc(x.v2)
|
||||||
|
}
|
|
@ -3,6 +3,7 @@ package apistatus
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/apemanager"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||||
|
@ -92,6 +93,12 @@ func FromStatusV2(st *status.Status) Status {
|
||||||
case session.StatusTokenExpired:
|
case session.StatusTokenExpired:
|
||||||
decoder = new(SessionTokenExpired)
|
decoder = new(SessionTokenExpired)
|
||||||
}
|
}
|
||||||
|
case apemanager.LocalizeFailStatus(&code):
|
||||||
|
//nolint:exhaustive
|
||||||
|
switch code {
|
||||||
|
case apemanager.StatusAPEManagerAccessDenied:
|
||||||
|
decoder = new(APEManagerAccessDenied)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if decoder == nil {
|
if decoder == nil {
|
||||||
|
|
|
@ -125,6 +125,12 @@ func TestToStatusV2(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
codeV2: 4097,
|
codeV2: 4097,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
status: (statusConstructor)(func() apistatus.Status {
|
||||||
|
return new(apistatus.APEManagerAccessDenied)
|
||||||
|
}),
|
||||||
|
codeV2: 5120,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
status: (statusConstructor)(func() apistatus.Status {
|
status: (statusConstructor)(func() apistatus.Status {
|
||||||
return new(apistatus.NodeUnderMaintenance)
|
return new(apistatus.NodeUnderMaintenance)
|
||||||
|
@ -278,6 +284,12 @@ func TestFromStatusV2(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
codeV2: 4097,
|
codeV2: 4097,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
status: (statusConstructor)(func() apistatus.Status {
|
||||||
|
return new(apistatus.APEManagerAccessDenied)
|
||||||
|
}),
|
||||||
|
codeV2: 5120,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
status: (statusConstructor)(func() apistatus.Status {
|
status: (statusConstructor)(func() apistatus.Status {
|
||||||
return new(apistatus.NodeUnderMaintenance)
|
return new(apistatus.NodeUnderMaintenance)
|
||||||
|
|
5
go.mod
5
go.mod
|
@ -3,7 +3,7 @@ module git.frostfs.info/TrueCloudLab/frostfs-sdk-go
|
||||||
go 1.20
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240306101814-c1c7b344b9c0
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||||
|
@ -11,6 +11,7 @@ require (
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0
|
github.com/antlr4-go/antlr/v4 v4.13.0
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2
|
github.com/hashicorp/golang-lru/v2 v2.0.2
|
||||||
|
github.com/klauspost/reedsolomon v1.12.1
|
||||||
github.com/mr-tron/base58 v1.2.0
|
github.com/mr-tron/base58 v1.2.0
|
||||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
|
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
|
||||||
github.com/stretchr/testify v1.8.3
|
github.com/stretchr/testify v1.8.3
|
||||||
|
@ -28,10 +29,12 @@ require (
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/goleak v1.2.1 // indirect
|
go.uber.org/goleak v1.2.1 // indirect
|
||||||
|
|
13
go.sum
13
go.sum
|
@ -31,8 +31,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240306101814-c1c7b344b9c0 h1:4iyAj9k7W29YpyzUTwMuMBbL3G3M96kMbX62OwGNGfE=
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3 h1:H5GvrVlowIMWfzqQkhY0p0myooJxQ1sMRVSFfXawwWg=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240306101814-c1c7b344b9c0/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||||
|
@ -227,6 +227,10 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||||
|
github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
|
||||||
|
github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
@ -296,6 +300,7 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
@ -328,8 +333,9 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||||
|
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
@ -548,6 +554,7 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
|
|
@ -209,6 +209,25 @@ func (m NetMap) SelectFilterNodes(expr *SelectFilterExpr) ([][]NodeInfo, error)
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func countNodes(r netmap.Replica) uint32 {
|
||||||
|
if r.GetCount() != 0 {
|
||||||
|
return r.GetCount()
|
||||||
|
}
|
||||||
|
return r.GetECDataCount() + r.GetECParityCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PlacementPolicy) isUnique() bool {
|
||||||
|
if p.unique {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, r := range p.replicas {
|
||||||
|
if r.GetECDataCount() != 0 || r.GetECParityCount() != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerNodes returns two-dimensional list of nodes as a result of applying
|
// ContainerNodes returns two-dimensional list of nodes as a result of applying
|
||||||
// given PlacementPolicy to the NetMap. Each line of the list corresponds to a
|
// given PlacementPolicy to the NetMap. Each line of the list corresponds to a
|
||||||
// replica descriptor. Line order corresponds to order of ReplicaDescriptor list
|
// replica descriptor. Line order corresponds to order of ReplicaDescriptor list
|
||||||
|
@ -230,6 +249,7 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, pivot []byte) ([][]NodeInfo, e
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unique := p.isUnique()
|
||||||
result := make([][]NodeInfo, len(p.replicas))
|
result := make([][]NodeInfo, len(p.replicas))
|
||||||
|
|
||||||
// Note that the cached selectors are not used when the policy contains the UNIQUE flag.
|
// Note that the cached selectors are not used when the policy contains the UNIQUE flag.
|
||||||
|
@ -240,7 +260,7 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, pivot []byte) ([][]NodeInfo, e
|
||||||
sName := p.replicas[i].GetSelector()
|
sName := p.replicas[i].GetSelector()
|
||||||
if sName == "" && !(len(p.replicas) == 1 && len(p.selectors) == 1) {
|
if sName == "" && !(len(p.replicas) == 1 && len(p.selectors) == 1) {
|
||||||
var s netmap.Selector
|
var s netmap.Selector
|
||||||
s.SetCount(p.replicas[i].GetCount())
|
s.SetCount(countNodes(p.replicas[i]))
|
||||||
s.SetFilter(mainFilterName)
|
s.SetFilter(mainFilterName)
|
||||||
|
|
||||||
nodes, err := c.getSelection(s)
|
nodes, err := c.getSelection(s)
|
||||||
|
@ -250,14 +270,14 @@ func (m NetMap) ContainerNodes(p PlacementPolicy, pivot []byte) ([][]NodeInfo, e
|
||||||
|
|
||||||
result[i] = append(result[i], flattenNodes(nodes)...)
|
result[i] = append(result[i], flattenNodes(nodes)...)
|
||||||
|
|
||||||
if p.unique {
|
if unique {
|
||||||
c.addUsedNodes(result[i]...)
|
c.addUsedNodes(result[i]...)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.unique {
|
if unique {
|
||||||
if c.processedSelectors[sName] == nil {
|
if c.processedSelectors[sName] == nil {
|
||||||
return nil, fmt.Errorf("selector not found: '%s'", sName)
|
return nil, fmt.Errorf("selector not found: '%s'", sName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,8 @@ func (x *NetworkInfo) readFromV2(m netmap.NetworkInfo, checkFieldPresence bool)
|
||||||
configEpochDuration,
|
configEpochDuration,
|
||||||
configIRCandidateFee,
|
configIRCandidateFee,
|
||||||
configMaxObjSize,
|
configMaxObjSize,
|
||||||
|
configMaxECDataCount,
|
||||||
|
configMaxECParityCount,
|
||||||
configWithdrawalFee:
|
configWithdrawalFee:
|
||||||
_, err = decodeConfigValueUint64(prm.GetValue())
|
_, err = decodeConfigValueUint64(prm.GetValue())
|
||||||
case configHomomorphicHashingDisabled,
|
case configHomomorphicHashingDisabled,
|
||||||
|
@ -234,6 +236,8 @@ func (x *NetworkInfo) IterateRawNetworkParameters(f func(name string, value []by
|
||||||
configEpochDuration,
|
configEpochDuration,
|
||||||
configIRCandidateFee,
|
configIRCandidateFee,
|
||||||
configMaxObjSize,
|
configMaxObjSize,
|
||||||
|
configMaxECDataCount,
|
||||||
|
configMaxECParityCount,
|
||||||
configWithdrawalFee,
|
configWithdrawalFee,
|
||||||
configHomomorphicHashingDisabled,
|
configHomomorphicHashingDisabled,
|
||||||
configMaintenanceModeAllowed:
|
configMaintenanceModeAllowed:
|
||||||
|
@ -432,6 +436,34 @@ func (x NetworkInfo) MaxObjectSize() uint64 {
|
||||||
return x.configUint64(configMaxObjSize)
|
return x.configUint64(configMaxObjSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const configMaxECDataCount = "MaxECDataCount"
|
||||||
|
|
||||||
|
// SetMaxECDataCount sets maximum number of data shards for erasure codes.
|
||||||
|
//
|
||||||
|
// Zero means no restrictions.
|
||||||
|
func (x *NetworkInfo) SetMaxECDataCount(dataCount uint64) {
|
||||||
|
x.setConfigUint64(configMaxECDataCount, dataCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxECDataCount returns maximum number of data shards for erasure codes.
|
||||||
|
func (x NetworkInfo) MaxECDataCount() uint64 {
|
||||||
|
return x.configUint64(configMaxECDataCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
const configMaxECParityCount = "MaxECParityCount"
|
||||||
|
|
||||||
|
// SetMaxECParityCount sets maximum number of parity shards for erasure codes.
|
||||||
|
//
|
||||||
|
// Zero means no restrictions.
|
||||||
|
func (x *NetworkInfo) SetMaxECParityCount(parityCount uint64) {
|
||||||
|
x.setConfigUint64(configMaxECParityCount, parityCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxECParityCount returns maximum number of parity shards for erasure codes.
|
||||||
|
func (x NetworkInfo) MaxECParityCount() uint64 {
|
||||||
|
return x.configUint64(configMaxECParityCount)
|
||||||
|
}
|
||||||
|
|
||||||
const configWithdrawalFee = "WithdrawFee"
|
const configWithdrawalFee = "WithdrawFee"
|
||||||
|
|
||||||
// SetWithdrawalFee sets fee for withdrawals from the FrostFS accounts that
|
// SetWithdrawalFee sets fee for withdrawals from the FrostFS accounts that
|
||||||
|
|
|
@ -173,6 +173,32 @@ func TestNetworkInfo_MaxObjectSize(t *testing.T) {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNetworkInfo_MaxECDataCount(t *testing.T) {
|
||||||
|
testConfigValue(t,
|
||||||
|
func(x NetworkInfo) any { return x.MaxECDataCount() },
|
||||||
|
func(info *NetworkInfo, val any) { info.SetMaxECDataCount(val.(uint64)) },
|
||||||
|
uint64(1), uint64(2),
|
||||||
|
"MaxECDataCount", func(val any) []byte {
|
||||||
|
data := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(data, val.(uint64))
|
||||||
|
return data
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetworkInfo_MaxECParityCount(t *testing.T) {
|
||||||
|
testConfigValue(t,
|
||||||
|
func(x NetworkInfo) any { return x.MaxECParityCount() },
|
||||||
|
func(info *NetworkInfo, val any) { info.SetMaxECParityCount(val.(uint64)) },
|
||||||
|
uint64(1), uint64(2),
|
||||||
|
"MaxECParityCount", func(val any) []byte {
|
||||||
|
data := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(data, val.(uint64))
|
||||||
|
return data
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNetworkInfo_WithdrawalFee(t *testing.T) {
|
func TestNetworkInfo_WithdrawalFee(t *testing.T) {
|
||||||
testConfigValue(t,
|
testConfigValue(t,
|
||||||
func(x NetworkInfo) any { return x.WithdrawalFee() },
|
func(x NetworkInfo) any { return x.WithdrawalFee() },
|
||||||
|
|
|
@ -4,10 +4,14 @@ options {
|
||||||
tokenVocab = QueryLexer;
|
tokenVocab = QueryLexer;
|
||||||
}
|
}
|
||||||
|
|
||||||
policy: UNIQUE? repStmt+ cbfStmt? selectStmt* filterStmt* EOF;
|
policy: UNIQUE? (repStmt | ecStmt)+ cbfStmt? selectStmt* filterStmt* EOF;
|
||||||
|
|
||||||
selectFilterExpr: cbfStmt? selectStmt? filterStmt* EOF;
|
selectFilterExpr: cbfStmt? selectStmt? filterStmt* EOF;
|
||||||
|
|
||||||
|
ecStmt:
|
||||||
|
EC Data = NUMBER1 DOT Parity = NUMBER1 // erasure code configuration
|
||||||
|
(IN Selector = ident)?; // optional selector name
|
||||||
|
|
||||||
repStmt:
|
repStmt:
|
||||||
REP Count = NUMBER1 // number of object replicas
|
REP Count = NUMBER1 // number of object replicas
|
||||||
(IN Selector = ident)?; // optional selector name
|
(IN Selector = ident)?; // optional selector name
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -7,6 +7,7 @@ SIMPLE_OP : 'EQ' | 'NE' | 'GE' | 'GT' | 'LT' | 'LE';
|
||||||
|
|
||||||
UNIQUE : 'UNIQUE';
|
UNIQUE : 'UNIQUE';
|
||||||
REP : 'REP';
|
REP : 'REP';
|
||||||
|
EC : 'EC';
|
||||||
IN : 'IN';
|
IN : 'IN';
|
||||||
AS : 'AS';
|
AS : 'AS';
|
||||||
CBF : 'CBF';
|
CBF : 'CBF';
|
||||||
|
@ -14,6 +15,7 @@ SELECT : 'SELECT';
|
||||||
FROM : 'FROM';
|
FROM : 'FROM';
|
||||||
FILTER : 'FILTER';
|
FILTER : 'FILTER';
|
||||||
WILDCARD : '*';
|
WILDCARD : '*';
|
||||||
|
DOT : '.';
|
||||||
|
|
||||||
CLAUSE_SAME : 'SAME';
|
CLAUSE_SAME : 'SAME';
|
||||||
CLAUSE_DISTINCT : 'DISTINCT';
|
CLAUSE_DISTINCT : 'DISTINCT';
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
||||||
// Code generated from Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
// Code generated from /repo/frostfs/sdk-go/netmap/parser/Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
||||||
|
|
||||||
package parser // Query
|
package parser // Query
|
||||||
|
|
||||||
|
@ -16,6 +16,10 @@ func (v *BaseQueryVisitor) VisitSelectFilterExpr(ctx *SelectFilterExprContext) i
|
||||||
return v.VisitChildren(ctx)
|
return v.VisitChildren(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (v *BaseQueryVisitor) VisitEcStmt(ctx *EcStmtContext) interface{} {
|
||||||
|
return v.VisitChildren(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
func (v *BaseQueryVisitor) VisitRepStmt(ctx *RepStmtContext) interface{} {
|
func (v *BaseQueryVisitor) VisitRepStmt(ctx *RepStmtContext) interface{} {
|
||||||
return v.VisitChildren(ctx)
|
return v.VisitChildren(ctx)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated from QueryLexer.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
// Code generated from /repo/frostfs/sdk-go/netmap/parser/QueryLexer.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
||||||
|
|
||||||
package parser
|
package parser
|
||||||
|
|
||||||
|
@ -43,119 +43,123 @@ func querylexerLexerInit() {
|
||||||
"DEFAULT_MODE",
|
"DEFAULT_MODE",
|
||||||
}
|
}
|
||||||
staticData.LiteralNames = []string{
|
staticData.LiteralNames = []string{
|
||||||
"", "'NOT'", "'AND'", "'OR'", "", "'UNIQUE'", "'REP'", "'IN'", "'AS'",
|
"", "'NOT'", "'AND'", "'OR'", "", "'UNIQUE'", "'REP'", "'EC'", "'IN'",
|
||||||
"'CBF'", "'SELECT'", "'FROM'", "'FILTER'", "'*'", "'SAME'", "'DISTINCT'",
|
"'AS'", "'CBF'", "'SELECT'", "'FROM'", "'FILTER'", "'*'", "'.'", "'SAME'",
|
||||||
"'('", "')'", "'@'", "", "", "'0'",
|
"'DISTINCT'", "'('", "')'", "'@'", "", "", "'0'",
|
||||||
}
|
}
|
||||||
staticData.SymbolicNames = []string{
|
staticData.SymbolicNames = []string{
|
||||||
"", "NOT_OP", "AND_OP", "OR_OP", "SIMPLE_OP", "UNIQUE", "REP", "IN",
|
"", "NOT_OP", "AND_OP", "OR_OP", "SIMPLE_OP", "UNIQUE", "REP", "EC",
|
||||||
"AS", "CBF", "SELECT", "FROM", "FILTER", "WILDCARD", "CLAUSE_SAME",
|
"IN", "AS", "CBF", "SELECT", "FROM", "FILTER", "WILDCARD", "DOT", "CLAUSE_SAME",
|
||||||
"CLAUSE_DISTINCT", "L_PAREN", "R_PAREN", "AT", "IDENT", "NUMBER1", "ZERO",
|
"CLAUSE_DISTINCT", "L_PAREN", "R_PAREN", "AT", "IDENT", "NUMBER1", "ZERO",
|
||||||
"STRING", "WS",
|
"STRING", "WS",
|
||||||
}
|
}
|
||||||
staticData.RuleNames = []string{
|
staticData.RuleNames = []string{
|
||||||
"NOT_OP", "AND_OP", "OR_OP", "SIMPLE_OP", "UNIQUE", "REP", "IN", "AS",
|
"NOT_OP", "AND_OP", "OR_OP", "SIMPLE_OP", "UNIQUE", "REP", "EC", "IN",
|
||||||
"CBF", "SELECT", "FROM", "FILTER", "WILDCARD", "CLAUSE_SAME", "CLAUSE_DISTINCT",
|
"AS", "CBF", "SELECT", "FROM", "FILTER", "WILDCARD", "DOT", "CLAUSE_SAME",
|
||||||
"L_PAREN", "R_PAREN", "AT", "IDENT", "Digit", "Nondigit", "NUMBER1",
|
"CLAUSE_DISTINCT", "L_PAREN", "R_PAREN", "AT", "IDENT", "Digit", "Nondigit",
|
||||||
"ZERO", "STRING", "ESC", "UNICODE", "HEX", "SAFECODEPOINTSINGLE", "SAFECODEPOINTDOUBLE",
|
"NUMBER1", "ZERO", "STRING", "ESC", "UNICODE", "HEX", "SAFECODEPOINTSINGLE",
|
||||||
"WS",
|
"SAFECODEPOINTDOUBLE", "WS",
|
||||||
}
|
}
|
||||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||||
staticData.serializedATN = []int32{
|
staticData.serializedATN = []int32{
|
||||||
4, 0, 23, 213, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
4, 0, 25, 222, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||||
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
||||||
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
||||||
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
||||||
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
||||||
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 1, 0, 1, 0, 1,
|
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
|
||||||
0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1,
|
31, 7, 31, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2,
|
||||||
3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 85, 8, 3, 1, 4,
|
1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
|
||||||
1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6,
|
1, 3, 3, 3, 89, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 5, 1,
|
||||||
1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9,
|
5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1,
|
||||||
1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 11,
|
9, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1,
|
||||||
1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1,
|
11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12,
|
||||||
13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1,
|
||||||
1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 5, 18, 152, 8,
|
16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 17, 1, 17,
|
||||||
18, 10, 18, 12, 18, 155, 9, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21,
|
1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 5, 20, 161, 8, 20, 10,
|
||||||
5, 21, 163, 8, 21, 10, 21, 12, 21, 166, 9, 21, 1, 22, 1, 22, 1, 23, 1,
|
20, 12, 20, 164, 9, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 5, 23,
|
||||||
23, 1, 23, 5, 23, 173, 8, 23, 10, 23, 12, 23, 176, 9, 23, 1, 23, 1, 23,
|
172, 8, 23, 10, 23, 12, 23, 175, 9, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1,
|
||||||
1, 23, 1, 23, 5, 23, 182, 8, 23, 10, 23, 12, 23, 185, 9, 23, 1, 23, 3,
|
25, 5, 25, 182, 8, 25, 10, 25, 12, 25, 185, 9, 25, 1, 25, 1, 25, 1, 25,
|
||||||
23, 188, 8, 23, 1, 24, 1, 24, 1, 24, 3, 24, 193, 8, 24, 1, 25, 1, 25, 1,
|
1, 25, 5, 25, 191, 8, 25, 10, 25, 12, 25, 194, 9, 25, 1, 25, 3, 25, 197,
|
||||||
25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29,
|
8, 25, 1, 26, 1, 26, 1, 26, 3, 26, 202, 8, 26, 1, 27, 1, 27, 1, 27, 1,
|
||||||
4, 29, 208, 8, 29, 11, 29, 12, 29, 209, 1, 29, 1, 29, 0, 0, 30, 1, 1, 3,
|
27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 4, 31,
|
||||||
2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12,
|
217, 8, 31, 11, 31, 12, 31, 218, 1, 31, 1, 31, 0, 0, 32, 1, 1, 3, 2, 5,
|
||||||
25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 0, 41, 0, 43,
|
3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25,
|
||||||
20, 45, 21, 47, 22, 49, 0, 51, 0, 53, 0, 55, 0, 57, 0, 59, 23, 1, 0, 8,
|
13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43,
|
||||||
1, 0, 48, 57, 3, 0, 65, 90, 95, 95, 97, 122, 1, 0, 49, 57, 9, 0, 34, 34,
|
0, 45, 0, 47, 22, 49, 23, 51, 24, 53, 0, 55, 0, 57, 0, 59, 0, 61, 0, 63,
|
||||||
39, 39, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114, 114, 116, 116,
|
25, 1, 0, 8, 1, 0, 48, 57, 3, 0, 65, 90, 95, 95, 97, 122, 1, 0, 49, 57,
|
||||||
3, 0, 48, 57, 65, 70, 97, 102, 3, 0, 0, 31, 39, 39, 92, 92, 3, 0, 0, 31,
|
9, 0, 34, 34, 39, 39, 47, 47, 92, 92, 98, 98, 102, 102, 110, 110, 114,
|
||||||
34, 34, 92, 92, 3, 0, 9, 10, 13, 13, 32, 32, 220, 0, 1, 1, 0, 0, 0, 0,
|
114, 116, 116, 3, 0, 48, 57, 65, 70, 97, 102, 3, 0, 0, 31, 39, 39, 92,
|
||||||
3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0,
|
92, 3, 0, 0, 31, 34, 34, 92, 92, 3, 0, 9, 10, 13, 13, 32, 32, 229, 0, 1,
|
||||||
11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0,
|
1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9,
|
||||||
0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0,
|
1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0,
|
||||||
0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0,
|
17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0,
|
||||||
0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1,
|
0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0,
|
||||||
0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 1, 61, 1, 0, 0, 0, 3, 65,
|
0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0,
|
||||||
1, 0, 0, 0, 5, 69, 1, 0, 0, 0, 7, 84, 1, 0, 0, 0, 9, 86, 1, 0, 0, 0, 11,
|
0, 0, 0, 41, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1,
|
||||||
93, 1, 0, 0, 0, 13, 97, 1, 0, 0, 0, 15, 100, 1, 0, 0, 0, 17, 103, 1, 0,
|
0, 0, 0, 0, 63, 1, 0, 0, 0, 1, 65, 1, 0, 0, 0, 3, 69, 1, 0, 0, 0, 5, 73,
|
||||||
0, 0, 19, 107, 1, 0, 0, 0, 21, 114, 1, 0, 0, 0, 23, 119, 1, 0, 0, 0, 25,
|
1, 0, 0, 0, 7, 88, 1, 0, 0, 0, 9, 90, 1, 0, 0, 0, 11, 97, 1, 0, 0, 0, 13,
|
||||||
126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 133, 1, 0, 0, 0, 31, 142, 1,
|
101, 1, 0, 0, 0, 15, 104, 1, 0, 0, 0, 17, 107, 1, 0, 0, 0, 19, 110, 1,
|
||||||
0, 0, 0, 33, 144, 1, 0, 0, 0, 35, 146, 1, 0, 0, 0, 37, 148, 1, 0, 0, 0,
|
0, 0, 0, 21, 114, 1, 0, 0, 0, 23, 121, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0,
|
||||||
39, 156, 1, 0, 0, 0, 41, 158, 1, 0, 0, 0, 43, 160, 1, 0, 0, 0, 45, 167,
|
27, 133, 1, 0, 0, 0, 29, 135, 1, 0, 0, 0, 31, 137, 1, 0, 0, 0, 33, 142,
|
||||||
1, 0, 0, 0, 47, 187, 1, 0, 0, 0, 49, 189, 1, 0, 0, 0, 51, 194, 1, 0, 0,
|
1, 0, 0, 0, 35, 151, 1, 0, 0, 0, 37, 153, 1, 0, 0, 0, 39, 155, 1, 0, 0,
|
||||||
0, 53, 200, 1, 0, 0, 0, 55, 202, 1, 0, 0, 0, 57, 204, 1, 0, 0, 0, 59, 207,
|
0, 41, 157, 1, 0, 0, 0, 43, 165, 1, 0, 0, 0, 45, 167, 1, 0, 0, 0, 47, 169,
|
||||||
1, 0, 0, 0, 61, 62, 5, 78, 0, 0, 62, 63, 5, 79, 0, 0, 63, 64, 5, 84, 0,
|
1, 0, 0, 0, 49, 176, 1, 0, 0, 0, 51, 196, 1, 0, 0, 0, 53, 198, 1, 0, 0,
|
||||||
0, 64, 2, 1, 0, 0, 0, 65, 66, 5, 65, 0, 0, 66, 67, 5, 78, 0, 0, 67, 68,
|
0, 55, 203, 1, 0, 0, 0, 57, 209, 1, 0, 0, 0, 59, 211, 1, 0, 0, 0, 61, 213,
|
||||||
5, 68, 0, 0, 68, 4, 1, 0, 0, 0, 69, 70, 5, 79, 0, 0, 70, 71, 5, 82, 0,
|
1, 0, 0, 0, 63, 216, 1, 0, 0, 0, 65, 66, 5, 78, 0, 0, 66, 67, 5, 79, 0,
|
||||||
0, 71, 6, 1, 0, 0, 0, 72, 73, 5, 69, 0, 0, 73, 85, 5, 81, 0, 0, 74, 75,
|
0, 67, 68, 5, 84, 0, 0, 68, 2, 1, 0, 0, 0, 69, 70, 5, 65, 0, 0, 70, 71,
|
||||||
5, 78, 0, 0, 75, 85, 5, 69, 0, 0, 76, 77, 5, 71, 0, 0, 77, 85, 5, 69, 0,
|
5, 78, 0, 0, 71, 72, 5, 68, 0, 0, 72, 4, 1, 0, 0, 0, 73, 74, 5, 79, 0,
|
||||||
0, 78, 79, 5, 71, 0, 0, 79, 85, 5, 84, 0, 0, 80, 81, 5, 76, 0, 0, 81, 85,
|
0, 74, 75, 5, 82, 0, 0, 75, 6, 1, 0, 0, 0, 76, 77, 5, 69, 0, 0, 77, 89,
|
||||||
5, 84, 0, 0, 82, 83, 5, 76, 0, 0, 83, 85, 5, 69, 0, 0, 84, 72, 1, 0, 0,
|
5, 81, 0, 0, 78, 79, 5, 78, 0, 0, 79, 89, 5, 69, 0, 0, 80, 81, 5, 71, 0,
|
||||||
0, 84, 74, 1, 0, 0, 0, 84, 76, 1, 0, 0, 0, 84, 78, 1, 0, 0, 0, 84, 80,
|
0, 81, 89, 5, 69, 0, 0, 82, 83, 5, 71, 0, 0, 83, 89, 5, 84, 0, 0, 84, 85,
|
||||||
1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 8, 1, 0, 0, 0, 86, 87, 5, 85, 0, 0,
|
5, 76, 0, 0, 85, 89, 5, 84, 0, 0, 86, 87, 5, 76, 0, 0, 87, 89, 5, 69, 0,
|
||||||
87, 88, 5, 78, 0, 0, 88, 89, 5, 73, 0, 0, 89, 90, 5, 81, 0, 0, 90, 91,
|
0, 88, 76, 1, 0, 0, 0, 88, 78, 1, 0, 0, 0, 88, 80, 1, 0, 0, 0, 88, 82,
|
||||||
5, 85, 0, 0, 91, 92, 5, 69, 0, 0, 92, 10, 1, 0, 0, 0, 93, 94, 5, 82, 0,
|
1, 0, 0, 0, 88, 84, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 89, 8, 1, 0, 0, 0,
|
||||||
0, 94, 95, 5, 69, 0, 0, 95, 96, 5, 80, 0, 0, 96, 12, 1, 0, 0, 0, 97, 98,
|
90, 91, 5, 85, 0, 0, 91, 92, 5, 78, 0, 0, 92, 93, 5, 73, 0, 0, 93, 94,
|
||||||
5, 73, 0, 0, 98, 99, 5, 78, 0, 0, 99, 14, 1, 0, 0, 0, 100, 101, 5, 65,
|
5, 81, 0, 0, 94, 95, 5, 85, 0, 0, 95, 96, 5, 69, 0, 0, 96, 10, 1, 0, 0,
|
||||||
0, 0, 101, 102, 5, 83, 0, 0, 102, 16, 1, 0, 0, 0, 103, 104, 5, 67, 0, 0,
|
0, 97, 98, 5, 82, 0, 0, 98, 99, 5, 69, 0, 0, 99, 100, 5, 80, 0, 0, 100,
|
||||||
104, 105, 5, 66, 0, 0, 105, 106, 5, 70, 0, 0, 106, 18, 1, 0, 0, 0, 107,
|
12, 1, 0, 0, 0, 101, 102, 5, 69, 0, 0, 102, 103, 5, 67, 0, 0, 103, 14,
|
||||||
108, 5, 83, 0, 0, 108, 109, 5, 69, 0, 0, 109, 110, 5, 76, 0, 0, 110, 111,
|
1, 0, 0, 0, 104, 105, 5, 73, 0, 0, 105, 106, 5, 78, 0, 0, 106, 16, 1, 0,
|
||||||
5, 69, 0, 0, 111, 112, 5, 67, 0, 0, 112, 113, 5, 84, 0, 0, 113, 20, 1,
|
0, 0, 107, 108, 5, 65, 0, 0, 108, 109, 5, 83, 0, 0, 109, 18, 1, 0, 0, 0,
|
||||||
0, 0, 0, 114, 115, 5, 70, 0, 0, 115, 116, 5, 82, 0, 0, 116, 117, 5, 79,
|
110, 111, 5, 67, 0, 0, 111, 112, 5, 66, 0, 0, 112, 113, 5, 70, 0, 0, 113,
|
||||||
0, 0, 117, 118, 5, 77, 0, 0, 118, 22, 1, 0, 0, 0, 119, 120, 5, 70, 0, 0,
|
20, 1, 0, 0, 0, 114, 115, 5, 83, 0, 0, 115, 116, 5, 69, 0, 0, 116, 117,
|
||||||
120, 121, 5, 73, 0, 0, 121, 122, 5, 76, 0, 0, 122, 123, 5, 84, 0, 0, 123,
|
5, 76, 0, 0, 117, 118, 5, 69, 0, 0, 118, 119, 5, 67, 0, 0, 119, 120, 5,
|
||||||
124, 5, 69, 0, 0, 124, 125, 5, 82, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127,
|
84, 0, 0, 120, 22, 1, 0, 0, 0, 121, 122, 5, 70, 0, 0, 122, 123, 5, 82,
|
||||||
5, 42, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 83, 0, 0, 129, 130, 5, 65,
|
0, 0, 123, 124, 5, 79, 0, 0, 124, 125, 5, 77, 0, 0, 125, 24, 1, 0, 0, 0,
|
||||||
0, 0, 130, 131, 5, 77, 0, 0, 131, 132, 5, 69, 0, 0, 132, 28, 1, 0, 0, 0,
|
126, 127, 5, 70, 0, 0, 127, 128, 5, 73, 0, 0, 128, 129, 5, 76, 0, 0, 129,
|
||||||
133, 134, 5, 68, 0, 0, 134, 135, 5, 73, 0, 0, 135, 136, 5, 83, 0, 0, 136,
|
130, 5, 84, 0, 0, 130, 131, 5, 69, 0, 0, 131, 132, 5, 82, 0, 0, 132, 26,
|
||||||
137, 5, 84, 0, 0, 137, 138, 5, 73, 0, 0, 138, 139, 5, 78, 0, 0, 139, 140,
|
1, 0, 0, 0, 133, 134, 5, 42, 0, 0, 134, 28, 1, 0, 0, 0, 135, 136, 5, 46,
|
||||||
5, 67, 0, 0, 140, 141, 5, 84, 0, 0, 141, 30, 1, 0, 0, 0, 142, 143, 5, 40,
|
0, 0, 136, 30, 1, 0, 0, 0, 137, 138, 5, 83, 0, 0, 138, 139, 5, 65, 0, 0,
|
||||||
0, 0, 143, 32, 1, 0, 0, 0, 144, 145, 5, 41, 0, 0, 145, 34, 1, 0, 0, 0,
|
139, 140, 5, 77, 0, 0, 140, 141, 5, 69, 0, 0, 141, 32, 1, 0, 0, 0, 142,
|
||||||
146, 147, 5, 64, 0, 0, 147, 36, 1, 0, 0, 0, 148, 153, 3, 41, 20, 0, 149,
|
143, 5, 68, 0, 0, 143, 144, 5, 73, 0, 0, 144, 145, 5, 83, 0, 0, 145, 146,
|
||||||
152, 3, 39, 19, 0, 150, 152, 3, 41, 20, 0, 151, 149, 1, 0, 0, 0, 151, 150,
|
5, 84, 0, 0, 146, 147, 5, 73, 0, 0, 147, 148, 5, 78, 0, 0, 148, 149, 5,
|
||||||
1, 0, 0, 0, 152, 155, 1, 0, 0, 0, 153, 151, 1, 0, 0, 0, 153, 154, 1, 0,
|
67, 0, 0, 149, 150, 5, 84, 0, 0, 150, 34, 1, 0, 0, 0, 151, 152, 5, 40,
|
||||||
0, 0, 154, 38, 1, 0, 0, 0, 155, 153, 1, 0, 0, 0, 156, 157, 7, 0, 0, 0,
|
0, 0, 152, 36, 1, 0, 0, 0, 153, 154, 5, 41, 0, 0, 154, 38, 1, 0, 0, 0,
|
||||||
157, 40, 1, 0, 0, 0, 158, 159, 7, 1, 0, 0, 159, 42, 1, 0, 0, 0, 160, 164,
|
155, 156, 5, 64, 0, 0, 156, 40, 1, 0, 0, 0, 157, 162, 3, 45, 22, 0, 158,
|
||||||
7, 2, 0, 0, 161, 163, 3, 39, 19, 0, 162, 161, 1, 0, 0, 0, 163, 166, 1,
|
161, 3, 43, 21, 0, 159, 161, 3, 45, 22, 0, 160, 158, 1, 0, 0, 0, 160, 159,
|
||||||
0, 0, 0, 164, 162, 1, 0, 0, 0, 164, 165, 1, 0, 0, 0, 165, 44, 1, 0, 0,
|
1, 0, 0, 0, 161, 164, 1, 0, 0, 0, 162, 160, 1, 0, 0, 0, 162, 163, 1, 0,
|
||||||
0, 166, 164, 1, 0, 0, 0, 167, 168, 5, 48, 0, 0, 168, 46, 1, 0, 0, 0, 169,
|
0, 0, 163, 42, 1, 0, 0, 0, 164, 162, 1, 0, 0, 0, 165, 166, 7, 0, 0, 0,
|
||||||
174, 5, 34, 0, 0, 170, 173, 3, 49, 24, 0, 171, 173, 3, 57, 28, 0, 172,
|
166, 44, 1, 0, 0, 0, 167, 168, 7, 1, 0, 0, 168, 46, 1, 0, 0, 0, 169, 173,
|
||||||
170, 1, 0, 0, 0, 172, 171, 1, 0, 0, 0, 173, 176, 1, 0, 0, 0, 174, 172,
|
7, 2, 0, 0, 170, 172, 3, 43, 21, 0, 171, 170, 1, 0, 0, 0, 172, 175, 1,
|
||||||
1, 0, 0, 0, 174, 175, 1, 0, 0, 0, 175, 177, 1, 0, 0, 0, 176, 174, 1, 0,
|
0, 0, 0, 173, 171, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 48, 1, 0, 0,
|
||||||
0, 0, 177, 188, 5, 34, 0, 0, 178, 183, 5, 39, 0, 0, 179, 182, 3, 49, 24,
|
0, 175, 173, 1, 0, 0, 0, 176, 177, 5, 48, 0, 0, 177, 50, 1, 0, 0, 0, 178,
|
||||||
0, 180, 182, 3, 55, 27, 0, 181, 179, 1, 0, 0, 0, 181, 180, 1, 0, 0, 0,
|
183, 5, 34, 0, 0, 179, 182, 3, 53, 26, 0, 180, 182, 3, 61, 30, 0, 181,
|
||||||
182, 185, 1, 0, 0, 0, 183, 181, 1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184,
|
179, 1, 0, 0, 0, 181, 180, 1, 0, 0, 0, 182, 185, 1, 0, 0, 0, 183, 181,
|
||||||
186, 1, 0, 0, 0, 185, 183, 1, 0, 0, 0, 186, 188, 5, 39, 0, 0, 187, 169,
|
1, 0, 0, 0, 183, 184, 1, 0, 0, 0, 184, 186, 1, 0, 0, 0, 185, 183, 1, 0,
|
||||||
1, 0, 0, 0, 187, 178, 1, 0, 0, 0, 188, 48, 1, 0, 0, 0, 189, 192, 5, 92,
|
0, 0, 186, 197, 5, 34, 0, 0, 187, 192, 5, 39, 0, 0, 188, 191, 3, 53, 26,
|
||||||
0, 0, 190, 193, 7, 3, 0, 0, 191, 193, 3, 51, 25, 0, 192, 190, 1, 0, 0,
|
0, 189, 191, 3, 59, 29, 0, 190, 188, 1, 0, 0, 0, 190, 189, 1, 0, 0, 0,
|
||||||
0, 192, 191, 1, 0, 0, 0, 193, 50, 1, 0, 0, 0, 194, 195, 5, 117, 0, 0, 195,
|
191, 194, 1, 0, 0, 0, 192, 190, 1, 0, 0, 0, 192, 193, 1, 0, 0, 0, 193,
|
||||||
196, 3, 53, 26, 0, 196, 197, 3, 53, 26, 0, 197, 198, 3, 53, 26, 0, 198,
|
195, 1, 0, 0, 0, 194, 192, 1, 0, 0, 0, 195, 197, 5, 39, 0, 0, 196, 178,
|
||||||
199, 3, 53, 26, 0, 199, 52, 1, 0, 0, 0, 200, 201, 7, 4, 0, 0, 201, 54,
|
1, 0, 0, 0, 196, 187, 1, 0, 0, 0, 197, 52, 1, 0, 0, 0, 198, 201, 5, 92,
|
||||||
1, 0, 0, 0, 202, 203, 8, 5, 0, 0, 203, 56, 1, 0, 0, 0, 204, 205, 8, 6,
|
0, 0, 199, 202, 7, 3, 0, 0, 200, 202, 3, 55, 27, 0, 201, 199, 1, 0, 0,
|
||||||
0, 0, 205, 58, 1, 0, 0, 0, 206, 208, 7, 7, 0, 0, 207, 206, 1, 0, 0, 0,
|
0, 201, 200, 1, 0, 0, 0, 202, 54, 1, 0, 0, 0, 203, 204, 5, 117, 0, 0, 204,
|
||||||
208, 209, 1, 0, 0, 0, 209, 207, 1, 0, 0, 0, 209, 210, 1, 0, 0, 0, 210,
|
205, 3, 57, 28, 0, 205, 206, 3, 57, 28, 0, 206, 207, 3, 57, 28, 0, 207,
|
||||||
211, 1, 0, 0, 0, 211, 212, 6, 29, 0, 0, 212, 60, 1, 0, 0, 0, 12, 0, 84,
|
208, 3, 57, 28, 0, 208, 56, 1, 0, 0, 0, 209, 210, 7, 4, 0, 0, 210, 58,
|
||||||
151, 153, 164, 172, 174, 181, 183, 187, 192, 209, 1, 6, 0, 0,
|
1, 0, 0, 0, 211, 212, 8, 5, 0, 0, 212, 60, 1, 0, 0, 0, 213, 214, 8, 6,
|
||||||
|
0, 0, 214, 62, 1, 0, 0, 0, 215, 217, 7, 7, 0, 0, 216, 215, 1, 0, 0, 0,
|
||||||
|
217, 218, 1, 0, 0, 0, 218, 216, 1, 0, 0, 0, 218, 219, 1, 0, 0, 0, 219,
|
||||||
|
220, 1, 0, 0, 0, 220, 221, 6, 31, 0, 0, 221, 64, 1, 0, 0, 0, 12, 0, 88,
|
||||||
|
160, 162, 173, 181, 183, 190, 192, 196, 201, 218, 1, 6, 0, 0,
|
||||||
}
|
}
|
||||||
deserializer := antlr.NewATNDeserializer(nil)
|
deserializer := antlr.NewATNDeserializer(nil)
|
||||||
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
||||||
|
@ -202,21 +206,23 @@ const (
|
||||||
QueryLexerSIMPLE_OP = 4
|
QueryLexerSIMPLE_OP = 4
|
||||||
QueryLexerUNIQUE = 5
|
QueryLexerUNIQUE = 5
|
||||||
QueryLexerREP = 6
|
QueryLexerREP = 6
|
||||||
QueryLexerIN = 7
|
QueryLexerEC = 7
|
||||||
QueryLexerAS = 8
|
QueryLexerIN = 8
|
||||||
QueryLexerCBF = 9
|
QueryLexerAS = 9
|
||||||
QueryLexerSELECT = 10
|
QueryLexerCBF = 10
|
||||||
QueryLexerFROM = 11
|
QueryLexerSELECT = 11
|
||||||
QueryLexerFILTER = 12
|
QueryLexerFROM = 12
|
||||||
QueryLexerWILDCARD = 13
|
QueryLexerFILTER = 13
|
||||||
QueryLexerCLAUSE_SAME = 14
|
QueryLexerWILDCARD = 14
|
||||||
QueryLexerCLAUSE_DISTINCT = 15
|
QueryLexerDOT = 15
|
||||||
QueryLexerL_PAREN = 16
|
QueryLexerCLAUSE_SAME = 16
|
||||||
QueryLexerR_PAREN = 17
|
QueryLexerCLAUSE_DISTINCT = 17
|
||||||
QueryLexerAT = 18
|
QueryLexerL_PAREN = 18
|
||||||
QueryLexerIDENT = 19
|
QueryLexerR_PAREN = 19
|
||||||
QueryLexerNUMBER1 = 20
|
QueryLexerAT = 20
|
||||||
QueryLexerZERO = 21
|
QueryLexerIDENT = 21
|
||||||
QueryLexerSTRING = 22
|
QueryLexerNUMBER1 = 22
|
||||||
QueryLexerWS = 23
|
QueryLexerZERO = 23
|
||||||
|
QueryLexerSTRING = 24
|
||||||
|
QueryLexerWS = 25
|
||||||
)
|
)
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,4 @@
|
||||||
// Code generated from Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
// Code generated from /repo/frostfs/sdk-go/netmap/parser/Query.g4 by ANTLR 4.13.0. DO NOT EDIT.
|
||||||
|
|
||||||
package parser // Query
|
package parser // Query
|
||||||
|
|
||||||
|
@ -14,6 +14,9 @@ type QueryVisitor interface {
|
||||||
// Visit a parse tree produced by Query#selectFilterExpr.
|
// Visit a parse tree produced by Query#selectFilterExpr.
|
||||||
VisitSelectFilterExpr(ctx *SelectFilterExprContext) interface{}
|
VisitSelectFilterExpr(ctx *SelectFilterExprContext) interface{}
|
||||||
|
|
||||||
|
// Visit a parse tree produced by Query#ecStmt.
|
||||||
|
VisitEcStmt(ctx *EcStmtContext) interface{}
|
||||||
|
|
||||||
// Visit a parse tree produced by Query#repStmt.
|
// Visit a parse tree produced by Query#repStmt.
|
||||||
VisitRepStmt(ctx *RepStmtContext) interface{}
|
VisitRepStmt(ctx *RepStmtContext) interface{}
|
||||||
|
|
||||||
|
|
102
netmap/policy.go
102
netmap/policy.go
|
@ -34,8 +34,17 @@ type PlacementPolicy struct {
|
||||||
|
|
||||||
func (p *PlacementPolicy) readFromV2(m netmap.PlacementPolicy, checkFieldPresence bool) error {
|
func (p *PlacementPolicy) readFromV2(m netmap.PlacementPolicy, checkFieldPresence bool) error {
|
||||||
p.replicas = m.GetReplicas()
|
p.replicas = m.GetReplicas()
|
||||||
if checkFieldPresence && len(p.replicas) == 0 {
|
if checkFieldPresence {
|
||||||
return errors.New("missing replicas")
|
if len(p.replicas) == 0 {
|
||||||
|
return errors.New("missing replicas")
|
||||||
|
}
|
||||||
|
if len(p.replicas) != 1 {
|
||||||
|
for i := range p.replicas {
|
||||||
|
if p.replicas[i].GetECDataCount() != 0 || p.replicas[i].GetECParityCount() != 0 {
|
||||||
|
return errors.New("erasure code group must be used exclusively")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p.backupFactor = m.GetContainerBackupFactor()
|
p.backupFactor = m.GetContainerBackupFactor()
|
||||||
|
@ -130,6 +139,14 @@ func (r *ReplicaDescriptor) SetNumberOfObjects(c uint32) {
|
||||||
r.m.SetCount(c)
|
r.m.SetCount(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *ReplicaDescriptor) SetECDataCount(v uint32) {
|
||||||
|
r.m.SetECDataCount(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReplicaDescriptor) SetECParityCount(v uint32) {
|
||||||
|
r.m.SetECParityCount(v)
|
||||||
|
}
|
||||||
|
|
||||||
// NumberOfObjects returns number set using SetNumberOfObjects.
|
// NumberOfObjects returns number set using SetNumberOfObjects.
|
||||||
//
|
//
|
||||||
// Zero ReplicaDescriptor has zero number of objects.
|
// Zero ReplicaDescriptor has zero number of objects.
|
||||||
|
@ -137,6 +154,19 @@ func (r ReplicaDescriptor) NumberOfObjects() uint32 {
|
||||||
return r.m.GetCount()
|
return r.m.GetCount()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r ReplicaDescriptor) GetECDataCount() uint32 {
|
||||||
|
return r.m.GetECDataCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r ReplicaDescriptor) GetECParityCount() uint32 {
|
||||||
|
return r.m.GetECParityCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalECPartCount returns total sum of ECDataCount and ECParityCount.
|
||||||
|
func (r ReplicaDescriptor) TotalECPartCount() uint32 {
|
||||||
|
return r.m.GetECDataCount() + r.m.GetECParityCount()
|
||||||
|
}
|
||||||
|
|
||||||
// SetSelectorName sets name of the related Selector.
|
// SetSelectorName sets name of the related Selector.
|
||||||
//
|
//
|
||||||
// Zero ReplicaDescriptor references to the root bucket's selector: it contains
|
// Zero ReplicaDescriptor references to the root bucket's selector: it contains
|
||||||
|
@ -170,10 +200,19 @@ func (p PlacementPolicy) NumberOfReplicas() int {
|
||||||
// descriptor. Index MUST be in range [0; NumberOfReplicas()).
|
// descriptor. Index MUST be in range [0; NumberOfReplicas()).
|
||||||
//
|
//
|
||||||
// Zero PlacementPolicy has no replicas.
|
// Zero PlacementPolicy has no replicas.
|
||||||
|
//
|
||||||
|
// Deprecated: Use PlacementPolicy.ReplicaDescriptor(int).NumberOfObjects() instead.
|
||||||
func (p PlacementPolicy) ReplicaNumberByIndex(i int) uint32 {
|
func (p PlacementPolicy) ReplicaNumberByIndex(i int) uint32 {
|
||||||
return p.replicas[i].GetCount()
|
return p.replicas[i].GetCount()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReplicaDescriptor returns i-th replica descriptor. Index MUST be in range [0; NumberOfReplicas()).
|
||||||
|
func (p PlacementPolicy) ReplicaDescriptor(i int) ReplicaDescriptor {
|
||||||
|
return ReplicaDescriptor{
|
||||||
|
m: p.replicas[i],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetContainerBackupFactor sets container backup factor: it controls how deep
|
// SetContainerBackupFactor sets container backup factor: it controls how deep
|
||||||
// FrostFS will search for nodes alternatives to include into container's nodes subset.
|
// FrostFS will search for nodes alternatives to include into container's nodes subset.
|
||||||
//
|
//
|
||||||
|
@ -393,10 +432,14 @@ func (p PlacementPolicy) WriteStringTo(w io.StringWriter) (err error) {
|
||||||
c := p.replicas[i].GetCount()
|
c := p.replicas[i].GetCount()
|
||||||
s := p.replicas[i].GetSelector()
|
s := p.replicas[i].GetSelector()
|
||||||
|
|
||||||
if s != "" {
|
if c != 0 {
|
||||||
_, err = w.WriteString(fmt.Sprintf("%sREP %d IN %s", delim, c, s))
|
|
||||||
} else {
|
|
||||||
_, err = w.WriteString(fmt.Sprintf("%sREP %d", delim, c))
|
_, err = w.WriteString(fmt.Sprintf("%sREP %d", delim, c))
|
||||||
|
} else {
|
||||||
|
ecx, ecy := p.replicas[i].GetECDataCount(), p.replicas[i].GetECParityCount()
|
||||||
|
_, err = w.WriteString(fmt.Sprintf("%sEC %d.%d", delim, ecx, ecy))
|
||||||
|
}
|
||||||
|
if s != "" {
|
||||||
|
_, err = w.WriteString(fmt.Sprintf(" IN %s", s))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -630,6 +673,8 @@ var (
|
||||||
"make sure to pair REP and SELECT clauses: \"REP .. IN X\" + \"SELECT ... AS X\"")
|
"make sure to pair REP and SELECT clauses: \"REP .. IN X\" + \"SELECT ... AS X\"")
|
||||||
// errRedundantSelector is returned for errors found by filters policy validator.
|
// errRedundantSelector is returned for errors found by filters policy validator.
|
||||||
errRedundantFilter = errors.New("policy: found redundant filter")
|
errRedundantFilter = errors.New("policy: found redundant filter")
|
||||||
|
// errECFewSelectors is returned when EC keyword is used without UNIQUE keyword.
|
||||||
|
errECFewSelectors = errors.New("policy: too few nodes to select")
|
||||||
)
|
)
|
||||||
|
|
||||||
type policyVisitor struct {
|
type policyVisitor struct {
|
||||||
|
@ -657,11 +702,18 @@ func (p *policyVisitor) VisitPolicy(ctx *parser.PolicyContext) any {
|
||||||
|
|
||||||
pl.unique = ctx.UNIQUE() != nil
|
pl.unique = ctx.UNIQUE() != nil
|
||||||
|
|
||||||
repStmts := ctx.AllRepStmt()
|
stmts := ctx.GetChildren()
|
||||||
pl.replicas = make([]netmap.Replica, 0, len(repStmts))
|
for _, r := range stmts {
|
||||||
|
var res *netmap.Replica
|
||||||
for _, r := range repStmts {
|
var ok bool
|
||||||
res, ok := r.Accept(p).(*netmap.Replica)
|
switch r := r.(type) {
|
||||||
|
case parser.IRepStmtContext:
|
||||||
|
res, ok = r.Accept(p).(*netmap.Replica)
|
||||||
|
case parser.IEcStmtContext:
|
||||||
|
res, ok = r.Accept(p).(*netmap.Replica)
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -758,6 +810,28 @@ func (p *policyVisitor) VisitRepStmt(ctx *parser.RepStmtContext) any {
|
||||||
return rs
|
return rs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VisitRepStmt implements parser.QueryVisitor interface.
|
||||||
|
func (p *policyVisitor) VisitEcStmt(ctx *parser.EcStmtContext) any {
|
||||||
|
dataCount, err := strconv.ParseUint(ctx.GetData().GetText(), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return p.reportError(errInvalidNumber)
|
||||||
|
}
|
||||||
|
parityCount, err := strconv.ParseUint(ctx.GetParity().GetText(), 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return p.reportError(errInvalidNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
rs := new(netmap.Replica)
|
||||||
|
rs.SetECDataCount(uint32(dataCount))
|
||||||
|
rs.SetECParityCount(uint32(parityCount))
|
||||||
|
|
||||||
|
if sel := ctx.GetSelector(); sel != nil {
|
||||||
|
rs.SetSelector(sel.GetText())
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs
|
||||||
|
}
|
||||||
|
|
||||||
// VisitSelectStmt implements parser.QueryVisitor interface.
|
// VisitSelectStmt implements parser.QueryVisitor interface.
|
||||||
func (p *policyVisitor) VisitSelectStmt(ctx *parser.SelectStmtContext) any {
|
func (p *policyVisitor) VisitSelectStmt(ctx *parser.SelectStmtContext) any {
|
||||||
res, err := strconv.ParseUint(ctx.GetCount().GetText(), 10, 32)
|
res, err := strconv.ParseUint(ctx.GetCount().GetText(), 10, 32)
|
||||||
|
@ -910,6 +984,14 @@ func validatePolicy(p PlacementPolicy) error {
|
||||||
if seenSelectors[selName] == nil {
|
if seenSelectors[selName] == nil {
|
||||||
return fmt.Errorf("%w: '%s'", errUnknownSelector, selName)
|
return fmt.Errorf("%w: '%s'", errUnknownSelector, selName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dataCount := p.replicas[i].GetECDataCount()
|
||||||
|
parityCount := p.replicas[i].GetECParityCount()
|
||||||
|
if dataCount != 0 || parityCount != 0 {
|
||||||
|
if c := seenSelectors[selName].GetCount(); c < dataCount+parityCount {
|
||||||
|
return fmt.Errorf("%w: %d < %d + %d", errECFewSelectors, c, dataCount, parityCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,48 +7,50 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDecodeString(t *testing.T) {
|
var validPlacementPolicy = []string{
|
||||||
testCases := []string{
|
`REP 2
|
||||||
`REP 2
|
|
||||||
CBF 2
|
CBF 2
|
||||||
SELECT 2 FROM *`,
|
SELECT 2 FROM *`,
|
||||||
`REP 1 IN X
|
`REP 1 IN X
|
||||||
CBF 1
|
CBF 1
|
||||||
SELECT 2 IN SAME Location FROM * AS X`,
|
SELECT 2 IN SAME Location FROM * AS X`,
|
||||||
|
|
||||||
`REP 1 IN X
|
`REP 1 IN X
|
||||||
REP 2 IN Y
|
REP 2 IN Y
|
||||||
CBF 1
|
CBF 1
|
||||||
SELECT 2 FROM * AS X
|
SELECT 2 FROM * AS X
|
||||||
SELECT 3 FROM * AS Y`,
|
SELECT 3 FROM * AS Y`,
|
||||||
|
|
||||||
`REP 1 IN X
|
`REP 1 IN X
|
||||||
SELECT 2 IN City FROM Good AS X
|
SELECT 2 IN City FROM Good AS X
|
||||||
FILTER Country EQ RU AS FromRU
|
FILTER Country EQ RU AS FromRU
|
||||||
FILTER Country EQ EN AS FromEN
|
FILTER Country EQ EN AS FromEN
|
||||||
FILTER @FromRU AND @FromEN AND Rating GT 7 AS Good`,
|
FILTER @FromRU AND @FromEN AND Rating GT 7 AS Good`,
|
||||||
|
|
||||||
`REP 7 IN SPB
|
`REP 7 IN SPB
|
||||||
SELECT 1 IN City FROM SPBSSD AS SPB
|
SELECT 1 IN City FROM SPBSSD AS SPB
|
||||||
FILTER City EQ SPB AND SSD EQ true OR City EQ SPB AND Rating GE 5 AS SPBSSD`,
|
FILTER City EQ SPB AND SSD EQ true OR City EQ SPB AND Rating GE 5 AS SPBSSD`,
|
||||||
|
|
||||||
`REP 7 IN SPB
|
`REP 7 IN SPB
|
||||||
SELECT 1 IN City FROM SPBSSD AS SPB
|
SELECT 1 IN City FROM SPBSSD AS SPB
|
||||||
FILTER NOT (NOT (City EQ SPB) AND SSD EQ true OR City EQ SPB AND Rating GE 5) AS SPBSSD`,
|
FILTER NOT (NOT (City EQ SPB) AND SSD EQ true OR City EQ SPB AND Rating GE 5) AS SPBSSD`,
|
||||||
|
|
||||||
`REP 1 IN FNODE
|
`REP 1 IN FNODE
|
||||||
CBF 1
|
CBF 1
|
||||||
SELECT 1 FROM F AS FNODE
|
SELECT 1 FROM F AS FNODE
|
||||||
FILTER Node EQ '10.78.8.11' AS F`,
|
FILTER Node EQ '10.78.8.11' AS F`,
|
||||||
|
|
||||||
`UNIQUE
|
`UNIQUE
|
||||||
REP 1
|
REP 1
|
||||||
REP 1`,
|
REP 1`,
|
||||||
}
|
`EC 1.2 IN X
|
||||||
|
SELECT 3 IN City FROM * AS X`,
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDecodeString(t *testing.T) {
|
||||||
var p PlacementPolicy
|
var p PlacementPolicy
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range validPlacementPolicy {
|
||||||
require.NoError(t, p.DecodeString(testCase), "unable parse %s", testCase)
|
require.NoError(t, p.DecodeString(testCase), "unable parse %s", testCase)
|
||||||
var b strings.Builder
|
var b strings.Builder
|
||||||
require.NoError(t, p.WriteStringTo(&b))
|
require.NoError(t, p.WriteStringTo(&b))
|
||||||
|
|
14
netmap/policy_fuzz.go
Normal file
14
netmap/policy_fuzz.go
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package netmap
|
||||||
|
|
||||||
|
func DoFuzzPlacementPolicyDecode(data []byte) int {
|
||||||
|
p := string(data)
|
||||||
|
if p == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
var pp PlacementPolicy
|
||||||
|
_ = pp.DecodeString(p)
|
||||||
|
return 1
|
||||||
|
}
|
17
netmap/policy_fuzz_test.go
Normal file
17
netmap/policy_fuzz_test.go
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package netmap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func FuzzDecodeString(f *testing.F) {
|
||||||
|
for _, pp := range validPlacementPolicy {
|
||||||
|
f.Add([]byte(pp))
|
||||||
|
}
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPlacementPolicyDecode(data)
|
||||||
|
})
|
||||||
|
}
|
69
object/ecinfo.go
Normal file
69
object/ecinfo.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package object
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ECChunk object.ECChunk
|
||||||
|
|
||||||
|
func (c *ECChunk) SetID(id oid.ID) {
|
||||||
|
objV2 := new(refs.ObjectID)
|
||||||
|
id.WriteToV2(objV2)
|
||||||
|
c.ID = *objV2
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToV2 converts ECChunk to v2 ECChunk message.
|
||||||
|
//
|
||||||
|
// Nil ECChunk converts to nil.
|
||||||
|
func (c *ECChunk) ToV2() *object.ECChunk {
|
||||||
|
return (*object.ECChunk)(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewECChunkFromV2(v2 *object.ECChunk) *ECChunk {
|
||||||
|
return (*ECChunk)(v2)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ECInfo object.ECInfo
|
||||||
|
|
||||||
|
// NewECInfoFromV2 wraps v2 ECInfo message to ECInfo.
|
||||||
|
//
|
||||||
|
// Nil object.ECInfo converts to nil.
|
||||||
|
func NewECInfoFromV2(v2 *object.ECInfo) *ECInfo {
|
||||||
|
return (*ECInfo)(v2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewECInfo creates and initializes blank ECInfo.
|
||||||
|
func NewECInfo() *ECInfo {
|
||||||
|
return NewECInfoFromV2(new(object.ECInfo))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToV2 converts ECInfo to v2 ECInfo message.
|
||||||
|
//
|
||||||
|
// Nil ECInfo converts to nil.
|
||||||
|
func (s *ECInfo) ToV2() *object.ECInfo {
|
||||||
|
return (*object.ECInfo)(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ECInfo) Marshal() ([]byte, error) {
|
||||||
|
return (*object.ECInfo)(s).StableMarshal(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ECInfo) Unmarshal(data []byte) error {
|
||||||
|
return (*object.ECInfo)(s).Unmarshal(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler.
|
||||||
|
func (s *ECInfo) MarshalJSON() ([]byte, error) {
|
||||||
|
return (*object.ECInfo)(s).MarshalJSON()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler.
|
||||||
|
func (s *ECInfo) UnmarshalJSON(data []byte) error {
|
||||||
|
return (*object.ECInfo)(s).UnmarshalJSON(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ECInfo) AddChunk(chunk ECChunk) {
|
||||||
|
s.Chunks = append(s.Chunks, *chunk.ToV2())
|
||||||
|
}
|
193
object/erasure_code.go
Normal file
193
object/erasure_code.go
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
package object
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
|
refs "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ECHeader represents erasure coding header.
|
||||||
|
type ECHeader struct {
|
||||||
|
parent oid.ID
|
||||||
|
parentSplitID *SplitID
|
||||||
|
parentSplitParentID *oid.ID
|
||||||
|
parentAttributes []Attribute
|
||||||
|
index uint32
|
||||||
|
total uint32
|
||||||
|
header []byte
|
||||||
|
headerLength uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type ECParentInfo struct {
|
||||||
|
// EC-parent's ID.
|
||||||
|
ID oid.ID
|
||||||
|
|
||||||
|
// EC-parent's split ID if the parent is a part of Split itself.
|
||||||
|
SplitID *SplitID
|
||||||
|
|
||||||
|
// EC-parent's parent split ID if the parent is a part of Split itself.
|
||||||
|
SplitParentID *oid.ID
|
||||||
|
|
||||||
|
// EC-parent's attributes.
|
||||||
|
Attributes []Attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewECHeader constructs new erasure coding header.
|
||||||
|
func NewECHeader(ecParentInfo ECParentInfo, index, total uint32, header []byte, headerLength uint32) *ECHeader {
|
||||||
|
return &ECHeader{
|
||||||
|
parent: ecParentInfo.ID,
|
||||||
|
parentSplitID: ecParentInfo.SplitID,
|
||||||
|
parentSplitParentID: ecParentInfo.SplitParentID,
|
||||||
|
parentAttributes: ecParentInfo.Attributes,
|
||||||
|
index: index,
|
||||||
|
total: total,
|
||||||
|
header: header,
|
||||||
|
headerLength: headerLength,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteToV2 converts SDK structure to v2-api one.
|
||||||
|
func (e *ECHeader) WriteToV2(h *object.ECHeader) {
|
||||||
|
var parent refs.ObjectID
|
||||||
|
e.parent.WriteToV2(&parent)
|
||||||
|
h.ParentSplitID = e.parentSplitID.ToV2()
|
||||||
|
|
||||||
|
if e.parentSplitParentID != nil {
|
||||||
|
parentSplitParentID := new(refs.ObjectID)
|
||||||
|
e.parentSplitParentID.WriteToV2(parentSplitParentID)
|
||||||
|
h.ParentSplitParentID = parentSplitParentID
|
||||||
|
}
|
||||||
|
|
||||||
|
h.Parent = &parent
|
||||||
|
|
||||||
|
attrs := make([]object.Attribute, len(e.parentAttributes))
|
||||||
|
for i := range e.parentAttributes {
|
||||||
|
attrs[i] = *e.parentAttributes[i].ToV2()
|
||||||
|
}
|
||||||
|
h.ParentAttributes = attrs
|
||||||
|
|
||||||
|
h.Index = e.index
|
||||||
|
h.Total = e.total
|
||||||
|
h.Header = e.header
|
||||||
|
h.HeaderLength = e.headerLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFromV2 converts v2-api structure to SDK one.
|
||||||
|
func (e *ECHeader) ReadFromV2(h *object.ECHeader) error {
|
||||||
|
if h == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if h.Parent == nil {
|
||||||
|
return errors.New("empty parent")
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs := make([]Attribute, len(h.ParentAttributes))
|
||||||
|
for i := range h.ParentAttributes {
|
||||||
|
attrs[i] = *NewAttributeFromV2(&h.ParentAttributes[i])
|
||||||
|
}
|
||||||
|
e.parentAttributes = attrs
|
||||||
|
|
||||||
|
_ = e.parent.ReadFromV2(*h.Parent)
|
||||||
|
e.parentSplitID = NewSplitIDFromV2(h.ParentSplitID)
|
||||||
|
if h.ParentSplitParentID != nil {
|
||||||
|
if e.parentSplitParentID == nil {
|
||||||
|
e.parentSplitParentID = new(oid.ID)
|
||||||
|
}
|
||||||
|
_ = e.parentSplitParentID.ReadFromV2(*h.ParentSplitParentID)
|
||||||
|
}
|
||||||
|
e.index = h.Index
|
||||||
|
e.total = h.Total
|
||||||
|
e.header = h.Header
|
||||||
|
e.headerLength = h.HeaderLength
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Object) ECHeader() *ECHeader {
|
||||||
|
ec := (*object.Object)(o).GetHeader().GetEC()
|
||||||
|
if ec == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
h := new(ECHeader)
|
||||||
|
_ = h.ReadFromV2(ec)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *Object) SetECHeader(ec *ECHeader) {
|
||||||
|
o.setHeaderField(func(h *object.Header) {
|
||||||
|
if ec == nil {
|
||||||
|
h.SetEC(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v2 := new(object.ECHeader)
|
||||||
|
ec.WriteToV2(v2)
|
||||||
|
h.SetEC(v2)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) Parent() oid.ID {
|
||||||
|
return e.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetParent(id oid.ID) {
|
||||||
|
e.parent = id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) ParentSplitID() *SplitID {
|
||||||
|
return e.parentSplitID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetParentSplitID(parentSplitID *SplitID) {
|
||||||
|
e.parentSplitID = parentSplitID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) ParentSplitParentID() *oid.ID {
|
||||||
|
return e.parentSplitParentID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetParentSplitParentID(parentSplitParentID *oid.ID) {
|
||||||
|
e.parentSplitParentID = parentSplitParentID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) ParentAttributes() []Attribute {
|
||||||
|
return e.parentAttributes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetParentAttributes(attrs []Attribute) {
|
||||||
|
e.parentAttributes = attrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) Index() uint32 {
|
||||||
|
return e.index
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetIndex(i uint32) {
|
||||||
|
e.index = i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) Total() uint32 {
|
||||||
|
return e.total
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetTotal(i uint32) {
|
||||||
|
e.total = i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) Header() []byte {
|
||||||
|
return e.header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetHeader(header []byte) {
|
||||||
|
e.header = header
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) HeaderLength() uint32 {
|
||||||
|
return e.headerLength
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECHeader) SetHeaderLength(l uint32) {
|
||||||
|
e.headerLength = l
|
||||||
|
}
|
87
object/erasurecode/constructor.go
Normal file
87
object/erasurecode/constructor.go
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package erasurecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
"github.com/klauspost/reedsolomon"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMalformedSlice is returned when a slice of EC chunks is inconsistent.
|
||||||
|
ErrMalformedSlice = errors.New("inconsistent EC headers")
|
||||||
|
// ErrInvShardNum is returned from NewConstructor when the number of shards is invalid.
|
||||||
|
ErrInvShardNum = reedsolomon.ErrInvShardNum
|
||||||
|
// ErrMaxShardNum is returned from NewConstructor when the number of shards is too big.
|
||||||
|
ErrMaxShardNum = reedsolomon.ErrMaxShardNum
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxShardCount is the maximum number of shards.
|
||||||
|
const MaxShardCount = 256
|
||||||
|
|
||||||
|
// Constructor is a wrapper around encoder allowing to reconstruct objects.
|
||||||
|
// It's methods are not thread-safe.
|
||||||
|
type Constructor struct {
|
||||||
|
enc reedsolomon.Encoder
|
||||||
|
headerLength uint32
|
||||||
|
payloadShards [][]byte
|
||||||
|
headerShards [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConstructor returns new constructor instance.
|
||||||
|
func NewConstructor(dataCount int, parityCount int) (*Constructor, error) {
|
||||||
|
// The library supports up to 65536 shards with some restrictions.
|
||||||
|
// This can easily result in OOM or panic, thus SDK declares it's own restriction.
|
||||||
|
if dataCount+parityCount > MaxShardCount {
|
||||||
|
return nil, ErrMaxShardNum
|
||||||
|
}
|
||||||
|
|
||||||
|
enc, err := reedsolomon.New(dataCount, parityCount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Constructor{enc: enc}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear clears internal state of the constructor, so it can be reused.
|
||||||
|
func (c *Constructor) clear() {
|
||||||
|
c.headerLength = 0
|
||||||
|
c.payloadShards = nil
|
||||||
|
c.headerShards = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Constructor) fillHeader(parts []*objectSDK.Object) error {
|
||||||
|
shards := make([][]byte, len(parts))
|
||||||
|
headerLength := 0
|
||||||
|
for i := range parts {
|
||||||
|
if parts[i] == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
headerLength, err = validatePart(parts, i, headerLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
shards[i] = parts[i].GetECHeader().Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
c.headerLength = uint32(headerLength)
|
||||||
|
c.headerShards = shards
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fillPayload fills the payload shards.
|
||||||
|
// Currently there is no case when it can be called without reconstructing header,
|
||||||
|
// thus fillHeader() must be called before and this function performs no validation.
|
||||||
|
func (c *Constructor) fillPayload(parts []*objectSDK.Object) {
|
||||||
|
shards := make([][]byte, len(parts))
|
||||||
|
for i := range parts {
|
||||||
|
if parts[i] == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
shards[i] = parts[i].Payload()
|
||||||
|
}
|
||||||
|
c.payloadShards = shards
|
||||||
|
}
|
31
object/erasurecode/constructor_test.go
Normal file
31
object/erasurecode/constructor_test.go
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
package erasurecode_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestErasureConstruct(t *testing.T) {
|
||||||
|
t.Run("negative, no panic", func(t *testing.T) {
|
||||||
|
_, err := erasurecode.NewConstructor(-1, 2)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrInvShardNum)
|
||||||
|
})
|
||||||
|
t.Run("negative, no panic", func(t *testing.T) {
|
||||||
|
_, err := erasurecode.NewConstructor(2, -1)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrInvShardNum)
|
||||||
|
})
|
||||||
|
t.Run("zero parity", func(t *testing.T) {
|
||||||
|
_, err := erasurecode.NewConstructor(1, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
t.Run("max shard num", func(t *testing.T) {
|
||||||
|
_, err := erasurecode.NewConstructor(erasurecode.MaxShardCount, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
t.Run("max+1 shard num", func(t *testing.T) {
|
||||||
|
_, err := erasurecode.NewConstructor(erasurecode.MaxShardCount+1, 0)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMaxShardNum)
|
||||||
|
})
|
||||||
|
}
|
146
object/erasurecode/reconstruct.go
Normal file
146
object/erasurecode/reconstruct.go
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
package erasurecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
"github.com/klauspost/reedsolomon"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reconstruct returns full object reconstructed from parts.
|
||||||
|
// All non-nil objects in parts must have EC header with the same `total` field equal to len(parts).
|
||||||
|
// The slice must contain at least one non nil object.
|
||||||
|
// Index of the objects in parts must be equal to it's index field in the EC header.
|
||||||
|
// The parts slice isn't changed and can be used concurrently for reading.
|
||||||
|
func (c *Constructor) Reconstruct(parts []*objectSDK.Object) (*objectSDK.Object, error) {
|
||||||
|
res, err := c.ReconstructHeader(parts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.fillPayload(parts)
|
||||||
|
|
||||||
|
payload, err := reconstructExact(c.enc, int(res.PayloadSize()), c.payloadShards)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w", ErrMalformedSlice, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res.SetPayload(payload)
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReconstructHeader returns object header reconstructed from parts.
|
||||||
|
// All non-nil objects in parts must have EC header with the same `total` field equal to len(parts).
|
||||||
|
// The slice must contain at least one non nil object.
|
||||||
|
// Index of the objects in parts must be equal to it's index field in the EC header.
|
||||||
|
// The parts slice isn't changed and can be used concurrently for reading.
|
||||||
|
func (c *Constructor) ReconstructHeader(parts []*objectSDK.Object) (*objectSDK.Object, error) {
|
||||||
|
c.clear()
|
||||||
|
|
||||||
|
if err := c.fillHeader(parts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := c.reconstructHeader()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("%w: %w", ErrMalformedSlice, err)
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReconstructParts reconstructs specific EC parts without reconstructing full object.
|
||||||
|
// All non-nil objects in parts must have EC header with the same `total` field equal to len(parts).
|
||||||
|
// The slice must contain at least one non nil object.
|
||||||
|
// Index of the objects in parts must be equal to it's index field in the EC header.
|
||||||
|
// Those parts for which corresponding element in required is true must be nil and will be overwritten.
|
||||||
|
// Because partial reconstruction only makes sense for full objects, all parts must have non-empty payload.
|
||||||
|
// If key is not nil, all reconstructed parts are signed with this key.
|
||||||
|
func (c *Constructor) ReconstructParts(parts []*objectSDK.Object, required []bool, key *ecdsa.PrivateKey) error {
|
||||||
|
if len(required) != len(parts) {
|
||||||
|
return fmt.Errorf("len(parts) != len(required): %d != %d", len(parts), len(required))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.clear()
|
||||||
|
|
||||||
|
if err := c.fillHeader(parts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.fillPayload(parts)
|
||||||
|
|
||||||
|
if err := c.enc.ReconstructSome(c.payloadShards, required); err != nil {
|
||||||
|
return fmt.Errorf("%w: %w", ErrMalformedSlice, err)
|
||||||
|
}
|
||||||
|
if err := c.enc.ReconstructSome(c.headerShards, required); err != nil {
|
||||||
|
return fmt.Errorf("%w: %w", ErrMalformedSlice, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
nonNilPart := 0
|
||||||
|
for i := range parts {
|
||||||
|
if parts[i] != nil {
|
||||||
|
nonNilPart = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ec := parts[nonNilPart].GetECHeader()
|
||||||
|
ecParentInfo := objectSDK.ECParentInfo{
|
||||||
|
ID: ec.Parent(),
|
||||||
|
SplitID: ec.ParentSplitID(),
|
||||||
|
SplitParentID: ec.ParentSplitParentID(),
|
||||||
|
Attributes: ec.ParentAttributes(),
|
||||||
|
}
|
||||||
|
total := ec.Total()
|
||||||
|
|
||||||
|
for i := range required {
|
||||||
|
if parts[i] != nil || !required[i] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
part := objectSDK.New()
|
||||||
|
copyRequiredFields(part, parts[nonNilPart])
|
||||||
|
part.SetPayload(c.payloadShards[i])
|
||||||
|
part.SetPayloadSize(uint64(len(c.payloadShards[i])))
|
||||||
|
part.SetECHeader(objectSDK.NewECHeader(ecParentInfo, uint32(i), total, c.headerShards[i], c.headerLength))
|
||||||
|
|
||||||
|
if err := setIDWithSignature(part, key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parts[i] = part
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Constructor) reconstructHeader() (*objectSDK.Object, error) {
|
||||||
|
data, err := reconstructExact(c.enc, int(c.headerLength), c.headerShards)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var obj objectSDK.Object
|
||||||
|
return &obj, obj.Unmarshal(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func reconstructExact(enc reedsolomon.Encoder, size int, shards [][]byte) ([]byte, error) {
|
||||||
|
if err := enc.ReconstructData(shards); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Technically, this error will be returned from enc.Join().
|
||||||
|
// However, allocating based on unvalidated user data is an easy attack vector.
|
||||||
|
// Preallocating seems to have enough benefits to justify a slight increase in code complexity.
|
||||||
|
maxSize := 0
|
||||||
|
for i := range shards {
|
||||||
|
maxSize += len(shards[i])
|
||||||
|
}
|
||||||
|
if size > maxSize {
|
||||||
|
return nil, reedsolomon.ErrShortData
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, size))
|
||||||
|
if err := enc.Join(buf, shards, size); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
284
object/erasurecode/reconstruct_test.go
Normal file
284
object/erasurecode/reconstruct_test.go
Normal file
|
@ -0,0 +1,284 @@
|
||||||
|
package erasurecode_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestErasureCodeReconstruct(t *testing.T) {
|
||||||
|
const payloadSize = 99
|
||||||
|
const dataCount = 3
|
||||||
|
const parityCount = 2
|
||||||
|
|
||||||
|
// We would also like to test padding behaviour,
|
||||||
|
// so ensure padding is done.
|
||||||
|
require.NotZero(t, payloadSize%(dataCount+parityCount))
|
||||||
|
|
||||||
|
pk, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
original := newObject(t, payloadSize, pk)
|
||||||
|
|
||||||
|
c, err := erasurecode.NewConstructor(dataCount, parityCount)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
parts, err := c.Split(original, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("reconstruct header", func(t *testing.T) {
|
||||||
|
original := original.CutPayload()
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := range parts {
|
||||||
|
parts[i] = parts[i].CutPayload()
|
||||||
|
}
|
||||||
|
t.Run("from data", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := dataCount; i < dataCount+parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
reconstructed, err := c.ReconstructHeader(parts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
verifyReconstruction(t, original, reconstructed)
|
||||||
|
})
|
||||||
|
t.Run("from parity", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := 0; i < parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
reconstructed, err := c.ReconstructHeader(parts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
verifyReconstruction(t, original, reconstructed)
|
||||||
|
|
||||||
|
t.Run("not enough shards", func(t *testing.T) {
|
||||||
|
parts[parityCount] = nil
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
t.Run("only nil parts", func(t *testing.T) {
|
||||||
|
parts := make([]*objectSDK.Object, len(parts))
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
t.Run("missing EC header", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
parts[0] = deepCopy(t, parts[0])
|
||||||
|
parts[0].SetECHeader(nil)
|
||||||
|
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
t.Run("invalid index", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
parts[0] = deepCopy(t, parts[0])
|
||||||
|
|
||||||
|
ec := parts[0].GetECHeader()
|
||||||
|
ec.SetIndex(1)
|
||||||
|
parts[0].SetECHeader(ec)
|
||||||
|
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
t.Run("invalid total", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
parts[0] = deepCopy(t, parts[0])
|
||||||
|
|
||||||
|
ec := parts[0].GetECHeader()
|
||||||
|
ec.SetTotal(uint32(len(parts) + 1))
|
||||||
|
parts[0].SetECHeader(ec)
|
||||||
|
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
t.Run("inconsistent header length", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
parts[0] = deepCopy(t, parts[0])
|
||||||
|
|
||||||
|
ec := parts[0].GetECHeader()
|
||||||
|
ec.SetHeaderLength(ec.HeaderLength() - 1)
|
||||||
|
parts[0].SetECHeader(ec)
|
||||||
|
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
t.Run("invalid header length", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := range parts {
|
||||||
|
parts[i] = deepCopy(t, parts[i])
|
||||||
|
|
||||||
|
ec := parts[0].GetECHeader()
|
||||||
|
ec.SetHeaderLength(math.MaxUint32)
|
||||||
|
parts[0].SetECHeader(ec)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := c.ReconstructHeader(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
t.Run("reconstruct data", func(t *testing.T) {
|
||||||
|
t.Run("from data", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := dataCount; i < dataCount+parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
reconstructed, err := c.Reconstruct(parts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
verifyReconstruction(t, original, reconstructed)
|
||||||
|
})
|
||||||
|
t.Run("from parity", func(t *testing.T) {
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := 0; i < parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
reconstructed, err := c.Reconstruct(parts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
verifyReconstruction(t, original, reconstructed)
|
||||||
|
|
||||||
|
t.Run("not enough shards", func(t *testing.T) {
|
||||||
|
parts[parityCount] = nil
|
||||||
|
_, err := c.Reconstruct(parts)
|
||||||
|
require.ErrorIs(t, err, erasurecode.ErrMalformedSlice)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
t.Run("reconstruct parts", func(t *testing.T) {
|
||||||
|
// We would like to also test that ReconstructParts doesn't perform
|
||||||
|
// excessive work, so ensure this test makes sense.
|
||||||
|
require.GreaterOrEqual(t, parityCount, 2)
|
||||||
|
|
||||||
|
t.Run("from data", func(t *testing.T) {
|
||||||
|
oldParts := parts
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := dataCount; i < dataCount+parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
required := make([]bool, len(parts))
|
||||||
|
required[dataCount] = true
|
||||||
|
|
||||||
|
require.NoError(t, c.ReconstructParts(parts, required, nil))
|
||||||
|
|
||||||
|
old := deepCopy(t, oldParts[dataCount])
|
||||||
|
old.SetSignature(nil)
|
||||||
|
require.Equal(t, old, parts[dataCount])
|
||||||
|
|
||||||
|
for i := dataCount + 1; i < dataCount+parityCount; i++ {
|
||||||
|
require.Nil(t, parts[i])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("from parity", func(t *testing.T) {
|
||||||
|
oldParts := parts
|
||||||
|
parts := cloneSlice(parts)
|
||||||
|
for i := 0; i < parityCount; i++ {
|
||||||
|
parts[i] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
required := make([]bool, len(parts))
|
||||||
|
required[0] = true
|
||||||
|
|
||||||
|
require.NoError(t, c.ReconstructParts(parts, required, nil))
|
||||||
|
|
||||||
|
old := deepCopy(t, oldParts[0])
|
||||||
|
old.SetSignature(nil)
|
||||||
|
require.Equal(t, old, parts[0])
|
||||||
|
|
||||||
|
for i := 1; i < parityCount; i++ {
|
||||||
|
require.Nil(t, parts[i])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newObject(t *testing.T, size uint64, pk *keys.PrivateKey) *objectSDK.Object {
|
||||||
|
// Use transformer to form object to avoid potential bugs with yet another helper object creation in tests.
|
||||||
|
tt := &testTarget{}
|
||||||
|
p := transformer.NewPayloadSizeLimiter(transformer.Params{
|
||||||
|
Key: &pk.PrivateKey,
|
||||||
|
NextTargetInit: func() transformer.ObjectWriter { return tt },
|
||||||
|
NetworkState: dummyEpochSource(123),
|
||||||
|
MaxSize: size + 1,
|
||||||
|
WithoutHomomorphicHash: true,
|
||||||
|
})
|
||||||
|
cnr := cidtest.ID()
|
||||||
|
ver := version.Current()
|
||||||
|
hdr := objectSDK.New()
|
||||||
|
hdr.SetContainerID(cnr)
|
||||||
|
hdr.SetType(objectSDK.TypeRegular)
|
||||||
|
hdr.SetVersion(&ver)
|
||||||
|
|
||||||
|
var owner user.ID
|
||||||
|
user.IDFromKey(&owner, pk.PrivateKey.PublicKey)
|
||||||
|
hdr.SetOwnerID(owner)
|
||||||
|
|
||||||
|
var attr objectSDK.Attribute
|
||||||
|
attr.SetKey("somekey")
|
||||||
|
attr.SetValue("somevalue")
|
||||||
|
hdr.SetAttributes(attr)
|
||||||
|
|
||||||
|
expectedPayload := make([]byte, size)
|
||||||
|
_, _ = rand.Read(expectedPayload)
|
||||||
|
writeObject(t, context.Background(), p, hdr, expectedPayload)
|
||||||
|
require.Len(t, tt.objects, 1)
|
||||||
|
return tt.objects[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeObject(t *testing.T, ctx context.Context, target transformer.ChunkedObjectWriter, header *objectSDK.Object, payload []byte) *transformer.AccessIdentifiers {
|
||||||
|
require.NoError(t, target.WriteHeader(ctx, header))
|
||||||
|
|
||||||
|
_, err := target.Write(ctx, payload)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ids, err := target.Close(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyReconstruction(t *testing.T, original, reconstructed *objectSDK.Object) {
|
||||||
|
require.True(t, reconstructed.VerifyIDSignature())
|
||||||
|
reconstructed.ToV2().SetMarshalData(nil)
|
||||||
|
original.ToV2().SetMarshalData(nil)
|
||||||
|
|
||||||
|
require.Equal(t, original, reconstructed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deepCopy(t *testing.T, obj *objectSDK.Object) *objectSDK.Object {
|
||||||
|
data, err := obj.Marshal()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res := objectSDK.New()
|
||||||
|
require.NoError(t, res.Unmarshal(data))
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneSlice[T any](src []T) []T {
|
||||||
|
dst := make([]T, len(src))
|
||||||
|
copy(dst, src)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyEpochSource uint64
|
||||||
|
|
||||||
|
func (s dummyEpochSource) CurrentEpoch() uint64 {
|
||||||
|
return uint64(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
type testTarget struct {
|
||||||
|
objects []*objectSDK.Object
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt *testTarget) WriteObject(_ context.Context, o *objectSDK.Object) error {
|
||||||
|
tt.objects = append(tt.objects, o)
|
||||||
|
return nil // AccessIdentifiers should not be used.
|
||||||
|
}
|
85
object/erasurecode/split.go
Normal file
85
object/erasurecode/split.go
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
package erasurecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/ecdsa"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Split splits fully formed object into multiple chunks.
|
||||||
|
func (c *Constructor) Split(obj *objectSDK.Object, key *ecdsa.PrivateKey) ([]*objectSDK.Object, error) {
|
||||||
|
c.clear()
|
||||||
|
|
||||||
|
header, err := obj.CutPayload().Marshal()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
headerShards, err := c.encodeRaw(header)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
payloadShards, err := c.encodeRaw(obj.Payload())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := make([]*objectSDK.Object, len(payloadShards))
|
||||||
|
parent, _ := obj.ID()
|
||||||
|
for i := range parts {
|
||||||
|
chunk := objectSDK.New()
|
||||||
|
copyRequiredFields(chunk, obj)
|
||||||
|
chunk.SetPayload(payloadShards[i])
|
||||||
|
chunk.SetPayloadSize(uint64(len(payloadShards[i])))
|
||||||
|
|
||||||
|
var parentSplitParentID *oid.ID
|
||||||
|
if obj.HasParent() {
|
||||||
|
splitParentID, ok := obj.Parent().ID()
|
||||||
|
if ok {
|
||||||
|
parentSplitParentID = &splitParentID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ecParentInfo := objectSDK.ECParentInfo{
|
||||||
|
ID: parent,
|
||||||
|
SplitID: obj.SplitID(),
|
||||||
|
SplitParentID: parentSplitParentID,
|
||||||
|
Attributes: obj.Attributes(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ec := objectSDK.NewECHeader(ecParentInfo, uint32(i), uint32(len(payloadShards)), headerShards[i], uint32(len(header)))
|
||||||
|
chunk.SetECHeader(ec)
|
||||||
|
if err := setIDWithSignature(chunk, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
parts[i] = chunk
|
||||||
|
}
|
||||||
|
return parts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setIDWithSignature(obj *objectSDK.Object, key *ecdsa.PrivateKey) error {
|
||||||
|
objectSDK.CalculateAndSetPayloadChecksum(obj)
|
||||||
|
|
||||||
|
if err := objectSDK.CalculateAndSetID(obj); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if key == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return objectSDK.CalculateAndSetSignature(*key, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Constructor) encodeRaw(data []byte) ([][]byte, error) {
|
||||||
|
shards, err := c.enc.Split(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := c.enc.Encode(shards); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return shards, nil
|
||||||
|
}
|
138
object/erasurecode/split_test.go
Normal file
138
object/erasurecode/split_test.go
Normal file
|
@ -0,0 +1,138 @@
|
||||||
|
package erasurecode_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/erasurecode"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The library can behave differently for big shard counts.
|
||||||
|
// This test checks we support the maximum number of chunks we promise.
|
||||||
|
func TestSplitMaxShardCount(t *testing.T) {
|
||||||
|
pk, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
original := newObject(t, 1024, pk)
|
||||||
|
|
||||||
|
t.Run("only data", func(t *testing.T) {
|
||||||
|
c, err := erasurecode.NewConstructor(erasurecode.MaxShardCount, 0)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
parts, err := c.Split(original, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("data + parity", func(t *testing.T) {
|
||||||
|
c, err := erasurecode.NewConstructor(1, erasurecode.MaxShardCount-1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
parts, err := c.Split(original, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("ec parents are children of last Split part", func(t *testing.T) {
|
||||||
|
c, err := erasurecode.NewConstructor(1, erasurecode.MaxShardCount-1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
splitted1 := newObject(t, 1024, pk)
|
||||||
|
splitted2 := newObject(t, 1024, pk)
|
||||||
|
|
||||||
|
splittedId1, _ := splitted1.ID()
|
||||||
|
splittedId2, _ := splitted2.ID()
|
||||||
|
|
||||||
|
splitID := objectSDK.NewSplitID()
|
||||||
|
parent := objectSDK.New()
|
||||||
|
parent.SetID(oidtest.ID())
|
||||||
|
parent.SetChildren(splittedId1, splittedId2)
|
||||||
|
parent.SetSplitID(splitID)
|
||||||
|
|
||||||
|
splitted1.SetSplitID(splitID)
|
||||||
|
splitted1.SetParent(parent)
|
||||||
|
splitted2.SetSplitID(splitID)
|
||||||
|
splitted2.SetParent(parent)
|
||||||
|
|
||||||
|
parts1, err := c.Split(splitted1, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts1, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts1 {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitID())
|
||||||
|
require.Equal(t, *splitID, *part.ECHeader().ParentSplitID())
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitParentID())
|
||||||
|
}
|
||||||
|
|
||||||
|
parts2, err := c.Split(splitted2, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts1, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts2 {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitID())
|
||||||
|
require.Equal(t, *splitID, *part.ECHeader().ParentSplitID())
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitParentID())
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
t.Run("ec parents are children of non-last Split part", func(t *testing.T) {
|
||||||
|
c, err := erasurecode.NewConstructor(1, erasurecode.MaxShardCount-1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
splitted1 := newObject(t, 1024, pk)
|
||||||
|
splitted2 := newObject(t, 1024, pk)
|
||||||
|
|
||||||
|
splittedId1, _ := splitted1.ID()
|
||||||
|
splittedId2, _ := splitted2.ID()
|
||||||
|
|
||||||
|
splitID := objectSDK.NewSplitID()
|
||||||
|
parent := objectSDK.New()
|
||||||
|
// Such parent has got no ID.
|
||||||
|
parent.SetChildren(splittedId1, splittedId2)
|
||||||
|
parent.SetSplitID(splitID)
|
||||||
|
|
||||||
|
splitted1.SetSplitID(splitID)
|
||||||
|
splitted1.SetParent(parent)
|
||||||
|
splitted2.SetSplitID(splitID)
|
||||||
|
splitted2.SetParent(parent)
|
||||||
|
|
||||||
|
parts1, err := c.Split(splitted1, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts1, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts1 {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitID())
|
||||||
|
require.Equal(t, *splitID, *part.ECHeader().ParentSplitID())
|
||||||
|
require.Nil(t, part.ECHeader().ParentSplitParentID())
|
||||||
|
}
|
||||||
|
|
||||||
|
parts2, err := c.Split(splitted2, &pk.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts1, erasurecode.MaxShardCount)
|
||||||
|
|
||||||
|
for _, part := range parts2 {
|
||||||
|
require.NoError(t, objectSDK.CheckHeaderVerificationFields(part))
|
||||||
|
|
||||||
|
require.NotNil(t, part.ECHeader().ParentSplitID())
|
||||||
|
require.Equal(t, *splitID, *part.ECHeader().ParentSplitID())
|
||||||
|
require.Nil(t, part.ECHeader().ParentSplitParentID())
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
44
object/erasurecode/target.go
Normal file
44
object/erasurecode/target.go
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
package erasurecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/ecdsa"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Target accepts regular objects and splits them into erasure-coded chunks.
|
||||||
|
type Target struct {
|
||||||
|
c *Constructor
|
||||||
|
key *ecdsa.PrivateKey
|
||||||
|
next transformer.ObjectWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// ObjectWriter is an interface of the object writer that writes prepared object.
|
||||||
|
type ObjectWriter interface {
|
||||||
|
WriteObject(context.Context, *objectSDK.Object) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTarget returns new target instance.
|
||||||
|
func NewTarget(c *Constructor, key *ecdsa.PrivateKey, next ObjectWriter) *Target {
|
||||||
|
return &Target{
|
||||||
|
c: c,
|
||||||
|
key: key,
|
||||||
|
next: next,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteObject implements the transformer.ObjectWriter interface.
|
||||||
|
func (t *Target) WriteObject(ctx context.Context, obj *objectSDK.Object) error {
|
||||||
|
parts, err := t.c.Split(obj, t.key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := range parts {
|
||||||
|
if err := t.next.WriteObject(ctx, parts[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
104
object/erasurecode/verify.go
Normal file
104
object/erasurecode/verify.go
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
package erasurecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Verify verifies that parts are well formed.
|
||||||
|
// All parts are expected to be non-nil.
|
||||||
|
// The number of parts must be equal to `total` field of the EC header
|
||||||
|
// and parts must be sorted by index.
|
||||||
|
func (c *Constructor) Verify(parts []*objectSDK.Object) error {
|
||||||
|
c.clear()
|
||||||
|
|
||||||
|
var headerLength int
|
||||||
|
for i := range parts {
|
||||||
|
if parts[i] == nil {
|
||||||
|
return ErrMalformedSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
headerLength, err = validatePart(parts, i, headerLength)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p0 := parts[0]
|
||||||
|
for i := 1; i < len(parts); i++ {
|
||||||
|
// This part must be kept in sync with copyRequiredFields().
|
||||||
|
pi := parts[i]
|
||||||
|
if p0.OwnerID().Equals(pi.OwnerID()) {
|
||||||
|
return fmt.Errorf("%w: owner id mismatch: %s != %s", ErrMalformedSlice, p0.OwnerID(), pi.OwnerID())
|
||||||
|
}
|
||||||
|
if p0.Version() == nil && pi.Version() != nil || !p0.Version().Equal(*pi.Version()) {
|
||||||
|
return fmt.Errorf("%w: version mismatch: %s != %s", ErrMalformedSlice, p0.Version(), pi.Version())
|
||||||
|
}
|
||||||
|
|
||||||
|
cnr0, _ := p0.ContainerID()
|
||||||
|
cnri, _ := pi.ContainerID()
|
||||||
|
if !cnr0.Equals(cnri) {
|
||||||
|
return fmt.Errorf("%w: container id mismatch: %s != %s", ErrMalformedSlice, cnr0, cnri)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.fillHeader(parts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.fillPayload(parts)
|
||||||
|
|
||||||
|
ok, err := c.enc.Verify(c.headerShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return ErrMalformedSlice
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, err = c.enc.Verify(c.payloadShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return ErrMalformedSlice
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyRequiredFields sets all fields in dst which are copied from src and shared among all chunks.
|
||||||
|
// src can be either another chunk of full object.
|
||||||
|
// dst must be a chunk.
|
||||||
|
func copyRequiredFields(dst *objectSDK.Object, src *objectSDK.Object) {
|
||||||
|
dst.SetVersion(src.Version())
|
||||||
|
dst.SetOwnerID(src.OwnerID())
|
||||||
|
dst.SetCreationEpoch(src.CreationEpoch())
|
||||||
|
dst.SetSessionToken(src.SessionToken())
|
||||||
|
|
||||||
|
cnr, _ := src.ContainerID()
|
||||||
|
dst.SetContainerID(cnr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validatePart makes i-th part is consistent with the rest.
|
||||||
|
// If headerLength is not zero it is asserted to be equal in the ec header.
|
||||||
|
// Otherwise, new headerLength is returned.
|
||||||
|
func validatePart(parts []*objectSDK.Object, i int, headerLength int) (int, error) {
|
||||||
|
ec := parts[i].GetECHeader()
|
||||||
|
if ec == nil {
|
||||||
|
return headerLength, fmt.Errorf("%w: missing EC header", ErrMalformedSlice)
|
||||||
|
}
|
||||||
|
if ec.Index() != uint32(i) {
|
||||||
|
return headerLength, fmt.Errorf("%w: index=%d, ec.index=%d", ErrMalformedSlice, i, ec.Index())
|
||||||
|
}
|
||||||
|
if ec.Total() != uint32(len(parts)) {
|
||||||
|
return headerLength, fmt.Errorf("%w: len(parts)=%d, total=%d", ErrMalformedSlice, len(parts), ec.Total())
|
||||||
|
}
|
||||||
|
if headerLength == 0 {
|
||||||
|
return int(ec.HeaderLength()), nil
|
||||||
|
}
|
||||||
|
if ec.HeaderLength() != uint32(headerLength) {
|
||||||
|
return headerLength, fmt.Errorf("%w: header length mismatch %d != %d", ErrMalformedSlice, headerLength, ec.HeaderLength())
|
||||||
|
}
|
||||||
|
return headerLength, nil
|
||||||
|
}
|
|
@ -17,3 +17,21 @@ func (s *SplitInfoError) SplitInfo() *SplitInfo {
|
||||||
func NewSplitInfoError(v *SplitInfo) *SplitInfoError {
|
func NewSplitInfoError(v *SplitInfo) *SplitInfoError {
|
||||||
return &SplitInfoError{si: v}
|
return &SplitInfoError{si: v}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ECInfoError struct {
|
||||||
|
ei *ECInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
const ecInfoErrorMsg = "object not found, ec info has been provided"
|
||||||
|
|
||||||
|
func (e *ECInfoError) Error() string {
|
||||||
|
return ecInfoErrorMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ECInfoError) ECInfo() *ECInfo {
|
||||||
|
return e.ei
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewECInfoError(v *ECInfo) *ECInfoError {
|
||||||
|
return &ECInfoError{ei: v}
|
||||||
|
}
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
package object_test
|
package object_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/rand"
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
objectV2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/refs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -31,3 +34,43 @@ func generateSplitInfo() *object.SplitInfo {
|
||||||
|
|
||||||
return si
|
return si
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNewECInfoError(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ei = generateECInfo()
|
||||||
|
|
||||||
|
err error = object.NewECInfoError(ei)
|
||||||
|
expectedErr *object.ECInfoError
|
||||||
|
)
|
||||||
|
|
||||||
|
require.True(t, errors.As(err, &expectedErr))
|
||||||
|
|
||||||
|
eiErr, ok := err.(*object.ECInfoError)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, ei, eiErr.ECInfo())
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateECInfo() *object.ECInfo {
|
||||||
|
ei := object.NewECInfo()
|
||||||
|
ei.Chunks = append(ei.Chunks, objectV2.ECChunk{
|
||||||
|
ID: generateV2ID(),
|
||||||
|
Index: 0,
|
||||||
|
Total: 2,
|
||||||
|
})
|
||||||
|
ei.Chunks = append(ei.Chunks, objectV2.ECChunk{
|
||||||
|
ID: generateV2ID(),
|
||||||
|
Index: 1,
|
||||||
|
Total: 2,
|
||||||
|
})
|
||||||
|
return ei
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateV2ID() refs.ObjectID {
|
||||||
|
var buf [32]byte
|
||||||
|
_, _ = rand.Read(buf[:])
|
||||||
|
|
||||||
|
var id refs.ObjectID
|
||||||
|
id.SetValue(buf[:])
|
||||||
|
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
|
@ -366,6 +366,14 @@ func (o *Object) Children() []oid.ID {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *Object) GetECHeader() *ECHeader {
|
||||||
|
v2 := (*object.Object)(o).GetHeader().GetEC()
|
||||||
|
|
||||||
|
var ec ECHeader
|
||||||
|
_ = ec.ReadFromV2(v2) // Errors is checked on unmarshal.
|
||||||
|
return &ec
|
||||||
|
}
|
||||||
|
|
||||||
// SetChildren sets list of the identifiers of the child objects.
|
// SetChildren sets list of the identifiers of the child objects.
|
||||||
func (o *Object) SetChildren(v ...oid.ID) {
|
func (o *Object) SetChildren(v ...oid.ID) {
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -120,6 +120,7 @@ const (
|
||||||
fKeySplitID
|
fKeySplitID
|
||||||
fKeyPropRoot
|
fKeyPropRoot
|
||||||
fKeyPropPhy
|
fKeyPropPhy
|
||||||
|
fKeyECParent
|
||||||
)
|
)
|
||||||
|
|
||||||
func (k filterKey) String() string {
|
func (k filterKey) String() string {
|
||||||
|
@ -152,6 +153,8 @@ func (k filterKey) String() string {
|
||||||
return v2object.FilterPropertyRoot
|
return v2object.FilterPropertyRoot
|
||||||
case fKeyPropPhy:
|
case fKeyPropPhy:
|
||||||
return v2object.FilterPropertyPhy
|
return v2object.FilterPropertyPhy
|
||||||
|
case fKeyECParent:
|
||||||
|
return v2object.FilterHeaderECParent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -277,6 +280,10 @@ func (f *SearchFilters) AddSplitIDFilter(m SearchMatchType, id *SplitID) {
|
||||||
f.addReservedFilter(m, fKeySplitID, staticStringer(id.String()))
|
f.addReservedFilter(m, fKeySplitID, staticStringer(id.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *SearchFilters) AddECParentFilter(m SearchMatchType, parentID oid.ID) {
|
||||||
|
f.addReservedFilter(m, fKeyECParent, staticStringer(parentID.String()))
|
||||||
|
}
|
||||||
|
|
||||||
// AddTypeFilter adds filter by object type.
|
// AddTypeFilter adds filter by object type.
|
||||||
func (f *SearchFilters) AddTypeFilter(m SearchMatchType, typ Type) {
|
func (f *SearchFilters) AddTypeFilter(m SearchMatchType, typ Type) {
|
||||||
f.addReservedFilter(m, fKeyType, staticStringer(typ.String()))
|
f.addReservedFilter(m, fKeyType, staticStringer(typ.String()))
|
||||||
|
|
|
@ -160,6 +160,23 @@ func TestSearchFilters_AddSplitIDFilter(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSearchFilters_AddECParentFilter(t *testing.T) {
|
||||||
|
id := testOID()
|
||||||
|
|
||||||
|
fs := new(object.SearchFilters)
|
||||||
|
fs.AddECParentFilter(object.MatchStringEqual, id)
|
||||||
|
|
||||||
|
t.Run("v2", func(t *testing.T) {
|
||||||
|
fsV2 := fs.ToV2()
|
||||||
|
|
||||||
|
require.Len(t, fsV2, 1)
|
||||||
|
|
||||||
|
require.Equal(t, v2object.FilterHeaderECParent, fsV2[0].GetKey())
|
||||||
|
require.Equal(t, id.String(), fsV2[0].GetValue())
|
||||||
|
require.Equal(t, v2object.MatchStringEqual, fsV2[0].GetMatchType())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestSearchFilters_AddTypeFilter(t *testing.T) {
|
func TestSearchFilters_AddTypeFilter(t *testing.T) {
|
||||||
typ := object.TypeTombstone
|
typ := object.TypeTombstone
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
buffPool "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/util/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -45,6 +46,7 @@ type Params struct {
|
||||||
// functionality. Primary usecases are providing file size when putting an object
|
// functionality. Primary usecases are providing file size when putting an object
|
||||||
// with the frostfs-cli or using Content-Length header in gateways.
|
// with the frostfs-cli or using Content-Length header in gateways.
|
||||||
SizeHint uint64
|
SizeHint uint64
|
||||||
|
Pool *buffPool.BufferPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length
|
// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length
|
||||||
|
@ -137,7 +139,13 @@ func (s *payloadSizeLimiter) initializeCurrent() {
|
||||||
payloadSize = remaining % s.MaxSize
|
payloadSize = remaining % s.MaxSize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.payload = make([]byte, 0, payloadSize)
|
|
||||||
|
if s.Pool == nil {
|
||||||
|
s.payload = make([]byte, 0, payloadSize)
|
||||||
|
} else {
|
||||||
|
buffer := s.Pool.Get(uint32(payloadSize))
|
||||||
|
s.payload = buffer.Data[:0]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *payloadSizeLimiter) initPayloadHashers() {
|
func (s *payloadSizeLimiter) initPayloadHashers() {
|
||||||
|
|
|
@ -1182,6 +1182,10 @@ func needCountError(ctx context.Context, err error) bool {
|
||||||
if errors.As(err, &siErr) {
|
if errors.As(err, &siErr) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
var eiErr *object.ECInfoError
|
||||||
|
if errors.As(err, &eiErr) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if errors.Is(ctx.Err(), context.Canceled) {
|
if errors.Is(ctx.Err(), context.Canceled) {
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -1,13 +1,18 @@
|
||||||
package pool
|
package pool
|
||||||
|
|
||||||
import "math/rand"
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
// sampler implements weighted random number generation using Vose's Alias
|
// sampler implements weighted random number generation using Vose's Alias
|
||||||
// Method (https://www.keithschwarz.com/darts-dice-coins/).
|
// Method (https://www.keithschwarz.com/darts-dice-coins/).
|
||||||
type sampler struct {
|
type sampler struct {
|
||||||
|
mu sync.Mutex
|
||||||
randomGenerator *rand.Rand
|
randomGenerator *rand.Rand
|
||||||
probabilities []float64
|
|
||||||
alias []int
|
probabilities []float64
|
||||||
|
alias []int
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSampler creates new sampler with a given set of probabilities using
|
// newSampler creates new sampler with a given set of probabilities using
|
||||||
|
@ -58,10 +63,16 @@ func newSampler(probabilities []float64, source rand.Source) *sampler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next returns the next (not so) random number from sampler.
|
// Next returns the next (not so) random number from sampler.
|
||||||
|
// This method is safe for concurrent use by multiple goroutines.
|
||||||
func (g *sampler) Next() int {
|
func (g *sampler) Next() int {
|
||||||
n := len(g.alias)
|
n := len(g.alias)
|
||||||
|
|
||||||
|
g.mu.Lock()
|
||||||
i := g.randomGenerator.Intn(n)
|
i := g.randomGenerator.Intn(n)
|
||||||
if g.randomGenerator.Float64() < g.probabilities[i] {
|
f := g.randomGenerator.Float64()
|
||||||
|
g.mu.Unlock()
|
||||||
|
|
||||||
|
if f < g.probabilities[i] {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
return g.alias[i]
|
return g.alias[i]
|
||||||
|
|
|
@ -26,6 +26,16 @@ const (
|
||||||
defaultStreamTimeout = 10 * time.Second
|
defaultStreamTimeout = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SubTreeSort defines an order of nodes returned from GetSubTree RPC.
|
||||||
|
type SubTreeSort int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NoneOrder does not specify order of nodes returned in GetSubTree RPC.
|
||||||
|
NoneOrder SubTreeSort = iota
|
||||||
|
// AscendingOrder specifies ascending alphabetical order of nodes based on FilePath attribute.
|
||||||
|
AscendingOrder
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
||||||
ErrNodeNotFound = errors.New("not found")
|
ErrNodeNotFound = errors.New("not found")
|
||||||
|
@ -119,6 +129,7 @@ type GetSubTreeParams struct {
|
||||||
RootID uint64
|
RootID uint64
|
||||||
Depth uint32
|
Depth uint32
|
||||||
BearerToken []byte
|
BearerToken []byte
|
||||||
|
Order SubTreeSort
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNodeParams groups parameters of Pool.AddNode operation.
|
// AddNodeParams groups parameters of Pool.AddNode operation.
|
||||||
|
@ -388,12 +399,17 @@ func (p *Pool) GetSubTree(ctx context.Context, prm GetSubTreeParams) (*SubTreeRe
|
||||||
RootId: prm.RootID,
|
RootId: prm.RootID,
|
||||||
Depth: prm.Depth,
|
Depth: prm.Depth,
|
||||||
BearerToken: prm.BearerToken,
|
BearerToken: prm.BearerToken,
|
||||||
OrderBy: &grpcService.GetSubTreeRequest_Body_Order{
|
OrderBy: new(grpcService.GetSubTreeRequest_Body_Order),
|
||||||
Direction: grpcService.GetSubTreeRequest_Body_Order_Asc,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch prm.Order {
|
||||||
|
case AscendingOrder:
|
||||||
|
request.Body.OrderBy.Direction = grpcService.GetSubTreeRequest_Body_Order_Asc
|
||||||
|
default:
|
||||||
|
request.Body.OrderBy.Direction = grpcService.GetSubTreeRequest_Body_Order_None
|
||||||
|
}
|
||||||
|
|
||||||
if err := p.signRequest(request.Body, func(key, sign []byte) {
|
if err := p.signRequest(request.Body, func(key, sign []byte) {
|
||||||
request.Signature = &grpcService.Signature{
|
request.Signature = &grpcService.Signature{
|
||||||
Key: key,
|
Key: key,
|
||||||
|
@ -583,7 +599,7 @@ func handleError(msg string, err error) error {
|
||||||
}
|
}
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
return fmt.Errorf("%w: %s", ErrNodeNotFound, err.Error())
|
return fmt.Errorf("%w: %s", ErrNodeNotFound, err.Error())
|
||||||
} else if strings.Contains(err.Error(), "is denied by") {
|
} else if strings.Contains(err.Error(), "denied") {
|
||||||
return fmt.Errorf("%w: %s", ErrNodeAccessDenied, err.Error())
|
return fmt.Errorf("%w: %s", ErrNodeAccessDenied, err.Error())
|
||||||
}
|
}
|
||||||
return fmt.Errorf("%s: %w", msg, err)
|
return fmt.Errorf("%s: %w", msg, err)
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -68,6 +69,10 @@ func TestHandleError(t *testing.T) {
|
||||||
err: errors.New("something is denied by some acl rule"),
|
err: errors.New("something is denied by some acl rule"),
|
||||||
expectedError: ErrNodeAccessDenied,
|
expectedError: ErrNodeAccessDenied,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
err: &apistatus.APEManagerAccessDenied{},
|
||||||
|
expectedError: ErrNodeAccessDenied,
|
||||||
|
},
|
||||||
} {
|
} {
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
err := handleError("err message", tc.err)
|
err := handleError("err message", tc.err)
|
||||||
|
|
Loading…
Reference in a new issue