[#11] Trim the old functionality

Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
This commit is contained in:
Leonard Lyubich 2020-08-21 14:32:03 +03:00 committed by Alex Vanin
parent 783ec72d56
commit a87fdab324
235 changed files with 39 additions and 36211 deletions

View file

@ -1,178 +0,0 @@
package placement
import (
"github.com/gogo/protobuf/proto"
"github.com/multiformats/go-multiaddr"
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
)
// method returns copy of current Graph.
func (g *graph) copy() *graph {
var (
place *netmap.PlacementRule
roots = make([]*netmap.Bucket, 0, len(g.roots))
items = make([]netmapcore.Info, len(g.items))
)
copy(items, g.items)
for _, root := range g.roots {
var r *netmap.Bucket
if root != nil {
tmp := root.Copy()
r = &tmp
}
roots = append(roots, r)
}
place = proto.Clone(g.place).(*netmap.PlacementRule)
return &graph{
roots: roots,
items: items,
place: place,
}
}
func (g *graph) Exclude(list []multiaddr.Multiaddr) Graph {
if len(list) == 0 {
return g
}
var (
sub = g.copy()
ignore = make([]uint32, 0, len(list))
)
for i := range list {
for j := range sub.items {
if list[i].String() == sub.items[j].Address() {
ignore = append(ignore, uint32(j))
}
}
}
return sub.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
group.Exclude = ignore
return bucket.GetMaxSelection(group)
})
}
// Filter container by rules.
func (g *graph) Filter(rule FilterRule) Graph {
if rule == nil {
return g
}
var (
sub = g.copy()
roots = make([]*netmap.Bucket, len(g.roots))
items = make([]netmapcore.Info, len(g.items))
)
for i := range g.place.SFGroups {
if g.roots[i] == nil {
continue
}
root := g.roots[i].Copy()
roots[i] = rule(g.place.SFGroups[i], &root)
}
copy(items, g.items)
return &graph{
roots: roots,
items: items,
place: sub.place,
}
}
// NodeList returns slice of MultiAddresses for current graph.
func (g *graph) NodeList() ([]multiaddr.Multiaddr, error) {
var (
ln = uint32(len(g.items))
result = make([]multiaddr.Multiaddr, 0, ln)
items = make([]netmapcore.Info, len(g.items))
)
if ln == 0 {
return nil, ErrEmptyNodes
}
copy(items, g.items)
for _, root := range g.roots {
if root == nil {
continue
}
list := root.Nodelist()
if len(list) == 0 {
continue
}
for _, idx := range list {
if ln <= idx.N {
return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx)
}
addr, err := multiaddr.NewMultiaddr(items[idx.N].Address())
if err != nil {
return nil, errors.Wrapf(err, "could not convert multi address(%s)", g.items[idx.N].Address())
}
result = append(result, addr)
}
}
if len(result) == 0 {
return nil, ErrEmptyNodes
}
return result, nil
}
// NodeInfo returns slice of NodeInfo for current graph.
func (g *graph) NodeInfo() ([]netmapcore.Info, error) {
var (
ln = uint32(len(g.items))
result = make([]netmapcore.Info, 0, ln)
items = make([]netmapcore.Info, len(g.items))
)
if ln == 0 {
return nil, ErrEmptyNodes
}
copy(items, g.items)
for _, root := range g.roots {
if root == nil {
continue
}
list := root.Nodelist()
if len(list) == 0 {
continue
}
for _, idx := range list {
if ln <= idx.N {
return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx)
}
result = append(result, items[idx.N])
}
}
if len(result) == 0 {
return nil, ErrEmptyNodes
}
return result, nil
}

View file

@ -1,106 +0,0 @@
package placement
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/bootstrap"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-node/pkg/core/container/storage"
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
"github.com/nspcc-dev/netmap"
"go.uber.org/atomic"
"go.uber.org/zap"
)
type (
// Component is interface of placement service
Component interface {
// TODO leave for feature request
NetworkState() *bootstrap.SpreadMap
Neighbours(seed, epoch uint64, full bool) []peers.ID
Update(epoch uint64, nm *netmapcore.NetMap) error
Query(ctx context.Context, opts ...QueryOption) (Graph, error)
}
// QueryOptions for query request
QueryOptions struct {
CID refs.CID
Previous int
Excludes []multiaddr.Multiaddr
}
// QueryOption settings closure
QueryOption func(*QueryOptions)
// FilterRule bucket callback handler
FilterRule func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket
// Graph is result of request to Placement-component
Graph interface {
Filter(rule FilterRule) Graph
Exclude(list []multiaddr.Multiaddr) Graph
NodeList() ([]multiaddr.Multiaddr, error)
NodeInfo() ([]netmapcore.Info, error)
}
// Key to fetch node-list
Key []byte
// Params to create Placement component
Params struct {
Log *zap.Logger
Netmap *NetMap
Peerstore peers.Store
Fetcher storage.Storage
ChronologyDuration uint64 // storing number of past epochs states
}
networkState struct {
nm *NetMap
epoch uint64
}
// placement is implementation of placement.Component
placement struct {
log *zap.Logger
cnr storage.Storage
chronologyDur uint64
nmStore *netMapStore
ps peers.Store
healthy *atomic.Bool
}
// graph is implementation of placement.Graph
graph struct {
roots []*netmap.Bucket
items []netmapcore.Info
place *netmap.PlacementRule
}
)
// ExcludeNodes to ignore some nodes.
func ExcludeNodes(list []multiaddr.Multiaddr) QueryOption {
return func(opt *QueryOptions) {
opt.Excludes = list
}
}
// ContainerID set by Key.
func ContainerID(cid refs.CID) QueryOption {
return func(opt *QueryOptions) {
opt.CID = cid
}
}
// UsePreviousNetmap for query.
func UsePreviousNetmap(diff int) QueryOption {
return func(opt *QueryOptions) {
opt.Previous = diff
}
}

View file

@ -1,69 +0,0 @@
package placement
import (
"math"
"github.com/nspcc-dev/hrw"
"github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
"go.uber.org/zap"
)
func calculateCount(n int) int {
if n < 30 {
return n
}
return int(1.4*math.Log(float64(n))+9) + 1
}
// Neighbours peers that which are distributed by hrw(seed)
// If full flag is set, all set of peers returns.
// Otherwise, result size depends on calculateCount function.
func (p *placement) Neighbours(seed, epoch uint64, full bool) []peers.ID {
nm := p.nmStore.get(epoch)
if nm == nil {
p.log.Error("could not receive network state",
zap.Uint64("epoch", epoch),
)
return nil
}
rPeers := p.listPeers(nm.Nodes(), !full)
hrw.SortSliceByValue(rPeers, seed)
if full {
return rPeers
}
var (
ln = len(rPeers)
cut = calculateCount(ln)
)
if cut > ln {
cut = ln
}
return rPeers[:cut]
}
func (p *placement) listPeers(nodes []netmap.Info, exclSelf bool) []peers.ID {
var (
id = p.ps.SelfID()
result = make([]peers.ID, 0, len(nodes))
)
for i := range nodes {
key := peers.IDFromBinary(nodes[i].PublicKey())
if exclSelf && id.Equal(key) {
continue
}
result = append(result, key)
}
return result
}

View file

@ -1,187 +0,0 @@
package placement
import (
"crypto/ecdsa"
"strconv"
"testing"
"bou.ke/monkey"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/bootstrap"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neofs-node/pkg/util/test"
"github.com/stretchr/testify/require"
)
func testAddress(t *testing.T) multiaddr.Multiaddr {
addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0")
require.NoError(t, err)
return addr
}
// -- -- //
func testPeerstore(t *testing.T) peers.Store {
p, err := peers.NewStore(peers.StoreParams{
Key: test.DecodeKey(-1),
Logger: testlogger.NewLogger(false),
Addr: testAddress(t),
})
require.NoError(t, err)
return p
}
const address = "/ip4/0.0.0.0/tcp/0/p2p/"
func TestPlacement_Neighbours(t *testing.T) {
t.Run("Placement component NPE fix test", func(t *testing.T) {
nodes := []bootstrap.NodeInfo{
{Address: address + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}},
{Address: address + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}},
{Address: address + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}},
}
ps := testPeerstore(t)
nm := testNetmap(t, nodes)
p := New(Params{
Log: testlogger.NewLogger(false),
Peerstore: ps,
})
require.NotPanics(t, func() {
require.NoError(t, p.Update(1, nm))
})
})
t.Run("Placement Neighbours TestSuite", func(t *testing.T) {
keys := []*ecdsa.PrivateKey{
test.DecodeKey(0),
test.DecodeKey(1),
test.DecodeKey(2),
}
nodes := []bootstrap.NodeInfo{
{
Address: address + idFromString(t, "USA1"),
PubKey: crypto.MarshalPublicKey(&keys[0].PublicKey),
Options: []string{"/Location:Europe/Country:USA/City:NewYork"},
},
{
Address: address + idFromString(t, "ITL1"),
PubKey: crypto.MarshalPublicKey(&keys[1].PublicKey),
Options: []string{"/Location:Europe/Country:Italy/City:Rome"},
},
{
Address: address + idFromString(t, "RUS1"),
PubKey: crypto.MarshalPublicKey(&keys[2].PublicKey),
Options: []string{"/Location:Europe/Country:Russia/City:SPB"},
},
}
ps := testPeerstore(t)
nm := testNetmap(t, nodes)
p := New(Params{
Log: testlogger.NewLogger(false),
Netmap: nm,
Peerstore: ps,
})
t.Run("check, that items have expected length (< 30)", func(t *testing.T) {
items := p.Neighbours(1, 0, false)
require.Len(t, items, len(nm.Nodes()))
})
t.Run("check, that items have expected length ( > 30)", func(t *testing.T) {
opts := []string{"/Location:Europe/Country:Russia/City:SPB"}
key, err := ps.GetPublicKey(ps.SelfID())
require.NoError(t, err)
keyBytes := crypto.MarshalPublicKey(key)
addr := address + idFromString(t, "NewRUS")
info := netmap.Info{}
info.SetAddress(addr)
info.SetPublicKey(keyBytes)
info.SetOptions(opts)
require.NoError(t, nm.AddNode(info))
for i := 0; i < 30; i++ {
addr := address + idFromString(t, "RUS"+strconv.Itoa(i+2))
key := test.DecodeKey(i + len(nodes))
pub := crypto.MarshalPublicKey(&key.PublicKey)
info := netmap.Info{}
info.SetAddress(addr)
info.SetPublicKey(pub)
info.SetOptions(opts)
require.NoError(t, nm.AddNode(info))
}
ln := calculateCount(len(nm.Nodes()))
items := p.Neighbours(1, 0, false)
require.Len(t, items, ln)
})
t.Run("check, that items is shuffled", func(t *testing.T) {
var cur, pre []peers.ID
for i := uint64(0); i < 10; i++ {
cur = p.Neighbours(i, 0, false)
require.NotEqual(t, pre, cur)
pre = cur
}
})
t.Run("check, that we can request more items that we have", func(t *testing.T) {
require.NotPanics(t, func() {
monkey.Patch(calculateCount, func(i int) int { return i + 1 })
defer monkey.Unpatch(calculateCount)
p.Neighbours(1, 0, false)
})
})
})
t.Run("unknown epoch", func(t *testing.T) {
s := &placement{
log: testlogger.NewLogger(false),
nmStore: newNetMapStore(),
ps: testPeerstore(t),
}
require.Empty(t, s.Neighbours(1, 1, false))
})
t.Run("neighbors w/ set full flag", func(t *testing.T) {
var (
n = 3
e uint64 = 5
nm = netmap.New()
nms = newNetMapStore()
)
for i := 0; i < n; i++ {
info := netmap.Info{}
info.SetAddress("node" + strconv.Itoa(i))
info.SetPublicKey([]byte{byte(i)})
require.NoError(t, nm.AddNode(info))
}
nms.put(e, nm)
s := &placement{
log: testlogger.NewLogger(false),
nmStore: nms,
ps: testPeerstore(t),
}
neighbors := s.Neighbours(1, e, true)
require.Len(t, neighbors, n)
})
}

View file

@ -1,260 +0,0 @@
package placement
import (
"bytes"
"context"
"strings"
"github.com/nspcc-dev/neofs-api-go/bootstrap"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
libnetmap "github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
"go.uber.org/atomic"
"go.uber.org/zap"
)
const defaultChronologyDuration = 1
var (
// ErrEmptyNodes when container doesn't contains any nodes
ErrEmptyNodes = errors.New("container doesn't contains nodes")
// ErrNodesBucketOmitted when in PlacementRule, Selector has not NodesBucket
ErrNodesBucketOmitted = errors.New("nodes-bucket is omitted")
// ErrEmptyContainer when GetMaxSelection or GetSelection returns empty result
ErrEmptyContainer = errors.New("could not get container, it's empty")
)
var errNilNetMap = errors.New("network map is nil")
// New is a placement component constructor.
func New(p Params) Component {
if p.Netmap == nil {
p.Netmap = netmap.New()
}
if p.ChronologyDuration <= 0 {
p.ChronologyDuration = defaultChronologyDuration
}
pl := &placement{
log: p.Log,
cnr: p.Fetcher,
chronologyDur: p.ChronologyDuration,
nmStore: newNetMapStore(),
ps: p.Peerstore,
healthy: atomic.NewBool(false),
}
pl.nmStore.put(0, p.Netmap)
return pl
}
func (p *placement) Name() string { return "PresentInNetwork" }
func (p *placement) Healthy() bool { return p.healthy.Load() }
type strNodes []netmap.Info
func (n strNodes) String() string {
list := make([]string, 0, len(n))
for i := range n {
list = append(list, n[i].Address())
}
return `[` + strings.Join(list, ",") + `]`
}
func (p *placement) Update(epoch uint64, nm *NetMap) error {
cnm := p.nmStore.get(p.nmStore.epoch())
if cnm == nil {
return errNilNetMap
}
items := nm.Nodes()
p.log.Debug("update to new netmap",
zap.Stringer("nodes", strNodes(items)))
p.log.Debug("update peerstore")
if err := p.ps.Update(nm); err != nil {
return err
}
var (
pubkeyBinary []byte
healthy bool
)
// storage nodes must be presented in network map to be healthy
pubkey, err := p.ps.GetPublicKey(p.ps.SelfID())
if err != nil {
p.log.Error("can't get my own public key")
}
pubkeyBinary = crypto.MarshalPublicKey(pubkey)
for i := range items {
if bytes.Equal(pubkeyBinary, items[i].PublicKey()) {
healthy = true
}
p.log.Debug("new peer for dht",
zap.Stringer("peer", peers.IDFromBinary(items[i].PublicKey())),
zap.String("addr", items[i].Address()))
}
// make copy to previous
p.log.Debug("update previous netmap")
if epoch > p.chronologyDur {
p.nmStore.trim(epoch - p.chronologyDur)
}
p.log.Debug("update current netmap")
p.nmStore.put(epoch, nm)
p.log.Debug("update current epoch")
p.healthy.Store(healthy)
return nil
}
// NetworkState returns copy of current NetworkMap.
func (p *placement) NetworkState() *bootstrap.SpreadMap {
ns := p.networkState(p.nmStore.epoch())
if ns == nil {
ns = &networkState{nm: netmap.New()}
}
nodes := ns.nm.Nodes()
res := &bootstrap.SpreadMap{
Epoch: ns.epoch,
NetMap: make([]bootstrap.NodeInfo, 0, len(nodes)),
}
for i := range nodes {
res.NetMap = append(res.NetMap, bootstrap.NodeInfo{
Address: nodes[i].Address(),
PubKey: nodes[i].PublicKey(),
Options: nodes[i].Options(),
})
}
return res
}
func (p *placement) networkState(epoch uint64) *networkState {
nm := p.nmStore.get(epoch)
if nm == nil {
return nil
}
return &networkState{
nm: nm,
epoch: epoch,
}
}
// Query returns graph based on container.
func (p *placement) Query(ctx context.Context, opts ...QueryOption) (Graph, error) {
var (
query QueryOptions
ignore []uint32
)
for _, opt := range opts {
opt(&query)
}
epoch := p.nmStore.epoch()
if query.Previous > 0 {
epoch -= uint64(query.Previous)
}
state := p.networkState(epoch)
if state == nil {
return nil, errors.Errorf("could not get network state for epoch #%d", epoch)
}
items := state.nm.Nodes()
cnr, err := p.cnr.Get(query.CID)
if err != nil {
return nil, errors.Wrap(err, "could not fetch container")
}
for i := range query.Excludes {
for j := range items {
if query.Excludes[i].String() == items[j].Address() {
ignore = append(ignore, uint32(j))
}
}
}
rule := cnr.PlacementRule()
return ContainerGraph(state.nm, &rule, ignore, query.CID)
}
// ContainerGraph applies the placement rules to network map and returns container graph.
func ContainerGraph(nm *NetMap, rule *libnetmap.PlacementRule, ignore []uint32, cid refs.CID) (Graph, error) {
root := nm.Root()
roots := make([]*netmap.Bucket, 0, len(rule.SFGroups))
for i := range rule.SFGroups {
rule.SFGroups[i].Exclude = ignore
if ln := len(rule.SFGroups[i].Selectors); ln <= 0 ||
rule.SFGroups[i].Selectors[ln-1].Key != libnetmap.NodesBucket {
return nil, errors.Wrapf(ErrNodesBucketOmitted, "container (%s)", cid)
}
bigSelectors := make([]libnetmap.Select, len(rule.SFGroups[i].Selectors))
for j := range rule.SFGroups[i].Selectors {
bigSelectors[j] = libnetmap.Select{
Key: rule.SFGroups[i].Selectors[j].Key,
Count: rule.SFGroups[i].Selectors[j].Count,
}
if rule.ReplFactor > 1 && rule.SFGroups[i].Selectors[j].Key == libnetmap.NodesBucket {
bigSelectors[j].Count *= rule.ReplFactor
}
}
sf := libnetmap.SFGroup{
Selectors: bigSelectors,
Filters: rule.SFGroups[i].Filters,
Exclude: ignore,
}
if tree := root.Copy().GetMaxSelection(sf); tree != nil {
// fetch graph for replication factor seeded by ContainerID
if tree = tree.GetSelection(bigSelectors, cid[:]); tree == nil {
return nil, errors.Wrapf(ErrEmptyContainer, "for container(%s) with repl-factor(%d)",
cid, rule.ReplFactor)
}
roots = append(roots, tree)
continue
}
return nil, errors.Wrap(ErrEmptyContainer, "empty for bigSelector")
}
return &graph{
roots: roots,
items: nm.Nodes(),
place: rule,
}, nil
}

View file

@ -1,411 +0,0 @@
package placement
import (
"context"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/mr-tron/base58"
"github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multihash"
"github.com/nspcc-dev/neofs-api-go/bootstrap"
"github.com/nspcc-dev/neofs-api-go/refs"
crypto "github.com/nspcc-dev/neofs-crypto"
libcnr "github.com/nspcc-dev/neofs-node/pkg/core/container"
"github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic"
"github.com/nspcc-dev/neofs-node/pkg/core/container/storage"
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test"
"github.com/nspcc-dev/neofs-node/pkg/util/test"
"github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
type (
fakeDHT struct {
}
fakeContainerStorage struct {
storage.Storage
*sync.RWMutex
items map[refs.CID]*storage.Container
}
)
var (
testDHTCapacity = 100
)
// -- -- //
func testContainerStorage() *fakeContainerStorage {
return &fakeContainerStorage{
RWMutex: new(sync.RWMutex),
items: make(map[refs.CID]*storage.Container, testDHTCapacity),
}
}
func (f *fakeContainerStorage) Get(cid storage.CID) (*storage.Container, error) {
f.RLock()
val, ok := f.items[cid]
f.RUnlock()
if !ok {
return nil, errors.New("value for requested key not found in DHT")
}
return val, nil
}
func (f *fakeContainerStorage) Put(c *storage.Container) (*storage.CID, error) {
id, err := libcnr.CalculateID(c)
if err != nil {
return nil, err
}
f.Lock()
f.items[*id] = c
f.Unlock()
return id, nil
}
func (f *fakeDHT) UpdatePeers([]peers.ID) {
// do nothing
}
func (f *fakeDHT) GetValue(ctx context.Context, key string) ([]byte, error) {
panic("implement me")
}
func (f *fakeDHT) PutValue(ctx context.Context, key string, val []byte) error {
panic("implement me")
}
func (f *fakeDHT) Get(ctx context.Context, key string) ([]byte, error) {
panic("implement me")
}
func (f *fakeDHT) Put(ctx context.Context, key string, val []byte) error {
panic("implement me")
}
// -- -- //
func testNetmap(t *testing.T, nodes []bootstrap.NodeInfo) *NetMap {
nm := netmapcore.New()
for i := range nodes {
info := netmapcore.Info{}
info.SetAddress(nodes[i].Address)
info.SetOptions(nodes[i].Options)
info.SetPublicKey(crypto.MarshalPublicKey(&test.DecodeKey(i).PublicKey))
err := nm.AddNode(info)
require.NoError(t, err)
}
return nm
}
// -- -- //
func idFromString(t *testing.T, id string) string {
buf, err := multihash.Encode([]byte(id), multihash.ID)
require.NoError(t, err)
return (multihash.Multihash(buf)).B58String()
}
func idFromAddress(t *testing.T, addr multiaddr.Multiaddr) string {
id, err := addr.ValueForProtocol(multiaddr.P_P2P)
require.NoError(t, err)
buf, err := base58.Decode(id)
require.NoError(t, err)
hs, err := multihash.Decode(buf)
require.NoError(t, err)
return string(hs.Digest)
}
// -- -- //
func TestPlacement(t *testing.T) {
multiaddr.SwapToP2pMultiaddrs()
testAddress := "/ip4/0.0.0.0/tcp/0/p2p/"
key := test.DecodeKey(-1)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ids := map[string]struct{}{
"GRM1": {}, "GRM2": {}, "GRM3": {}, "GRM4": {},
"SPN1": {}, "SPN2": {}, "SPN3": {}, "SPN4": {},
}
nodes := []bootstrap.NodeInfo{
{Address: testAddress + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}},
{Address: testAddress + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}},
{Address: testAddress + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}},
}
for id := range ids {
var opts []string
switch {
case strings.Contains(id, "GRM"):
opts = append(opts, "/Location:Europe/Country:Germany/City:"+id)
case strings.Contains(id, "SPN"):
opts = append(opts, "/Location:Europe/Country:Spain/City:"+id)
}
for i := 0; i < 4; i++ {
id := id + strconv.Itoa(i)
nodes = append(nodes, bootstrap.NodeInfo{
Address: testAddress + idFromString(t, id),
Options: opts,
})
}
}
sort.Slice(nodes, func(i, j int) bool {
return strings.Compare(nodes[i].Address, nodes[j].Address) == -1
})
nm := testNetmap(t, nodes)
cnrStorage := testContainerStorage()
p := New(Params{
Log: testlogger.NewLogger(false),
Peerstore: testPeerstore(t),
Fetcher: cnrStorage,
})
require.NoError(t, p.Update(1, nm))
oid, err := refs.NewObjectID()
require.NoError(t, err)
// filter over oid
filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
return bucket.GetSelection(group.Selectors, oid[:])
}
owner, err := refs.NewOwnerID(&key.PublicKey)
require.NoError(t, err)
cnr1 := new(storage.Container)
cnr1.SetOwnerID(owner)
cnr1.SetBasicACL(basic.FromUint32(0))
cnr1.SetPlacementRule(netmap.PlacementRule{
ReplFactor: 2,
SFGroups: []netmap.SFGroup{
{
Selectors: []netmap.Select{
{Key: "Country", Count: 1},
{Key: "City", Count: 2},
{Key: netmap.NodesBucket, Count: 1},
},
Filters: []netmap.Filter{
{Key: "Country", F: netmap.FilterIn("Germany", "Spain")},
},
},
},
})
cid1, err := cnrStorage.Put(cnr1)
require.NoError(t, err)
cnr2 := new(storage.Container)
cnr2.SetOwnerID(owner)
cnr2.SetBasicACL(basic.FromUint32(0))
cnr2.SetPlacementRule(netmap.PlacementRule{
ReplFactor: 2,
SFGroups: []netmap.SFGroup{
{
Selectors: []netmap.Select{
{Key: "Country", Count: 1},
{Key: netmap.NodesBucket, Count: 10},
},
Filters: []netmap.Filter{
{Key: "Country", F: netmap.FilterIn("Germany", "Spain")},
},
},
},
})
cid2, err := cnrStorage.Put(cnr2)
require.NoError(t, err)
cnr3 := new(storage.Container)
cnr3.SetOwnerID(owner)
cnr3.SetBasicACL(basic.FromUint32(0))
cnr3.SetPlacementRule(netmap.PlacementRule{
ReplFactor: 2,
SFGroups: []netmap.SFGroup{
{
Selectors: []netmap.Select{
{Key: "Country", Count: 1},
},
Filters: []netmap.Filter{
{Key: "Country", F: netmap.FilterIn("Germany", "Spain")},
},
},
},
})
cid3, err := cnrStorage.Put(cnr3)
require.NoError(t, err)
t.Run("Should fail on empty container", func(t *testing.T) {
_, err = p.Query(ctx, ContainerID(*cid2))
require.EqualError(t, errors.Cause(err), ErrEmptyContainer.Error())
})
t.Run("Should fail on Nodes Bucket is omitted in container", func(t *testing.T) {
_, err = p.Query(ctx, ContainerID(*cid3))
require.EqualError(t, errors.Cause(err), ErrNodesBucketOmitted.Error())
})
t.Run("Should fail on unknown container (dht error)", func(t *testing.T) {
_, err = p.Query(ctx, ContainerID(refs.CID{5}))
require.Error(t, err)
})
g, err := p.Query(ctx, ContainerID(*cid1))
require.NoError(t, err)
t.Run("Should return error on empty items", func(t *testing.T) {
_, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket {
return &netmap.Bucket{}
}).NodeList()
require.EqualError(t, err, ErrEmptyNodes.Error())
})
t.Run("Should ignore some nodes", func(t *testing.T) {
g1, err := p.Query(ctx, ContainerID(*cid1))
require.NoError(t, err)
expect, err := g1.
Filter(filter).
NodeList()
require.NoError(t, err)
g2, err := p.Query(ctx, ContainerID(*cid1))
require.NoError(t, err)
actual, err := g2.
Filter(filter).
NodeList()
require.NoError(t, err)
require.Equal(t, expect, actual)
g3, err := p.Query(ctx, ContainerID(*cid1))
require.NoError(t, err)
actual, err = g3.
Exclude(expect).
Filter(filter).
NodeList()
require.NoError(t, err)
for _, item := range expect {
require.NotContains(t, actual, item)
}
g4, err := p.Query(ctx,
ContainerID(*cid1),
ExcludeNodes(expect))
require.NoError(t, err)
actual, err = g4.
Filter(filter).
NodeList()
require.NoError(t, err)
for _, item := range expect {
require.NotContains(t, actual, item)
}
})
t.Run("Should return error on nil Buckets", func(t *testing.T) {
_, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket {
return nil
}).NodeList()
require.EqualError(t, err, ErrEmptyNodes.Error())
})
t.Run("Should return error on empty NodeInfo's", func(t *testing.T) {
cp := g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket {
return nil
})
cp.(*graph).items = nil
_, err := cp.NodeList()
require.EqualError(t, err, ErrEmptyNodes.Error())
})
t.Run("Should return error on unknown items", func(t *testing.T) {
cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket {
return b
})
cp.(*graph).items = cp.(*graph).items[:5]
_, err := cp.NodeList()
require.Error(t, err)
})
t.Run("Should return error on bad items", func(t *testing.T) {
cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket {
return b
})
for i := range cp.(*graph).items {
cp.(*graph).items[i].SetAddress("BadAddress")
}
_, err := cp.NodeList()
require.EqualError(t, errors.Cause(err), "failed to parse multiaddr \"BadAddress\": must begin with /")
})
list, err := g.
Filter(filter).
// must return same graph on empty filter
Filter(nil).
NodeList()
require.NoError(t, err)
// 1 Country, 2 Cities, 1 Node = 2 Nodes
require.Len(t, list, 2)
for _, item := range list {
id := idFromAddress(t, item)
require.Contains(t, ids, id[:4]) // exclude our postfix (0-4)
}
}
func TestContainerGraph(t *testing.T) {
t.Run("selectors index out-of-range", func(t *testing.T) {
rule := new(netmap.PlacementRule)
rule.SFGroups = append(rule.SFGroups, netmap.SFGroup{})
require.NotPanics(t, func() {
_, _ = ContainerGraph(
netmapcore.New(),
rule,
nil,
refs.CID{},
)
})
})
}

View file

@ -1,66 +0,0 @@
package placement
import (
"sync"
"github.com/nspcc-dev/neofs-node/pkg/core/netmap"
)
type (
// NetMap is a type alias of
// NetMap from netmap package.
NetMap = netmap.NetMap
netMapStore struct {
*sync.RWMutex
items map[uint64]*NetMap
curEpoch uint64
}
)
func newNetMapStore() *netMapStore {
return &netMapStore{
RWMutex: new(sync.RWMutex),
items: make(map[uint64]*NetMap),
}
}
func (s *netMapStore) put(epoch uint64, nm *NetMap) {
s.Lock()
s.items[epoch] = nm
s.curEpoch = epoch
s.Unlock()
}
func (s *netMapStore) get(epoch uint64) *NetMap {
s.RLock()
nm := s.items[epoch]
s.RUnlock()
return nm
}
// trim cleans all network states elder than epoch.
func (s *netMapStore) trim(epoch uint64) {
s.Lock()
m := make(map[uint64]struct{}, len(s.items))
for e := range s.items {
if e < epoch {
m[e] = struct{}{}
}
}
for e := range m {
delete(s.items, e)
}
s.Unlock()
}
func (s *netMapStore) epoch() uint64 {
s.RLock()
defer s.RUnlock()
return s.curEpoch
}

View file

@ -1,127 +0,0 @@
package placement
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/container"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
"github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
/*
File source code includes implementations of placement-related solutions.
Highly specialized interfaces give the opportunity to hide placement implementation in a black box for the reasons:
* placement is implementation-tied entity working with graphs, filters, etc.;
* NeoFS components are mostly needed in a small part of the solutions provided by placement;
* direct dependency from placement avoidance helps other components do not touch crucial changes in placement.
*/
type (
// CID is a type alias of
// CID from refs package of neofs-api-go.
CID = refs.CID
// SGID is a type alias of
// SGID from refs package of neofs-api-go.
SGID = refs.SGID
// ObjectID is a type alias of
// ObjectID from refs package of neofs-api-go.
ObjectID = refs.ObjectID
// Object is a type alias of
// Object from object package of neofs-api-go.
Object = object.Object
// Address is a type alias of
// Address from refs package of neofs-api-go.
Address = refs.Address
PlacementWrapper struct {
pl Component
}
)
var errEmptyPlacement = errors.New("could not create storage lister: empty placement component")
// NewObjectPlacer wraps Component and returns ObjectPlacer interface.
func NewObjectPlacer(pl Component) (*PlacementWrapper, error) {
if pl == nil {
return nil, errEmptyPlacement
}
return &PlacementWrapper{pl}, nil
}
func (v PlacementWrapper) ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) {
graph, err := v.pl.Query(ctx, ContainerID(cid))
if err != nil {
return nil, errors.Wrap(err, "objectPlacer.ContainerNodes failed on graph query")
}
return graph.NodeList()
}
func (v PlacementWrapper) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]netmapcore.Info, error) {
graph, err := v.pl.Query(ctx, ContainerID(cid), UsePreviousNetmap(prev))
if err != nil {
return nil, errors.Wrap(err, "objectPlacer.ContainerNodesInfo failed on graph query")
}
return graph.NodeInfo()
}
func (v PlacementWrapper) GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) {
queryOptions := make([]QueryOption, 1, 2)
queryOptions[0] = ContainerID(addr.CID)
if usePreviousNetMap {
queryOptions = append(queryOptions, UsePreviousNetmap(1))
}
graph, err := v.pl.Query(ctx, queryOptions...)
if err != nil {
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.NotFound {
return nil, container.ErrNotFound
}
return nil, errors.Wrap(err, "placer.GetNodes failed on graph query")
}
filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
return bucket
}
if !addr.ObjectID.Empty() {
filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes())
}
}
return graph.Exclude(excl).Filter(filter).NodeList()
}
func (v PlacementWrapper) IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) {
nodes, err := v.GetNodes(ctx, Address{
CID: cid,
}, previousNetMap)
if err != nil {
return false, errors.Wrap(err, "placer.FromContainer failed on placer.GetNodes")
}
for i := range nodes {
if nodes[i].Equal(addr) {
return true, nil
}
}
return false, nil
}
func (v PlacementWrapper) Epoch() uint64 { return v.pl.NetworkState().Epoch }

View file

@ -1,197 +0,0 @@
package replication
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
// CID is a type alias of
// CID from refs package of neofs-api-go.
CID = refs.CID
// Object is a type alias of
// Object from object package of neofs-api-go.
Object = object.Object
// OwnerID is a type alias of
// OwnerID from object package of neofs-api-go.
OwnerID = object.OwnerID
// Address is a type alias of
// Address from refs package of neofs-api-go.
Address = refs.Address
// ObjectVerificationParams groups the parameters of stored object verification.
ObjectVerificationParams struct {
Address
Node multiaddr.Multiaddr
Handler func(valid bool, obj *Object)
LocalInvalid bool
}
// ObjectVerifier is an interface of stored object verifier.
ObjectVerifier interface {
Verify(ctx context.Context, params *ObjectVerificationParams) bool
}
// ObjectSource is an interface of the object storage with read access.
ObjectSource interface {
Get(ctx context.Context, addr Address) (*Object, error)
}
// ObjectStoreParams groups the parameters for object storing.
ObjectStoreParams struct {
*Object
Nodes []ObjectLocation
Handler func(ObjectLocation, bool)
}
// ObjectReceptacle is an interface of object storage with write access.
ObjectReceptacle interface {
Put(ctx context.Context, params ObjectStoreParams) error
}
// ObjectCleaner Entity for removing object by address from somewhere
ObjectCleaner interface {
Del(Address) error
}
// ContainerActualityChecker is an interface of entity
// for checking local node presence in container
// Return true if no errors && local node is in container
ContainerActualityChecker interface {
Actual(ctx context.Context, cid CID) bool
}
// ObjectPool is a queue of objects selected for data audit.
// It is updated once in epoch.
ObjectPool interface {
Update([]Address)
Pop() (Address, error)
Undone() int
}
// Scheduler returns slice of addresses for data audit.
// These addresses put into ObjectPool.
Scheduler interface {
SelectForReplication(limit int) ([]Address, error)
}
// ReservationRatioReceiver is an interface of entity
// for getting reservation ratio value of object by address.
ReservationRatioReceiver interface {
ReservationRatio(ctx context.Context, objAddr Address) (int, error)
}
// RemoteStorageSelector is an interface of entity
// for getting remote nodes from placement for object by address
// Result doesn't contain nodes from exclude list
RemoteStorageSelector interface {
SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error)
}
// MultiSolver is an interface that encapsulates other different utilities.
MultiSolver interface {
AddressStore
RemoteStorageSelector
ReservationRatioReceiver
ContainerActualityChecker
EpochReceiver
WeightComparator
}
// ObjectLocator is an itnerface of entity
// for building list current object remote nodes by address
ObjectLocator interface {
LocateObject(ctx context.Context, objAddr Address) ([]multiaddr.Multiaddr, error)
}
// WeightComparator is an itnerface of entity
// for comparing weight by address of local node with passed node
// returns -1 if local node is weightier or on error
// returns 0 if weights are equal
// returns 1 if passed node is weightier
WeightComparator interface {
CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int
}
// EpochReceiver is an interface of entity for getting current epoch number.
EpochReceiver interface {
Epoch() uint64
}
// ObjectLocation groups the information about object current remote location.
ObjectLocation struct {
Node multiaddr.Multiaddr
WeightGreater bool // true if Node field value has less index in placement vector than localhost
}
// ObjectLocationRecord groups the information about all current locations.
ObjectLocationRecord struct {
Address
ReservationRatio int
Locations []ObjectLocation
}
// ReplicateTask groups the information about object replication task.
// Task solver should not process nodes from exclude list,
// Task solver should perform up to Shortage replications.
ReplicateTask struct {
Address
Shortage int
ExcludeNodes []multiaddr.Multiaddr
}
// ReplicateResult groups the information about object replication task result.
ReplicateResult struct {
*ReplicateTask
NewStorages []multiaddr.Multiaddr
}
// PresenceChecker is an interface of object storage with presence check access.
PresenceChecker interface {
Has(address Address) (bool, error)
}
// AddressStore is an interface of local peer's network address storage.
AddressStore interface {
SelfAddr() (multiaddr.Multiaddr, error)
}
)
const (
writeResultTimeout = "write result timeout"
taskChanClosed = " process finish finish: task channel closed"
ctxDoneMsg = " process finish: context done"
objectPoolPart = "object pool"
loggerPart = "logger"
objectVerifierPart = "object verifier"
objectReceptaclePart = "object receptacle"
remoteStorageSelectorPart = "remote storage elector"
objectSourcePart = "object source"
reservationRatioReceiverPart = "reservation ratio receiver"
objectLocatorPart = "object locator"
epochReceiverPart = "epoch receiver"
presenceCheckerPart = "object presence checker"
weightComparatorPart = "weight comparator"
addrStorePart = "address store"
)
func instanceError(entity, part string) error {
return errors.Errorf("could not instantiate %s: empty %s", entity, part)
}
func addressFields(addr Address) []zap.Field {
return []zap.Field{
zap.Stringer("oid", addr.ObjectID),
zap.Stringer("cid", addr.CID),
}
}

View file

@ -1,27 +0,0 @@
package replication
import (
"sync"
)
type (
garbageStore struct {
*sync.RWMutex
items []Address
}
)
func (s *garbageStore) put(addr Address) {
s.Lock()
defer s.Unlock()
for i := range s.items {
if s.items[i].Equal(&addr) {
return
}
}
s.items = append(s.items, addr)
}
func newGarbageStore() *garbageStore { return &garbageStore{RWMutex: new(sync.RWMutex)} }

View file

@ -1,294 +0,0 @@
package replication
import (
"context"
"sync"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
"github.com/nspcc-dev/neofs-node/pkg/util/rand"
"github.com/nspcc-dev/netmap"
"github.com/pkg/errors"
)
type (
replicationScheduler struct {
cac ContainerActualityChecker
ls localstore.Iterator
}
// SchedulerParams groups the parameters of scheduler constructor.
SchedulerParams struct {
ContainerActualityChecker
localstore.Iterator
}
objectPool struct {
mu *sync.Mutex
tasks []Address
}
multiSolver struct {
as AddressStore
pl placement.Component
}
// MultiSolverParams groups the parameters of multi solver constructor.
MultiSolverParams struct {
AddressStore
Placement placement.Component
}
)
const (
objectPoolInstanceFailMsg = "could not create object pool"
multiSolverInstanceFailMsg = "could not create multi solver"
replicationSchedulerEntity = "replication scheduler"
)
var (
errPoolExhausted = errors.New("object pool is exhausted")
errEmptyLister = errors.New("empty local objects lister")
errEmptyContainerActual = errors.New("empty container actuality checker")
errEmptyAddressStore = errors.New("empty address store")
errEmptyPlacement = errors.New("empty placement")
)
// NewObjectPool is an object pool constructor.
func NewObjectPool() ObjectPool {
return &objectPool{mu: new(sync.Mutex)}
}
// NewReplicationScheduler is a replication scheduler constructor.
func NewReplicationScheduler(p SchedulerParams) (Scheduler, error) {
switch {
case p.ContainerActualityChecker == nil:
return nil, errors.Wrap(errEmptyContainerActual, objectPoolInstanceFailMsg)
case p.Iterator == nil:
return nil, errors.Wrap(errEmptyLister, objectPoolInstanceFailMsg)
}
return &replicationScheduler{
cac: p.ContainerActualityChecker,
ls: p.Iterator,
}, nil
}
// NewMultiSolver is a multi solver constructor.
func NewMultiSolver(p MultiSolverParams) (MultiSolver, error) {
switch {
case p.Placement == nil:
return nil, errors.Wrap(errEmptyPlacement, multiSolverInstanceFailMsg)
case p.AddressStore == nil:
return nil, errors.Wrap(errEmptyAddressStore, multiSolverInstanceFailMsg)
}
return &multiSolver{
as: p.AddressStore,
pl: p.Placement,
}, nil
}
func (s *objectPool) Update(pool []Address) {
s.mu.Lock()
defer s.mu.Unlock()
s.tasks = pool
}
func (s *objectPool) Undone() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.tasks)
}
func (s *objectPool) Pop() (Address, error) {
s.mu.Lock()
defer s.mu.Unlock()
if len(s.tasks) == 0 {
return Address{}, errPoolExhausted
}
head := s.tasks[0]
s.tasks = s.tasks[1:]
return head, nil
}
func (s *replicationScheduler) SelectForReplication(limit int) ([]Address, error) {
// Attention! This routine might be inefficient with big number of objects
// and containers. Consider using fast traversal and filtering algorithms
// with sieve of bloom filters.
migration := make([]Address, 0, limit)
replication := make([]Address, 0)
ctx := context.Background()
if err := s.ls.Iterate(nil, func(meta *localstore.ObjectMeta) bool {
if s.cac.Actual(ctx, meta.Object.SystemHeader.CID) {
replication = append(replication, *meta.Object.Address())
} else {
migration = append(migration, *meta.Object.Address())
}
return len(migration) >= limit
}); err != nil {
return nil, err
}
lnM := len(migration)
lnR := len(replication)
edge := 0
// I considered using rand.Perm() and appending elements in `for` cycle.
// But it seems, that shuffling is efficient even when `limit-lnM`
// is 1000 times smaller than `lnR`. But it can be discussed and changed
// later anyway.
if lnM < limit {
r := rand.New()
r.Shuffle(lnR, func(i, j int) {
replication[i], replication[j] = replication[j], replication[i]
})
edge = min(limit-lnM, lnR)
}
return append(migration, replication[:edge]...), nil
}
func (s *multiSolver) Epoch() uint64 { return s.pl.NetworkState().Epoch }
func (s *multiSolver) SelfAddr() (multiaddr.Multiaddr, error) { return s.as.SelfAddr() }
func (s *multiSolver) ReservationRatio(ctx context.Context, addr Address) (int, error) {
graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID))
if err != nil {
return 0, errors.Wrap(err, "reservation ratio computation failed on placement query")
}
nodes, err := graph.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes())
}).NodeList()
if err != nil {
return 0, errors.Wrap(err, "reservation ratio computation failed on graph node list")
}
return len(nodes), nil
}
func (s *multiSolver) SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error) {
selfAddr, err := s.as.SelfAddr()
if err != nil {
return nil, errors.Wrap(err, "select remote storage nodes failed on get self address")
}
nodes, err := s.selectNodes(ctx, addr, excl...)
if err != nil {
return nil, errors.Wrap(err, "select remote storage nodes failed on get node list")
}
var (
metSelf bool
selfIndex = -1
res = make([]ObjectLocation, 0, len(nodes))
)
for i := range nodes {
if nodes[i].Equal(selfAddr) {
metSelf = true
selfIndex = i
}
res = append(res, ObjectLocation{
Node: nodes[i],
WeightGreater: !metSelf,
})
}
if selfIndex != -1 {
res = append(res[:selfIndex], res[selfIndex+1:]...)
}
return res, nil
}
func (s *multiSolver) selectNodes(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) {
graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID))
if err != nil {
return nil, errors.Wrap(err, "select remote storage nodes failed on placement query")
}
filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { return bucket }
if !addr.ObjectID.Empty() {
filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket {
return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes())
}
}
return graph.Exclude(excl).Filter(filter).NodeList()
}
func (s *multiSolver) Actual(ctx context.Context, cid CID) bool {
graph, err := s.pl.Query(ctx, placement.ContainerID(cid))
if err != nil {
return false
}
nodes, err := graph.NodeList()
if err != nil {
return false
}
selfAddr, err := s.as.SelfAddr()
if err != nil {
return false
}
for i := range nodes {
if nodes[i].Equal(selfAddr) {
return true
}
}
return false
}
func (s *multiSolver) CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int {
selfAddr, err := s.as.SelfAddr()
if err != nil {
return -1
}
if selfAddr.Equal(node) {
return 0
}
excl := make([]multiaddr.Multiaddr, 0)
for {
nodes, err := s.selectNodes(ctx, addr, excl...)
if err != nil {
return -1
}
for j := range nodes {
if nodes[j].Equal(selfAddr) {
return -1
} else if nodes[j].Equal(node) {
return 1
}
}
excl = append(excl, nodes[0]) // TODO: when it will become relevant to append full nodes slice
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View file

@ -1,154 +0,0 @@
package replication
import (
"context"
"time"
"go.uber.org/zap"
)
type (
// ObjectLocationDetector is an interface of entity
// that listens tasks to detect object current locations in network.
ObjectLocationDetector interface {
Process(ctx context.Context) chan<- Address
Subscribe(ch chan<- *ObjectLocationRecord)
}
objectLocationDetector struct {
weightComparator WeightComparator
objectLocator ObjectLocator
reservationRatioReceiver ReservationRatioReceiver
presenceChecker PresenceChecker
log *zap.Logger
taskChanCap int
resultTimeout time.Duration
resultChan chan<- *ObjectLocationRecord
}
// LocationDetectorParams groups the parameters of location detector's constructor.
LocationDetectorParams struct {
WeightComparator
ObjectLocator
ReservationRatioReceiver
PresenceChecker
*zap.Logger
TaskChanCap int
ResultTimeout time.Duration
}
)
const (
defaultLocationDetectorChanCap = 10
defaultLocationDetectorResultTimeout = time.Second
locationDetectorEntity = "object location detector"
)
func (s *objectLocationDetector) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch }
func (s *objectLocationDetector) Process(ctx context.Context) chan<- Address {
ch := make(chan Address, s.taskChanCap)
go s.processRoutine(ctx, ch)
return ch
}
func (s *objectLocationDetector) writeResult(locationRecord *ObjectLocationRecord) {
if s.resultChan == nil {
return
}
select {
case s.resultChan <- locationRecord:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *objectLocationDetector) processRoutine(ctx context.Context, taskChan <-chan Address) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(locationDetectorEntity+ctxDoneMsg, zap.Error(ctx.Err()))
break loop
case addr, ok := <-taskChan:
if !ok {
s.log.Warn(locationDetectorEntity + taskChanClosed)
break loop
} else if has, err := s.presenceChecker.Has(addr); err != nil || !has {
continue loop
}
s.handleTask(ctx, addr)
}
}
close(s.resultChan)
}
func (s *objectLocationDetector) handleTask(ctx context.Context, addr Address) {
var (
err error
log = s.log.With(addressFields(addr)...)
locationRecord = &ObjectLocationRecord{addr, 0, nil}
)
if locationRecord.ReservationRatio, err = s.reservationRatioReceiver.ReservationRatio(ctx, addr); err != nil {
log.Error("reservation ratio computation failure", zap.Error(err))
return
}
nodes, err := s.objectLocator.LocateObject(ctx, addr)
if err != nil {
log.Error("locate object failure", zap.Error(err))
return
}
for i := range nodes {
locationRecord.Locations = append(locationRecord.Locations, ObjectLocation{
Node: nodes[i],
WeightGreater: s.weightComparator.CompareWeight(ctx, addr, nodes[i]) == 1,
})
}
log.Debug("current location record created",
zap.Int("reservation ratio", locationRecord.ReservationRatio),
zap.Any("storage nodes exclude self", locationRecord.Locations))
s.writeResult(locationRecord)
}
// NewLocationDetector is an object location detector's constructor.
func NewLocationDetector(p *LocationDetectorParams) (ObjectLocationDetector, error) {
switch {
case p.PresenceChecker == nil:
return nil, instanceError(locationDetectorEntity, presenceCheckerPart)
case p.ObjectLocator == nil:
return nil, instanceError(locationDetectorEntity, objectLocatorPart)
case p.ReservationRatioReceiver == nil:
return nil, instanceError(locationDetectorEntity, reservationRatioReceiverPart)
case p.Logger == nil:
return nil, instanceError(locationDetectorEntity, loggerPart)
case p.WeightComparator == nil:
return nil, instanceError(locationDetectorEntity, weightComparatorPart)
}
if p.TaskChanCap <= 0 {
p.TaskChanCap = defaultLocationDetectorChanCap
}
if p.ResultTimeout <= 0 {
p.ResultTimeout = defaultLocationDetectorResultTimeout
}
return &objectLocationDetector{
weightComparator: p.WeightComparator,
objectLocator: p.ObjectLocator,
reservationRatioReceiver: p.ReservationRatioReceiver,
presenceChecker: p.PresenceChecker,
log: p.Logger,
taskChanCap: p.TaskChanCap,
resultTimeout: p.ResultTimeout,
resultChan: nil,
}, nil
}

View file

@ -1,347 +0,0 @@
package replication
import (
"context"
"fmt"
"time"
"go.uber.org/zap"
)
type (
// Manager is an interface of object manager,
Manager interface {
Process(ctx context.Context)
HandleEpoch(ctx context.Context, epoch uint64)
}
manager struct {
objectPool ObjectPool
managerTimeout time.Duration
objectVerifier ObjectVerifier
log *zap.Logger
locationDetector ObjectLocationDetector
storageValidator StorageValidator
replicator ObjectReplicator
restorer ObjectRestorer
placementHonorer PlacementHonorer
// internal task channels
detectLocationTaskChan chan<- Address
restoreTaskChan chan<- Address
pushTaskTimeout time.Duration
// internal result channels
replicationResultChan <-chan *ReplicateResult
restoreResultChan <-chan Address
garbageChanCap int
replicateResultChanCap int
restoreResultChanCap int
garbageChan <-chan Address
garbageStore *garbageStore
epochCh chan uint64
scheduler Scheduler
poolSize int
poolExpansionRate float64
}
// ManagerParams groups the parameters of object manager's constructor.
ManagerParams struct {
Interval time.Duration
PushTaskTimeout time.Duration
PlacementHonorerEnabled bool
ReplicateTaskChanCap int
RestoreTaskChanCap int
GarbageChanCap int
InitPoolSize int
ExpansionRate float64
ObjectPool
ObjectVerifier
PlacementHonorer
ObjectLocationDetector
StorageValidator
ObjectReplicator
ObjectRestorer
*zap.Logger
Scheduler
}
)
const (
managerEntity = "replication manager"
redundantCopiesBeagleName = "BEAGLE_REDUNDANT_COPIES"
defaultInterval = 3 * time.Second
defaultPushTaskTimeout = time.Second
defaultGarbageChanCap = 10
defaultReplicateResultChanCap = 10
defaultRestoreResultChanCap = 10
)
func (s *manager) Name() string { return redundantCopiesBeagleName }
func (s *manager) HandleEpoch(ctx context.Context, epoch uint64) {
select {
case s.epochCh <- epoch:
case <-ctx.Done():
return
case <-time.After(s.managerTimeout):
// this timeout must never happen
// if timeout happens in runtime, then something is definitely wrong!
s.log.Warn("replication scheduler is busy")
}
}
func (s *manager) Process(ctx context.Context) {
// starting object restorer
// bind manager to push restore tasks to restorer
s.restoreTaskChan = s.restorer.Process(ctx)
// bind manager to listen object restorer results
restoreResultChan := make(chan Address, s.restoreResultChanCap)
s.restoreResultChan = restoreResultChan
s.restorer.Subscribe(restoreResultChan)
// starting location detector
// bind manager to push locate tasks to location detector
s.detectLocationTaskChan = s.locationDetector.Process(ctx)
locationsHandlerStartFn := s.storageValidator.Process
if s.placementHonorer != nil {
locationsHandlerStartFn = s.placementHonorer.Process
// starting storage validator
// bind placement honorer to push validate tasks to storage validator
s.placementHonorer.Subscribe(s.storageValidator.Process(ctx))
}
// starting location handler component
// bind location detector to push tasks to location handler component
s.locationDetector.Subscribe(locationsHandlerStartFn(ctx))
// bind manager to listen object replicator results
replicateResultChan := make(chan *ReplicateResult, s.replicateResultChanCap)
s.replicationResultChan = replicateResultChan
s.replicator.Subscribe(replicateResultChan)
// starting replicator
// bind storage validator to push replicate tasks to replicator
s.storageValidator.SubscribeReplication(s.replicator.Process(ctx))
garbageChan := make(chan Address, s.garbageChanCap)
s.garbageChan = garbageChan
s.storageValidator.SubscribeGarbage(garbageChan)
go s.taskRoutine(ctx)
go s.resultRoutine(ctx)
s.processRoutine(ctx)
}
func resultLog(s1, s2 string) string {
return fmt.Sprintf(managerEntity+" %s process finish: %s", s1, s2)
}
func (s *manager) writeDetectLocationTask(addr Address) {
if s.detectLocationTaskChan == nil {
return
}
select {
case s.detectLocationTaskChan <- addr:
case <-time.After(s.pushTaskTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *manager) writeRestoreTask(addr Address) {
if s.restoreTaskChan == nil {
return
}
select {
case s.restoreTaskChan <- addr:
case <-time.After(s.pushTaskTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *manager) resultRoutine(ctx context.Context) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(resultLog("result", ctxDoneMsg), zap.Error(ctx.Err()))
break loop
case addr, ok := <-s.restoreResultChan:
if !ok {
s.log.Warn(resultLog("result", "restorer result channel closed"))
break loop
}
s.log.Info("object successfully restored", addressFields(addr)...)
case res, ok := <-s.replicationResultChan:
if !ok {
s.log.Warn(resultLog("result", "replicator result channel closed"))
break loop
} else if len(res.NewStorages) > 0 {
s.log.Info("object successfully replicated",
append(addressFields(res.Address), zap.Any("new storages", res.NewStorages))...)
}
case addr, ok := <-s.garbageChan:
if !ok {
s.log.Warn(resultLog("result", "garbage channel closed"))
break loop
}
s.garbageStore.put(addr)
}
}
}
func (s *manager) taskRoutine(ctx context.Context) {
loop:
for {
if task, err := s.objectPool.Pop(); err == nil {
select {
case <-ctx.Done():
s.log.Warn(resultLog("task", ctxDoneMsg), zap.Error(ctx.Err()))
break loop
default:
s.distributeTask(ctx, task)
}
} else {
// if object pool is empty, check it again after a while
time.Sleep(s.managerTimeout)
}
}
close(s.restoreTaskChan)
close(s.detectLocationTaskChan)
}
func (s *manager) processRoutine(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case epoch := <-s.epochCh:
var delta int
// undone - amount of objects we couldn't process in last epoch
undone := s.objectPool.Undone()
if undone > 0 {
// if there are unprocessed objects, then lower your estimation
delta = -undone
} else {
// otherwise try to expand
delta = int(float64(s.poolSize) * s.poolExpansionRate)
}
tasks, err := s.scheduler.SelectForReplication(s.poolSize + delta)
if err != nil {
s.log.Warn("can't select objects for replication", zap.Error(err))
}
// if there are NOT enough objects to fill the pool, do not change it
// otherwise expand or shrink it with the delta value
if len(tasks) >= s.poolSize+delta {
s.poolSize += delta
}
s.objectPool.Update(tasks)
s.log.Info("replication schedule updated",
zap.Int("unprocessed_tasks", undone),
zap.Int("next_tasks", len(tasks)),
zap.Int("pool_size", s.poolSize),
zap.Uint64("new_epoch", epoch))
}
}
}
// Function takes object from storage by address (if verify
// If verify flag is set object stored incorrectly (Verify returned error) - restore task is planned
// otherwise validate task is planned.
func (s *manager) distributeTask(ctx context.Context, addr Address) {
if !s.objectVerifier.Verify(ctx, &ObjectVerificationParams{Address: addr}) {
s.writeRestoreTask(addr)
return
}
s.writeDetectLocationTask(addr)
}
// NewManager is an object manager's constructor.
func NewManager(p ManagerParams) (Manager, error) {
switch {
case p.ObjectPool == nil:
return nil, instanceError(managerEntity, objectPoolPart)
case p.ObjectVerifier == nil:
return nil, instanceError(managerEntity, objectVerifierPart)
case p.Logger == nil:
return nil, instanceError(managerEntity, loggerPart)
case p.ObjectLocationDetector == nil:
return nil, instanceError(managerEntity, locationDetectorEntity)
case p.StorageValidator == nil:
return nil, instanceError(managerEntity, storageValidatorEntity)
case p.ObjectReplicator == nil:
return nil, instanceError(managerEntity, objectReplicatorEntity)
case p.ObjectRestorer == nil:
return nil, instanceError(managerEntity, objectRestorerEntity)
case p.PlacementHonorer == nil && p.PlacementHonorerEnabled:
return nil, instanceError(managerEntity, placementHonorerEntity)
case p.Scheduler == nil:
return nil, instanceError(managerEntity, replicationSchedulerEntity)
}
if p.Interval <= 0 {
p.Interval = defaultInterval
}
if p.PushTaskTimeout <= 0 {
p.PushTaskTimeout = defaultPushTaskTimeout
}
if p.GarbageChanCap <= 0 {
p.GarbageChanCap = defaultGarbageChanCap
}
if p.ReplicateTaskChanCap <= 0 {
p.ReplicateTaskChanCap = defaultReplicateResultChanCap
}
if p.RestoreTaskChanCap <= 0 {
p.RestoreTaskChanCap = defaultRestoreResultChanCap
}
if !p.PlacementHonorerEnabled {
p.PlacementHonorer = nil
}
return &manager{
objectPool: p.ObjectPool,
managerTimeout: p.Interval,
objectVerifier: p.ObjectVerifier,
log: p.Logger,
locationDetector: p.ObjectLocationDetector,
storageValidator: p.StorageValidator,
replicator: p.ObjectReplicator,
restorer: p.ObjectRestorer,
placementHonorer: p.PlacementHonorer,
pushTaskTimeout: p.PushTaskTimeout,
garbageChanCap: p.GarbageChanCap,
replicateResultChanCap: p.ReplicateTaskChanCap,
restoreResultChanCap: p.RestoreTaskChanCap,
garbageStore: newGarbageStore(),
epochCh: make(chan uint64),
scheduler: p.Scheduler,
poolSize: p.InitPoolSize,
poolExpansionRate: p.ExpansionRate,
}, nil
}

View file

@ -1,188 +0,0 @@
package replication
import (
"context"
"time"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
)
type (
// ObjectReplicator is an interface of entity
// that listens object replication tasks.
// Result includes new object storage list.
ObjectReplicator interface {
Process(ctx context.Context) chan<- *ReplicateTask
Subscribe(ch chan<- *ReplicateResult)
}
objectReplicator struct {
objectReceptacle ObjectReceptacle
remoteStorageSelector RemoteStorageSelector
objectSource ObjectSource
presenceChecker PresenceChecker
log *zap.Logger
taskChanCap int
resultTimeout time.Duration
resultChan chan<- *ReplicateResult
}
// ObjectReplicatorParams groups the parameters of replicator's constructor.
ObjectReplicatorParams struct {
RemoteStorageSelector
ObjectSource
ObjectReceptacle
PresenceChecker
*zap.Logger
TaskChanCap int
ResultTimeout time.Duration
}
)
const (
defaultReplicatorChanCap = 10
defaultReplicatorResultTimeout = time.Second
objectReplicatorEntity = "object replicator"
)
func (s *objectReplicator) Subscribe(ch chan<- *ReplicateResult) { s.resultChan = ch }
func (s *objectReplicator) Process(ctx context.Context) chan<- *ReplicateTask {
ch := make(chan *ReplicateTask, s.taskChanCap)
go s.processRoutine(ctx, ch)
return ch
}
func (s *objectReplicator) writeResult(replicateResult *ReplicateResult) {
if s.resultChan == nil {
return
}
select {
case s.resultChan <- replicateResult:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *objectReplicator) processRoutine(ctx context.Context, taskChan <-chan *ReplicateTask) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(objectReplicatorEntity+" process finish: context completed",
zap.Error(ctx.Err()))
break loop
case replicateTask, ok := <-taskChan:
if !ok {
s.log.Warn(objectReplicatorEntity + " process finish: task channel closed")
break loop
} else if has, err := s.presenceChecker.Has(replicateTask.Address); err != nil || !has {
continue loop
}
s.handleTask(ctx, replicateTask)
}
}
close(s.resultChan)
}
func (s *objectReplicator) handleTask(ctx context.Context, task *ReplicateTask) {
obj, err := s.objectSource.Get(ctx, task.Address)
if err != nil {
s.log.Warn("get object from storage failure", zap.Error(err))
return
}
res := &ReplicateResult{
ReplicateTask: task,
NewStorages: make([]multiaddr.Multiaddr, 0, task.Shortage),
}
for len(res.NewStorages) < task.Shortage {
nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, task.Address, task.ExcludeNodes...)
if err != nil {
break
}
for i := 0; i < len(nodesInfo); i++ {
if contains(res.NewStorages, nodesInfo[i].Node) {
nodesInfo = append(nodesInfo[:i], nodesInfo[i+1:]...)
i--
continue
}
}
if len(nodesInfo) > task.Shortage {
nodesInfo = nodesInfo[:task.Shortage]
}
if len(nodesInfo) == 0 {
break
}
if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{
Object: obj,
Nodes: nodesInfo,
Handler: func(location ObjectLocation, success bool) {
if success {
res.NewStorages = append(res.NewStorages, location.Node)
} else {
task.ExcludeNodes = append(task.ExcludeNodes, location.Node)
}
},
}); err != nil {
s.log.Warn("replicate object failure", zap.Error(err))
break
}
}
s.writeResult(res)
}
func contains(list []multiaddr.Multiaddr, item multiaddr.Multiaddr) bool {
for i := range list {
if list[i].Equal(item) {
return true
}
}
return false
}
// NewReplicator is an object replicator's constructor.
func NewReplicator(p ObjectReplicatorParams) (ObjectReplicator, error) {
switch {
case p.ObjectReceptacle == nil:
return nil, instanceError(objectReplicatorEntity, objectReceptaclePart)
case p.ObjectSource == nil:
return nil, instanceError(objectReplicatorEntity, objectSourcePart)
case p.RemoteStorageSelector == nil:
return nil, instanceError(objectReplicatorEntity, remoteStorageSelectorPart)
case p.PresenceChecker == nil:
return nil, instanceError(objectReplicatorEntity, presenceCheckerPart)
case p.Logger == nil:
return nil, instanceError(objectReplicatorEntity, loggerPart)
}
if p.TaskChanCap <= 0 {
p.TaskChanCap = defaultReplicatorChanCap
}
if p.ResultTimeout <= 0 {
p.ResultTimeout = defaultReplicatorResultTimeout
}
return &objectReplicator{
objectReceptacle: p.ObjectReceptacle,
remoteStorageSelector: p.RemoteStorageSelector,
objectSource: p.ObjectSource,
presenceChecker: p.PresenceChecker,
log: p.Logger,
taskChanCap: p.TaskChanCap,
resultTimeout: p.ResultTimeout,
}, nil
}

View file

@ -1,173 +0,0 @@
package replication
import (
"context"
"time"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore"
"go.uber.org/zap"
)
type (
// ObjectRestorer is an interface of entity
// that listen tasks to restore object by address.
// Restorer doesn't recheck if object is actually corrupted.
// Restorer writes result to subscriber only if restoration was successful.
ObjectRestorer interface {
Process(ctx context.Context) chan<- Address
Subscribe(ch chan<- Address)
}
objectRestorer struct {
objectVerifier ObjectVerifier
remoteStorageSelector RemoteStorageSelector
objectReceptacle ObjectReceptacle
epochReceiver EpochReceiver
presenceChecker PresenceChecker
log *zap.Logger
taskChanCap int
resultTimeout time.Duration
resultChan chan<- Address
}
// ObjectRestorerParams groups the parameters of object restorer's constructor.
ObjectRestorerParams struct {
ObjectVerifier
ObjectReceptacle
EpochReceiver
RemoteStorageSelector
PresenceChecker
*zap.Logger
TaskChanCap int
ResultTimeout time.Duration
}
)
const (
defaultRestorerChanCap = 10
defaultRestorerResultTimeout = time.Second
objectRestorerEntity = "object restorer"
)
func (s *objectRestorer) Subscribe(ch chan<- Address) { s.resultChan = ch }
func (s *objectRestorer) Process(ctx context.Context) chan<- Address {
ch := make(chan Address, s.taskChanCap)
go s.processRoutine(ctx, ch)
return ch
}
func (s *objectRestorer) writeResult(refInfo Address) {
if s.resultChan == nil {
return
}
select {
case s.resultChan <- refInfo:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *objectRestorer) processRoutine(ctx context.Context, taskChan <-chan Address) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(objectRestorerEntity+ctxDoneMsg, zap.Error(ctx.Err()))
break loop
case addr, ok := <-taskChan:
if !ok {
s.log.Warn(objectRestorerEntity + taskChanClosed)
break loop
} else if has, err := s.presenceChecker.Has(addr); err != nil || !has {
continue loop
}
s.handleTask(ctx, addr)
}
}
close(s.resultChan)
}
func (s *objectRestorer) handleTask(ctx context.Context, addr Address) {
var (
receivedObj *Object
exclNodes = make([]multiaddr.Multiaddr, 0)
)
loop:
for {
nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, addr, exclNodes...)
if err != nil {
break
}
for i := range nodesInfo {
info := nodesInfo[i]
if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{
Address: addr,
Node: nodesInfo[i].Node,
Handler: func(valid bool, obj *Object) {
if valid {
receivedObj = obj
} else {
exclNodes = append(exclNodes, info.Node)
}
},
LocalInvalid: true,
}) {
break loop
}
}
}
if err := s.objectReceptacle.Put(
context.WithValue(ctx, localstore.StoreEpochValue, s.epochReceiver.Epoch()),
ObjectStoreParams{Object: receivedObj},
); err != nil {
s.log.Warn("put object to local storage failure", append(addressFields(addr), zap.Error(err))...)
return
}
s.writeResult(addr)
}
// NewObjectRestorer is an object restorer's constructor.
func NewObjectRestorer(p *ObjectRestorerParams) (ObjectRestorer, error) {
switch {
case p.Logger == nil:
return nil, instanceError(objectRestorerEntity, loggerPart)
case p.ObjectVerifier == nil:
return nil, instanceError(objectRestorerEntity, objectVerifierPart)
case p.ObjectReceptacle == nil:
return nil, instanceError(objectRestorerEntity, objectReceptaclePart)
case p.RemoteStorageSelector == nil:
return nil, instanceError(objectRestorerEntity, remoteStorageSelectorPart)
case p.EpochReceiver == nil:
return nil, instanceError(objectRestorerEntity, epochReceiverPart)
case p.PresenceChecker == nil:
return nil, instanceError(objectRestorerEntity, presenceCheckerPart)
}
if p.TaskChanCap <= 0 {
p.TaskChanCap = defaultRestorerChanCap
}
if p.ResultTimeout <= 0 {
p.ResultTimeout = defaultRestorerResultTimeout
}
return &objectRestorer{
objectVerifier: p.ObjectVerifier,
remoteStorageSelector: p.RemoteStorageSelector,
objectReceptacle: p.ObjectReceptacle,
epochReceiver: p.EpochReceiver,
presenceChecker: p.PresenceChecker,
log: p.Logger,
taskChanCap: p.TaskChanCap,
resultTimeout: p.ResultTimeout,
}, nil
}

View file

@ -1,198 +0,0 @@
package replication
import (
"context"
"time"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
)
type (
// PlacementHonorer is an interface of entity
// that listens tasks to piece out placement rule of container for particular object.
PlacementHonorer interface {
Process(ctx context.Context) chan<- *ObjectLocationRecord
Subscribe(ch chan<- *ObjectLocationRecord)
}
placementHonorer struct {
objectSource ObjectSource
objectReceptacle ObjectReceptacle
remoteStorageSelector RemoteStorageSelector
presenceChecker PresenceChecker
log *zap.Logger
taskChanCap int
resultTimeout time.Duration
resultChan chan<- *ObjectLocationRecord
}
// PlacementHonorerParams groups the parameters of placement honorer's constructor.
PlacementHonorerParams struct {
ObjectSource
ObjectReceptacle
RemoteStorageSelector
PresenceChecker
*zap.Logger
TaskChanCap int
ResultTimeout time.Duration
}
)
const (
defaultPlacementHonorerChanCap = 10
defaultPlacementHonorerResultTimeout = time.Second
placementHonorerEntity = "placement honorer"
)
func (s *placementHonorer) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch }
func (s *placementHonorer) Process(ctx context.Context) chan<- *ObjectLocationRecord {
ch := make(chan *ObjectLocationRecord, s.taskChanCap)
go s.processRoutine(ctx, ch)
return ch
}
func (s *placementHonorer) writeResult(locationRecord *ObjectLocationRecord) {
if s.resultChan == nil {
return
}
select {
case s.resultChan <- locationRecord:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *placementHonorer) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(placementHonorerEntity+ctxDoneMsg, zap.Error(ctx.Err()))
break loop
case locationRecord, ok := <-taskChan:
if !ok {
s.log.Warn(placementHonorerEntity + taskChanClosed)
break loop
} else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has {
continue loop
}
s.handleTask(ctx, locationRecord)
}
}
close(s.resultChan)
}
func (s *placementHonorer) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) {
defer s.writeResult(locationRecord)
var (
err error
log = s.log.With(addressFields(locationRecord.Address)...)
copiesShortage = locationRecord.ReservationRatio - 1
exclNodes = make([]multiaddr.Multiaddr, 0)
procLocations []ObjectLocation
)
obj, err := s.objectSource.Get(ctx, locationRecord.Address)
if err != nil {
log.Warn("get object failure", zap.Error(err))
return
}
tombstone := obj.IsTombstone()
for copiesShortage > 0 {
nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, locationRecord.Address, exclNodes...)
if err != nil {
log.Warn("select remote storage nodes failure",
zap.Stringer("object", locationRecord.Address),
zap.Any("exclude nodes", exclNodes),
zap.String("error", err.Error()),
)
return
}
if !tombstone {
procLocations = make([]ObjectLocation, 0, len(nodesInfo))
loop:
for i := range nodesInfo {
for j := range locationRecord.Locations {
if locationRecord.Locations[j].Node.Equal(nodesInfo[i].Node) {
copiesShortage--
continue loop
}
}
procLocations = append(procLocations, nodesInfo[i])
}
if len(procLocations) == 0 {
return
}
} else {
procLocations = nodesInfo
}
if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{
Object: obj,
Nodes: procLocations,
Handler: func(loc ObjectLocation, success bool) {
if success {
copiesShortage--
if tombstone {
for i := range locationRecord.Locations {
if locationRecord.Locations[i].Node.Equal(loc.Node) {
return
}
}
}
locationRecord.Locations = append(locationRecord.Locations, loc)
} else {
exclNodes = append(exclNodes, loc.Node)
}
},
}); err != nil {
s.log.Warn("put object to new nodes failure", zap.Error(err))
return
}
}
}
// NewPlacementHonorer is a placement honorer's constructor.
func NewPlacementHonorer(p PlacementHonorerParams) (PlacementHonorer, error) {
switch {
case p.RemoteStorageSelector == nil:
return nil, instanceError(placementHonorerEntity, remoteStorageSelectorPart)
case p.ObjectSource == nil:
return nil, instanceError(placementHonorerEntity, objectSourcePart)
case p.ObjectReceptacle == nil:
return nil, instanceError(placementHonorerEntity, objectReceptaclePart)
case p.Logger == nil:
return nil, instanceError(placementHonorerEntity, loggerPart)
case p.PresenceChecker == nil:
return nil, instanceError(placementHonorerEntity, presenceCheckerPart)
}
if p.TaskChanCap <= 0 {
p.TaskChanCap = defaultPlacementHonorerChanCap
}
if p.ResultTimeout <= 0 {
p.ResultTimeout = defaultPlacementHonorerResultTimeout
}
return &placementHonorer{
objectSource: p.ObjectSource,
objectReceptacle: p.ObjectReceptacle,
remoteStorageSelector: p.RemoteStorageSelector,
presenceChecker: p.PresenceChecker,
log: p.Logger,
taskChanCap: p.TaskChanCap,
resultTimeout: p.ResultTimeout,
}, nil
}

View file

@ -1,80 +0,0 @@
package storage
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/query"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-node/pkg/core/object"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
locator struct {
executor transport.SelectiveContainerExecutor
log *zap.Logger
}
// LocatorParams groups the parameters of ObjectLocator constructor.
LocatorParams struct {
SelectiveContainerExecutor transport.SelectiveContainerExecutor
Logger *zap.Logger
}
)
const locatorInstanceFailMsg = "could not create object locator"
var errEmptyObjectsContainerHandler = errors.New("empty container objects container handler")
func (s *locator) LocateObject(ctx context.Context, addr refs.Address) (res []multiaddr.Multiaddr, err error) {
queryBytes, err := (&query.Query{
Filters: []query.Filter{
{
Type: query.Filter_Exact,
Name: transport.KeyID,
Value: addr.ObjectID.String(),
},
},
}).Marshal()
if err != nil {
return nil, errors.Wrap(err, "locate object failed on query marshal")
}
err = s.executor.Search(ctx, &transport.SearchParams{
SelectiveParams: transport.SelectiveParams{
CID: addr.CID,
TTL: service.NonForwardingTTL,
IDList: make([]object.ID, 1),
},
SearchCID: addr.CID,
SearchQuery: queryBytes,
Handler: func(node multiaddr.Multiaddr, addrList []refs.Address) {
if len(addrList) > 0 {
res = append(res, node)
}
},
})
return
}
// NewObjectLocator constructs replication.ObjectLocator from SelectiveContainerExecutor.
func NewObjectLocator(p LocatorParams) (replication.ObjectLocator, error) {
switch {
case p.SelectiveContainerExecutor == nil:
return nil, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg)
case p.Logger == nil:
return nil, errors.Wrap(logger.ErrNilLogger, locatorInstanceFailMsg)
}
return &locator{
executor: p.SelectiveContainerExecutor,
log: p.Logger,
}, nil
}

View file

@ -1,40 +0,0 @@
package storage
import (
"testing"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
type testExecutor struct {
transport.SelectiveContainerExecutor
}
func TestNewObjectLocator(t *testing.T) {
validParams := LocatorParams{
SelectiveContainerExecutor: new(testExecutor),
Logger: zap.L(),
}
t.Run("valid params", func(t *testing.T) {
s, err := NewObjectLocator(validParams)
require.NoError(t, err)
require.NotNil(t, s)
})
t.Run("empty logger", func(t *testing.T) {
p := validParams
p.Logger = nil
_, err := NewObjectLocator(p)
require.EqualError(t, err, errors.Wrap(logger.ErrNilLogger, locatorInstanceFailMsg).Error())
})
t.Run("empty container handler", func(t *testing.T) {
p := validParams
p.SelectiveContainerExecutor = nil
_, err := NewObjectLocator(p)
require.EqualError(t, err, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg).Error())
})
}

View file

@ -1,133 +0,0 @@
package storage
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
// ObjectStorage is an interface of encapsulated ObjectReceptacle and ObjectSource pair.
ObjectStorage interface {
replication.ObjectReceptacle
replication.ObjectSource
}
objectStorage struct {
ls localstore.Localstore
executor transport.SelectiveContainerExecutor
log *zap.Logger
}
// ObjectStorageParams groups the parameters of ObjectStorage constructor.
ObjectStorageParams struct {
Localstore localstore.Localstore
SelectiveContainerExecutor transport.SelectiveContainerExecutor
Logger *zap.Logger
}
)
const objectSourceInstanceFailMsg = "could not create object source"
var errNilObject = errors.New("object is nil")
var errCouldNotGetObject = errors.New("could not get object from any node")
func (s *objectStorage) Put(ctx context.Context, params replication.ObjectStoreParams) error {
if params.Object == nil {
return errNilObject
} else if len(params.Nodes) == 0 {
if s.ls == nil {
return errEmptyLocalstore
}
return s.ls.Put(ctx, params.Object)
}
nodes := make([]multiaddr.Multiaddr, len(params.Nodes))
for i := range params.Nodes {
nodes[i] = params.Nodes[i].Node
}
return s.executor.Put(ctx, &transport.PutParams{
SelectiveParams: transport.SelectiveParams{
CID: params.Object.SystemHeader.CID,
Nodes: nodes,
TTL: service.NonForwardingTTL,
IDList: make([]object.ID, 1),
},
Object: params.Object,
Handler: func(node multiaddr.Multiaddr, valid bool) {
if params.Handler == nil {
return
}
for i := range params.Nodes {
if params.Nodes[i].Node.Equal(node) {
params.Handler(params.Nodes[i], valid)
return
}
}
},
})
}
func (s *objectStorage) Get(ctx context.Context, addr object.Address) (res *object.Object, err error) {
if s.ls != nil {
if has, err := s.ls.Has(addr); err == nil && has {
if res, err = s.ls.Get(addr); err == nil {
return res, err
}
}
}
if err = s.executor.Get(ctx, &transport.GetParams{
SelectiveParams: transport.SelectiveParams{
CID: addr.CID,
TTL: service.NonForwardingTTL,
IDList: []object.ID{addr.ObjectID},
Breaker: func(refs.Address) (cFlag transport.ProgressControlFlag) {
if res != nil {
cFlag = transport.BreakProgress
}
return
},
},
Handler: func(node multiaddr.Multiaddr, obj *object.Object) { res = obj },
}); err != nil {
return
} else if res == nil {
return nil, errCouldNotGetObject
}
return
}
// NewObjectStorage encapsulates Localstore and SelectiveContainerExecutor
// and returns ObjectStorage interface.
func NewObjectStorage(p ObjectStorageParams) (ObjectStorage, error) {
if p.Logger == nil {
return nil, errors.Wrap(logger.ErrNilLogger, objectSourceInstanceFailMsg)
}
if p.Localstore == nil {
p.Logger.Warn("local storage not provided")
}
if p.SelectiveContainerExecutor == nil {
p.Logger.Warn("object container handler not provided")
}
return &objectStorage{
ls: p.Localstore,
executor: p.SelectiveContainerExecutor,
log: p.Logger,
}, nil
}

View file

@ -1,75 +0,0 @@
package storage
import (
"crypto/ecdsa"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-node/pkg/network/peers"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
// AddressStoreComponent is an interface of encapsulated AddressStore and NodePublicKeyReceiver pair.
AddressStoreComponent interface {
AddressStore
NodePublicKeyReceiver
}
// AddressStore is an interface of the container of local Multiaddr.
AddressStore interface {
SelfAddr() (multiaddr.Multiaddr, error)
}
// NodePublicKeyReceiver is an interface of Multiaddr to PublicKey converter.
NodePublicKeyReceiver interface {
PublicKey(multiaddr.Multiaddr) *ecdsa.PublicKey
}
addressStore struct {
ps peers.Store
log *zap.Logger
}
)
const addressStoreInstanceFailMsg = "could not create address store"
var (
errEmptyPeerStore = errors.New("empty peer store")
errEmptyAddressStore = errors.New("empty address store")
)
func (s addressStore) SelfAddr() (multiaddr.Multiaddr, error) { return s.ps.GetAddr(s.ps.SelfID()) }
func (s addressStore) PublicKey(mAddr multiaddr.Multiaddr) (res *ecdsa.PublicKey) {
if peerID, err := s.ps.AddressID(mAddr); err != nil {
s.log.Error("could not peer ID",
zap.Stringer("node", mAddr),
zap.Error(err),
)
} else if res, err = s.ps.GetPublicKey(peerID); err != nil {
s.log.Error("could not receive public key",
zap.Stringer("peer", peerID),
zap.Error(err),
)
}
return res
}
// NewAddressStore wraps peer store and returns AddressStoreComponent.
func NewAddressStore(ps peers.Store, log *zap.Logger) (AddressStoreComponent, error) {
if ps == nil {
return nil, errors.Wrap(errEmptyPeerStore, addressStoreInstanceFailMsg)
} else if log == nil {
return nil, errors.Wrap(logger.ErrNilLogger, addressStoreInstanceFailMsg)
}
return &addressStore{
ps: ps,
log: log,
}, nil
}

View file

@ -1,396 +0,0 @@
package storage
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/sha256"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore"
"github.com/nspcc-dev/neofs-node/pkg/services/id"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/nspcc-dev/neofs-node/pkg/util/rand"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
objectValidator struct {
as AddressStore
ls localstore.Localstore
executor transport.SelectiveContainerExecutor
log *zap.Logger
saltSize int
maxRngSize uint64
rangeCount int
sltr Salitor
verifier verifier.Verifier
}
// Salitor is a salting data function.
Salitor func(data, salt []byte) []byte
// ObjectValidatorParams groups th
ObjectValidatorParams struct {
AddressStore AddressStore
Localstore localstore.Localstore
SelectiveContainerExecutor transport.SelectiveContainerExecutor
Logger *zap.Logger
Salitor Salitor
SaltSize int
MaxPayloadRangeSize uint64
PayloadRangeCount int
Verifier verifier.Verifier
}
localHeadIntegrityVerifier struct {
}
payloadVerifier struct {
}
localIntegrityVerifier struct {
headVerifier verifier.Verifier
payloadVerifier verifier.Verifier
}
)
const (
objectValidatorInstanceFailMsg = "could not create object validator"
defaultSaltSize = 64 // bytes
defaultPayloadRangeCount = 3
defaultMaxPayloadRangeSize = 64
)
var (
errEmptyLocalstore = errors.New("empty local storage")
errEmptyObjectVerifier = errors.New("empty object verifier")
)
var (
errBrokenHeaderStructure = errors.New("broken header structure")
errMissingPayloadChecksumHeader = errors.New("missing payload checksum header")
errWrongPayloadChecksum = errors.New("wrong payload checksum")
)
func (s *objectValidator) Verify(ctx context.Context, params *replication.ObjectVerificationParams) bool {
selfAddr, err := s.as.SelfAddr()
if err != nil {
s.log.Debug("receive self address failure", zap.Error(err))
return false
}
if params.Node == nil || params.Node.Equal(selfAddr) {
return s.verifyLocal(ctx, params.Address)
}
return s.verifyRemote(ctx, params)
}
func (s *objectValidator) verifyLocal(ctx context.Context, addr refs.Address) bool {
var (
err error
obj *object.Object
)
if obj, err = s.ls.Get(addr); err != nil {
s.log.Debug("get local meta information failure", zap.Error(err))
return false
} else if err = s.verifier.Verify(ctx, obj); err != nil {
s.log.Debug("integrity check failure", zap.Error(err))
}
return err == nil
}
func (s *objectValidator) verifyRemote(ctx context.Context, params *replication.ObjectVerificationParams) bool {
var (
receivedObj *object.Object
valid bool
)
defer func() {
if params.Handler != nil && receivedObj != nil {
params.Handler(valid, receivedObj)
}
}()
p := &transport.HeadParams{
GetParams: transport.GetParams{
SelectiveParams: transport.SelectiveParams{
CID: params.CID,
Nodes: []multiaddr.Multiaddr{params.Node},
TTL: service.NonForwardingTTL,
IDList: []object.ID{params.ObjectID},
Raw: true,
},
Handler: func(_ multiaddr.Multiaddr, obj *object.Object) {
receivedObj = obj
valid = s.verifier.Verify(ctx, obj) == nil
},
},
FullHeaders: true,
}
if err := s.executor.Head(ctx, p); err != nil || !valid {
return false
} else if receivedObj.SystemHeader.PayloadLength <= 0 || receivedObj.IsLinking() {
return true
}
if !params.LocalInvalid {
has, err := s.ls.Has(params.Address)
if err == nil && has {
obj, err := s.ls.Get(params.Address)
if err == nil {
return s.verifyThroughHashes(ctx, obj, params.Node)
}
}
}
valid = false
_ = s.executor.Get(ctx, &p.GetParams)
return valid
}
func (s *objectValidator) verifyThroughHashes(ctx context.Context, obj *object.Object, node multiaddr.Multiaddr) (valid bool) {
var (
salt = generateSalt(s.saltSize)
rngs = generateRanges(obj.SystemHeader.PayloadLength, s.maxRngSize, s.rangeCount)
)
_ = s.executor.RangeHash(ctx, &transport.RangeHashParams{
SelectiveParams: transport.SelectiveParams{
CID: obj.SystemHeader.CID,
Nodes: []multiaddr.Multiaddr{node},
TTL: service.NonForwardingTTL,
IDList: []object.ID{obj.SystemHeader.ID},
},
Ranges: rngs,
Salt: salt,
Handler: func(node multiaddr.Multiaddr, hashes []hash.Hash) {
valid = compareHashes(s.sltr, obj.Payload, salt, rngs, hashes)
},
})
return
}
func compareHashes(sltr Salitor, payload, salt []byte, rngs []object.Range, hashes []hash.Hash) bool {
if len(rngs) != len(hashes) {
return false
}
for i := range rngs {
saltPayloadPart := sltr(payload[rngs[i].Offset:rngs[i].Offset+rngs[i].Length], salt)
if !hashes[i].Equal(hash.Sum(saltPayloadPart)) {
return false
}
}
return true
}
func generateRanges(payloadSize, maxRangeSize uint64, count int) []object.Range {
res := make([]object.Range, count)
l := min(payloadSize, maxRangeSize)
for i := 0; i < count; i++ {
res[i].Length = l
res[i].Offset = rand.Uint64(rand.New(), int64(payloadSize-l))
}
return res
}
func min(a, b uint64) uint64 {
if a < b {
return a
}
return b
}
func generateSalt(saltSize int) []byte {
salt := make([]byte, saltSize)
if _, err := rand.Read(salt); err != nil {
return nil
}
return salt
}
// NewObjectValidator constructs universal replication.ObjectVerifier.
func NewObjectValidator(p *ObjectValidatorParams) (replication.ObjectVerifier, error) {
switch {
case p.Logger == nil:
return nil, errors.Wrap(logger.ErrNilLogger, objectValidatorInstanceFailMsg)
case p.AddressStore == nil:
return nil, errors.Wrap(errEmptyAddressStore, objectValidatorInstanceFailMsg)
case p.Localstore == nil:
return nil, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg)
case p.Verifier == nil:
return nil, errors.Wrap(errEmptyObjectVerifier, objectValidatorInstanceFailMsg)
}
if p.SaltSize <= 0 {
p.SaltSize = defaultSaltSize
}
if p.PayloadRangeCount <= 0 {
p.PayloadRangeCount = defaultPayloadRangeCount
}
if p.MaxPayloadRangeSize <= 0 {
p.MaxPayloadRangeSize = defaultMaxPayloadRangeSize
}
return &objectValidator{
as: p.AddressStore,
ls: p.Localstore,
executor: p.SelectiveContainerExecutor,
log: p.Logger,
saltSize: p.SaltSize,
maxRngSize: p.MaxPayloadRangeSize,
rangeCount: p.PayloadRangeCount,
sltr: p.Salitor,
verifier: p.Verifier,
}, nil
}
// NewLocalHeadIntegrityVerifier constructs local object head verifier and returns objutil.Verifier interface.
func NewLocalHeadIntegrityVerifier() (verifier.Verifier, error) {
return new(localHeadIntegrityVerifier), nil
}
// NewLocalIntegrityVerifier constructs local object verifier and returns objutil.Verifier interface.
func NewLocalIntegrityVerifier() (verifier.Verifier, error) {
return &localIntegrityVerifier{
headVerifier: new(localHeadIntegrityVerifier),
payloadVerifier: new(payloadVerifier),
}, nil
}
// NewPayloadVerifier constructs object payload verifier and returns objutil.Verifier.
func NewPayloadVerifier() verifier.Verifier {
return new(payloadVerifier)
}
type hdrOwnerKeyContainer struct {
owner refs.OwnerID
key []byte
}
func (s hdrOwnerKeyContainer) GetOwnerID() refs.OwnerID {
return s.owner
}
func (s hdrOwnerKeyContainer) GetOwnerKey() []byte {
return s.key
}
func (s *localHeadIntegrityVerifier) Verify(ctx context.Context, obj *object.Object) error {
var (
checkKey *ecdsa.PublicKey
ownerKeyCnr id.OwnerKeyContainer
)
if _, h := obj.LastHeader(object.HeaderType(object.TokenHdr)); h != nil {
token := h.GetValue().(*object.Header_Token).Token
if err := service.VerifySignatureWithKey(
crypto.UnmarshalPublicKey(token.GetOwnerKey()),
service.NewVerifiedSessionToken(token),
); err != nil {
return err
}
ownerKeyCnr = token
checkKey = crypto.UnmarshalPublicKey(token.GetSessionKey())
} else if _, h := obj.LastHeader(object.HeaderType(object.PublicKeyHdr)); h != nil {
pkHdr := h.GetValue().(*object.Header_PublicKey)
if pkHdr != nil && pkHdr.PublicKey != nil {
val := pkHdr.PublicKey.GetValue()
ownerKeyCnr = &hdrOwnerKeyContainer{
owner: obj.GetSystemHeader().OwnerID,
key: val,
}
checkKey = crypto.UnmarshalPublicKey(val)
}
}
if ownerKeyCnr == nil {
return id.ErrNilOwnerKeyContainer
} else if err := id.VerifyKey(ownerKeyCnr); err != nil {
return err
}
return verifyObjectIntegrity(obj, checkKey)
}
// verifyObjectIntegrity verifies integrity of object header.
// Returns error if object
// - does not contains integrity header;
// - integrity header is not a last header in object;
// - integrity header signature is broken.
func verifyObjectIntegrity(obj *object.Object, key *ecdsa.PublicKey) error {
n, h := obj.LastHeader(object.HeaderType(object.IntegrityHdr))
if l := len(obj.Headers); l <= 0 || n != l-1 {
return errBrokenHeaderStructure
}
integrityHdr := h.Value.(*object.Header_Integrity).Integrity
if integrityHdr == nil {
return errBrokenHeaderStructure
}
data, err := verifier.MarshalHeaders(obj, n)
if err != nil {
return err
}
hdrChecksum := sha256.Sum256(data)
return crypto.Verify(key, hdrChecksum[:], integrityHdr.ChecksumSignature)
}
func (s *payloadVerifier) Verify(_ context.Context, obj *object.Object) error {
if _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); h == nil {
return errMissingPayloadChecksumHeader
} else if checksum := sha256.Sum256(obj.Payload); !bytes.Equal(
checksum[:],
h.Value.(*object.Header_PayloadChecksum).PayloadChecksum,
) {
return errWrongPayloadChecksum
}
return nil
}
func (s *localIntegrityVerifier) Verify(ctx context.Context, obj *object.Object) error {
if err := s.headVerifier.Verify(ctx, obj); err != nil {
return err
}
return s.payloadVerifier.Verify(ctx, obj)
}

View file

@ -1,261 +0,0 @@
package storage
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"math/rand"
"testing"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore"
verifier2 "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/nspcc-dev/neofs-node/pkg/util/test"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
type testEntity struct {
err error
}
func (s *testEntity) Verify(context.Context, *object.Object) error { return s.err }
func (s *testEntity) SelfAddr() (multiaddr.Multiaddr, error) { panic("implement me") }
func (s *testEntity) Put(context.Context, *localstore.Object) error { panic("implement me") }
func (s *testEntity) Get(localstore.Address) (*localstore.Object, error) { panic("implement me") }
func (s *testEntity) Del(localstore.Address) error { panic("implement me") }
func (s *testEntity) Meta(localstore.Address) (*localstore.ObjectMeta, error) { panic("implement me") }
func (s *testEntity) Has(localstore.Address) (bool, error) { panic("implement me") }
func (s *testEntity) ObjectsCount() (uint64, error) { panic("implement me") }
func (s *testEntity) Size() int64 { panic("implement me") }
func (s *testEntity) Iterate(localstore.FilterPipeline, localstore.MetaHandler) error {
panic("implement me")
}
func (s *testEntity) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) {
panic("implement me")
}
func TestNewObjectValidator(t *testing.T) {
validParams := ObjectValidatorParams{
Logger: zap.L(),
AddressStore: new(testEntity),
Localstore: new(testEntity),
Verifier: new(testEntity),
}
t.Run("valid params", func(t *testing.T) {
s, err := NewObjectValidator(&validParams)
require.NoError(t, err)
require.NotNil(t, s)
})
t.Run("fail on empty local storage", func(t *testing.T) {
p := validParams
p.Localstore = nil
_, err := NewObjectValidator(&p)
require.EqualError(t, err, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg).Error())
})
t.Run("fail on empty logger", func(t *testing.T) {
p := validParams
p.Logger = nil
_, err := NewObjectValidator(&p)
require.EqualError(t, err, errors.Wrap(logger.ErrNilLogger, objectValidatorInstanceFailMsg).Error())
})
}
func TestNewLocalIntegrityVerifier(t *testing.T) {
var (
err error
verifier verifier2.Verifier
)
verifier, err = NewLocalHeadIntegrityVerifier()
require.NoError(t, err)
require.NotNil(t, verifier)
verifier, err = NewLocalIntegrityVerifier()
require.NoError(t, err)
require.NotNil(t, verifier)
}
func TestLocalHeadIntegrityVerifier_Verify(t *testing.T) {
var (
ctx = context.TODO()
ownerPrivateKey = test.DecodeKey(0)
ownerPublicKey = &ownerPrivateKey.PublicKey
sessionPrivateKey = test.DecodeKey(1)
sessionPublicKey = &sessionPrivateKey.PublicKey
)
ownerID, err := refs.NewOwnerID(ownerPublicKey)
require.NoError(t, err)
s, err := NewLocalIntegrityVerifier()
require.NoError(t, err)
okItems := []func() *object.Object{
// correct object w/ session token
func() *object.Object {
token := new(service.Token)
token.SetOwnerID(ownerID)
token.SetSessionKey(crypto.MarshalPublicKey(sessionPublicKey))
require.NoError(t,
service.AddSignatureWithKey(
ownerPrivateKey,
service.NewSignedSessionToken(token),
),
)
obj := new(object.Object)
obj.AddHeader(&object.Header{
Value: &object.Header_Token{
Token: token,
},
})
obj.SetPayload([]byte{1, 2, 3})
addPayloadChecksum(obj)
addHeadersChecksum(t, obj, sessionPrivateKey)
return obj
},
// correct object w/o session token
func() *object.Object {
obj := new(object.Object)
obj.SystemHeader.OwnerID = ownerID
obj.SetPayload([]byte{1, 2, 3})
addPayloadChecksum(obj)
obj.AddHeader(&object.Header{
Value: &object.Header_PublicKey{
PublicKey: &object.PublicKey{
Value: crypto.MarshalPublicKey(ownerPublicKey),
},
},
})
addHeadersChecksum(t, obj, ownerPrivateKey)
return obj
},
}
failItems := []func() *object.Object{}
for _, item := range okItems {
require.NoError(t, s.Verify(ctx, item()))
}
for _, item := range failItems {
require.Error(t, s.Verify(ctx, item()))
}
}
func addPayloadChecksum(obj *object.Object) {
payloadChecksum := sha256.Sum256(obj.GetPayload())
obj.AddHeader(&object.Header{
Value: &object.Header_PayloadChecksum{
PayloadChecksum: payloadChecksum[:],
},
})
}
func addHeadersChecksum(t *testing.T, obj *object.Object, key *ecdsa.PrivateKey) {
headersData, err := verifier2.MarshalHeaders(obj, len(obj.Headers))
require.NoError(t, err)
headersChecksum := sha256.Sum256(headersData)
integrityHdr := new(object.IntegrityHeader)
integrityHdr.SetHeadersChecksum(headersChecksum[:])
require.NoError(t, service.AddSignatureWithKey(key, integrityHdr))
obj.AddHeader(&object.Header{
Value: &object.Header_Integrity{
Integrity: integrityHdr,
},
})
}
func TestPayloadVerifier_Verify(t *testing.T) {
ctx := context.TODO()
verifier := new(payloadVerifier)
t.Run("missing header", func(t *testing.T) {
obj := new(object.Object)
require.EqualError(t, verifier.Verify(ctx, obj), errMissingPayloadChecksumHeader.Error())
})
t.Run("correct result", func(t *testing.T) {
payload := testData(t, 10)
cs := sha256.Sum256(payload)
hdr := &object.Header_PayloadChecksum{PayloadChecksum: cs[:]}
obj := &object.Object{
Headers: []object.Header{{Value: hdr}},
Payload: payload,
}
require.NoError(t, verifier.Verify(ctx, obj))
hdr.PayloadChecksum[0]++
require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error())
hdr.PayloadChecksum[0]--
obj.Payload[0]++
require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error())
})
}
func TestLocalIntegrityVerifier_Verify(t *testing.T) {
ctx := context.TODO()
obj := new(object.Object)
t.Run("head verification failure", func(t *testing.T) {
hErr := errors.New("test error for head verifier")
s := &localIntegrityVerifier{
headVerifier: &testEntity{
err: hErr, // force head verifier to return hErr
},
}
require.EqualError(t, s.Verify(ctx, obj), hErr.Error())
})
t.Run("correct result", func(t *testing.T) {
pErr := errors.New("test error for payload verifier")
s := &localIntegrityVerifier{
headVerifier: new(testEntity),
payloadVerifier: &testEntity{
err: pErr, // force payload verifier to return hErr
},
}
require.EqualError(t, s.Verify(ctx, obj), pErr.Error())
})
}
// testData returns size bytes of random data.
func testData(t *testing.T, size int) []byte {
res := make([]byte, size)
_, err := rand.Read(res)
require.NoError(t, err)
return res
}
// TODO: write functionality tests

View file

@ -1,194 +0,0 @@
package replication
import (
"context"
"time"
"github.com/multiformats/go-multiaddr"
"go.uber.org/zap"
)
type (
// StorageValidator is an interface of entity
// that listens and performs task of storage validation on remote nodes.
// Validation can result to the need to replicate or clean object.
StorageValidator interface {
Process(ctx context.Context) chan<- *ObjectLocationRecord
SubscribeReplication(ch chan<- *ReplicateTask)
SubscribeGarbage(ch chan<- Address)
}
storageValidator struct {
objectVerifier ObjectVerifier
log *zap.Logger
presenceChecker PresenceChecker
addrstore AddressStore
taskChanCap int
resultTimeout time.Duration
replicateResultChan chan<- *ReplicateTask
garbageChan chan<- Address
}
// StorageValidatorParams groups the parameters of storage validator's constructor.
StorageValidatorParams struct {
ObjectVerifier
PresenceChecker
*zap.Logger
TaskChanCap int
ResultTimeout time.Duration
AddrStore AddressStore
}
)
const (
defaultStorageValidatorChanCap = 10
defaultStorageValidatorResultTimeout = time.Second
storageValidatorEntity = "storage validator"
)
func (s *storageValidator) SubscribeReplication(ch chan<- *ReplicateTask) {
s.replicateResultChan = ch
}
func (s *storageValidator) SubscribeGarbage(ch chan<- Address) { s.garbageChan = ch }
func (s *storageValidator) Process(ctx context.Context) chan<- *ObjectLocationRecord {
ch := make(chan *ObjectLocationRecord, s.taskChanCap)
go s.processRoutine(ctx, ch)
return ch
}
func (s *storageValidator) writeReplicateResult(replicateTask *ReplicateTask) {
if s.replicateResultChan == nil {
return
}
select {
case s.replicateResultChan <- replicateTask:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *storageValidator) writeGarbage(addr Address) {
if s.garbageChan == nil {
return
}
select {
case s.garbageChan <- addr:
case <-time.After(s.resultTimeout):
s.log.Warn(writeResultTimeout)
}
}
func (s *storageValidator) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) {
loop:
for {
select {
case <-ctx.Done():
s.log.Warn(storageValidatorEntity+ctxDoneMsg, zap.Error(ctx.Err()))
break loop
case locationRecord, ok := <-taskChan:
if !ok {
s.log.Warn(storageValidatorEntity + taskChanClosed)
break loop
} else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has {
continue loop
}
s.handleTask(ctx, locationRecord)
}
}
close(s.replicateResultChan)
close(s.garbageChan)
}
func (s *storageValidator) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) {
selfAddr, err := s.addrstore.SelfAddr()
if err != nil {
s.log.Error("storage validator can't obtain self address")
return
}
var (
weightierCounter int
replicateTask = &ReplicateTask{
Address: locationRecord.Address,
Shortage: locationRecord.ReservationRatio - 1, // taking account of object correctly stored in local store
ExcludeNodes: nodesFromLocations(locationRecord.Locations, selfAddr),
}
)
for i := range locationRecord.Locations {
loc := locationRecord.Locations[i]
if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{
Address: locationRecord.Address,
Node: locationRecord.Locations[i].Node,
Handler: func(valid bool, _ *Object) {
if valid {
replicateTask.Shortage--
if loc.WeightGreater {
weightierCounter++
}
}
},
}); weightierCounter >= locationRecord.ReservationRatio {
s.writeGarbage(locationRecord.Address)
return
}
}
if replicateTask.Shortage > 0 {
s.writeReplicateResult(replicateTask)
}
}
// nodesFromLocations must ignore self address, because it is used in
// storage validator during replication. We must ignore our own stored
// objects during replication and work with remote hosts and check their
// verification info.
func nodesFromLocations(locations []ObjectLocation, selfaddr multiaddr.Multiaddr) []multiaddr.Multiaddr {
res := make([]multiaddr.Multiaddr, 0, len(locations))
for i := range locations {
if !locations[i].Node.Equal(selfaddr) {
res = append(res, locations[i].Node)
}
}
return res
}
// NewStorageValidator is a storage validator's constructor.
func NewStorageValidator(p StorageValidatorParams) (StorageValidator, error) {
switch {
case p.Logger == nil:
return nil, instanceError(storageValidatorEntity, loggerPart)
case p.ObjectVerifier == nil:
return nil, instanceError(storageValidatorEntity, objectVerifierPart)
case p.PresenceChecker == nil:
return nil, instanceError(storageValidatorEntity, presenceCheckerPart)
case p.AddrStore == nil:
return nil, instanceError(storageValidatorEntity, addrStorePart)
}
if p.TaskChanCap <= 0 {
p.TaskChanCap = defaultStorageValidatorChanCap
}
if p.ResultTimeout <= 0 {
p.ResultTimeout = defaultStorageValidatorResultTimeout
}
return &storageValidator{
objectVerifier: p.ObjectVerifier,
log: p.Logger,
presenceChecker: p.PresenceChecker,
taskChanCap: p.TaskChanCap,
resultTimeout: p.ResultTimeout,
addrstore: p.AddrStore,
}, nil
}

View file

@ -1,25 +0,0 @@
package transformer
import (
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
)
type (
// Object is a type alias of
// Object from object package of neofs-api-go.
Object = object.Object
// ObjectID is a type alias of
// ObjectID from refs package of neofs-api-go.
ObjectID = refs.ObjectID
// CID is a type alias of
// CID from refs package of neofs-api-go.
CID = refs.CID
// StorageGroup is a type alias of
// StorageGroup from storagegroup package of neofs-api-go.
StorageGroup = storagegroup.StorageGroup
)

View file

@ -1,762 +0,0 @@
package transformer
import (
"bytes"
"context"
"crypto/sha256"
"io"
"math/rand"
"sort"
"testing"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/session"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier"
"github.com/nspcc-dev/neofs-node/pkg/util/test"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
type (
// Entity for mocking interfaces.
// Implementation of any interface intercepts arguments via f (if not nil).
// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error.
testPutEntity struct {
// Set of interfaces which entity must implement, but some methods from those does not call.
// Argument interceptor. Used for ascertain of correct parameter passage between components.
f func(...interface{})
// Mocked result of any interface.
res interface{}
// Mocked error of any interface.
err error
}
)
var (
_ io.Writer = (*testPutEntity)(nil)
_ EpochReceiver = (*testPutEntity)(nil)
_ Transformer = (*testPutEntity)(nil)
_ storagegroup.InfoReceiver = (*testPutEntity)(nil)
_ verifier.Verifier = (*testPutEntity)(nil)
)
func (s *testPutEntity) Verify(_ context.Context, obj *Object) error {
if s.f != nil {
s.f(obj)
}
return s.err
}
func (s *testPutEntity) Write(p []byte) (int, error) {
if s.f != nil {
s.f(p)
}
return 0, s.err
}
func (s *testPutEntity) Transform(_ context.Context, u ProcUnit, h ...ProcUnitHandler) error {
if s.f != nil {
s.f(u, h)
}
return s.err
}
func (s *testPutEntity) GetSGInfo(_ context.Context, cid CID, group []ObjectID) (*StorageGroup, error) {
if s.f != nil {
s.f(cid, group)
}
if s.err != nil {
return nil, s.err
}
return s.res.(*StorageGroup), nil
}
func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) }
func TestNewTransformer(t *testing.T) {
validParams := Params{
SGInfoReceiver: new(testPutEntity),
EpochReceiver: new(testPutEntity),
SizeLimit: 1,
Verifier: new(testPutEntity),
}
t.Run("valid params", func(t *testing.T) {
res, err := NewTransformer(validParams)
require.NoError(t, err)
require.NotNil(t, res)
})
t.Run("non-positive size", func(t *testing.T) {
p := validParams
p.SizeLimit = 0
_, err := NewTransformer(p)
require.EqualError(t, err, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg).Error())
})
t.Run("empty SG info receiver", func(t *testing.T) {
p := validParams
p.SGInfoReceiver = nil
_, err := NewTransformer(p)
require.EqualError(t, err, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg).Error())
})
t.Run("empty epoch receiver", func(t *testing.T) {
p := validParams
p.EpochReceiver = nil
_, err := NewTransformer(p)
require.EqualError(t, err, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg).Error())
})
t.Run("empty object verifier", func(t *testing.T) {
p := validParams
p.Verifier = nil
_, err := NewTransformer(p)
require.EqualError(t, err, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg).Error())
})
}
func Test_transformer(t *testing.T) {
ctx := context.TODO()
u := ProcUnit{
Head: &Object{
Payload: testData(t, 10),
},
Payload: new(emptyReader),
}
handlers := []ProcUnitHandler{func(context.Context, ProcUnit) error { return nil }}
t.Run("preliminary transformation failure", func(t *testing.T) {
// create custom error for test
pErr := errors.New("test error for prelim transformer")
s := &transformer{
tPrelim: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct prelim transformer params", func(t *testing.T) {
require.Equal(t, u, items[0])
require.Empty(t, items[1])
})
},
err: pErr, // force Transformer to return pErr
},
}
// ascertain that error returns as expected
require.EqualError(t, s.Transform(ctx, u, handlers...), pErr.Error())
})
t.Run("size limiter error/correct sign processing", func(t *testing.T) {
// create custom error for test
sErr := errors.New("test error for signer")
lErr := errors.New("test error for size limiter")
s := &transformer{
tPrelim: new(testPutEntity),
tSizeLim: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct size limiter params", func(t *testing.T) {
require.Equal(t, u, items[0])
hs := items[1].([]ProcUnitHandler)
require.Len(t, hs, 1)
require.EqualError(t, hs[0](ctx, u), sErr.Error())
})
},
err: lErr, // force Transformer to return lErr
},
tSign: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct signer params", func(t *testing.T) {
require.Equal(t, u, items[0])
require.Equal(t, handlers, items[1])
})
},
err: sErr, // force Transformer to return sErr
},
}
// ascertain that error returns as expected
require.EqualError(t, s.Transform(ctx, u, handlers...), lErr.Error())
})
}
func Test_preliminaryTransformer(t *testing.T) {
ctx := context.TODO()
u := ProcUnit{
Head: &Object{
Payload: testData(t, 10),
},
Payload: new(emptyReader),
}
t.Run("field moulder failure", func(t *testing.T) {
// create custom error for test
mErr := errors.New("test error for field moulder")
s := &preliminaryTransformer{
fMoulder: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct field moulder params", func(t *testing.T) {
require.Equal(t, u, items[0])
require.Empty(t, items[1])
})
},
err: mErr, // force Transformer to return mErr
},
}
// ascertain that error returns as expected
require.EqualError(t, s.Transform(ctx, u), mErr.Error())
})
t.Run("correct result", func(t *testing.T) {
// create custom error for test
sgErr := errors.New("test error for SG moulder")
s := &preliminaryTransformer{
fMoulder: new(testPutEntity),
sgMoulder: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct field moulder params", func(t *testing.T) {
require.Equal(t, u, items[0])
require.Empty(t, items[1])
})
},
err: sgErr, // force Transformer to return sgErr
},
}
// ascertain that error returns as expected
require.EqualError(t, s.Transform(ctx, u), sgErr.Error())
})
}
func Test_readChunk(t *testing.T) {
t.Run("empty slice", func(t *testing.T) {
t.Run("missing checksum header", func(t *testing.T) {
obj := new(Object)
_, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
require.Nil(t, h)
require.NoError(t, readChunk(ProcUnit{
Head: obj,
Payload: bytes.NewBuffer(testData(t, 10)),
}, nil, nil, nil))
_, h = obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
require.NotNil(t, h)
require.Equal(t, sha256.New().Sum(nil), h.Value.(*object.Header_PayloadChecksum).PayloadChecksum)
})
t.Run("existing checksum header", func(t *testing.T) {
h := &object.Header_PayloadChecksum{PayloadChecksum: testData(t, 10)}
obj := &Object{Headers: []object.Header{{Value: h}}}
require.NoError(t, readChunk(ProcUnit{
Head: obj,
Payload: bytes.NewBuffer(testData(t, 10)),
}, nil, nil, nil))
require.NotNil(t, h)
require.Equal(t, sha256.New().Sum(nil), h.PayloadChecksum)
})
})
t.Run("non-empty slice", func(t *testing.T) {
t.Run("non-full data", func(t *testing.T) {
var (
size = 10
buf = testData(t, size)
r = bytes.NewBuffer(buf[:size-1])
)
require.EqualError(t,
readChunk(ProcUnit{Head: new(Object), Payload: r}, buf, nil, nil),
ErrPayloadEOF.Error(),
)
})
t.Run("hash accumulator write", func(t *testing.T) {
var (
d = testData(t, 10)
srcHash = sha256.Sum256(d)
hAcc = sha256.New()
buf = bytes.NewBuffer(d)
b = make([]byte, len(d))
obj = new(Object)
srcHomoHash = hash.Sum(d)
homoHashHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
)
t.Run("failure", func(t *testing.T) {
hErr := errors.New("test error for hash writer")
b := testData(t, len(d))
require.EqualError(t, readChunk(EmptyPayloadUnit(new(Object)), b, &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct accumulator params", func(t *testing.T) {
require.Equal(t, b, items[0])
})
},
err: hErr,
}, nil), hErr.Error())
})
require.NoError(t, readChunk(ProcUnit{Head: obj, Payload: buf}, b, hAcc, homoHashHdr))
_, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
require.NotNil(t, h)
require.Equal(t, srcHash[:], h.Value.(*object.Header_PayloadChecksum).PayloadChecksum)
require.Equal(t, srcHash[:], hAcc.Sum(nil))
require.Equal(t, srcHomoHash, homoHashHdr.HomoHash)
})
})
}
func Test_headSigner(t *testing.T) {
ctx := context.TODO()
t.Run("invalid input", func(t *testing.T) {
t.Run("missing token", func(t *testing.T) {
u := ProcUnit{Head: new(Object)}
require.Error(t, u.Head.Verify())
s := &headSigner{verifier: &testPutEntity{err: errors.New("")}}
require.EqualError(t, s.Transform(ctx, u), errNoToken.Error())
})
t.Run("with token", func(t *testing.T) {
u := ProcUnit{Head: new(Object)}
v, err := storage.NewLocalHeadIntegrityVerifier()
require.NoError(t, err)
require.Error(t, u.Head.Verify())
privateToken, err := session.NewPrivateToken(0)
require.NoError(t, err)
ctx := context.WithValue(ctx, PrivateSessionToken, privateToken)
s := &headSigner{
verifier: &testPutEntity{
err: errors.New(""),
},
}
key := &privateToken.PrivateKey().PublicKey
u.Head.SystemHeader.OwnerID, err = refs.NewOwnerID(key)
require.NoError(t, err)
u.Head.AddHeader(&object.Header{
Value: &object.Header_PublicKey{
PublicKey: &object.PublicKey{
Value: crypto.MarshalPublicKey(key),
},
},
})
require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
require.NoError(t, v.Verify(ctx, unit.Head))
_, h := unit.Head.LastHeader(object.HeaderType(object.IntegrityHdr))
require.NotNil(t, h)
d, err := verifier.MarshalHeaders(unit.Head, len(unit.Head.Headers)-1)
require.NoError(t, err)
cs := sha256.Sum256(d)
require.Equal(t, cs[:], h.Value.(*object.Header_Integrity).Integrity.GetHeadersChecksum())
return nil
}))
t.Run("valid input", func(t *testing.T) {
s := &headSigner{verifier: new(testPutEntity)}
require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
require.Equal(t, u, unit)
return nil
}))
})
})
})
}
func Test_fieldMoulder(t *testing.T) {
ctx := context.TODO()
epoch := uint64(100)
fMoulder := &fieldMoulder{epochRecv: &testPutEntity{res: epoch}}
t.Run("no token", func(t *testing.T) {
require.EqualError(t, new(fieldMoulder).Transform(ctx, ProcUnit{}), errNoToken.Error())
})
t.Run("with token", func(t *testing.T) {
token := new(service.Token)
token.SetID(service.TokenID{1, 2, 3})
ctx := context.WithValue(ctx, PublicSessionToken, token)
u := ProcUnit{Head: new(Object)}
_, h := u.Head.LastHeader(object.HeaderType(object.TokenHdr))
require.Nil(t, h)
require.NoError(t, fMoulder.Transform(ctx, u))
_, h = u.Head.LastHeader(object.HeaderType(object.TokenHdr))
require.Equal(t, token, h.Value.(*object.Header_Token).Token)
require.False(t, u.Head.SystemHeader.ID.Empty())
require.NotZero(t, u.Head.SystemHeader.CreatedAt.UnixTime)
require.Equal(t, epoch, u.Head.SystemHeader.CreatedAt.Epoch)
require.Equal(t, uint64(1), u.Head.SystemHeader.Version)
})
}
func Test_sgMoulder(t *testing.T) {
ctx := context.TODO()
t.Run("invalid SG linking", func(t *testing.T) {
t.Run("w/ header and w/o links", func(t *testing.T) {
obj := new(Object)
obj.SetStorageGroup(new(storagegroup.StorageGroup))
require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error())
})
t.Run("w/o header and w/ links", func(t *testing.T) {
obj := new(Object)
addLink(obj, object.Link_StorageGroup, ObjectID{})
require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error())
})
})
t.Run("non-SG", func(t *testing.T) {
obj := new(Object)
require.NoError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}))
})
t.Run("receive SG info", func(t *testing.T) {
cid := testObjectAddress(t).CID
group := make([]ObjectID, 5)
for i := range group {
group[i] = testObjectAddress(t).ObjectID
}
t.Run("failure", func(t *testing.T) {
obj := &Object{SystemHeader: object.SystemHeader{CID: cid}}
obj.SetStorageGroup(new(storagegroup.StorageGroup))
for i := range group {
addLink(obj, object.Link_StorageGroup, group[i])
}
sgErr := errors.New("test error for SG info receiver")
mSG := &sgMoulder{
sgInfoRecv: &testPutEntity{
f: func(items ...interface{}) {
t.Run("correct SG info receiver params", func(t *testing.T) {
cp := make([]ObjectID, len(group))
copy(cp, group)
sort.Sort(storagegroup.IDList(cp))
require.Equal(t, cid, items[0])
require.Equal(t, cp, items[1])
})
},
err: sgErr,
},
}
require.EqualError(t, mSG.Transform(ctx, ProcUnit{Head: obj}), sgErr.Error())
})
})
t.Run("correct result", func(t *testing.T) {
obj := new(Object)
obj.SetStorageGroup(new(storagegroup.StorageGroup))
addLink(obj, object.Link_StorageGroup, ObjectID{})
sgInfo := &storagegroup.StorageGroup{
ValidationDataSize: 19,
ValidationHash: hash.Sum(testData(t, 10)),
}
mSG := &sgMoulder{
sgInfoRecv: &testPutEntity{
res: sgInfo,
},
}
require.NoError(t, mSG.Transform(ctx, ProcUnit{Head: obj}))
_, h := obj.LastHeader(object.HeaderType(object.StorageGroupHdr))
require.NotNil(t, h)
require.Equal(t, sgInfo, h.Value.(*object.Header_StorageGroup).StorageGroup)
})
}
func Test_sizeLimiter(t *testing.T) {
ctx := context.TODO()
t.Run("limit entry", func(t *testing.T) {
payload := testData(t, 10)
payloadSize := uint64(len(payload) - 1)
u := ProcUnit{
Head: &Object{SystemHeader: object.SystemHeader{
PayloadLength: payloadSize,
}},
Payload: bytes.NewBuffer(payload[:payloadSize]),
}
sl := &sizeLimiter{limit: payloadSize}
t.Run("cut payload", func(t *testing.T) {
require.Error(t, sl.Transform(ctx, ProcUnit{
Head: &Object{SystemHeader: object.SystemHeader{PayloadLength: payloadSize}},
Payload: bytes.NewBuffer(payload[:payloadSize-1]),
}))
})
require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
_, err := unit.Payload.Read(make([]byte, 1))
require.EqualError(t, err, io.EOF.Error())
require.Equal(t, payload[:payloadSize], unit.Head.Payload)
_, h := unit.Head.LastHeader(object.HeaderType(object.HomoHashHdr))
require.NotNil(t, h)
require.Equal(t, hash.Sum(payload[:payloadSize]), h.Value.(*object.Header_HomoHash).HomoHash)
return nil
}))
})
t.Run("limit exceed", func(t *testing.T) {
payload := testData(t, 100)
sizeLimit := uint64(len(payload)) / 13
pToken, err := session.NewPrivateToken(0)
require.NoError(t, err)
srcObj := &object.Object{
SystemHeader: object.SystemHeader{
Version: 12,
PayloadLength: uint64(len(payload)),
ID: testObjectAddress(t).ObjectID,
OwnerID: object.OwnerID{1, 2, 3},
CID: testObjectAddress(t).CID,
},
Headers: []object.Header{
{Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}},
},
}
u := ProcUnit{
Head: srcObj,
Payload: bytes.NewBuffer(payload),
}
epoch := uint64(77)
sl := &sizeLimiter{
limit: sizeLimit,
epochRecv: &testPutEntity{res: epoch},
}
t.Run("no token", func(t *testing.T) {
require.EqualError(t, sl.Transform(ctx, ProcUnit{
Head: &Object{
SystemHeader: object.SystemHeader{
PayloadLength: uint64(len(payload)),
},
},
Payload: bytes.NewBuffer(payload),
}), errNoToken.Error())
})
ctx := context.WithValue(ctx, PrivateSessionToken, pToken)
t.Run("cut payload", func(t *testing.T) {
require.Error(t, sl.Transform(ctx, ProcUnit{
Head: &Object{
SystemHeader: object.SystemHeader{
PayloadLength: uint64(len(payload)) + 1,
},
},
Payload: bytes.NewBuffer(payload),
}))
})
objs := make([]Object, 0)
t.Run("handler error", func(t *testing.T) {
hErr := errors.New("test error for handler")
require.EqualError(t, sl.Transform(ctx, ProcUnit{
Head: &Object{
SystemHeader: object.SystemHeader{PayloadLength: uint64(len(payload))},
Headers: make([]object.Header, 0),
},
Payload: bytes.NewBuffer(payload),
}, func(context.Context, ProcUnit) error { return hErr }), hErr.Error())
})
require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
_, err := unit.Payload.Read(make([]byte, 1))
require.EqualError(t, err, io.EOF.Error())
objs = append(objs, *unit.Head.Copy())
return nil
}))
ln := len(objs)
res := make([]byte, 0, len(payload))
zObj := objs[ln-1]
require.Zero(t, zObj.SystemHeader.PayloadLength)
require.Empty(t, zObj.Payload)
require.Empty(t, zObj.Links(object.Link_Next))
require.Empty(t, zObj.Links(object.Link_Previous))
require.Empty(t, zObj.Links(object.Link_Parent))
children := zObj.Links(object.Link_Child)
require.Len(t, children, ln-1)
for i := range objs[:ln-1] {
require.Equal(t, objs[i].SystemHeader.ID, children[i])
}
for i := range objs[:ln-1] {
res = append(res, objs[i].Payload...)
if i == 0 {
require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID)
require.True(t, objs[i].Links(object.Link_Previous)[0].Empty())
} else if i < ln-2 {
require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID)
require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID)
} else {
_, h := objs[i].LastHeader(object.HeaderType(object.HomoHashHdr))
require.NotNil(t, h)
require.Equal(t, hash.Sum(payload), h.Value.(*object.Header_HomoHash).HomoHash)
require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID)
require.True(t, objs[i].Links(object.Link_Next)[0].Empty())
}
}
require.Equal(t, payload, res)
})
}
// testData returns size bytes of random data.
func testData(t *testing.T, size int) []byte {
res := make([]byte, size)
_, err := rand.Read(res)
require.NoError(t, err)
return res
}
// testObjectAddress returns new random object address.
func testObjectAddress(t *testing.T) refs.Address {
oid, err := refs.NewObjectID()
require.NoError(t, err)
return refs.Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid}
}
func TestIntegration(t *testing.T) {
ownerKey := test.DecodeKey(1)
ownerID, err := refs.NewOwnerID(&ownerKey.PublicKey)
require.NoError(t, err)
privToken, err := session.NewPrivateToken(0)
require.NoError(t, err)
pkBytes, err := session.PublicSessionToken(privToken)
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), PrivateSessionToken, privToken)
pubToken := new(service.Token)
pubToken.SetID(service.TokenID{1, 2, 3})
pubToken.SetSessionKey(pkBytes)
pubToken.SetOwnerID(ownerID)
pubToken.SetOwnerKey(crypto.MarshalPublicKey(&ownerKey.PublicKey))
require.NoError(t, service.AddSignatureWithKey(ownerKey, service.NewSignedSessionToken(pubToken)))
ctx = context.WithValue(ctx, PublicSessionToken, pubToken)
t.Run("non-SG object", func(t *testing.T) {
t.Run("with split", func(t *testing.T) {
tr, err := NewTransformer(Params{
SGInfoReceiver: new(testPutEntity),
EpochReceiver: &testPutEntity{res: uint64(1)},
SizeLimit: 13,
Verifier: &testPutEntity{
err: errors.New(""), // force verifier to return non-nil error
},
})
require.NoError(t, err)
payload := make([]byte, 20)
_, err = rand.Read(payload)
require.NoError(t, err)
obj := &Object{
SystemHeader: object.SystemHeader{
PayloadLength: uint64(len(payload)),
CID: CID{3},
},
Headers: []object.Header{
{Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}},
},
}
obj.SystemHeader.OwnerID = ownerID
obj.SetHeader(&object.Header{
Value: &object.Header_Token{
Token: pubToken,
},
})
testTransformer(t, ctx, ProcUnit{
Head: obj,
Payload: bytes.NewBuffer(payload),
}, tr, payload)
})
})
}
func testTransformer(t *testing.T, ctx context.Context, u ProcUnit, tr Transformer, src []byte) {
objList := make([]Object, 0)
verifier, err := storage.NewLocalHeadIntegrityVerifier()
require.NoError(t, err)
require.NoError(t, tr.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
require.NoError(t, verifier.Verify(ctx, unit.Head))
objList = append(objList, *unit.Head.Copy())
return nil
}))
reverse := NewRestorePipeline(SplitRestorer())
res, err := reverse.Restore(ctx, objList...)
require.NoError(t, err)
integrityVerifier, err := storage.NewLocalIntegrityVerifier()
require.NoError(t, err)
require.NoError(t, integrityVerifier.Verify(ctx, &res[0]))
require.Equal(t, src, res[0].Payload)
_, h := res[0].LastHeader(object.HeaderType(object.HomoHashHdr))
require.True(t, hash.Sum(src).Equal(h.Value.(*object.Header_HomoHash).HomoHash))
}
func addLink(o *Object, t object.Link_Type, id ObjectID) {
o.AddHeader(&object.Header{Value: &object.Header_Link{
Link: &object.Link{Type: t, ID: id},
}})
}

View file

@ -1,126 +0,0 @@
package transformer
import (
"context"
"sync"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/pkg/errors"
)
type (
// ObjectRestorer is an interface of object restorer.
ObjectRestorer interface {
Type() object.Transform_Type
Restore(context.Context, ...Object) ([]Object, error)
}
restorePipeline struct {
ObjectRestorer
*sync.RWMutex
items map[object.Transform_Type]ObjectRestorer
}
splitRestorer struct{}
)
var errEmptyObjList = errors.New("object list is empty")
var errMissingParentLink = errors.New("missing parent link")
func (s *restorePipeline) Restore(ctx context.Context, srcObjs ...Object) ([]Object, error) {
if len(srcObjs) == 0 {
return nil, errEmptyInput
}
s.RLock()
defer s.RUnlock()
var (
objs = srcObjs
err error
)
for {
_, th := objs[0].LastHeader(object.HeaderType(object.TransformHdr))
if th == nil {
break
}
transform := th.Value.(*object.Header_Transform).Transform
tr, ok := s.items[transform.Type]
if !ok {
return nil, errors.Errorf("missing restorer (%s)", transform.Type)
}
if objs, err = tr.Restore(ctx, objs...); err != nil {
return nil, errors.Wrapf(err, "restoration failed (%s)", transform.Type)
}
}
return objs, nil
}
// NewRestorePipeline is a constructor of the pipeline of object restorers.
func NewRestorePipeline(t ...ObjectRestorer) ObjectRestorer {
m := make(map[object.Transform_Type]ObjectRestorer, len(t))
for i := range t {
m[t[i].Type()] = t[i]
}
return &restorePipeline{
RWMutex: new(sync.RWMutex),
items: m,
}
}
func (*splitRestorer) Type() object.Transform_Type {
return object.Transform_Split
}
func (*splitRestorer) Restore(ctx context.Context, objs ...Object) ([]Object, error) {
if len(objs) == 0 {
return nil, errEmptyObjList
}
chain, err := GetChain(objs...)
if err != nil {
return nil, errors.Wrap(err, "could not get chain of objects")
}
obj := chain[len(chain)-1]
var (
size uint64
p = make([]byte, 0, len(chain[0].Payload)*len(chain))
)
for j := 0; j < len(chain); j++ {
p = append(p, chain[j].Payload...)
size += chain[j].SystemHeader.PayloadLength
}
obj.SystemHeader.PayloadLength = size
obj.Payload = p
parent, err := lastLink(&obj, object.Link_Parent)
if err != nil {
return nil, errMissingParentLink
}
obj.SystemHeader.ID = parent
err = deleteTransformer(&obj, object.Transform_Split)
if err != nil {
return nil, err
}
return []Object{obj}, nil
}
// SplitRestorer is a splitted object restorer's constructor.
func SplitRestorer() ObjectRestorer {
return new(splitRestorer)
}

View file

@ -1,525 +0,0 @@
package transformer
import (
"context"
"crypto/sha256"
"io"
"sort"
"time"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/session"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier"
"github.com/pkg/errors"
)
type (
// Type is a type alias of
// Type from object package of neofs-api-go.
Type = object.Transform_Type
// ProcUnit groups the information about transforming unit.
ProcUnit struct {
Head *Object
Payload io.Reader
}
// ProcUnitHandler is a handling ProcUnit function.
ProcUnitHandler func(context.Context, ProcUnit) error
// Transformer is an interface of object transformer.
Transformer interface {
Transform(context.Context, ProcUnit, ...ProcUnitHandler) error
}
// EpochReceiver is an interface of epoch number container with read access.
EpochReceiver interface {
Epoch() uint64
}
transformer struct {
tPrelim Transformer
tSizeLim Transformer
tSign Transformer
}
preliminaryTransformer struct {
fMoulder Transformer
sgMoulder Transformer
}
fieldMoulder struct {
epochRecv EpochReceiver
}
sgMoulder struct {
sgInfoRecv storagegroup.InfoReceiver
}
sizeLimiter struct {
limit uint64
epochRecv EpochReceiver
}
headSigner struct {
verifier verifier.Verifier
}
emptyReader struct{}
// Params groups the parameters of object transformer's constructor.
Params struct {
SGInfoReceiver storagegroup.InfoReceiver
EpochReceiver EpochReceiver
SizeLimit uint64
Verifier verifier.Verifier
}
)
// ErrPayloadEOF is returned by Transformer that
// received unexpected end of object payload.
var ErrPayloadEOF = errors.New("payload EOF")
const (
verifyHeadersCount = 2 // payload checksum, integrity
splitHeadersCount = 4 // flag, parent, left, right
transformerInstanceFailMsg = "could not create transformer instance"
// PrivateSessionToken is a context key for session.PrivateToken.
PrivateSessionToken = "private token"
// PublicSessionToken is a context key for service.SessionToken.
PublicSessionToken = "public token"
)
// ErrInvalidSGLinking is returned by Transformer that received
// an object with broken storage group links.
var ErrInvalidSGLinking = errors.New("invalid storage group linking")
var (
errNoToken = errors.New("no token provided")
errEmptyInput = errors.New("empty input")
errChainNotFound = errors.New("chain not found")
errCutChain = errors.New("GetChain failed: chain is not full")
errMissingTransformHdr = errors.New("cannot find transformer header")
errEmptySGInfoRecv = errors.New("empty storage group info receivers")
errInvalidSizeLimit = errors.New("non-positive object size limit")
errEmptyEpochReceiver = errors.New("empty epoch receiver")
errEmptyVerifier = errors.New("empty object verifier")
)
// NewTransformer is an object transformer's constructor.
func NewTransformer(p Params) (Transformer, error) {
switch {
case p.SizeLimit <= 0:
return nil, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg)
case p.EpochReceiver == nil:
return nil, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg)
case p.SGInfoReceiver == nil:
return nil, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg)
case p.Verifier == nil:
return nil, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg)
}
return &transformer{
tPrelim: &preliminaryTransformer{
fMoulder: &fieldMoulder{
epochRecv: p.EpochReceiver,
},
sgMoulder: &sgMoulder{
sgInfoRecv: p.SGInfoReceiver,
},
},
tSizeLim: &sizeLimiter{
limit: p.SizeLimit,
epochRecv: p.EpochReceiver,
},
tSign: &headSigner{
verifier: p.Verifier,
},
}, nil
}
func (s *transformer) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
if err := s.tPrelim.Transform(ctx, unit); err != nil {
return err
}
return s.tSizeLim.Transform(ctx, unit, func(ctx context.Context, unit ProcUnit) error {
return s.tSign.Transform(ctx, unit, handlers...)
})
}
func (s *preliminaryTransformer) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error {
if err := s.fMoulder.Transform(ctx, unit); err != nil {
return err
}
return s.sgMoulder.Transform(ctx, unit)
}
// TODO: simplify huge function.
func (s *sizeLimiter) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
if unit.Head.SystemHeader.PayloadLength <= s.limit {
homoHashHdr := &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
unit.Head.AddHeader(&object.Header{Value: homoHashHdr})
buf := make([]byte, unit.Head.SystemHeader.PayloadLength)
if err := readChunk(unit, buf, nil, homoHashHdr); err != nil {
return err
}
unit.Head.Payload = buf
return procHandlers(ctx, EmptyPayloadUnit(unit.Head), handlers...)
}
var (
err error
buf = make([]byte, s.limit)
hAcc = sha256.New()
srcHdrLen = len(unit.Head.Headers)
pObj = unit.Head
resObj = ProcUnit{
Head: &Object{
SystemHeader: object.SystemHeader{
Version: pObj.SystemHeader.Version,
OwnerID: pObj.SystemHeader.OwnerID,
CID: pObj.SystemHeader.CID,
CreatedAt: object.CreationPoint{
UnixTime: time.Now().Unix(),
Epoch: s.epochRecv.Epoch(),
},
},
},
Payload: unit.Payload,
}
left, right = &object.Link{Type: object.Link_Previous}, &object.Link{Type: object.Link_Next}
hashAccHdr, hashHdr = new(object.Header_PayloadChecksum), new(object.Header_PayloadChecksum)
homoHashAccHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
childCount = pObj.SystemHeader.PayloadLength/s.limit + 1
)
if right.ID, err = refs.NewObjectID(); err != nil {
return err
}
splitHeaders := make([]object.Header, 0, 3*verifyHeadersCount+splitHeadersCount+childCount)
splitHeaders = append(splitHeaders, pObj.Headers...)
splitHeaders = append(splitHeaders, []object.Header{
{Value: &object.Header_Transform{Transform: &object.Transform{Type: object.Transform_Split}}},
{Value: &object.Header_Link{Link: &object.Link{
Type: object.Link_Parent,
ID: unit.Head.SystemHeader.ID,
}}},
{Value: &object.Header_Link{Link: left}},
{Value: &object.Header_Link{Link: right}},
{Value: hashHdr},
{Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}},
{Value: homoHashAccHdr},
{Value: hashAccHdr},
{Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}},
}...)
children := splitHeaders[srcHdrLen+2*verifyHeadersCount+splitHeadersCount+1:]
pObj.Headers = splitHeaders[:srcHdrLen+2*verifyHeadersCount+splitHeadersCount]
for tail := pObj.SystemHeader.PayloadLength; tail > 0; tail -= min(tail, s.limit) {
size := min(tail, s.limit)
resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)-verifyHeadersCount-1]
if err = readChunk(resObj, buf[:size], hAcc, homoHashAccHdr); err != nil {
return err
}
resObj.Head.SystemHeader.PayloadLength = size
resObj.Head.Payload = buf[:size]
left.ID, resObj.Head.SystemHeader.ID = resObj.Head.SystemHeader.ID, right.ID
if tail <= s.limit {
right.ID = ObjectID{}
temp := make([]object.Header, verifyHeadersCount+1) // +1 for homomorphic hash
copy(temp, pObj.Headers[srcHdrLen:])
hashAccHdr.PayloadChecksum = hAcc.Sum(nil)
copy(pObj.Headers[srcHdrLen:srcHdrLen+verifyHeadersCount+1],
pObj.Headers[len(pObj.Headers)-verifyHeadersCount:])
resObj.Head.Headers = pObj.Headers[:srcHdrLen+verifyHeadersCount]
if err = signWithToken(ctx, &Object{
SystemHeader: pObj.SystemHeader,
Headers: resObj.Head.Headers,
}); err != nil {
return err
}
copy(pObj.Headers[srcHdrLen+2*(verifyHeadersCount+1):],
pObj.Headers[srcHdrLen+verifyHeadersCount+1:srcHdrLen+verifyHeadersCount+splitHeadersCount])
copy(pObj.Headers[srcHdrLen+verifyHeadersCount+1:], temp)
resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)]
} else if right.ID, err = refs.NewObjectID(); err != nil {
return err
}
if err := procHandlers(ctx, EmptyPayloadUnit(resObj.Head), handlers...); err != nil {
return err
}
children = append(children, object.Header{Value: &object.Header_Link{Link: &object.Link{
Type: object.Link_Child,
ID: resObj.Head.SystemHeader.ID,
}}})
}
pObj.SystemHeader.PayloadLength = 0
pObj.Headers = append(pObj.Headers[:srcHdrLen], children...)
if err := readChunk(unit, nil, nil, nil); err != nil {
return err
}
return procHandlers(ctx, EmptyPayloadUnit(pObj), handlers...)
}
func readChunk(unit ProcUnit, buf []byte, hAcc io.Writer, homoHashAcc *object.Header_HomoHash) (err error) {
var csHdr *object.Header_PayloadChecksum
if _, v := unit.Head.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); v == nil {
csHdr = new(object.Header_PayloadChecksum)
unit.Head.Headers = append(unit.Head.Headers, object.Header{Value: csHdr})
} else {
csHdr = v.Value.(*object.Header_PayloadChecksum)
}
if _, err = io.ReadFull(unit.Payload, buf); err != nil && err != io.EOF {
if errors.Is(err, io.ErrUnexpectedEOF) {
err = ErrPayloadEOF
}
return
} else if hAcc != nil {
if _, err = hAcc.Write(buf); err != nil {
return
}
}
if homoHashAcc != nil {
if homoHashAcc.HomoHash, err = hash.Concat([]hash.Hash{homoHashAcc.HomoHash, hash.Sum(buf)}); err != nil {
return
}
}
h := sha256.Sum256(buf)
csHdr.PayloadChecksum = h[:]
return nil
}
func (s *headSigner) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
if s.verifier.Verify(ctx, unit.Head) != nil {
if err := signWithToken(ctx, unit.Head); err != nil {
return err
}
}
return procHandlers(ctx, unit, handlers...)
}
func signWithToken(ctx context.Context, obj *Object) error {
integrityHdr := new(object.IntegrityHeader)
if pToken, ok := ctx.Value(PrivateSessionToken).(session.PrivateToken); !ok {
return errNoToken
} else if hdrData, err := verifier.MarshalHeaders(obj, len(obj.Headers)); err != nil {
return err
} else {
cs := sha256.Sum256(hdrData)
integrityHdr.SetHeadersChecksum(cs[:])
if err = service.AddSignatureWithKey(pToken.PrivateKey(), integrityHdr); err != nil {
return err
}
}
obj.AddHeader(&object.Header{Value: &object.Header_Integrity{Integrity: integrityHdr}})
return nil
}
func (s *fieldMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) (err error) {
token, ok := ctx.Value(PublicSessionToken).(*service.Token)
if !ok {
return errNoToken
}
unit.Head.AddHeader(&object.Header{
Value: &object.Header_Token{
Token: token,
},
})
if unit.Head.SystemHeader.ID.Empty() {
if unit.Head.SystemHeader.ID, err = refs.NewObjectID(); err != nil {
return
}
}
if unit.Head.SystemHeader.CreatedAt.UnixTime == 0 {
unit.Head.SystemHeader.CreatedAt.UnixTime = time.Now().Unix()
}
if unit.Head.SystemHeader.CreatedAt.Epoch == 0 {
unit.Head.SystemHeader.CreatedAt.Epoch = s.epochRecv.Epoch()
}
if unit.Head.SystemHeader.Version == 0 {
unit.Head.SystemHeader.Version = 1
}
return nil
}
func (s *sgMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error {
sgLinks := unit.Head.Links(object.Link_StorageGroup)
group, err := unit.Head.StorageGroup()
if nonEmptyList := len(sgLinks) > 0; (err == nil) != nonEmptyList {
return ErrInvalidSGLinking
} else if err != nil || !group.Empty() {
return nil
}
sort.Sort(storagegroup.IDList(sgLinks))
sgInfo, err := s.sgInfoRecv.GetSGInfo(ctx, unit.Head.SystemHeader.CID, sgLinks)
if err != nil {
return err
}
unit.Head.SetStorageGroup(sgInfo)
return nil
}
func procHandlers(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
for i := range handlers {
if err := handlers[i](ctx, unit); err != nil {
return err
}
}
return nil
}
func (*emptyReader) Read([]byte) (n int, err error) { return 0, io.EOF }
// EmptyPayloadUnit returns ProcUnit with Object from argument and empty payload reader
// that always returns (0, io.EOF).
func EmptyPayloadUnit(head *Object) ProcUnit { return ProcUnit{Head: head, Payload: new(emptyReader)} }
func min(a, b uint64) uint64 {
if a < b {
return a
}
return b
}
// GetChain builds a list of objects in the hereditary chain.
// In case of impossibility to do this, an error is returned.
func GetChain(srcObjs ...Object) ([]Object, error) {
var (
err error
first, id ObjectID
res = make([]Object, 0, len(srcObjs))
m = make(map[ObjectID]*Object, len(srcObjs))
)
// Fill map with all objects
for i := range srcObjs {
m[srcObjs[i].SystemHeader.ID] = &srcObjs[i]
prev, err := lastLink(&srcObjs[i], object.Link_Previous)
if err == nil && prev.Empty() { // then it is first
id, err = lastLink(&srcObjs[i], object.Link_Next)
if err != nil {
return nil, errors.Wrap(err, "GetChain failed: missing first object next links")
}
first = srcObjs[i].SystemHeader.ID
}
}
// Check first presence
if first.Empty() {
return nil, errChainNotFound
}
res = append(res, *m[first])
// Iterate chain
for count := 0; !id.Empty() && count < len(srcObjs); count++ {
nextObj, ok := m[id]
if !ok {
return nil, errors.Errorf("GetChain failed: missing next object %s", id)
}
id, err = lastLink(nextObj, object.Link_Next)
if err != nil {
return nil, errors.Wrap(err, "GetChain failed: missing object next links")
}
res = append(res, *nextObj)
}
// Check last chain element has empty next (prevent cut chain)
id, err = lastLink(&res[len(res)-1], object.Link_Next)
if err != nil {
return nil, errors.Wrap(err, "GetChain failed: missing object next links")
} else if !id.Empty() {
return nil, errCutChain
}
return res, nil
}
func deleteTransformer(o *Object, t object.Transform_Type) error {
n, th := o.LastHeader(object.HeaderType(object.TransformHdr))
if th == nil || th.Value.(*object.Header_Transform).Transform.Type != t {
return errMissingTransformHdr
}
o.Headers = o.Headers[:n]
return nil
}
func lastLink(o *Object, t object.Link_Type) (res ObjectID, err error) {
for i := len(o.Headers) - 1; i >= 0; i-- {
if v, ok := o.Headers[i].Value.(*object.Header_Link); ok {
if v.Link.GetType() == t {
res = v.Link.ID
return
}
}
}
err = errors.Errorf("object.lastLink: links of type %s not found", t)
return
}

View file

@ -1,107 +0,0 @@
package transport
import (
"context"
"io"
"time"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
)
type (
// ObjectTransport is an interface of the executor of object remote operations.
ObjectTransport interface {
Transport(context.Context, ObjectTransportParams)
}
// ObjectTransportParams groups the parameters of remote object operation.
ObjectTransportParams struct {
TransportInfo MetaInfo
TargetNode multiaddr.Multiaddr
ResultHandler ResultHandler
}
// ResultHandler is an interface of remote object operation's result handler.
ResultHandler interface {
HandleResult(context.Context, multiaddr.Multiaddr, interface{}, error)
}
// MetaInfo is an interface of the container of cross-operation values.
MetaInfo interface {
GetTTL() uint32
GetTimeout() time.Duration
service.SessionTokenSource
GetRaw() bool
Type() object.RequestType
service.BearerTokenSource
service.ExtendedHeadersSource
}
// SearchInfo is an interface of the container of object Search operation parameters.
SearchInfo interface {
MetaInfo
GetCID() refs.CID
GetQuery() []byte
}
// PutInfo is an interface of the container of object Put operation parameters.
PutInfo interface {
MetaInfo
GetHead() *object.Object
Payload() io.Reader
CopiesNumber() uint32
}
// AddressInfo is an interface of the container of object request by Address.
AddressInfo interface {
MetaInfo
GetAddress() refs.Address
}
// GetInfo is an interface of the container of object Get operation parameters.
GetInfo interface {
AddressInfo
}
// HeadInfo is an interface of the container of object Head operation parameters.
HeadInfo interface {
GetInfo
GetFullHeaders() bool
}
// RangeInfo is an interface of the container of object GetRange operation parameters.
RangeInfo interface {
AddressInfo
GetRange() object.Range
}
// RangeHashInfo is an interface of the container of object GetRangeHash operation parameters.
RangeHashInfo interface {
AddressInfo
GetRanges() []object.Range
GetSalt() []byte
}
)
const (
// KeyID is a filter key to object ID field.
KeyID = "ID"
// KeyTombstone is a filter key to tombstone header.
KeyTombstone = "TOMBSTONE"
// KeyStorageGroup is a filter key to storage group link.
KeyStorageGroup = "STORAGE_GROUP"
// KeyNoChildren is a filter key to objects w/o child links.
KeyNoChildren = "LEAF"
// KeyParent is a filter key to parent link.
KeyParent = "PARENT"
// KeyHasParent is a filter key to objects with parent link.
KeyHasParent = "HAS_PAR"
)

View file

@ -1,138 +0,0 @@
package storagegroup
import (
"context"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/storagegroup"
"github.com/nspcc-dev/neofs-node/pkg/core/container"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport"
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
// StorageGroupInfoReceiverParams groups the parameters of
// storage group information receiver.
StorageGroupInfoReceiverParams struct {
SelectiveContainerExecutor transport.SelectiveContainerExecutor
Logger *zap.Logger
}
sgInfoRecv struct {
executor transport.SelectiveContainerExecutor
log *zap.Logger
}
)
const locationFinderInstanceFailMsg = "could not create object location finder"
// ErrIncompleteSGInfo is returned by storage group information receiver
// that could not receive full information.
var ErrIncompleteSGInfo = errors.New("could not receive full storage group info")
// PublicSessionToken is a context key for SessionToken.
// FIXME: temp solution for cycle import fix.
// Unify with same const from transformer pkg.
const PublicSessionToken = "public token"
// BearerToken is a context key for BearerToken.
const BearerToken = "bearer token"
// ExtendedHeaders is a context key for X-headers.
const ExtendedHeaders = "extended headers"
func (s *sgInfoRecv) GetSGInfo(ctx context.Context, cid container.ID, group []object.ID) (*storagegroup.StorageGroup, error) {
var (
err error
res = new(storagegroup.StorageGroup)
hashList = make([]hash.Hash, 0, len(group))
)
m := make(map[string]struct{}, len(group))
for i := range group {
m[group[i].String()] = struct{}{}
}
// FIXME: hardcoded for simplicity.
// Function is called in next cases:
// - SG transformation on trusted node side (only in this case session token is needed);
// - SG info check on container nodes (token is not needed since system group has extra access);
// - data audit on inner ring nodes (same as previous).
var token service.SessionToken
if v, ok := ctx.Value(PublicSessionToken).(service.SessionToken); ok {
token = v
}
var bearer service.BearerToken
if v, ok := ctx.Value(BearerToken).(service.BearerToken); ok {
bearer = v
}
var extHdrs []service.ExtendedHeader
if v, ok := ctx.Value(ExtendedHeaders).([]service.ExtendedHeader); ok {
extHdrs = v
}
if err = s.executor.Head(ctx, &transport.HeadParams{
GetParams: transport.GetParams{
SelectiveParams: transport.SelectiveParams{
CID: cid,
TTL: service.SingleForwardingTTL,
IDList: group,
Breaker: func(addr refs.Address) (cFlag transport.ProgressControlFlag) {
if len(m) == 0 {
cFlag = transport.BreakProgress
} else if _, ok := m[addr.ObjectID.String()]; !ok {
cFlag = transport.NextAddress
}
return
},
Token: token,
Bearer: bearer,
ExtendedHeaders: extHdrs,
},
Handler: func(_ multiaddr.Multiaddr, obj *object.Object) {
_, hashHeader := obj.LastHeader(object.HeaderType(object.HomoHashHdr))
if hashHeader == nil {
return
}
hashList = append(hashList, hashHeader.Value.(*object.Header_HomoHash).HomoHash)
res.ValidationDataSize += obj.SystemHeader.PayloadLength
delete(m, obj.SystemHeader.ID.String())
},
},
FullHeaders: true,
}); err != nil {
return nil, err
} else if len(m) > 0 {
return nil, ErrIncompleteSGInfo
}
res.ValidationHash, err = hash.Concat(hashList)
return res, err
}
// NewStorageGroupInfoReceiver constructs storagegroup.InfoReceiver from SelectiveContainerExecutor.
func NewStorageGroupInfoReceiver(p StorageGroupInfoReceiverParams) (storagegroup.InfoReceiver, error) {
switch {
case p.Logger == nil:
return nil, errors.Wrap(logger.ErrNilLogger, locationFinderInstanceFailMsg)
case p.SelectiveContainerExecutor == nil:
return nil, errors.Wrap(errors.New("empty container handler"), locationFinderInstanceFailMsg)
}
return &sgInfoRecv{
executor: p.SelectiveContainerExecutor,
log: p.Logger,
}, nil
}

View file

@ -1,658 +0,0 @@
package transport
import (
"context"
"io"
"sync"
"time"
"github.com/multiformats/go-multiaddr"
"github.com/nspcc-dev/neofs-api-go/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-node/pkg/core/container"
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
"github.com/pkg/errors"
"go.uber.org/zap"
)
/*
File source code includes implementation of unified objects container handler.
Implementation provides the opportunity to perform any logic over object container distributed in network.
Implementation holds placement and object transport implementations in a black box.
Any special logic could be tuned through passing handle parameters.
NOTE: Although the implementation of the other interfaces via OCH is the same, they are still separated in order to avoid mess.
*/
type (
// SelectiveContainerExecutor is an interface the tool that performs
// object operations in container with preconditions.
SelectiveContainerExecutor interface {
Put(context.Context, *PutParams) error
Get(context.Context, *GetParams) error
Head(context.Context, *HeadParams) error
Search(context.Context, *SearchParams) error
RangeHash(context.Context, *RangeHashParams) error
}
// PutParams groups the parameters
// of selective object Put.
PutParams struct {
SelectiveParams
Object *object.Object
Handler func(multiaddr.Multiaddr, bool)
CopiesNumber uint32
}
// GetParams groups the parameters
// of selective object Get.
GetParams struct {
SelectiveParams
Handler func(multiaddr.Multiaddr, *object.Object)
}
// HeadParams groups the parameters
// of selective object Head.
HeadParams struct {
GetParams
FullHeaders bool
}
// SearchParams groups the parameters
// of selective object Search.
SearchParams struct {
SelectiveParams
SearchCID refs.CID
SearchQuery []byte
Handler func(multiaddr.Multiaddr, []refs.Address)
}
// RangeHashParams groups the parameters
// of selective object GetRangeHash.
RangeHashParams struct {
SelectiveParams
Ranges []object.Range
Salt []byte
Handler func(multiaddr.Multiaddr, []hash.Hash)
}
// SelectiveParams groups the parameters of
// the execution of selective container operation.
SelectiveParams struct {
/* Should be set to true only if service under object transport implementations is served on localhost. */
ServeLocal bool
/* Raw option of the request */
Raw bool
/* TTL for object transport. All transport operations inherit same value. */
TTL uint32
/* Required ID of processing container. If empty or not set, an error is returned. */
CID container.ID
/* List of nodes selected for processing. If not specified => nodes will be selected during. */
Nodes []multiaddr.Multiaddr
/*
Next two parameters provide the opportunity to process selective objects in container.
At least on of non-empty IDList or Query is required, an error is returned otherwise.
*/
/* List of objects to process (overlaps query). */
IDList []refs.ObjectID
/* If no objects is indicated, query is used for selection. */
Query []byte
/*
If function provided, it is called after every successful operation.
True result breaks operation performing.
*/
Breaker func(refs.Address) ProgressControlFlag
/* Public session token */
Token service.SessionToken
/* Bearer token */
Bearer service.BearerToken
/* Extended headers */
ExtendedHeaders []service.ExtendedHeader
}
// ProgressControlFlag is an enumeration of progress control flags.
ProgressControlFlag int
// ObjectContainerHandlerParams grops the parameters of SelectiveContainerExecutor constructor.
ObjectContainerHandlerParams struct {
NodeLister *placement.PlacementWrapper
Executor ContainerTraverseExecutor
*zap.Logger
}
simpleTraverser struct {
*sync.Once
list []multiaddr.Multiaddr
}
selectiveCnrExec struct {
cnl *placement.PlacementWrapper
Executor ContainerTraverseExecutor
log *zap.Logger
}
metaInfo struct {
ttl uint32
raw bool
rt object.RequestType
token service.SessionToken
bearer service.BearerToken
extHdrs []service.ExtendedHeader
}
putInfo struct {
metaInfo
obj *object.Object
cn uint32
}
getInfo struct {
metaInfo
addr object.Address
raw bool
}
headInfo struct {
getInfo
fullHdr bool
}
searchInfo struct {
metaInfo
cid container.ID
query []byte
}
rangeHashInfo struct {
metaInfo
addr object.Address
ranges []object.Range
salt []byte
}
execItems struct {
params SelectiveParams
metaConstructor func(addr object.Address) MetaInfo
handler ResultHandler
}
searchTarget struct {
list []refs.Address
}
// ContainerTraverseExecutor is an interface of
// object operation executor with container traversing.
ContainerTraverseExecutor interface {
Execute(context.Context, TraverseParams)
}
// TraverseParams groups the parameters of container traversing.
TraverseParams struct {
TransportInfo MetaInfo
Handler ResultHandler
Traverser Traverser
WorkerPool WorkerPool
ExecutionInterceptor func(context.Context, multiaddr.Multiaddr) bool
}
// WorkerPool is an interface of go-routine pool
WorkerPool interface {
Submit(func()) error
}
// Traverser is an interface of container traverser.
Traverser interface {
Next(context.Context) []multiaddr.Multiaddr
}
cnrTraverseExec struct {
transport ObjectTransport
}
singleRoutinePool struct{}
emptyReader struct{}
)
const (
_ ProgressControlFlag = iota
// NextAddress is a ProgressControlFlag of to go to the next address of the object.
NextAddress
// NextNode is a ProgressControlFlag of to go to the next node.
NextNode
// BreakProgress is a ProgressControlFlag to interrupt the execution.
BreakProgress
)
const (
instanceFailMsg = "could not create container objects collector"
)
var (
errNilObjectTransport = errors.New("object transport is nil")
errEmptyLogger = errors.New("empty logger")
errEmptyNodeLister = errors.New("empty container node lister")
errEmptyTraverseExecutor = errors.New("empty container traverse executor")
errSelectiveParams = errors.New("neither ID list nor query provided")
)
func (s *selectiveCnrExec) Put(ctx context.Context, p *PutParams) error {
meta := &putInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestPut,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
obj: p.Object,
cn: p.CopiesNumber,
}
return s.exec(ctx, &execItems{
params: p.SelectiveParams,
metaConstructor: func(object.Address) MetaInfo { return meta },
handler: p,
})
}
func (s *selectiveCnrExec) Get(ctx context.Context, p *GetParams) error {
return s.exec(ctx, &execItems{
params: p.SelectiveParams,
metaConstructor: func(addr object.Address) MetaInfo {
return &getInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestGet,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
addr: addr,
raw: p.Raw,
}
},
handler: p,
})
}
func (s *selectiveCnrExec) Head(ctx context.Context, p *HeadParams) error {
return s.exec(ctx, &execItems{
params: p.SelectiveParams,
metaConstructor: func(addr object.Address) MetaInfo {
return &headInfo{
getInfo: getInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestHead,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
addr: addr,
raw: p.Raw,
},
fullHdr: p.FullHeaders,
}
},
handler: p,
})
}
func (s *selectiveCnrExec) Search(ctx context.Context, p *SearchParams) error {
return s.exec(ctx, &execItems{
params: p.SelectiveParams,
metaConstructor: func(object.Address) MetaInfo {
return &searchInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestSearch,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
cid: p.SearchCID,
query: p.SearchQuery,
}
},
handler: p,
})
}
func (s *selectiveCnrExec) RangeHash(ctx context.Context, p *RangeHashParams) error {
return s.exec(ctx, &execItems{
params: p.SelectiveParams,
metaConstructor: func(addr object.Address) MetaInfo {
return &rangeHashInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestRangeHash,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
addr: addr,
ranges: p.Ranges,
salt: p.Salt,
}
},
handler: p,
})
}
func (s *selectiveCnrExec) exec(ctx context.Context, p *execItems) error {
if err := p.params.validate(); err != nil {
return err
}
nodes, err := s.prepareNodes(ctx, &p.params)
if err != nil {
return err
}
loop:
for i := range nodes {
addrList := s.prepareAddrList(ctx, &p.params, nodes[i])
if len(addrList) == 0 {
continue
}
for j := range addrList {
if p.params.Breaker != nil {
switch cFlag := p.params.Breaker(addrList[j]); cFlag {
case NextAddress:
continue
case NextNode:
continue loop
case BreakProgress:
break loop
}
}
s.Executor.Execute(ctx, TraverseParams{
TransportInfo: p.metaConstructor(addrList[j]),
Handler: p.handler,
Traverser: newSimpleTraverser(nodes[i]),
})
}
}
return nil
}
func (s *SelectiveParams) validate() error {
switch {
case len(s.IDList) == 0 && len(s.Query) == 0:
return errSelectiveParams
default:
return nil
}
}
func (s *selectiveCnrExec) prepareNodes(ctx context.Context, p *SelectiveParams) ([]multiaddr.Multiaddr, error) {
if len(p.Nodes) > 0 {
return p.Nodes, nil
}
// If node serves Object transport service on localhost => pass single empty node
if p.ServeLocal {
// all transport implementations will use localhost by default
return []multiaddr.Multiaddr{nil}, nil
}
// Otherwise use container nodes
return s.cnl.ContainerNodes(ctx, p.CID)
}
func (s *selectiveCnrExec) prepareAddrList(ctx context.Context, p *SelectiveParams, node multiaddr.Multiaddr) []refs.Address {
var (
addrList []object.Address
l = len(p.IDList)
)
if l > 0 {
addrList = make([]object.Address, 0, l)
for i := range p.IDList {
addrList = append(addrList, object.Address{CID: p.CID, ObjectID: p.IDList[i]})
}
return addrList
}
handler := new(searchTarget)
s.Executor.Execute(ctx, TraverseParams{
TransportInfo: &searchInfo{
metaInfo: metaInfo{
ttl: p.TTL,
rt: object.RequestSearch,
raw: p.Raw,
token: p.Token,
bearer: p.Bearer,
extHdrs: p.ExtendedHeaders,
},
cid: p.CID,
query: p.Query,
},
Handler: handler,
Traverser: newSimpleTraverser(node),
})
return handler.list
}
func newSimpleTraverser(list ...multiaddr.Multiaddr) Traverser {
return &simpleTraverser{
Once: new(sync.Once),
list: list,
}
}
func (s *simpleTraverser) Next(context.Context) (res []multiaddr.Multiaddr) {
s.Do(func() {
res = s.list
})
return
}
func (s metaInfo) GetTTL() uint32 { return s.ttl }
func (s metaInfo) GetTimeout() time.Duration { return 0 }
func (s metaInfo) GetRaw() bool { return s.raw }
func (s metaInfo) Type() object.RequestType { return s.rt }
func (s metaInfo) GetSessionToken() service.SessionToken { return s.token }
func (s metaInfo) GetBearerToken() service.BearerToken { return s.bearer }
func (s metaInfo) ExtendedHeaders() []service.ExtendedHeader { return s.extHdrs }
func (s *putInfo) GetHead() *object.Object { return s.obj }
func (s *putInfo) Payload() io.Reader { return new(emptyReader) }
func (*emptyReader) Read(p []byte) (int, error) { return 0, io.EOF }
func (s *putInfo) CopiesNumber() uint32 {
return s.cn
}
func (s *getInfo) GetAddress() refs.Address { return s.addr }
func (s *getInfo) Raw() bool { return s.raw }
func (s *headInfo) GetFullHeaders() bool { return s.fullHdr }
func (s *searchInfo) GetCID() refs.CID { return s.cid }
func (s *searchInfo) GetQuery() []byte { return s.query }
func (s *rangeHashInfo) GetAddress() refs.Address { return s.addr }
func (s *rangeHashInfo) GetRanges() []object.Range { return s.ranges }
func (s *rangeHashInfo) GetSalt() []byte { return s.salt }
func (s *searchTarget) HandleResult(_ context.Context, _ multiaddr.Multiaddr, r interface{}, e error) {
if e == nil {
s.list = append(s.list, r.([]refs.Address)...)
}
}
// HandleResult calls Handler with:
// - Multiaddr with argument value;
// - error equality to nil.
func (s *PutParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, _ interface{}, e error) {
s.Handler(node, e == nil)
}
// HandleResult calls Handler if error argument is nil with:
// - Multiaddr with argument value;
// - result casted to an Object pointer.
func (s *GetParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
if e == nil {
s.Handler(node, r.(*object.Object))
}
}
// HandleResult calls Handler if error argument is nil with:
// - Multiaddr with argument value;
// - result casted to Address slice.
func (s *SearchParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
if e == nil {
s.Handler(node, r.([]refs.Address))
}
}
// HandleResult calls Handler if error argument is nil with:
// - Multiaddr with argument value;
// - result casted to Hash slice.
func (s *RangeHashParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) {
if e == nil {
s.Handler(node, r.([]hash.Hash))
}
}
func (s *cnrTraverseExec) Execute(ctx context.Context, p TraverseParams) {
if p.WorkerPool == nil {
p.WorkerPool = new(singleRoutinePool)
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wg := new(sync.WaitGroup)
for {
select {
case <-ctx.Done():
return
default:
}
nodes := p.Traverser.Next(ctx)
if len(nodes) == 0 {
break
}
for i := range nodes {
node := nodes[i]
wg.Add(1)
if err := p.WorkerPool.Submit(func() {
defer wg.Done()
if p.ExecutionInterceptor != nil && p.ExecutionInterceptor(ctx, node) {
return
}
s.transport.Transport(ctx, ObjectTransportParams{
TransportInfo: p.TransportInfo,
TargetNode: node,
ResultHandler: p.Handler,
})
}); err != nil {
wg.Done()
}
}
wg.Wait()
}
}
func (*singleRoutinePool) Submit(fn func()) error {
fn()
return nil
}
// NewObjectContainerHandler is a SelectiveContainerExecutor constructor.
func NewObjectContainerHandler(p ObjectContainerHandlerParams) (SelectiveContainerExecutor, error) {
switch {
case p.Executor == nil:
return nil, errors.Wrap(errEmptyTraverseExecutor, instanceFailMsg)
case p.Logger == nil:
return nil, errors.Wrap(errEmptyLogger, instanceFailMsg)
case p.NodeLister == nil:
return nil, errors.Wrap(errEmptyNodeLister, instanceFailMsg)
}
return &selectiveCnrExec{
cnl: p.NodeLister,
Executor: p.Executor,
log: p.Logger,
}, nil
}
// NewContainerTraverseExecutor is a ContainerTraverseExecutor executor.
func NewContainerTraverseExecutor(t ObjectTransport) (ContainerTraverseExecutor, error) {
if t == nil {
return nil, errNilObjectTransport
}
return &cnrTraverseExec{transport: t}, nil
}