forked from TrueCloudLab/frostfs-node
[#1513] Upgrade NeoFS SDK Go with changed netmap
package
Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
This commit is contained in:
parent
24b4c1ecf4
commit
21d2f8f861
70 changed files with 876 additions and 990 deletions
|
@ -244,8 +244,8 @@ type NodeInfoRes struct {
|
|||
}
|
||||
|
||||
// NodeInfo returns information about the node from netmap.
|
||||
func (x NodeInfoRes) NodeInfo() *netmap.NodeInfo {
|
||||
return x.cliRes.NodeInfo()
|
||||
func (x NodeInfoRes) NodeInfo() netmap.NodeInfo {
|
||||
return *x.cliRes.NodeInfo()
|
||||
}
|
||||
|
||||
// LatestVersion returns the latest NeoFS API version in use.
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/nspcc-dev/neofs-sdk-go/acl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/policy"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||
subnetid "github.com/nspcc-dev/neofs-sdk-go/subnet/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
|
@ -69,12 +68,14 @@ It will be stored in sidechain when inner ring will accepts it.`,
|
|||
placementPolicy, err := parseContainerPolicy(containerPolicy)
|
||||
common.ExitOnErr(cmd, "", err)
|
||||
|
||||
if containerSubnet != "" {
|
||||
var subnetID subnetid.ID
|
||||
|
||||
err = subnetID.DecodeString(containerSubnet)
|
||||
common.ExitOnErr(cmd, "could not parse subnetID: %w", err)
|
||||
|
||||
placementPolicy.SetSubnetID(&subnetID)
|
||||
placementPolicy.RestrictSubnet(subnetID)
|
||||
}
|
||||
|
||||
attributes, err := parseAttributes(containerAttributes)
|
||||
common.ExitOnErr(cmd, "", err)
|
||||
|
@ -177,16 +178,17 @@ func parseContainerPolicy(policyString string) (*netmap.PlacementPolicy, error)
|
|||
policyString = string(data)
|
||||
}
|
||||
|
||||
result, err := policy.Parse(policyString)
|
||||
var result netmap.PlacementPolicy
|
||||
|
||||
err = result.DecodeString(policyString)
|
||||
if err == nil {
|
||||
common.PrintVerbose("Parsed QL encoded policy")
|
||||
return result, nil
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
result = netmap.NewPlacementPolicy()
|
||||
if err = result.UnmarshalJSON([]byte(policyString)); err == nil {
|
||||
common.PrintVerbose("Parsed JSON encoded policy")
|
||||
return result, nil
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("can't parse placement policy")
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
internalclient "github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/client"
|
||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/common"
|
||||
|
@ -12,7 +11,6 @@ import (
|
|||
"github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/key"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/acl"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/policy"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -85,6 +83,13 @@ func initContainerInfoCmd() {
|
|||
flags.BoolVar(&containerJSON, "json", false, "print or dump container in JSON format")
|
||||
}
|
||||
|
||||
type stringWriter cobra.Command
|
||||
|
||||
func (x *stringWriter) WriteString(s string) (n int, err error) {
|
||||
(*cobra.Command)(x).Print(s)
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
func prettyPrintContainer(cmd *cobra.Command, cnr *container.Container, jsonEncoding bool) {
|
||||
if cnr == nil {
|
||||
return
|
||||
|
@ -137,8 +142,14 @@ func prettyPrintContainer(cmd *cobra.Command, cnr *container.Container, jsonEnco
|
|||
cmd.Println("invalid nonce:", err)
|
||||
}
|
||||
|
||||
pp := cnr.PlacementPolicy()
|
||||
if pp == nil {
|
||||
cmd.Println("missing placement policy")
|
||||
} else {
|
||||
cmd.Println("placement policy:")
|
||||
cmd.Println(strings.Join(policy.Encode(cnr.PlacementPolicy()), "\n"))
|
||||
common.ExitOnErr(cmd, "write policy: %w", pp.WriteStringTo((*stringWriter)(cmd)))
|
||||
cmd.Println()
|
||||
}
|
||||
}
|
||||
|
||||
func prettyPrintBasicACL(cmd *cobra.Command, basicACL acl.BasicACL) {
|
||||
|
|
|
@ -9,13 +9,9 @@ import (
|
|||
"github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/common"
|
||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/commonflags"
|
||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-cli/internal/key"
|
||||
nmClient "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type netCfgWriter cobra.Command
|
||||
|
||||
var netInfoCmd = &cobra.Command{
|
||||
Use: "netinfo",
|
||||
Short: "Get information about NeoFS network",
|
||||
|
@ -39,21 +35,23 @@ var netInfoCmd = &cobra.Command{
|
|||
|
||||
cmd.Printf("Time per block: %s\n", time.Duration(netInfo.MsPerBlock())*time.Millisecond)
|
||||
|
||||
netCfg := netInfo.NetworkConfig()
|
||||
const format = " %s: %v\n"
|
||||
|
||||
cmd.Println("NeoFS network configuration")
|
||||
cmd.Println("NeoFS network configuration (system)")
|
||||
cmd.Printf(format, "Audit fee", netInfo.AuditFee())
|
||||
cmd.Printf(format, "Storage price", netInfo.StoragePrice())
|
||||
cmd.Printf(format, "Container fee", netInfo.ContainerFee())
|
||||
cmd.Printf(format, "EigenTrust alpha", netInfo.EigenTrustAlpha())
|
||||
cmd.Printf(format, "Number of EigenTrust iterations", netInfo.NumberOfEigenTrustIterations())
|
||||
cmd.Printf(format, "Epoch duration", netInfo.EpochDuration())
|
||||
cmd.Printf(format, "Inner Ring candidate fee", netInfo.IRCandidateFee())
|
||||
cmd.Printf(format, "Maximum object size", netInfo.MaxObjectSize())
|
||||
cmd.Printf(format, "Withdrawal fee", netInfo.WithdrawalFee())
|
||||
|
||||
err = nmClient.WriteConfig((*netCfgWriter)(cmd), func(f func(key []byte, val []byte) error) error {
|
||||
var err error
|
||||
|
||||
netCfg.IterateParameters(func(prm *netmap.NetworkParameter) bool {
|
||||
err = f(prm.Key(), prm.Value())
|
||||
return err != nil
|
||||
cmd.Println("NeoFS network configuration (other)")
|
||||
netInfo.IterateRawNetworkParameters(func(name string, value []byte) {
|
||||
cmd.Printf(format, name, hex.EncodeToString(value))
|
||||
})
|
||||
|
||||
return err
|
||||
})
|
||||
common.ExitOnErr(cmd, "read config: %w", err)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -61,57 +59,3 @@ func initNetInfoCmd() {
|
|||
commonflags.Init(netInfoCmd)
|
||||
commonflags.InitAPI(netInfoCmd)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) print(name string, v interface{}, unknown bool) {
|
||||
var sUnknown string
|
||||
|
||||
if unknown {
|
||||
sUnknown = " (unknown)"
|
||||
}
|
||||
|
||||
(*cobra.Command)(x).Printf(" %s%s: %v\n", name, sUnknown, v)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) UnknownParameter(k string, v []byte) {
|
||||
x.print(k, hex.EncodeToString(v), true)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) MaxObjectSize(v uint64) {
|
||||
x.print("Maximum object size", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) BasicIncomeRate(v uint64) {
|
||||
x.print("Basic income rate", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) AuditFee(v uint64) {
|
||||
x.print("Audit fee", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) EpochDuration(v uint64) {
|
||||
x.print("Epoch duration", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) ContainerFee(v uint64) {
|
||||
x.print("Container fee", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) ContainerAliasFee(v uint64) {
|
||||
x.print("Container alias fee", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) EigenTrustIterations(v uint64) {
|
||||
x.print("Number EigenTrust of iterations", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) EigenTrustAlpha(v float64) {
|
||||
x.print("EigenTrust α", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) InnerRingCandidateFee(v uint64) {
|
||||
x.print("Inner Ring candidate fee", v, false)
|
||||
}
|
||||
|
||||
func (x *netCfgWriter) WithdrawFee(v uint64) {
|
||||
x.print("Withdraw fee", v, false)
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func initNodeInfoCmd() {
|
|||
nodeInfoCmd.Flags().Bool(nodeInfoJSONFlag, false, "print node info in JSON format")
|
||||
}
|
||||
|
||||
func prettyPrintNodeInfo(cmd *cobra.Command, i *netmap.NodeInfo) {
|
||||
func prettyPrintNodeInfo(cmd *cobra.Command, i netmap.NodeInfo) {
|
||||
isJSON, _ := cmd.Flags().GetBool(nodeInfoJSONFlag)
|
||||
if isJSON {
|
||||
common.PrettyPrintJSON(cmd, i, "node info")
|
||||
|
@ -45,12 +45,24 @@ func prettyPrintNodeInfo(cmd *cobra.Command, i *netmap.NodeInfo) {
|
|||
}
|
||||
|
||||
cmd.Println("key:", hex.EncodeToString(i.PublicKey()))
|
||||
cmd.Println("state:", i.State())
|
||||
netmap.IterateAllAddresses(i, func(s string) {
|
||||
|
||||
var stateWord string
|
||||
switch {
|
||||
default:
|
||||
stateWord = "<undefined>"
|
||||
case i.IsOnline():
|
||||
stateWord = "online"
|
||||
case i.IsOffline():
|
||||
stateWord = "offline"
|
||||
}
|
||||
|
||||
cmd.Println("state:", stateWord)
|
||||
|
||||
netmap.IterateNetworkEndpoints(i, func(s string) {
|
||||
cmd.Println("address:", s)
|
||||
})
|
||||
|
||||
for _, attribute := range i.Attributes() {
|
||||
cmd.Printf("attribute: %s=%s\n", attribute.Key(), attribute.Value())
|
||||
}
|
||||
i.IterateAttributes(func(key, value string) {
|
||||
cmd.Printf("attribute: %s=%s\n", key, value)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,69 +1,14 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/cmd/neofs-node/config"
|
||||
nodeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/node"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/util/attributes"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
const (
|
||||
// list of default values for well-known attributes
|
||||
defaultCapacity = 0
|
||||
defaultPrice = 0
|
||||
)
|
||||
|
||||
func parseAttributes(c *config.Config) []netmap.NodeAttribute {
|
||||
if nodeconfig.Relay(c) {
|
||||
return nil
|
||||
func parseAttributes(c *cfg) {
|
||||
if nodeconfig.Relay(c.appCfg) {
|
||||
return
|
||||
}
|
||||
|
||||
stringAttributes := nodeconfig.Attributes(c)
|
||||
|
||||
attrs, err := attributes.ParseV2Attributes(stringAttributes, nil)
|
||||
if err != nil {
|
||||
fatalOnErr(err)
|
||||
}
|
||||
|
||||
return addWellKnownAttributes(attrs)
|
||||
}
|
||||
|
||||
type wellKnownNodeAttrDesc struct {
|
||||
explicit bool
|
||||
defaultVal string
|
||||
}
|
||||
|
||||
func listWellKnownAttrDesc() map[string]wellKnownNodeAttrDesc {
|
||||
return map[string]wellKnownNodeAttrDesc{
|
||||
netmap.AttrPrice: {defaultVal: strconv.FormatUint(defaultPrice, 10)},
|
||||
netmap.AttrCapacity: {defaultVal: strconv.FormatUint(defaultCapacity, 10)},
|
||||
netmap.AttrUNLOCODE: {explicit: true},
|
||||
}
|
||||
}
|
||||
|
||||
func addWellKnownAttributes(attrs []netmap.NodeAttribute) []netmap.NodeAttribute {
|
||||
mWellKnown := listWellKnownAttrDesc()
|
||||
|
||||
// check how user defined well-known attributes
|
||||
for i := range attrs {
|
||||
delete(mWellKnown, attrs[i].Key())
|
||||
}
|
||||
|
||||
for key, desc := range mWellKnown {
|
||||
// check if required attribute is set
|
||||
if desc.explicit {
|
||||
fatalOnErr(fmt.Errorf("missing explicit value of required node attribute %s", key))
|
||||
}
|
||||
|
||||
// set default value of the attribute
|
||||
index := len(attrs)
|
||||
attrs = append(attrs, netmap.NodeAttribute{})
|
||||
attrs[index].SetKey(key)
|
||||
attrs[index].SetValue(desc.defaultVal)
|
||||
}
|
||||
|
||||
return attrs
|
||||
fatalOnErr(attributes.ReadNodeAttributes(&c.cfgNodeInfo.localInfo, nodeconfig.Attributes(c.appCfg)))
|
||||
}
|
||||
|
|
|
@ -213,21 +213,21 @@ func newCachedNetmapStorage(s netmap.State, v netmap.Source) netmap.Source {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.Netmap, error) {
|
||||
func (s *lruNetmapSource) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(s.netState.CurrentEpoch() - diff)
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.Netmap, error) {
|
||||
func (s *lruNetmapSource) GetNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.getNetMapByEpoch(epoch)
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.Netmap, error) {
|
||||
func (s *lruNetmapSource) getNetMapByEpoch(epoch uint64) (*netmapSDK.NetMap, error) {
|
||||
val, err := s.cache.get(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return val.(*netmapSDK.Netmap), nil
|
||||
return val.(*netmapSDK.NetMap), nil
|
||||
}
|
||||
|
||||
func (s *lruNetmapSource) Epoch() (uint64, error) {
|
||||
|
|
|
@ -480,15 +480,21 @@ func initObjectPool(cfg *config.Config) (pool cfgObjectRoutines) {
|
|||
}
|
||||
|
||||
func (c *cfg) LocalNodeInfo() (*netmapV2.NodeInfo, error) {
|
||||
ni := c.cfgNetmap.state.getNodeInfo()
|
||||
if ni != nil {
|
||||
return ni.ToV2(), nil
|
||||
var res netmapV2.NodeInfo
|
||||
|
||||
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
||||
if ok {
|
||||
ni.WriteToV2(&res)
|
||||
} else {
|
||||
c.cfgNodeInfo.localInfo.WriteToV2(&res)
|
||||
}
|
||||
|
||||
return c.cfgNodeInfo.localInfo.ToV2(), nil
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// handleLocalNodeInfo rewrites local node info from netmap
|
||||
// handleLocalNodeInfo rewrites local node info from the NeoFS network map.
|
||||
// Called with nil when storage node is outside the NeoFS network map
|
||||
// (before entering the network and after leaving it).
|
||||
func (c *cfg) handleLocalNodeInfo(ni *netmap.NodeInfo) {
|
||||
c.cfgNetmap.state.setNodeInfo(ni)
|
||||
}
|
||||
|
@ -496,10 +502,10 @@ func (c *cfg) handleLocalNodeInfo(ni *netmap.NodeInfo) {
|
|||
// bootstrap sets local node's netmap status to "online".
|
||||
func (c *cfg) bootstrap() error {
|
||||
ni := c.cfgNodeInfo.localInfo
|
||||
ni.SetState(netmap.NodeStateOnline)
|
||||
ni.SetOnline()
|
||||
|
||||
prm := nmClient.AddPeerPrm{}
|
||||
prm.SetNodeInfo(&ni)
|
||||
prm.SetNodeInfo(ni)
|
||||
|
||||
return c.cfgNetmap.wrapper.AddPeer(prm)
|
||||
}
|
||||
|
|
|
@ -329,7 +329,7 @@ type loadPlacementBuilder struct {
|
|||
cnrSrc containerCore.Source
|
||||
}
|
||||
|
||||
func (l *loadPlacementBuilder) BuildPlacement(epoch uint64, cnr cid.ID) ([]netmap.Nodes, error) {
|
||||
func (l *loadPlacementBuilder) BuildPlacement(epoch uint64, cnr cid.ID) ([][]netmap.NodeInfo, error) {
|
||||
cnrNodes, nm, err := l.buildPlacement(epoch, cnr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -341,7 +341,7 @@ func (l *loadPlacementBuilder) BuildPlacement(epoch uint64, cnr cid.ID) ([]netma
|
|||
pivotPrefix + strconv.FormatUint(epoch, 10),
|
||||
)
|
||||
|
||||
placement, err := nm.GetPlacementVectors(cnrNodes, pivot)
|
||||
placement, err := nm.PlacementVectors(cnrNodes, pivot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build placement vectors: %w", err)
|
||||
}
|
||||
|
@ -349,12 +349,17 @@ func (l *loadPlacementBuilder) BuildPlacement(epoch uint64, cnr cid.ID) ([]netma
|
|||
return placement, nil
|
||||
}
|
||||
|
||||
func (l *loadPlacementBuilder) buildPlacement(epoch uint64, idCnr cid.ID) (netmap.ContainerNodes, *netmap.Netmap, error) {
|
||||
func (l *loadPlacementBuilder) buildPlacement(epoch uint64, idCnr cid.ID) ([][]netmap.NodeInfo, *netmap.NetMap, error) {
|
||||
cnr, err := l.cnrSrc.Get(idCnr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
return nil, nil, errors.New("missing placement policy in container")
|
||||
}
|
||||
|
||||
nm, err := l.nmSrc.GetNetMapByEpoch(epoch)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not get network map: %w", err)
|
||||
|
@ -363,7 +368,7 @@ func (l *loadPlacementBuilder) buildPlacement(epoch uint64, idCnr cid.ID) (netma
|
|||
binCnr := make([]byte, sha256.Size)
|
||||
idCnr.Encode(binCnr)
|
||||
|
||||
cnrNodes, err := nm.GetContainerNodes(cnr.PlacementPolicy(), binCnr)
|
||||
cnrNodes, err := nm.ContainerNodes(*policy, binCnr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("could not build container nodes: %w", err)
|
||||
}
|
||||
|
@ -512,9 +517,9 @@ func (l *loadPlacementBuilder) isNodeFromContainerKey(epoch uint64, cnr cid.ID,
|
|||
return false, err
|
||||
}
|
||||
|
||||
for _, vector := range cnrNodes.Replicas() {
|
||||
for _, node := range vector {
|
||||
if bytes.Equal(node.PublicKey(), key) {
|
||||
for i := range cnrNodes {
|
||||
for j := range cnrNodes[i] {
|
||||
if bytes.Equal(cnrNodes[i][j].PublicKey(), key) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,9 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
netmapV2 "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
netmapGRPC "github.com/nspcc-dev/neofs-api-go/v2/netmap/grpc"
|
||||
"github.com/nspcc-dev/neofs-api-go/v2/refs"
|
||||
nodeconfig "github.com/nspcc-dev/neofs-node/cmd/neofs-node/config/node"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/metrics"
|
||||
|
@ -20,6 +18,7 @@ import (
|
|||
netmapService "github.com/nspcc-dev/neofs-node/pkg/services/netmap"
|
||||
netmapSDK "github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
subnetid "github.com/nspcc-dev/neofs-sdk-go/subnet/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/version"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -53,16 +52,18 @@ func (s *networkState) setCurrentEpoch(v uint64) {
|
|||
}
|
||||
|
||||
func (s *networkState) setNodeInfo(ni *netmapSDK.NodeInfo) {
|
||||
s.nodeInfo.Store(ni)
|
||||
ctrlNetSt := control.NetmapStatus_STATUS_UNDEFINED
|
||||
|
||||
var ctrlNetSt control.NetmapStatus
|
||||
if ni != nil {
|
||||
s.nodeInfo.Store(*ni)
|
||||
|
||||
switch ni.State() {
|
||||
default:
|
||||
ctrlNetSt = control.NetmapStatus_STATUS_UNDEFINED
|
||||
case netmapSDK.NodeStateOnline:
|
||||
switch {
|
||||
case ni.IsOnline():
|
||||
ctrlNetSt = control.NetmapStatus_ONLINE
|
||||
case netmapSDK.NodeStateOffline:
|
||||
case ni.IsOffline():
|
||||
ctrlNetSt = control.NetmapStatus_OFFLINE
|
||||
}
|
||||
} else {
|
||||
ctrlNetSt = control.NetmapStatus_OFFLINE
|
||||
}
|
||||
|
||||
|
@ -73,27 +74,48 @@ func (s *networkState) controlNetmapStatus() control.NetmapStatus {
|
|||
return s.controlNetStatus.Load().(control.NetmapStatus)
|
||||
}
|
||||
|
||||
func (s *networkState) getNodeInfo() *netmapSDK.NodeInfo {
|
||||
return s.nodeInfo.Load().(*netmapSDK.NodeInfo)
|
||||
func (s *networkState) getNodeInfo() (res netmapSDK.NodeInfo, ok bool) {
|
||||
v := s.nodeInfo.Load()
|
||||
if v != nil {
|
||||
res, ok = v.(netmapSDK.NodeInfo)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("unexpected value in atomic node info state: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func nodeKeyFromNetmap(c *cfg) []byte {
|
||||
return c.cfgNetmap.state.getNodeInfo().PublicKey()
|
||||
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
||||
if ok {
|
||||
return ni.PublicKey()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cfg) iterateNetworkAddresses(f func(string) bool) {
|
||||
c.cfgNetmap.state.getNodeInfo().IterateAddresses(f)
|
||||
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
||||
if ok {
|
||||
ni.IterateNetworkEndpoints(f)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cfg) addressNum() int {
|
||||
return c.cfgNetmap.state.getNodeInfo().NumberOfAddresses()
|
||||
ni, ok := c.cfgNetmap.state.getNodeInfo()
|
||||
if ok {
|
||||
return ni.NumberOfNetworkEndpoints()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func initNetmapService(c *cfg) {
|
||||
network.WriteToNodeInfo(c.localAddr, &c.cfgNodeInfo.localInfo)
|
||||
c.cfgNodeInfo.localInfo.SetPublicKey(c.key.PublicKey().Bytes())
|
||||
c.cfgNodeInfo.localInfo.SetAttributes(parseAttributes(c.appCfg)...)
|
||||
c.cfgNodeInfo.localInfo.SetState(netmapSDK.NodeStateOffline)
|
||||
parseAttributes(c)
|
||||
c.cfgNodeInfo.localInfo.SetOffline()
|
||||
|
||||
readSubnetCfg(c)
|
||||
|
||||
|
@ -113,7 +135,7 @@ func initNetmapService(c *cfg) {
|
|||
&netInfo{
|
||||
netState: c.cfgNetmap.state,
|
||||
magic: c.cfgMorph.client,
|
||||
netCfg: c.cfgNetmap.wrapper.IterateConfigParameters,
|
||||
morphClientNetMap: c.cfgNetmap.wrapper,
|
||||
msPerBlockRdr: c.cfgMorph.client.MsPerBlock,
|
||||
},
|
||||
),
|
||||
|
@ -236,14 +258,25 @@ func initNetmapState(c *cfg) {
|
|||
ni, err := c.netmapLocalNodeState(epoch)
|
||||
fatalOnErrDetails("could not init network state", err)
|
||||
|
||||
stateWord := "undefined"
|
||||
|
||||
if ni != nil {
|
||||
switch {
|
||||
case ni.IsOnline():
|
||||
stateWord = "online"
|
||||
case ni.IsOffline():
|
||||
stateWord = "offline"
|
||||
}
|
||||
}
|
||||
|
||||
c.log.Info("initial network state",
|
||||
zap.Uint64("epoch", epoch),
|
||||
zap.Stringer("state", ni.State()),
|
||||
zap.String("state", stateWord),
|
||||
)
|
||||
|
||||
c.cfgNetmap.state.setCurrentEpoch(epoch)
|
||||
c.cfgNetmap.startEpoch = epoch
|
||||
c.cfgNetmap.state.setNodeInfo(ni)
|
||||
c.handleLocalNodeInfo(ni)
|
||||
}
|
||||
|
||||
func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
|
||||
|
@ -253,17 +286,14 @@ func (c *cfg) netmapLocalNodeState(epoch uint64) (*netmapSDK.NodeInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return c.localNodeInfoFromNetmap(nm), nil
|
||||
}
|
||||
|
||||
func (c *cfg) localNodeInfoFromNetmap(nm *netmapSDK.Netmap) *netmapSDK.NodeInfo {
|
||||
for _, n := range nm.Nodes {
|
||||
if bytes.Equal(n.PublicKey(), c.key.PublicKey().Bytes()) {
|
||||
return n.NodeInfo
|
||||
nmNodes := nm.Nodes()
|
||||
for i := range nmNodes {
|
||||
if bytes.Equal(nmNodes[i].PublicKey(), c.key.PublicKey().Bytes()) {
|
||||
return &nmNodes[i], nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// addNewEpochNotificationHandler adds handler that will be executed synchronously
|
||||
|
@ -309,18 +339,11 @@ func (c *cfg) SetNetmapStatus(st control.NetmapStatus) error {
|
|||
return c.bootstrap()
|
||||
}
|
||||
|
||||
var apiState netmapSDK.NodeState
|
||||
|
||||
if st == control.NetmapStatus_OFFLINE {
|
||||
apiState = netmapSDK.NodeStateOffline
|
||||
}
|
||||
|
||||
c.cfgNetmap.reBoostrapTurnedOff.Store(true)
|
||||
|
||||
prm := nmClient.UpdatePeerPrm{}
|
||||
|
||||
prm.SetKey(c.key.PublicKey().Bytes())
|
||||
prm.SetState(apiState)
|
||||
|
||||
return c.cfgNetmap.wrapper.UpdatePeerState(prm)
|
||||
}
|
||||
|
@ -332,50 +355,49 @@ type netInfo struct {
|
|||
MagicNumber() (uint64, error)
|
||||
}
|
||||
|
||||
netCfg func(func(key, value []byte) error) error
|
||||
morphClientNetMap *nmClient.Client
|
||||
|
||||
msPerBlockRdr func() (int64, error)
|
||||
}
|
||||
|
||||
func (n *netInfo) Dump(ver *refs.Version) (*netmapV2.NetworkInfo, error) {
|
||||
func (n *netInfo) Dump(ver version.Version) (*netmapSDK.NetworkInfo, error) {
|
||||
magic, err := n.magic.MagicNumber()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ni := new(netmapV2.NetworkInfo)
|
||||
var ni netmapSDK.NetworkInfo
|
||||
ni.SetCurrentEpoch(n.netState.CurrentEpoch())
|
||||
ni.SetMagicNumber(magic)
|
||||
|
||||
if mjr := ver.GetMajor(); mjr > 2 || mjr == 2 && ver.GetMinor() > 9 {
|
||||
netInfoMorph, err := n.morphClientNetMap.ReadNetworkConfiguration()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read network configuration using netmap contract client: %w", err)
|
||||
}
|
||||
|
||||
if mjr := ver.Major(); mjr > 2 || mjr == 2 && ver.Minor() > 9 {
|
||||
msPerBlock, err := n.msPerBlockRdr()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ms per block: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
ps []netmapV2.NetworkParameter
|
||||
netCfg netmapV2.NetworkConfig
|
||||
)
|
||||
|
||||
if err := n.netCfg(func(key, value []byte) error {
|
||||
var p netmapV2.NetworkParameter
|
||||
|
||||
p.SetKey(key)
|
||||
p.SetValue(value)
|
||||
|
||||
ps = append(ps, p)
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("network config: %w", err)
|
||||
}
|
||||
|
||||
netCfg.SetParameters(ps...)
|
||||
|
||||
ni.SetNetworkConfig(&netCfg)
|
||||
ni.SetMsPerBlock(msPerBlock)
|
||||
|
||||
ni.SetMaxObjectSize(netInfoMorph.MaxObjectSize)
|
||||
ni.SetStoragePrice(netInfoMorph.StoragePrice)
|
||||
ni.SetAuditFee(netInfoMorph.AuditFee)
|
||||
ni.SetEpochDuration(netInfoMorph.EpochDuration)
|
||||
ni.SetContainerFee(netInfoMorph.ContainerFee)
|
||||
ni.SetNamedContainerFee(netInfoMorph.ContainerAliasFee)
|
||||
ni.SetNumberOfEigenTrustIterations(netInfoMorph.EigenTrustIterations)
|
||||
ni.SetEigenTrustAlpha(netInfoMorph.EigenTrustAlpha)
|
||||
ni.SetIRCandidateFee(netInfoMorph.IRCandidateFee)
|
||||
ni.SetWithdrawalFee(netInfoMorph.WithdrawalFee)
|
||||
|
||||
for i := range netInfoMorph.Raw {
|
||||
ni.SetRawNetworkParameter(netInfoMorph.Raw[i].Name, netInfoMorph.Raw[i].Value)
|
||||
}
|
||||
}
|
||||
|
||||
return ni, nil
|
||||
return &ni, nil
|
||||
}
|
||||
|
|
|
@ -530,10 +530,12 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient.
|
|||
if err == nil {
|
||||
key := info.PublicKey()
|
||||
|
||||
for i := range nm.Nodes {
|
||||
if bytes.Equal(nm.Nodes[i].PublicKey(), key) {
|
||||
nmNodes := nm.Nodes()
|
||||
|
||||
for i := range nmNodes {
|
||||
if bytes.Equal(nmNodes[i].PublicKey(), key) {
|
||||
prm := truststorage.UpdatePrm{}
|
||||
prm.SetPeer(reputation.PeerIDFromBytes(nm.Nodes[i].PublicKey()))
|
||||
prm.SetPeer(reputation.PeerIDFromBytes(nmNodes[i].PublicKey()))
|
||||
|
||||
return &reputationClient{
|
||||
MultiAddressClient: cl.(coreclient.MultiAddressClient),
|
||||
|
|
|
@ -26,7 +26,7 @@ func (i InitialTrustSource) InitialTrust(reputation.PeerID) (reputation.TrustVal
|
|||
return reputation.TrustZero, fmt.Errorf("failed to get NetMap: %w", err)
|
||||
}
|
||||
|
||||
nodeCount := reputation.TrustValueFromFloat64(float64(len(nm.Nodes)))
|
||||
nodeCount := reputation.TrustValueFromFloat64(float64(len(nm.Nodes())))
|
||||
if nodeCount == 0 {
|
||||
return reputation.TrustZero, ErrEmptyNetMap
|
||||
}
|
||||
|
|
|
@ -66,14 +66,15 @@ func (it *TrustIterator) Iterate(h reputation.TrustHandler) error {
|
|||
// find out if local node is presented in netmap
|
||||
localIndex := -1
|
||||
|
||||
for i := range nm.Nodes {
|
||||
if bytes.Equal(nm.Nodes[i].PublicKey(), it.storage.LocalKey) {
|
||||
nmNodes := nm.Nodes()
|
||||
for i := range nmNodes {
|
||||
if bytes.Equal(nmNodes[i].PublicKey(), it.storage.LocalKey) {
|
||||
localIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ln := len(nm.Nodes)
|
||||
ln := len(nmNodes)
|
||||
if localIndex >= 0 && ln > 0 {
|
||||
ln--
|
||||
}
|
||||
|
@ -81,13 +82,13 @@ func (it *TrustIterator) Iterate(h reputation.TrustHandler) error {
|
|||
// calculate Pj http://ilpubs.stanford.edu:8090/562/1/2002-56.pdf Chapter 4.5.
|
||||
p := reputation.TrustOne.Div(reputation.TrustValueFromInt(ln))
|
||||
|
||||
for i := range nm.Nodes {
|
||||
for i := range nmNodes {
|
||||
if i == localIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
trust := reputation.Trust{}
|
||||
trust.SetPeer(reputation.PeerIDFromBytes(nm.Nodes[i].PublicKey()))
|
||||
trust.SetPeer(reputation.PeerIDFromBytes(nmNodes[i].PublicKey()))
|
||||
trust.SetValue(p)
|
||||
trust.SetTrustingPeer(reputation.PeerIDFromBytes(it.storage.LocalKey))
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -19,7 +19,7 @@ require (
|
|||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220601120906-3bec6657f5c5 // indirect
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.12.2
|
||||
github.com/nspcc-dev/neofs-contract v0.15.1
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.4.0.20220615085207-eb3b99081235
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.4.0.20220616082321-e986f4780721
|
||||
github.com/nspcc-dev/tzhash v1.5.2
|
||||
github.com/panjf2000/ants/v2 v2.4.0
|
||||
github.com/paulmach/orb v0.2.2
|
||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/google/uuid"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/util/test"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -17,8 +17,8 @@ func TestCheckFormat(t *testing.T) {
|
|||
|
||||
require.Error(t, CheckFormat(c))
|
||||
|
||||
policy := netmap.NewPlacementPolicy()
|
||||
c.SetPlacementPolicy(policy)
|
||||
policy := netmaptest.PlacementPolicy()
|
||||
c.SetPlacementPolicy(&policy)
|
||||
|
||||
require.Error(t, CheckFormat(c))
|
||||
|
||||
|
|
31
pkg/core/netmap/nodes.go
Normal file
31
pkg/core/netmap/nodes.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package netmap
|
||||
|
||||
import "github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
|
||||
// Node is a named type of netmap.NodeInfo which provides interface needed
|
||||
// in the current repository. Node is expected to be used everywhere instead
|
||||
// of direct usage of netmap.NodeInfo, so it represents a type mediator.
|
||||
type Node netmap.NodeInfo
|
||||
|
||||
// PublicKey returns public key bound to the storage node.
|
||||
//
|
||||
// Return value MUST NOT be mutated, make a copy first.
|
||||
func (x Node) PublicKey() []byte {
|
||||
return (netmap.NodeInfo)(x).PublicKey()
|
||||
}
|
||||
|
||||
// IterateAddresses iterates over all announced network addresses
|
||||
// and passes them into f. Handler MUST NOT be nil.
|
||||
func (x Node) IterateAddresses(f func(string) bool) {
|
||||
(netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
|
||||
}
|
||||
|
||||
// NumberOfAddresses returns number of announced network addresses.
|
||||
func (x Node) NumberOfAddresses() int {
|
||||
return (netmap.NodeInfo)(x).NumberOfNetworkEndpoints()
|
||||
}
|
||||
|
||||
// Nodes is a named type of []netmap.NodeInfo which provides interface needed
|
||||
// in the current repository. Nodes is expected to be used everywhere instead
|
||||
// of direct usage of []netmap.NodeInfo, so it represents a type mediator.
|
||||
type Nodes []netmap.NodeInfo
|
|
@ -16,7 +16,7 @@ type Source interface {
|
|||
//
|
||||
// Implementations must not retain the network map pointer and modify
|
||||
// the network map through it.
|
||||
GetNetMap(diff uint64) (*netmap.Netmap, error)
|
||||
GetNetMap(diff uint64) (*netmap.NetMap, error)
|
||||
|
||||
// GetNetMapByEpoch reads network map by the epoch number from the storage.
|
||||
// It returns the pointer to the requested network map and any error encountered.
|
||||
|
@ -25,7 +25,7 @@ type Source interface {
|
|||
//
|
||||
// Implementations must not retain the network map pointer and modify
|
||||
// the network map through it.
|
||||
GetNetMapByEpoch(epoch uint64) (*netmap.Netmap, error)
|
||||
GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error)
|
||||
|
||||
// Epoch reads the current epoch from the storage.
|
||||
// It returns thw number of the current epoch and any error encountered.
|
||||
|
@ -35,11 +35,11 @@ type Source interface {
|
|||
}
|
||||
|
||||
// GetLatestNetworkMap requests and returns the latest network map from the storage.
|
||||
func GetLatestNetworkMap(src Source) (*netmap.Netmap, error) {
|
||||
func GetLatestNetworkMap(src Source) (*netmap.NetMap, error) {
|
||||
return src.GetNetMap(0)
|
||||
}
|
||||
|
||||
// GetPreviousNetworkMap requests and returns previous from the latest network map from the storage.
|
||||
func GetPreviousNetworkMap(src Source) (*netmap.Netmap, error) {
|
||||
func GetPreviousNetworkMap(src Source) (*netmap.NetMap, error) {
|
||||
return src.GetNetMap(1)
|
||||
}
|
||||
|
|
|
@ -48,7 +48,9 @@ func (ap *Processor) processEmit() {
|
|||
return
|
||||
}
|
||||
|
||||
ln := len(networkMap.Nodes)
|
||||
nmNodes := networkMap.Nodes()
|
||||
|
||||
ln := len(nmNodes)
|
||||
if ln == 0 {
|
||||
ap.log.Debug("empty network map, do not emit gas")
|
||||
|
||||
|
@ -57,8 +59,8 @@ func (ap *Processor) processEmit() {
|
|||
|
||||
gasPerNode := fixedn.Fixed8(ap.storageEmission / uint64(ln))
|
||||
|
||||
for i := range networkMap.Nodes {
|
||||
keyBytes := networkMap.Nodes[i].PublicKey()
|
||||
for i := range nmNodes {
|
||||
keyBytes := nmNodes[i].PublicKey()
|
||||
|
||||
key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256())
|
||||
if err != nil {
|
||||
|
|
|
@ -6,11 +6,12 @@ import (
|
|||
"encoding/hex"
|
||||
|
||||
clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
cntClient "github.com/nspcc-dev/neofs-node/pkg/morph/client/container"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/audit"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/util/rand"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -59,10 +60,19 @@ func (ap *Processor) processStartAudit(epoch uint64) {
|
|||
continue
|
||||
}
|
||||
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
log.Error("missing placement policy in container, ignore",
|
||||
zap.Stringer("cid", containers[i]),
|
||||
)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
containers[i].Encode(pivot)
|
||||
|
||||
// find all container nodes for current epoch
|
||||
nodes, err := nm.GetContainerNodes(cnr.PlacementPolicy(), pivot)
|
||||
nodes, err := nm.ContainerNodes(*policy, pivot)
|
||||
if err != nil {
|
||||
log.Info("can't build placement for container, ignore",
|
||||
zap.Stringer("cid", containers[i]),
|
||||
|
@ -71,7 +81,7 @@ func (ap *Processor) processStartAudit(epoch uint64) {
|
|||
continue
|
||||
}
|
||||
|
||||
n := nodes.Flatten()
|
||||
n := placement.FlattenNodes(nodes)
|
||||
|
||||
// shuffle nodes to ask a random one
|
||||
rand.Shuffle(len(n), func(i, j int) {
|
||||
|
@ -110,7 +120,7 @@ func (ap *Processor) processStartAudit(epoch uint64) {
|
|||
}
|
||||
}
|
||||
|
||||
func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmap.Nodes) []oid.ID {
|
||||
func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) []oid.ID {
|
||||
var sg []oid.ID
|
||||
|
||||
ln := len(shuffled)
|
||||
|
@ -130,7 +140,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmap.Nodes) []oid.
|
|||
zap.Int("total_tries", ln),
|
||||
)
|
||||
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, shuffled[i])
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i]))
|
||||
if err != nil {
|
||||
log.Warn("parse client node info", zap.String("error", err.Error()))
|
||||
|
||||
|
|
|
@ -240,10 +240,15 @@ func checkSubnet(subCli *morphsubnet.Client, cnr *containerSDK.Container) error
|
|||
return errors.New("missing owner")
|
||||
}
|
||||
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
return errors.New("missing placement policy")
|
||||
}
|
||||
|
||||
prm := morphsubnet.UserAllowedPrm{}
|
||||
|
||||
subID := cnr.PlacementPolicy().SubnetID()
|
||||
if subID == nil || subnetid.IsZero(*subID) {
|
||||
subID := policy.Subnet()
|
||||
if subnetid.IsZero(subID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -256,7 +261,7 @@ func checkSubnet(subCli *morphsubnet.Client, cnr *containerSDK.Container) error
|
|||
}
|
||||
|
||||
if !res.Allowed() {
|
||||
return fmt.Errorf("user is not allowed to create containers in %s subnetwork", subID)
|
||||
return fmt.Errorf("user is not allowed to create containers in %v subnetwork", subID)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -3,7 +3,6 @@ package netmap
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
|
@ -39,20 +38,19 @@ func newCleanupTable(enabled bool, threshold uint64) cleanupTable {
|
|||
}
|
||||
|
||||
// Update cleanup table based on on-chain information about netmap.
|
||||
func (c *cleanupTable) update(snapshot *netmap.Netmap, now uint64) {
|
||||
func (c *cleanupTable) update(snapshot netmap.NetMap, now uint64) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
nmNodes := snapshot.Nodes()
|
||||
|
||||
// replacing map is less memory efficient but faster
|
||||
newMap := make(map[string]epochStampWithNodeInfo, len(snapshot.Nodes))
|
||||
newMap := make(map[string]epochStampWithNodeInfo, len(nmNodes))
|
||||
|
||||
for i := range snapshot.Nodes {
|
||||
binNodeInfo, err := snapshot.Nodes[i].Marshal()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("could not marshal node info: %w", err)) // seems better than ignore
|
||||
}
|
||||
for i := range nmNodes {
|
||||
binNodeInfo := nmNodes[i].Marshal()
|
||||
|
||||
keyString := hex.EncodeToString(snapshot.Nodes[i].PublicKey())
|
||||
keyString := hex.EncodeToString(nmNodes[i].PublicKey())
|
||||
|
||||
access, ok := c.lastAccess[keyString]
|
||||
if ok {
|
||||
|
|
|
@ -22,14 +22,13 @@ func TestCleanupTable(t *testing.T) {
|
|||
newNodeInfo(genKey(t).PublicKey()),
|
||||
}
|
||||
|
||||
networkMap, err := netmap.NewNetmap(netmap.NodesFromInfo(infos))
|
||||
require.NoError(t, err)
|
||||
var networkMap netmap.NetMap
|
||||
networkMap.SetNodes(infos)
|
||||
|
||||
mapInfos := make(map[string][]byte)
|
||||
|
||||
for i := range infos {
|
||||
binNodeInfo, err := infos[i].Marshal()
|
||||
require.NoError(t, err)
|
||||
binNodeInfo := infos[i].Marshal()
|
||||
|
||||
mapInfos[hex.EncodeToString(infos[i].PublicKey())] = binNodeInfo
|
||||
}
|
||||
|
|
|
@ -30,24 +30,12 @@ var errMissingRequiredAttr = errors.New("missing required attribute in DB record
|
|||
//
|
||||
// UN-LOCODE attribute remains untouched.
|
||||
func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
|
||||
mAttr := uniqueAttributes(n.Attributes())
|
||||
|
||||
// check if the derived attributes are presented
|
||||
for attrKey := range v.mAttr {
|
||||
if _, ok := mAttr[attrKey]; ok {
|
||||
return fmt.Errorf("attribute derived from %s is presented: %s",
|
||||
netmap.AttrUNLOCODE,
|
||||
attrKey,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
attrLocode, ok := mAttr[netmap.AttrUNLOCODE]
|
||||
if !ok {
|
||||
attrLocode := n.LOCODE()
|
||||
if attrLocode == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lc, err := locode.FromString(attrLocode.Value())
|
||||
lc, err := locode.FromString(attrLocode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid locode value: %w", err)
|
||||
}
|
||||
|
@ -57,41 +45,48 @@ func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
|
|||
return fmt.Errorf("could not get locode record from DB: %w", err)
|
||||
}
|
||||
|
||||
for attrKey, attrDesc := range v.mAttr {
|
||||
attrVal := attrDesc.converter(record)
|
||||
if attrVal == "" {
|
||||
if !attrDesc.optional {
|
||||
countryCode := record.CountryCode()
|
||||
if countryCode == nil {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
continue
|
||||
strCountryCode := countryCode.String()
|
||||
if strCountryCode == "" {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
var a netmap.NodeAttribute
|
||||
a.SetKey(attrKey)
|
||||
a.SetValue(attrVal)
|
||||
|
||||
mAttr[attrKey] = a
|
||||
countryName := record.CountryName()
|
||||
if countryName == "" {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
as := n.Attributes()
|
||||
as = as[:0]
|
||||
|
||||
for _, attr := range mAttr {
|
||||
as = append(as, attr)
|
||||
locationName := record.LocationName()
|
||||
if locationName == "" {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
n.SetAttributes(as...)
|
||||
continent := record.Continent()
|
||||
if continent == nil {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
continentName := continent.String()
|
||||
if continentName == "" {
|
||||
return errMissingRequiredAttr
|
||||
}
|
||||
|
||||
n.SetCountryCode(strCountryCode)
|
||||
n.SetCountryName(countryName)
|
||||
n.SetLocationName(locationName)
|
||||
n.SetContinentName(continentName)
|
||||
|
||||
if subDivCode := record.SubDivCode(); subDivCode != "" {
|
||||
n.SetSubdivisionCode(subDivCode)
|
||||
}
|
||||
|
||||
if subDivName := record.SubDivName(); subDivName != "" {
|
||||
n.SetSubdivisionName(subDivName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func uniqueAttributes(as []netmap.NodeAttribute) map[string]netmap.NodeAttribute {
|
||||
mAttr := make(map[string]netmap.NodeAttribute, len(as))
|
||||
|
||||
for _, attr := range as {
|
||||
mAttr[attr.Key()] = attr
|
||||
}
|
||||
|
||||
return mAttr
|
||||
}
|
||||
|
|
|
@ -34,40 +34,21 @@ func (x db) Get(lc *locodestd.LOCODE) (locode.Record, error) {
|
|||
return r, nil
|
||||
}
|
||||
|
||||
func addAttrKV(n *netmap.NodeInfo, key, val string) {
|
||||
var a netmap.NodeAttribute
|
||||
|
||||
a.SetKey(key)
|
||||
a.SetValue(val)
|
||||
|
||||
n.SetAttributes(append(n.Attributes(), a)...)
|
||||
}
|
||||
|
||||
func addLocodeAttrValue(n *netmap.NodeInfo, val string) {
|
||||
addAttrKV(n, netmap.AttrUNLOCODE, val)
|
||||
n.SetLOCODE(val)
|
||||
}
|
||||
|
||||
func addLocodeAttr(n *netmap.NodeInfo, lc locodestd.LOCODE) {
|
||||
addLocodeAttrValue(n, fmt.Sprintf("%s %s", lc[0], lc[1]))
|
||||
n.SetLOCODE(fmt.Sprintf("%s %s", lc[0], lc[1]))
|
||||
}
|
||||
|
||||
func nodeInfoWithSomeAttrs() *netmap.NodeInfo {
|
||||
n := netmap.NewNodeInfo()
|
||||
var n netmap.NodeInfo
|
||||
|
||||
addAttrKV(n, "key1", "val1")
|
||||
addAttrKV(n, "key2", "val2")
|
||||
n.SetAttribute("key1", "val1")
|
||||
n.SetAttribute("key2", "val2")
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func containsAttr(n *netmap.NodeInfo, key, val string) bool {
|
||||
for _, a := range n.Attributes() {
|
||||
if a.Key() == key && a.Value() == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return &n
|
||||
}
|
||||
|
||||
func TestValidator_VerifyAndUpdate(t *testing.T) {
|
||||
|
@ -108,41 +89,11 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
|
|||
|
||||
validator := locode.New(p)
|
||||
|
||||
t.Run("w/ derived attributes", func(t *testing.T) {
|
||||
fn := func(withLocode bool) {
|
||||
for _, derivedAttr := range []string{
|
||||
netmap.AttrCountryCode,
|
||||
netmap.AttrCountry,
|
||||
netmap.AttrLocation,
|
||||
netmap.AttrSubDivCode,
|
||||
netmap.AttrSubDiv,
|
||||
netmap.AttrContinent,
|
||||
} {
|
||||
n := nodeInfoWithSomeAttrs()
|
||||
|
||||
addAttrKV(n, derivedAttr, "some value")
|
||||
|
||||
if withLocode {
|
||||
addLocodeAttr(n, r.LOCODE)
|
||||
}
|
||||
|
||||
err := validator.VerifyAndUpdate(n)
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
fn(true)
|
||||
fn(false)
|
||||
})
|
||||
|
||||
t.Run("w/o locode", func(t *testing.T) {
|
||||
n := nodeInfoWithSomeAttrs()
|
||||
|
||||
attrs := n.Attributes()
|
||||
|
||||
err := validator.VerifyAndUpdate(n)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attrs, n.Attributes())
|
||||
})
|
||||
|
||||
t.Run("w/ locode", func(t *testing.T) {
|
||||
|
@ -168,22 +119,14 @@ func TestValidator_VerifyAndUpdate(t *testing.T) {
|
|||
|
||||
addLocodeAttr(n, r.LOCODE)
|
||||
|
||||
attrs := n.Attributes()
|
||||
|
||||
err := validator.VerifyAndUpdate(n)
|
||||
require.NoError(t, err)
|
||||
|
||||
outAttrs := n.Attributes()
|
||||
|
||||
for _, a := range attrs {
|
||||
require.Contains(t, outAttrs, a)
|
||||
}
|
||||
|
||||
require.True(t, containsAttr(n, netmap.AttrCountryCode, rec.CountryCode().String()))
|
||||
require.True(t, containsAttr(n, netmap.AttrCountry, rec.CountryName()))
|
||||
require.True(t, containsAttr(n, netmap.AttrLocation, rec.LocationName()))
|
||||
require.True(t, containsAttr(n, netmap.AttrSubDivCode, rec.SubDivCode()))
|
||||
require.True(t, containsAttr(n, netmap.AttrSubDiv, rec.SubDivName()))
|
||||
require.True(t, containsAttr(n, netmap.AttrContinent, rec.Continent().String()))
|
||||
require.Equal(t, rec.CountryCode().String(), n.Attribute("CountryCode"))
|
||||
require.Equal(t, rec.CountryName(), n.Attribute("Country"))
|
||||
require.Equal(t, rec.LocationName(), n.Attribute("Location"))
|
||||
require.Equal(t, rec.SubDivCode(), n.Attribute("SubDivCode"))
|
||||
require.Equal(t, rec.SubDivName(), n.Attribute("SubDiv"))
|
||||
require.Equal(t, rec.Continent().String(), n.Attribute("Continent"))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
package locode
|
||||
|
||||
type attrDescriptor struct {
|
||||
optional bool
|
||||
converter func(Record) string
|
||||
}
|
||||
|
||||
func countryCodeValue(r Record) (val string) {
|
||||
return r.CountryCode().String()
|
||||
}
|
||||
|
||||
func countryValue(r Record) string {
|
||||
return r.CountryName()
|
||||
}
|
||||
|
||||
func locationValue(r Record) string {
|
||||
return r.LocationName()
|
||||
}
|
||||
|
||||
func subDivCodeValue(r Record) string {
|
||||
return r.SubDivCode()
|
||||
}
|
||||
|
||||
func subDivValue(r Record) string {
|
||||
return r.SubDivName()
|
||||
}
|
||||
|
||||
func continentValue(r Record) string {
|
||||
return r.Continent().String()
|
||||
}
|
|
@ -1,9 +1,5 @@
|
|||
package locode
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
// Prm groups the required parameters of the Validator's constructor.
|
||||
//
|
||||
// All values must comply with the requirements imposed on them.
|
||||
|
@ -26,8 +22,6 @@ type Prm struct {
|
|||
// the Validator is immediately ready to work through API.
|
||||
type Validator struct {
|
||||
db DB
|
||||
|
||||
mAttr map[string]attrDescriptor
|
||||
}
|
||||
|
||||
// New creates a new instance of the Validator.
|
||||
|
@ -39,16 +33,5 @@ type Validator struct {
|
|||
func New(prm Prm) *Validator {
|
||||
return &Validator{
|
||||
db: prm.DB,
|
||||
mAttr: map[string]attrDescriptor{
|
||||
netmap.AttrCountryCode: {converter: countryCodeValue},
|
||||
netmap.AttrCountry: {converter: countryValue},
|
||||
|
||||
netmap.AttrLocation: {converter: locationValue},
|
||||
|
||||
netmap.AttrSubDivCode: {converter: subDivCodeValue, optional: true},
|
||||
netmap.AttrSubDiv: {converter: subDivValue, optional: true},
|
||||
|
||||
netmap.AttrContinent: {converter: continentValue},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
// VerifyAndUpdate calls network.VerifyAddress.
|
||||
func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
|
||||
err := network.VerifyMultiAddress(n)
|
||||
err := network.VerifyMultiAddress(*n)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not verify multiaddress: %w", err)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,8 @@ package netmap
|
|||
|
||||
import (
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
netmapclient "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -34,7 +34,6 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
|||
prm := netmapclient.UpdatePeerPrm{}
|
||||
|
||||
prm.SetKey(key.Bytes())
|
||||
prm.SetState(netmap.NodeStateOffline)
|
||||
prm.SetHash(ev.TxHash())
|
||||
|
||||
err = np.netmapClient.UpdatePeerState(prm)
|
||||
|
@ -45,7 +44,7 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) {
|
|||
uint32(ev.epoch),
|
||||
nil,
|
||||
methodUpdateStateNotary,
|
||||
int64(netmap.NodeStateOffline.ToV2()), key.Bytes(),
|
||||
int64(v2netmap.Offline), key.Bytes(),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
@ -60,7 +60,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) {
|
|||
}
|
||||
}
|
||||
|
||||
np.netmapSnapshot.update(networkMap, epoch)
|
||||
np.netmapSnapshot.update(*networkMap, epoch)
|
||||
np.handleCleanupTick(netmapCleanupTick{epoch: epoch, txHash: ev.TxHash()})
|
||||
np.handleNewAudit(audit.NewAuditStartEvent(epoch))
|
||||
np.handleAuditSettlements(settlement.NewAuditEvent(epoch))
|
||||
|
|
|
@ -3,8 +3,6 @@ package netmap
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
netmapclient "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap"
|
||||
netmapEvent "github.com/nspcc-dev/neofs-node/pkg/morph/event/netmap"
|
||||
|
@ -36,7 +34,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
|||
}
|
||||
|
||||
// unmarshal node info
|
||||
nodeInfo := netmap.NewNodeInfo()
|
||||
var nodeInfo netmap.NodeInfo
|
||||
if err := nodeInfo.Unmarshal(ev.Node()); err != nil {
|
||||
// it will be nice to have tx id at event structure to log it
|
||||
np.log.Warn("can't parse network map candidate")
|
||||
|
@ -44,7 +42,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
|||
}
|
||||
|
||||
// validate and update node info
|
||||
err := np.nodeValidator.VerifyAndUpdate(nodeInfo)
|
||||
err := np.nodeValidator.VerifyAndUpdate(&nodeInfo)
|
||||
if err != nil {
|
||||
np.log.Warn("could not verify and update information about network map candidate",
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -54,28 +52,10 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) {
|
|||
}
|
||||
|
||||
// sort attributes to make it consistent
|
||||
a := nodeInfo.Attributes()
|
||||
sort.Slice(a, func(i, j int) bool {
|
||||
switch strings.Compare(a[i].Key(), a[j].Key()) {
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
default:
|
||||
return a[i].Value() < a[j].Value()
|
||||
}
|
||||
})
|
||||
nodeInfo.SetAttributes(a...)
|
||||
nodeInfo.SortAttributes()
|
||||
|
||||
// marshal updated node info structure
|
||||
nodeInfoBinary, err := nodeInfo.Marshal()
|
||||
if err != nil {
|
||||
np.log.Warn("could not marshal updated network map candidate",
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
nodeInfoBinary := nodeInfo.Marshal()
|
||||
|
||||
keyString := hex.EncodeToString(nodeInfo.PublicKey())
|
||||
|
||||
|
@ -121,15 +101,6 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
|||
return
|
||||
}
|
||||
|
||||
// better use unified enum from neofs-api-go/v2/netmap package
|
||||
if ev.Status() != netmap.NodeStateOffline {
|
||||
np.log.Warn("node proposes unknown state",
|
||||
zap.String("key", hex.EncodeToString(ev.PublicKey().Bytes())),
|
||||
zap.Stringer("status", ev.Status()),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// flag node to remove from local view, so it can be re-bootstrapped
|
||||
// again before new epoch will tick
|
||||
np.netmapSnapshot.flag(hex.EncodeToString(ev.PublicKey().Bytes()))
|
||||
|
@ -141,7 +112,9 @@ func (np *Processor) processUpdatePeer(ev netmapEvent.UpdatePeer) {
|
|||
} else {
|
||||
prm := netmapclient.UpdatePeerPrm{}
|
||||
|
||||
prm.SetState(ev.Status())
|
||||
if ev.Online() {
|
||||
prm.SetOnline()
|
||||
}
|
||||
prm.SetKey(ev.PublicKey().Bytes())
|
||||
|
||||
err = np.netmapClient.UpdatePeerState(prm)
|
||||
|
@ -181,12 +154,14 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, node := range candidates.Nodes {
|
||||
if !bytes.Equal(node.NodeInfo.PublicKey(), ev.Node()) {
|
||||
candidateNodes := candidates.Nodes()
|
||||
|
||||
for i := range candidateNodes {
|
||||
if !bytes.Equal(candidateNodes[i].PublicKey(), ev.Node()) {
|
||||
continue
|
||||
}
|
||||
|
||||
err = node.IterateSubnets(func(subNetID subnetid.ID) error {
|
||||
err = candidateNodes[i].IterateSubnets(func(subNetID subnetid.ID) error {
|
||||
if subNetID.Equals(subnetToRemoveFrom) {
|
||||
return netmap.ErrRemoveSubnet
|
||||
}
|
||||
|
@ -199,7 +174,6 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
|||
|
||||
prm := netmapclient.UpdatePeerPrm{}
|
||||
prm.SetKey(ev.Node())
|
||||
prm.SetState(netmap.NodeStateOffline)
|
||||
prm.SetHash(ev.TxHash())
|
||||
|
||||
err = np.netmapClient.UpdatePeerState(prm)
|
||||
|
@ -209,7 +183,7 @@ func (np *Processor) processRemoveSubnetNode(ev subnetEvent.RemoveNode) {
|
|||
}
|
||||
} else {
|
||||
prm := netmapclient.AddPeerPrm{}
|
||||
prm.SetNodeInfo(node.NodeInfo)
|
||||
prm.SetNodeInfo(candidateNodes[i])
|
||||
prm.SetHash(ev.TxHash())
|
||||
|
||||
err = np.netmapClient.AddPeer(prm)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"time"
|
||||
|
||||
clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
neofsapiclient "github.com/nspcc-dev/neofs-node/pkg/innerring/internal/client"
|
||||
auditproc "github.com/nspcc-dev/neofs-node/pkg/innerring/processors/audit"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/network/cache"
|
||||
|
@ -69,7 +70,7 @@ func (c *ClientCache) GetSG(task *audit.Task, id oid.ID) (*storagegroup.StorageG
|
|||
return c.getSG(task.AuditContext(), sgAddress, task.NetworkMap(), task.ContainerNodes())
|
||||
}
|
||||
|
||||
func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Netmap, cn netmap.ContainerNodes) (*storagegroup.StorageGroup, error) {
|
||||
func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.NetMap, cn [][]netmap.NodeInfo) (*storagegroup.StorageGroup, error) {
|
||||
obj := addr.Object()
|
||||
|
||||
nodes, err := placement.BuildObjectPlacement(nm, cn, &obj)
|
||||
|
@ -80,7 +81,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
|
|||
var info clientcore.NodeInfo
|
||||
|
||||
for _, node := range placement.FlattenNodes(nodes) {
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, node)
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(node))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
@ -124,14 +125,14 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne
|
|||
}
|
||||
|
||||
// GetHeader requests node from the container under audit to return object header by id.
|
||||
func (c *ClientCache) GetHeader(task *audit.Task, node *netmap.Node, id oid.ID, relay bool) (*object.Object, error) {
|
||||
func (c *ClientCache) GetHeader(task *audit.Task, node netmap.NodeInfo, id oid.ID, relay bool) (*object.Object, error) {
|
||||
var objAddress oid.Address
|
||||
objAddress.SetContainer(task.ContainerID())
|
||||
objAddress.SetObject(id)
|
||||
|
||||
var info clientcore.NodeInfo
|
||||
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, node)
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(node))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
@ -162,14 +163,14 @@ func (c *ClientCache) GetHeader(task *audit.Task, node *netmap.Node, id oid.ID,
|
|||
|
||||
// GetRangeHash requests node from the container under audit to return Tillich-Zemor hash of the
|
||||
// payload range of the object with specified identifier.
|
||||
func (c *ClientCache) GetRangeHash(task *audit.Task, node *netmap.Node, id oid.ID, rng *object.Range) ([]byte, error) {
|
||||
func (c *ClientCache) GetRangeHash(task *audit.Task, node netmap.NodeInfo, id oid.ID, rng *object.Range) ([]byte, error) {
|
||||
var objAddress oid.Address
|
||||
objAddress.SetContainer(task.ContainerID())
|
||||
objAddress.SetObject(id)
|
||||
|
||||
var info clientcore.NodeInfo
|
||||
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, node)
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(node))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"crypto/elliptic"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
auditClient "github.com/nspcc-dev/neofs-node/pkg/morph/client/audit"
|
||||
balanceClient "github.com/nspcc-dev/neofs-node/pkg/morph/client/balance"
|
||||
containerClient "github.com/nspcc-dev/neofs-node/pkg/morph/client/container"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/util/logger"
|
||||
auditAPI "github.com/nspcc-dev/neofs-sdk-go/audit"
|
||||
containerAPI "github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
|
@ -75,7 +77,7 @@ type auditSettlementCalculator audit.Calculator
|
|||
type containerWrapper containerAPI.Container
|
||||
|
||||
type nodeInfoWrapper struct {
|
||||
ni *netmapAPI.Node
|
||||
ni netmapAPI.NodeInfo
|
||||
}
|
||||
|
||||
type sgWrapper storagegroup.StorageGroup
|
||||
|
@ -89,7 +91,7 @@ func (n nodeInfoWrapper) PublicKey() []byte {
|
|||
}
|
||||
|
||||
func (n nodeInfoWrapper) Price() *big.Int {
|
||||
return big.NewInt(int64(n.ni.Price))
|
||||
return big.NewInt(int64(n.ni.Price()))
|
||||
}
|
||||
|
||||
func (c *containerWrapper) Owner() user.ID {
|
||||
|
@ -125,9 +127,9 @@ func (s settlementDeps) ContainerInfo(cid cid.ID) (common.ContainerInfo, error)
|
|||
return (*containerWrapper)(cnr), nil
|
||||
}
|
||||
|
||||
func (s settlementDeps) buildContainer(e uint64, cid cid.ID) (netmapAPI.ContainerNodes, *netmapAPI.Netmap, error) {
|
||||
func (s settlementDeps) buildContainer(e uint64, cid cid.ID) ([][]netmapAPI.NodeInfo, *netmapAPI.NetMap, error) {
|
||||
var (
|
||||
nm *netmapAPI.Netmap
|
||||
nm *netmapAPI.NetMap
|
||||
err error
|
||||
)
|
||||
|
||||
|
@ -146,11 +148,16 @@ func (s settlementDeps) buildContainer(e uint64, cid cid.ID) (netmapAPI.Containe
|
|||
return nil, nil, fmt.Errorf("could not get container from sidechain: %w", err)
|
||||
}
|
||||
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
return nil, nil, errors.New("missing placement policy in container")
|
||||
}
|
||||
|
||||
binCnr := make([]byte, sha256.Size)
|
||||
cid.Encode(binCnr)
|
||||
|
||||
cn, err := nm.GetContainerNodes(
|
||||
cnr.PlacementPolicy(),
|
||||
cn, err := nm.ContainerNodes(
|
||||
*policy,
|
||||
binCnr, // may be replace pivot calculation to neofs-api-go
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -166,12 +173,12 @@ func (s settlementDeps) ContainerNodes(e uint64, cid cid.ID) ([]common.NodeInfo,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ns := cn.Flatten()
|
||||
ns := placement.FlattenNodes(cn)
|
||||
res := make([]common.NodeInfo, 0, len(ns))
|
||||
|
||||
for i := range ns {
|
||||
res = append(res, &nodeInfoWrapper{
|
||||
ni: &ns[i],
|
||||
ni: ns[i],
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -301,19 +301,21 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
|
|||
return
|
||||
}
|
||||
|
||||
for _, c := range candidates.Nodes {
|
||||
s.processCandidate(delEv.TxHash(), removedID, c)
|
||||
candidateNodes := candidates.Nodes()
|
||||
|
||||
for i := range candidateNodes {
|
||||
s.processCandidate(delEv.TxHash(), removedID, candidateNodes[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.ID, c netmap.Node) {
|
||||
func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.ID, c netmap.NodeInfo) {
|
||||
removeSubnet := false
|
||||
log := s.log.With(
|
||||
zap.String("public_key", hex.EncodeToString(c.NodeInfo.PublicKey())),
|
||||
zap.String("public_key", hex.EncodeToString(c.PublicKey())),
|
||||
zap.String("removed_subnet", removedID.String()),
|
||||
)
|
||||
|
||||
err := c.NodeInfo.IterateSubnets(func(id subnetid.ID) error {
|
||||
err := c.IterateSubnets(func(id subnetid.ID) error {
|
||||
if removedID.Equals(id) {
|
||||
removeSubnet = true
|
||||
return netmap.ErrRemoveSubnet
|
||||
|
@ -326,8 +328,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
|||
log.Debug("removing node from netmap candidates")
|
||||
|
||||
var updateStatePrm netmapclient.UpdatePeerPrm
|
||||
updateStatePrm.SetState(netmap.NodeStateOffline)
|
||||
updateStatePrm.SetKey(c.NodeInfo.PublicKey())
|
||||
updateStatePrm.SetKey(c.PublicKey())
|
||||
updateStatePrm.SetHash(txHash)
|
||||
|
||||
err = s.netmapClient.UpdatePeerState(updateStatePrm)
|
||||
|
@ -346,7 +347,7 @@ func (s *Server) processCandidate(txHash neogoutil.Uint256, removedID subnetid.I
|
|||
log.Debug("removing subnet from the node")
|
||||
|
||||
var addPeerPrm netmapclient.AddPeerPrm
|
||||
addPeerPrm.SetNodeInfo(c.NodeInfo)
|
||||
addPeerPrm.SetNodeInfo(c)
|
||||
addPeerPrm.SetHash(txHash)
|
||||
|
||||
err = s.netmapClient.AddPeer(addPeerPrm)
|
||||
|
|
|
@ -9,13 +9,13 @@ import (
|
|||
|
||||
// AddPeerPrm groups parameters of AddPeer operation.
|
||||
type AddPeerPrm struct {
|
||||
nodeInfo *netmap.NodeInfo
|
||||
nodeInfo netmap.NodeInfo
|
||||
|
||||
client.InvokePrmOptional
|
||||
}
|
||||
|
||||
// SetNodeInfo sets new peer NodeInfo.
|
||||
func (a *AddPeerPrm) SetNodeInfo(nodeInfo *netmap.NodeInfo) {
|
||||
func (a *AddPeerPrm) SetNodeInfo(nodeInfo netmap.NodeInfo) {
|
||||
a.nodeInfo = nodeInfo
|
||||
}
|
||||
|
||||
|
@ -31,18 +31,9 @@ func (c *Client) AddPeer(p AddPeerPrm) error {
|
|||
method += "IR"
|
||||
}
|
||||
|
||||
if p.nodeInfo == nil {
|
||||
return fmt.Errorf("nil node info (%s)", method)
|
||||
}
|
||||
|
||||
rawNodeInfo, err := p.nodeInfo.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal node info (%s): %w", method, err)
|
||||
}
|
||||
|
||||
prm := client.InvokePrm{}
|
||||
prm.SetMethod(method)
|
||||
prm.SetArgs(rawNodeInfo)
|
||||
prm.SetArgs(p.nodeInfo.Marshal())
|
||||
prm.InvokePrmOptional = p.InvokePrmOptional
|
||||
|
||||
if err := c.client.Invoke(prm); err != nil {
|
||||
|
|
|
@ -185,85 +185,112 @@ func (c *Client) SetConfig(p SetConfigPrm) error {
|
|||
return c.client.Invoke(prm)
|
||||
}
|
||||
|
||||
// IterateConfigParameters iterates over configuration parameters stored in Netmap contract and passes them to f.
|
||||
//
|
||||
// Returns f's errors directly.
|
||||
func (c *Client) IterateConfigParameters(f func(key, value []byte) error) error {
|
||||
// RawNetworkParameter is a NeoFS network parameter which is transmitted but
|
||||
// not interpreted by the NeoFS API protocol.
|
||||
type RawNetworkParameter struct {
|
||||
// Name of the parameter.
|
||||
Name string
|
||||
|
||||
// Raw parameter value.
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// NetworkConfiguration represents NeoFS network configuration stored
|
||||
// in the NeoFS Sidechain.
|
||||
type NetworkConfiguration struct {
|
||||
MaxObjectSize uint64
|
||||
|
||||
StoragePrice uint64
|
||||
|
||||
AuditFee uint64
|
||||
|
||||
EpochDuration uint64
|
||||
|
||||
ContainerFee uint64
|
||||
|
||||
ContainerAliasFee uint64
|
||||
|
||||
EigenTrustIterations uint64
|
||||
|
||||
EigenTrustAlpha float64
|
||||
|
||||
IRCandidateFee uint64
|
||||
|
||||
WithdrawalFee uint64
|
||||
|
||||
Raw []RawNetworkParameter
|
||||
}
|
||||
|
||||
// ReadNetworkConfiguration reads NetworkConfiguration from the NeoFS Sidechain.
|
||||
func (c *Client) ReadNetworkConfiguration() (*NetworkConfiguration, error) {
|
||||
prm := client.TestInvokePrm{}
|
||||
prm.SetMethod(configListMethod)
|
||||
|
||||
items, err := c.client.TestInvoke(prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not perform test invocation (%s): %w",
|
||||
return nil, fmt.Errorf("could not perform test invocation (%s): %w",
|
||||
configListMethod, err)
|
||||
}
|
||||
|
||||
if ln := len(items); ln != 1 {
|
||||
return fmt.Errorf("unexpected stack item count (%s): %d", configListMethod, ln)
|
||||
return nil, fmt.Errorf("unexpected stack item count (%s): %d", configListMethod, ln)
|
||||
}
|
||||
|
||||
arr, err := client.ArrayFromStackItem(items[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("record list (%s): %w", configListMethod, err)
|
||||
return nil, fmt.Errorf("record list (%s): %w", configListMethod, err)
|
||||
}
|
||||
|
||||
return iterateRecords(arr, func(key, value []byte) error {
|
||||
return f(key, value)
|
||||
})
|
||||
}
|
||||
m := make(map[string]struct{}, len(arr))
|
||||
var res NetworkConfiguration
|
||||
res.Raw = make([]RawNetworkParameter, 0, len(arr))
|
||||
|
||||
// ConfigWriter is an interface of NeoFS network config writer.
|
||||
type ConfigWriter interface {
|
||||
UnknownParameter(string, []byte)
|
||||
MaxObjectSize(uint64)
|
||||
BasicIncomeRate(uint64)
|
||||
AuditFee(uint64)
|
||||
EpochDuration(uint64)
|
||||
ContainerFee(uint64)
|
||||
ContainerAliasFee(uint64)
|
||||
EigenTrustIterations(uint64)
|
||||
EigenTrustAlpha(float64)
|
||||
InnerRingCandidateFee(uint64)
|
||||
WithdrawFee(uint64)
|
||||
}
|
||||
err = iterateRecords(arr, func(name string, value []byte) error {
|
||||
_, ok := m[name]
|
||||
if ok {
|
||||
return fmt.Errorf("duplicated config name %s", name)
|
||||
}
|
||||
|
||||
// WriteConfig writes NeoFS network configuration received via iterator.
|
||||
//
|
||||
// Returns iterator's errors directly.
|
||||
func WriteConfig(dst ConfigWriter, iterator func(func(key, val []byte) error) error) error {
|
||||
return iterator(func(key, val []byte) error {
|
||||
switch k := string(key); k {
|
||||
m[name] = struct{}{}
|
||||
|
||||
switch name {
|
||||
default:
|
||||
dst.UnknownParameter(k, val)
|
||||
res.Raw = append(res.Raw, RawNetworkParameter{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
case maxObjectSizeConfig:
|
||||
dst.MaxObjectSize(bytesToUint64(val))
|
||||
res.MaxObjectSize = bytesToUint64(value)
|
||||
case basicIncomeRateConfig:
|
||||
dst.BasicIncomeRate(bytesToUint64(val))
|
||||
res.StoragePrice = bytesToUint64(value)
|
||||
case auditFeeConfig:
|
||||
dst.AuditFee(bytesToUint64(val))
|
||||
res.AuditFee = bytesToUint64(value)
|
||||
case epochDurationConfig:
|
||||
dst.EpochDuration(bytesToUint64(val))
|
||||
res.EpochDuration = bytesToUint64(value)
|
||||
case containerFeeConfig:
|
||||
dst.ContainerFee(bytesToUint64(val))
|
||||
res.ContainerFee = bytesToUint64(value)
|
||||
case containerAliasFeeConfig:
|
||||
dst.ContainerAliasFee(bytesToUint64(val))
|
||||
res.ContainerAliasFee = bytesToUint64(value)
|
||||
case etIterationsConfig:
|
||||
dst.EigenTrustIterations(bytesToUint64(val))
|
||||
res.EigenTrustIterations = bytesToUint64(value)
|
||||
case etAlphaConfig:
|
||||
v, err := strconv.ParseFloat(string(val), 64)
|
||||
res.EigenTrustAlpha, err = strconv.ParseFloat(string(value), 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("prm %s: %v", etAlphaConfig, err)
|
||||
return fmt.Errorf("invalid prm %s: %v", etAlphaConfig, err)
|
||||
}
|
||||
|
||||
dst.EigenTrustAlpha(v)
|
||||
case irCandidateFeeConfig:
|
||||
dst.InnerRingCandidateFee(bytesToUint64(val))
|
||||
res.IRCandidateFee = bytesToUint64(value)
|
||||
case withdrawFeeConfig:
|
||||
dst.WithdrawFee(bytesToUint64(val))
|
||||
res.WithdrawalFee = bytesToUint64(value)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func bytesToUint64(val []byte) uint64 {
|
||||
|
@ -307,7 +334,7 @@ func StringAssert(item stackitem.Item) (interface{}, error) {
|
|||
// iterateRecords iterates over all config records and passes them to f.
|
||||
//
|
||||
// Returns f's errors directly.
|
||||
func iterateRecords(arr []stackitem.Item, f func(key, value []byte) error) error {
|
||||
func iterateRecords(arr []stackitem.Item, f func(key string, value []byte) error) error {
|
||||
for i := range arr {
|
||||
fields, err := client.ArrayFromStackItem(arr[i])
|
||||
if err != nil {
|
||||
|
@ -328,7 +355,7 @@ func iterateRecords(arr []stackitem.Item, f func(key, value []byte) error) error
|
|||
return fmt.Errorf("record value: %w", err)
|
||||
}
|
||||
|
||||
if err := f(k, v); err != nil {
|
||||
if err := f(string(k), v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,20 +8,6 @@ import (
|
|||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
// State is an enumeration of various states of the NeoFS node.
|
||||
type State int64
|
||||
|
||||
const (
|
||||
// Undefined is unknown state.
|
||||
Undefined State = iota
|
||||
|
||||
// Online is network unavailable state.
|
||||
Online
|
||||
|
||||
// Offline is an active state in the network.
|
||||
Offline
|
||||
)
|
||||
|
||||
const (
|
||||
nodeInfoFixedPrmNumber = 1
|
||||
|
||||
|
@ -31,7 +17,7 @@ const (
|
|||
// GetNetMapByEpoch receives information list about storage nodes
|
||||
// through the Netmap contract call, composes network map
|
||||
// from them and returns it. Returns snapshot of the specified epoch number.
|
||||
func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.Netmap, error) {
|
||||
func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.NetMap, error) {
|
||||
invokePrm := client.TestInvokePrm{}
|
||||
invokePrm.SetMethod(epochSnapshotMethod)
|
||||
invokePrm.SetArgs(epoch)
|
||||
|
@ -48,7 +34,7 @@ func (c *Client) GetNetMapByEpoch(epoch uint64) (*netmap.Netmap, error) {
|
|||
// GetCandidates receives information list about candidates
|
||||
// for the next epoch network map through the Netmap contract
|
||||
// call, composes network map from them and returns it.
|
||||
func (c *Client) GetCandidates() (*netmap.Netmap, error) {
|
||||
func (c *Client) GetCandidates() (*netmap.NetMap, error) {
|
||||
invokePrm := client.TestInvokePrm{}
|
||||
invokePrm.SetMethod(netMapCandidatesMethod)
|
||||
|
||||
|
@ -62,7 +48,10 @@ func (c *Client) GetCandidates() (*netmap.Netmap, error) {
|
|||
return nil, fmt.Errorf("could not parse contract response: %w", err)
|
||||
}
|
||||
|
||||
return netmap.NewNetmap(netmap.NodesFromInfo(candVals))
|
||||
var nm netmap.NetMap
|
||||
nm.SetNodes(candVals)
|
||||
|
||||
return &nm, nil
|
||||
}
|
||||
|
||||
// NetMap performs the test invoke of get network map
|
||||
|
@ -128,11 +117,9 @@ func stackItemToNodeInfo(prm stackitem.Item, res *netmap.NodeInfo) error {
|
|||
|
||||
switch state {
|
||||
case 1:
|
||||
res.SetState(netmap.NodeStateOnline)
|
||||
res.SetOnline()
|
||||
case 2:
|
||||
res.SetState(netmap.NodeStateOffline)
|
||||
default:
|
||||
res.SetState(0)
|
||||
res.SetOffline()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -17,23 +17,29 @@ func Test_stackItemsToNodeInfos(t *testing.T) {
|
|||
pub := make([]byte, 33)
|
||||
rand.Read(pub)
|
||||
|
||||
expected[i].SetState(netmap.NodeState(i % 3))
|
||||
switch i % 3 {
|
||||
case 1:
|
||||
expected[i].SetOffline()
|
||||
case 2:
|
||||
expected[i].SetOnline()
|
||||
}
|
||||
|
||||
expected[i].SetPublicKey(pub)
|
||||
|
||||
var attr netmap.NodeAttribute
|
||||
attr.SetKey("key")
|
||||
attr.SetValue(strconv.Itoa(i))
|
||||
expected[i].SetAttributes(attr)
|
||||
expected[i].SetAttribute("key", strconv.Itoa(i))
|
||||
}
|
||||
|
||||
items := make([]stackitem.Item, 4)
|
||||
for i := range items {
|
||||
data, err := expected[i].Marshal()
|
||||
require.NoError(t, err)
|
||||
data := expected[i].Marshal()
|
||||
|
||||
state := int64(expected[i].State())
|
||||
if state != 0 { // In contract online=1, offline=2, in API it is the other way.
|
||||
state = 3 - state
|
||||
var state int64
|
||||
|
||||
switch {
|
||||
case expected[i].IsOnline():
|
||||
state = 1
|
||||
case expected[i].IsOffline():
|
||||
state = 2
|
||||
}
|
||||
|
||||
items[i] = stackitem.NewStruct([]stackitem.Item{
|
||||
|
|
|
@ -12,17 +12,17 @@ import (
|
|||
// through the Netmap contract call, composes network map
|
||||
// from them and returns it. With diff == 0 returns current
|
||||
// network map, else return snapshot of previous network map.
|
||||
func (c *Client) GetNetMap(diff uint64) (*netmap.Netmap, error) {
|
||||
func (c *Client) GetNetMap(diff uint64) (*netmap.NetMap, error) {
|
||||
return c.getNetMap(diff)
|
||||
}
|
||||
|
||||
// Snapshot returns current netmap node infos.
|
||||
// Consider using pkg/morph/client/netmap for this.
|
||||
func (c *Client) Snapshot() (*netmap.Netmap, error) {
|
||||
func (c *Client) Snapshot() (*netmap.NetMap, error) {
|
||||
return c.getNetMap(0)
|
||||
}
|
||||
|
||||
func (c *Client) getNetMap(diff uint64) (*netmap.Netmap, error) {
|
||||
func (c *Client) getNetMap(diff uint64) (*netmap.NetMap, error) {
|
||||
prm := client.TestInvokePrm{}
|
||||
prm.SetMethod(snapshotMethod)
|
||||
prm.SetArgs(diff)
|
||||
|
@ -35,7 +35,7 @@ func (c *Client) getNetMap(diff uint64) (*netmap.Netmap, error) {
|
|||
return unmarshalNetmap(res, snapshotMethod)
|
||||
}
|
||||
|
||||
func unmarshalNetmap(items []stackitem.Item, method string) (*netmap.Netmap, error) {
|
||||
func unmarshalNetmap(items []stackitem.Item, method string) (*netmap.NetMap, error) {
|
||||
rawPeers, err := peersFromStackItems(items, method)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -48,5 +48,8 @@ func unmarshalNetmap(items []stackitem.Item, method string) (*netmap.Netmap, err
|
|||
}
|
||||
}
|
||||
|
||||
return netmap.NewNetmap(netmap.NodesFromInfo(result))
|
||||
var nm netmap.NetMap
|
||||
nm.SetNodes(result)
|
||||
|
||||
return &nm, nil
|
||||
}
|
||||
|
|
|
@ -4,13 +4,13 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
// UpdatePeerPrm groups parameters of UpdatePeerState operation.
|
||||
type UpdatePeerPrm struct {
|
||||
key []byte
|
||||
state netmap.NodeState
|
||||
|
||||
online bool
|
||||
|
||||
client.InvokePrmOptional
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ func (u *UpdatePeerPrm) SetKey(key []byte) {
|
|||
}
|
||||
|
||||
// SetState sets node state.
|
||||
func (u *UpdatePeerPrm) SetState(state netmap.NodeState) {
|
||||
u.state = state
|
||||
func (u *UpdatePeerPrm) SetOnline() {
|
||||
u.online = true
|
||||
}
|
||||
|
||||
// UpdatePeerState changes peer status through Netmap contract call.
|
||||
|
@ -36,9 +36,14 @@ func (c *Client) UpdatePeerState(p UpdatePeerPrm) error {
|
|||
method += "IR"
|
||||
}
|
||||
|
||||
state := 2
|
||||
if p.online {
|
||||
state = 1
|
||||
}
|
||||
|
||||
prm := client.InvokePrm{}
|
||||
prm.SetMethod(method)
|
||||
prm.SetArgs(int64(p.state.ToV2()), p.key)
|
||||
prm.SetArgs(int64(state), p.key)
|
||||
prm.InvokePrmOptional = p.InvokePrmOptional
|
||||
|
||||
if err := c.client.Invoke(prm); err != nil {
|
||||
|
|
|
@ -10,12 +10,12 @@ import (
|
|||
v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/morph/client"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/morph/event"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
type UpdatePeer struct {
|
||||
publicKey *keys.PublicKey
|
||||
status netmap.NodeState
|
||||
|
||||
online bool
|
||||
|
||||
// For notary notifications only.
|
||||
// Contains raw transactions of notary request.
|
||||
|
@ -25,8 +25,8 @@ type UpdatePeer struct {
|
|||
// MorphEvent implements Neo:Morph Event interface.
|
||||
func (UpdatePeer) MorphEvent() {}
|
||||
|
||||
func (s UpdatePeer) Status() netmap.NodeState {
|
||||
return s.status
|
||||
func (s UpdatePeer) Online() bool {
|
||||
return s.online
|
||||
}
|
||||
|
||||
func (s UpdatePeer) PublicKey() *keys.PublicKey {
|
||||
|
@ -73,7 +73,13 @@ func ParseUpdatePeer(e *subscriptions.NotificationEvent) (event.Event, error) {
|
|||
return nil, fmt.Errorf("could not get node status: %w", err)
|
||||
}
|
||||
|
||||
ev.status = netmap.NodeStateFromV2(v2netmap.NodeState(st))
|
||||
switch v2netmap.NodeState(st) {
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported node state %d", st)
|
||||
case v2netmap.Offline:
|
||||
case v2netmap.Online:
|
||||
ev.online = true
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
|
|
@ -7,9 +7,8 @@ import (
|
|||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/opcode"
|
||||
netmapv2 "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/morph/event"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
var errNilPubKey = errors.New("could not parse public key: public key is nil")
|
||||
|
@ -27,10 +26,6 @@ func (s *UpdatePeer) setPublicKey(v []byte) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (s *UpdatePeer) setStatus(v uint32) {
|
||||
s.status = netmap.NodeStateFromV2(netmapv2.NodeState(v))
|
||||
}
|
||||
|
||||
const (
|
||||
// UpdateStateNotaryEvent is method name for netmap state updating
|
||||
// operations in `Netmap` contract. Is used as identificator for
|
||||
|
@ -66,7 +61,13 @@ func ParseUpdatePeerNotary(ne event.NotaryEvent) (event.Event, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ev.setStatus(uint32(state))
|
||||
switch v2netmap.NodeState(state) {
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported node state %d", err)
|
||||
case v2netmap.Offline:
|
||||
case v2netmap.Online:
|
||||
ev.online = true
|
||||
}
|
||||
|
||||
fieldNum++
|
||||
case fieldNum == expectedItemNumUpdatePeer:
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/morph/event"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -15,10 +14,7 @@ func TestParseUpdatePeer(t *testing.T) {
|
|||
priv, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
var (
|
||||
publicKey = priv.PublicKey()
|
||||
state = netmap.NodeStateOffline
|
||||
)
|
||||
publicKey := priv.PublicKey()
|
||||
|
||||
t.Run("wrong number of parameters", func(t *testing.T) {
|
||||
prms := []stackitem.Item{
|
||||
|
@ -48,14 +44,14 @@ func TestParseUpdatePeer(t *testing.T) {
|
|||
|
||||
t.Run("correct behavior", func(t *testing.T) {
|
||||
ev, err := ParseUpdatePeer(createNotifyEventFromItems([]stackitem.Item{
|
||||
stackitem.NewBigInteger(new(big.Int).SetInt64(int64(state.ToV2()))),
|
||||
stackitem.NewBigInteger(new(big.Int).SetInt64(1)),
|
||||
stackitem.NewByteArray(publicKey.Bytes()),
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, UpdatePeer{
|
||||
publicKey: publicKey,
|
||||
status: state,
|
||||
online: true,
|
||||
}, ev)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -132,10 +132,11 @@ func WriteToNodeInfo(g AddressGroup, ni *netmap.NodeInfo) {
|
|||
addrs := make([]string, 0, num)
|
||||
|
||||
iterateAllAddresses(g, func(addr Address) {
|
||||
ni.SetNetworkEndpoints()
|
||||
addrs = append(addrs, addr.String())
|
||||
})
|
||||
|
||||
ni.SetAddresses(addrs...)
|
||||
ni.SetNetworkEndpoints(addrs...)
|
||||
}
|
||||
|
||||
// Intersects checks if two AddressGroup have
|
||||
|
|
|
@ -30,6 +30,18 @@ var (
|
|||
errUnsupportedPresentationProtocol = errors.New("unsupported presentation protocol in multiaddress")
|
||||
)
|
||||
|
||||
// NodeEndpointsIterator is a wrapper over netmap.NodeInfo which implements
|
||||
// MultiAddressIterator.
|
||||
type NodeEndpointsIterator netmap.NodeInfo
|
||||
|
||||
func (x NodeEndpointsIterator) IterateAddresses(f func(string) bool) {
|
||||
(netmap.NodeInfo)(x).IterateNetworkEndpoints(f)
|
||||
}
|
||||
|
||||
func (x NodeEndpointsIterator) NumberOfAddresses() int {
|
||||
return (netmap.NodeInfo)(x).NumberOfNetworkEndpoints()
|
||||
}
|
||||
|
||||
// VerifyMultiAddress validates multiaddress of n.
|
||||
//
|
||||
// If n's address contains more than 3 protocols
|
||||
|
@ -45,8 +57,8 @@ var (
|
|||
// 2. tcp
|
||||
// 3. tls(optional, may be absent)
|
||||
//
|
||||
func VerifyMultiAddress(ni *netmap.NodeInfo) error {
|
||||
return iterateParsedAddresses(ni, checkProtocols)
|
||||
func VerifyMultiAddress(ni netmap.NodeInfo) error {
|
||||
return iterateParsedAddresses(NodeEndpointsIterator(ni), checkProtocols)
|
||||
}
|
||||
|
||||
func checkProtocols(a Address) error {
|
||||
|
|
|
@ -55,10 +55,7 @@ func TestVerifyMultiAddress_Order(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func constructNodeInfo(address string) *netmap.NodeInfo {
|
||||
ni := new(netmap.NodeInfo)
|
||||
|
||||
ni.SetAddresses(address)
|
||||
|
||||
func constructNodeInfo(address string) (ni netmap.NodeInfo) {
|
||||
ni.SetNetworkEndpoints(address)
|
||||
return ni
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ type Context struct {
|
|||
sgMembersCache map[int][]oid.ID
|
||||
|
||||
placementMtx sync.Mutex
|
||||
placementCache map[string][]netmap.Nodes
|
||||
placementCache map[string][][]netmap.NodeInfo
|
||||
|
||||
porRequests, porRetries atomic.Uint32
|
||||
|
||||
|
@ -51,11 +51,11 @@ type Context struct {
|
|||
type pairMemberInfo struct {
|
||||
failedPDP, passedPDP bool // at least one
|
||||
|
||||
node *netmap.Node
|
||||
node netmap.NodeInfo
|
||||
}
|
||||
|
||||
type gamePair struct {
|
||||
n1, n2 *netmap.Node
|
||||
n1, n2 netmap.NodeInfo
|
||||
|
||||
id oid.ID
|
||||
|
||||
|
@ -88,11 +88,11 @@ type ContainerCommunicator interface {
|
|||
GetSG(*audit.Task, oid.ID) (*storagegroup.StorageGroup, error)
|
||||
|
||||
// Must return object header from the container node.
|
||||
GetHeader(*audit.Task, *netmap.Node, oid.ID, bool) (*object.Object, error)
|
||||
GetHeader(*audit.Task, netmap.NodeInfo, oid.ID, bool) (*object.Object, error)
|
||||
|
||||
// Must return homomorphic Tillich-Zemor hash of payload range of the
|
||||
// object stored in container node.
|
||||
GetRangeHash(*audit.Task, *netmap.Node, oid.ID, *object.Range) ([]byte, error)
|
||||
GetRangeHash(*audit.Task, netmap.NodeInfo, oid.ID, *object.Range) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewContext creates, initializes and returns Context.
|
||||
|
@ -160,9 +160,12 @@ func (c *Context) init() {
|
|||
|
||||
c.sgMembersCache = make(map[int][]oid.ID)
|
||||
|
||||
c.placementCache = make(map[string][]netmap.Nodes)
|
||||
c.placementCache = make(map[string][][]netmap.NodeInfo)
|
||||
|
||||
c.cnrNodesNum = len(c.task.ContainerNodes().Flatten())
|
||||
cnrVectors := c.task.ContainerNodes()
|
||||
for i := range cnrVectors {
|
||||
c.cnrNodesNum += len(cnrVectors[i])
|
||||
}
|
||||
|
||||
c.pairedNodes = make(map[uint64]*pairMemberInfo)
|
||||
|
||||
|
@ -200,7 +203,7 @@ func (c *Context) writeReport() {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Context) buildPlacement(id oid.ID) ([]netmap.Nodes, error) {
|
||||
func (c *Context) buildPlacement(id oid.ID) ([][]netmap.NodeInfo, error) {
|
||||
c.placementMtx.Lock()
|
||||
defer c.placementMtx.Unlock()
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ func (c *Context) splitPayload(id oid.ID) []uint64 {
|
|||
}
|
||||
|
||||
func (c *Context) collectHashes(p *gamePair) {
|
||||
fn := func(n *netmap.Node, rngs []*object.Range) [][]byte {
|
||||
fn := func(n netmap.NodeInfo, rngs []*object.Range) [][]byte {
|
||||
// Here we randomize the order a bit: the hypothesis is that this
|
||||
// makes it harder for an unscrupulous node to come up with a
|
||||
// reliable cheating strategy.
|
||||
|
@ -176,7 +176,7 @@ func (c *Context) analyzeHashes(p *gamePair) {
|
|||
c.passNodesPDP(p.n1, p.n2)
|
||||
}
|
||||
|
||||
func (c *Context) failNodesPDP(ns ...*netmap.Node) {
|
||||
func (c *Context) failNodesPDP(ns ...netmap.NodeInfo) {
|
||||
c.pairedMtx.Lock()
|
||||
|
||||
for i := range ns {
|
||||
|
@ -186,7 +186,7 @@ func (c *Context) failNodesPDP(ns ...*netmap.Node) {
|
|||
c.pairedMtx.Unlock()
|
||||
}
|
||||
|
||||
func (c *Context) passNodesPDP(ns ...*netmap.Node) {
|
||||
func (c *Context) passNodesPDP(ns ...netmap.NodeInfo) {
|
||||
c.pairedMtx.Lock()
|
||||
|
||||
for i := range ns {
|
||||
|
@ -200,18 +200,18 @@ func (c *Context) writePairsResult() {
|
|||
var failCount, okCount int
|
||||
|
||||
c.iteratePairedNodes(
|
||||
func(*netmap.Node) { failCount++ },
|
||||
func(*netmap.Node) { okCount++ },
|
||||
func(netmap.NodeInfo) { failCount++ },
|
||||
func(netmap.NodeInfo) { okCount++ },
|
||||
)
|
||||
|
||||
failedNodes := make([][]byte, 0, failCount)
|
||||
passedNodes := make([][]byte, 0, okCount)
|
||||
|
||||
c.iteratePairedNodes(
|
||||
func(n *netmap.Node) {
|
||||
func(n netmap.NodeInfo) {
|
||||
failedNodes = append(failedNodes, n.PublicKey())
|
||||
},
|
||||
func(n *netmap.Node) {
|
||||
func(n netmap.NodeInfo) {
|
||||
passedNodes = append(passedNodes, n.PublicKey())
|
||||
},
|
||||
)
|
||||
|
@ -219,7 +219,7 @@ func (c *Context) writePairsResult() {
|
|||
c.report.SetPDPResults(passedNodes, failedNodes)
|
||||
}
|
||||
|
||||
func (c *Context) iteratePairedNodes(onFail, onPass func(*netmap.Node)) {
|
||||
func (c *Context) iteratePairedNodes(onFail, onPass func(netmap.NodeInfo)) {
|
||||
for _, pairedNode := range c.pairedNodes {
|
||||
if pairedNode.failedPDP {
|
||||
onFail(pairedNode.node)
|
||||
|
|
|
@ -23,12 +23,12 @@ func (c *Context) executePoP() {
|
|||
}
|
||||
|
||||
func (c *Context) buildCoverage() {
|
||||
replicas := c.task.ContainerStructure().PlacementPolicy().Replicas()
|
||||
policy := c.task.ContainerStructure().PlacementPolicy()
|
||||
|
||||
// select random member from another storage group
|
||||
// and process all placement vectors
|
||||
c.iterateSGMembersPlacementRand(func(id oid.ID, ind int, nodes netmap.Nodes) bool {
|
||||
c.processObjectPlacement(id, nodes, replicas[ind].Count())
|
||||
c.iterateSGMembersPlacementRand(func(id oid.ID, ind int, nodes []netmap.NodeInfo) bool {
|
||||
c.processObjectPlacement(id, nodes, policy.ReplicaNumberByIndex(ind))
|
||||
return c.containerCovered()
|
||||
})
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func (c *Context) containerCovered() bool {
|
|||
return c.cnrNodesNum <= len(c.pairedNodes)
|
||||
}
|
||||
|
||||
func (c *Context) processObjectPlacement(id oid.ID, nodes netmap.Nodes, replicas uint32) {
|
||||
func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, replicas uint32) {
|
||||
var (
|
||||
ok uint32
|
||||
optimal bool
|
||||
|
@ -50,7 +50,7 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes netmap.Nodes, replicas
|
|||
|
||||
for i := 0; ok < replicas && i < len(nodes); i++ {
|
||||
// try to get object header from node
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, &nodes[i], id, false)
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, nodes[i], id, false)
|
||||
if err != nil {
|
||||
c.log.Debug("could not get object header from candidate",
|
||||
zap.Stringer("id", id),
|
||||
|
@ -95,14 +95,14 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes netmap.Nodes, replicas
|
|||
|
||||
if unpairedCandidate1 >= 0 {
|
||||
if unpairedCandidate2 >= 0 {
|
||||
c.composePair(id, &nodes[unpairedCandidate1], &nodes[unpairedCandidate2])
|
||||
c.composePair(id, nodes[unpairedCandidate1], nodes[unpairedCandidate2])
|
||||
} else if pairedCandidate >= 0 {
|
||||
c.composePair(id, &nodes[unpairedCandidate1], &nodes[pairedCandidate])
|
||||
c.composePair(id, nodes[unpairedCandidate1], nodes[pairedCandidate])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Context) composePair(id oid.ID, n1, n2 *netmap.Node) {
|
||||
func (c *Context) composePair(id oid.ID, n1, n2 netmap.NodeInfo) {
|
||||
c.pairs = append(c.pairs, gamePair{
|
||||
n1: n1,
|
||||
n2: n2,
|
||||
|
@ -117,7 +117,7 @@ func (c *Context) composePair(id oid.ID, n1, n2 *netmap.Node) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, netmap.Nodes) bool) {
|
||||
func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.NodeInfo) bool) {
|
||||
// iterate over storage groups members for all storage groups (one by one)
|
||||
// with randomly shuffled members
|
||||
c.iterateSGMembersRand(func(id oid.ID) bool {
|
||||
|
|
|
@ -78,7 +78,7 @@ func (c *Context) checkStorageGroupPoR(ind int, sg oid.ID) {
|
|||
accRetries++
|
||||
}
|
||||
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, &flat[j], members[i], true)
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, flat[j], members[i], true)
|
||||
if err != nil {
|
||||
c.log.Debug("can't head object",
|
||||
zap.String("remote_node", hex.EncodeToString(flat[j].PublicKey())),
|
||||
|
|
|
@ -19,9 +19,9 @@ type Task struct {
|
|||
|
||||
cnr *container.Container
|
||||
|
||||
nm *netmap.Netmap
|
||||
nm *netmap.NetMap
|
||||
|
||||
cnrNodes netmap.ContainerNodes
|
||||
cnrNodes [][]netmap.NodeInfo
|
||||
|
||||
sgList []oid.ID
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (t *Task) ContainerStructure() *container.Container {
|
|||
}
|
||||
|
||||
// WithContainerNodes sets nodes in the container under audit.
|
||||
func (t *Task) WithContainerNodes(cnrNodes netmap.ContainerNodes) *Task {
|
||||
func (t *Task) WithContainerNodes(cnrNodes [][]netmap.NodeInfo) *Task {
|
||||
if t != nil {
|
||||
t.cnrNodes = cnrNodes
|
||||
}
|
||||
|
@ -92,12 +92,12 @@ func (t *Task) WithContainerNodes(cnrNodes netmap.ContainerNodes) *Task {
|
|||
}
|
||||
|
||||
// NetworkMap returns network map of audit epoch.
|
||||
func (t *Task) NetworkMap() *netmap.Netmap {
|
||||
func (t *Task) NetworkMap() *netmap.NetMap {
|
||||
return t.nm
|
||||
}
|
||||
|
||||
// WithNetworkMap sets network map of audit epoch.
|
||||
func (t *Task) WithNetworkMap(nm *netmap.Netmap) *Task {
|
||||
func (t *Task) WithNetworkMap(nm *netmap.NetMap) *Task {
|
||||
if t != nil {
|
||||
t.nm = nm
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (t *Task) WithNetworkMap(nm *netmap.Netmap) *Task {
|
|||
}
|
||||
|
||||
// ContainerNodes returns nodes in the container under audit.
|
||||
func (t *Task) ContainerNodes() netmap.ContainerNodes {
|
||||
func (t *Task) ContainerNodes() [][]netmap.NodeInfo {
|
||||
return t.cnrNodes
|
||||
}
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
loadroute "github.com/nspcc-dev/neofs-node/pkg/services/container/announcement/load/route"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
)
|
||||
|
@ -37,14 +38,12 @@ func (b *Builder) NextStage(a container.UsedSpaceAnnouncement, passed []loadrout
|
|||
continue
|
||||
}
|
||||
|
||||
target := placement[i][0]
|
||||
|
||||
if len(passed) == 1 && bytes.Equal(passed[0].PublicKey(), target.PublicKey()) {
|
||||
if len(passed) == 1 && bytes.Equal(passed[0].PublicKey(), placement[i][0].PublicKey()) {
|
||||
// add nil element so the announcement will be saved in local memory
|
||||
res = append(res, nil)
|
||||
} else {
|
||||
// add element with remote node to send announcement to
|
||||
res = append(res, target)
|
||||
res = append(res, netmapcore.Node(placement[i][0]))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,5 +10,5 @@ type PlacementBuilder interface {
|
|||
// BuildPlacement must compose and sort (according to a specific algorithm)
|
||||
// storage nodes from the container by its identifier using network map
|
||||
// of particular epoch.
|
||||
BuildPlacement(epoch uint64, cnr cid.ID) ([]netmap.Nodes, error)
|
||||
BuildPlacement(epoch uint64, cnr cid.ID) ([][]netmap.NodeInfo, error)
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func (s *Server) NetmapSnapshot(ctx context.Context, req *control.NetmapSnapshot
|
|||
|
||||
nm := new(control.Netmap)
|
||||
nm.SetEpoch(epoch)
|
||||
nm.SetNodes(nodesFromAPI(apiNetMap.Nodes))
|
||||
nm.SetNodes(nodesFromAPI(apiNetMap.Nodes()))
|
||||
|
||||
// create and fill response
|
||||
resp := new(control.NetmapSnapshotResponse)
|
||||
|
@ -47,20 +47,28 @@ func (s *Server) NetmapSnapshot(ctx context.Context, req *control.NetmapSnapshot
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
func nodesFromAPI(apiNodes netmapAPI.Nodes) []*control.NodeInfo {
|
||||
func nodesFromAPI(apiNodes []netmapAPI.NodeInfo) []*control.NodeInfo {
|
||||
nodes := make([]*control.NodeInfo, 0, len(apiNodes))
|
||||
|
||||
for _, apiNode := range apiNodes {
|
||||
for i := range apiNodes {
|
||||
node := new(control.NodeInfo)
|
||||
node.SetPublicKey(apiNode.PublicKey())
|
||||
node.SetPublicKey(apiNodes[i].PublicKey())
|
||||
|
||||
addrs := make([]string, 0, apiNode.NumberOfAddresses())
|
||||
netmapAPI.IterateAllAddresses(apiNode.NodeInfo, func(s string) {
|
||||
addrs := make([]string, 0, apiNodes[i].NumberOfNetworkEndpoints())
|
||||
netmapAPI.IterateNetworkEndpoints(apiNodes[i], func(s string) {
|
||||
addrs = append(addrs, s)
|
||||
})
|
||||
node.SetAddresses(addrs)
|
||||
node.SetAttributes(attributesFromAPI(apiNode.Attributes()))
|
||||
node.SetState(stateFromAPI(apiNode.State()))
|
||||
node.SetAttributes(attributesFromAPI(apiNodes[i]))
|
||||
|
||||
switch {
|
||||
default:
|
||||
node.SetState(control.NetmapStatus_STATUS_UNDEFINED)
|
||||
case apiNodes[i].IsOnline():
|
||||
node.SetState(control.NetmapStatus_ONLINE)
|
||||
case apiNodes[i].IsOffline():
|
||||
node.SetState(control.NetmapStatus_OFFLINE)
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
@ -68,36 +76,16 @@ func nodesFromAPI(apiNodes netmapAPI.Nodes) []*control.NodeInfo {
|
|||
return nodes
|
||||
}
|
||||
|
||||
func stateFromAPI(s netmapAPI.NodeState) control.NetmapStatus {
|
||||
switch s {
|
||||
default:
|
||||
return control.NetmapStatus_STATUS_UNDEFINED
|
||||
case netmapAPI.NodeStateOffline:
|
||||
return control.NetmapStatus_OFFLINE
|
||||
case netmapAPI.NodeStateOnline:
|
||||
return control.NetmapStatus_ONLINE
|
||||
}
|
||||
}
|
||||
func attributesFromAPI(apiNode netmapAPI.NodeInfo) []*control.NodeInfo_Attribute {
|
||||
attrs := make([]*control.NodeInfo_Attribute, 0, apiNode.NumberOfAttributes())
|
||||
|
||||
func attributesFromAPI(apiAttrs []netmapAPI.NodeAttribute) []*control.NodeInfo_Attribute {
|
||||
attrs := make([]*control.NodeInfo_Attribute, 0, len(apiAttrs))
|
||||
|
||||
for _, apiAttr := range apiAttrs {
|
||||
apiNode.IterateAttributes(func(key, value string) {
|
||||
a := new(control.NodeInfo_Attribute)
|
||||
a.SetKey(apiAttr.Key())
|
||||
a.SetValue(apiAttr.Value())
|
||||
|
||||
apiParents := apiAttr.ParentKeys()
|
||||
parents := make([]string, 0, len(apiParents))
|
||||
|
||||
for i := range apiParents {
|
||||
parents = append(parents, apiParents[i])
|
||||
}
|
||||
|
||||
a.SetParents(parents)
|
||||
a.SetKey(key)
|
||||
a.SetValue(value)
|
||||
|
||||
attrs = append(attrs, a)
|
||||
}
|
||||
})
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
"github.com/nspcc-dev/neofs-api-go/v2/refs"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/core/version"
|
||||
netmapSDK "github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
versionsdk "github.com/nspcc-dev/neofs-sdk-go/version"
|
||||
)
|
||||
|
||||
|
@ -32,7 +33,7 @@ type NetworkInfo interface {
|
|||
// Must return recent network information in NeoFS API v2 NetworkInfo structure.
|
||||
//
|
||||
// If protocol version is <=2.9, MillisecondsPerBlock and network config should be unset.
|
||||
Dump(*refs.Version) (*netmap.NetworkInfo, error)
|
||||
Dump(versionsdk.Version) (*netmapSDK.NetworkInfo, error)
|
||||
}
|
||||
|
||||
func NewExecutionService(s NodeState, v versionsdk.Version, netInfo NetworkInfo) Server {
|
||||
|
@ -93,13 +94,24 @@ func (s *executorSvc) LocalNodeInfo(
|
|||
func (s *executorSvc) NetworkInfo(
|
||||
_ context.Context,
|
||||
req *netmap.NetworkInfoRequest) (*netmap.NetworkInfoResponse, error) {
|
||||
ni, err := s.netInfo.Dump(req.GetMetaHeader().GetVersion())
|
||||
verV2 := req.GetMetaHeader().GetVersion()
|
||||
if verV2 == nil {
|
||||
return nil, errors.New("missing protocol version in meta header")
|
||||
}
|
||||
|
||||
var ver versionsdk.Version
|
||||
ver.ReadFromV2(*verV2)
|
||||
|
||||
ni, err := s.netInfo.Dump(ver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var niV2 netmap.NetworkInfo
|
||||
ni.WriteToV2(&niV2)
|
||||
|
||||
body := new(netmap.NetworkInfoResponseBody)
|
||||
body.SetNetworkInfo(ni)
|
||||
body.SetNetworkInfo(&niV2)
|
||||
|
||||
resp := new(netmap.NetworkInfoResponse)
|
||||
resp.SetBody(body)
|
||||
|
|
|
@ -132,20 +132,26 @@ func (c senderClassifier) isContainerKey(
|
|||
}
|
||||
|
||||
func lookupKeyInContainer(
|
||||
nm *netmap.Netmap,
|
||||
nm *netmap.NetMap,
|
||||
owner, idCnr []byte,
|
||||
cnr *container.Container) (bool, error) {
|
||||
cnrNodes, err := nm.GetContainerNodes(cnr.PlacementPolicy(), idCnr)
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
return false, errors.New("missing placement policy in container")
|
||||
}
|
||||
|
||||
cnrVectors, err := nm.ContainerNodes(*policy, idCnr)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
flatCnrNodes := cnrNodes.Flatten() // we need single array to iterate on
|
||||
for i := range flatCnrNodes {
|
||||
if bytes.Equal(flatCnrNodes[i].PublicKey(), owner) {
|
||||
for i := range cnrVectors {
|
||||
for j := range cnrVectors[i] {
|
||||
if bytes.Equal(cnrVectors[i][j].PublicKey(), owner) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/network"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object/util"
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
netmaptest "github.com/nspcc-dev/neofs-sdk-go/netmap/test"
|
||||
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test"
|
||||
|
@ -38,7 +40,7 @@ type testTraverserGenerator struct {
|
|||
}
|
||||
|
||||
type testPlacementBuilder struct {
|
||||
vectors map[string][]netmap.Nodes
|
||||
vectors map[string][][]netmap.NodeInfo
|
||||
}
|
||||
|
||||
type testClientCache struct {
|
||||
|
@ -81,7 +83,7 @@ func (g *testTraverserGenerator) GenerateTraverser(cnr cid.ID, obj *oid.ID, e ui
|
|||
return placement.NewTraverser(opts...)
|
||||
}
|
||||
|
||||
func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ *netmap.PlacementPolicy) ([]netmap.Nodes, error) {
|
||||
func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnr)
|
||||
|
||||
|
@ -392,8 +394,8 @@ func TestGetLocalOnly(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func testNodeMatrix(t testing.TB, dim []int) ([]netmap.Nodes, [][]string) {
|
||||
mNodes := make([]netmap.Nodes, len(dim))
|
||||
func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
|
||||
mNodes := make([][]netmap.NodeInfo, len(dim))
|
||||
mAddr := make([][]string, len(dim))
|
||||
|
||||
for i := range dim {
|
||||
|
@ -406,20 +408,20 @@ func testNodeMatrix(t testing.TB, dim []int) ([]netmap.Nodes, [][]string) {
|
|||
strconv.Itoa(60000+j),
|
||||
)
|
||||
|
||||
ni := netmap.NewNodeInfo()
|
||||
ni.SetAddresses(a)
|
||||
var ni netmap.NodeInfo
|
||||
ni.SetNetworkEndpoints(a)
|
||||
|
||||
var na network.AddressGroup
|
||||
|
||||
err := na.FromIterator(ni)
|
||||
err := na.FromIterator(netmapcore.Node(ni))
|
||||
require.NoError(t, err)
|
||||
|
||||
as[j] = network.StringifyGroup(na)
|
||||
|
||||
ns[j] = *ni
|
||||
ns[j] = ni
|
||||
}
|
||||
|
||||
mNodes[i] = netmap.NodesFromInfo(ns)
|
||||
mNodes[i] = ns
|
||||
mAddr[i] = as
|
||||
}
|
||||
|
||||
|
@ -464,7 +466,8 @@ func generateChain(ln int, cnr cid.ID) ([]*objectSDK.Object, []oid.ID, []byte) {
|
|||
func TestGetRemoteSmall(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cnr := container.New(container.WithPolicy(new(netmap.PlacementPolicy)))
|
||||
pp := netmaptest.PlacementPolicy()
|
||||
cnr := container.New(container.WithPolicy(&pp))
|
||||
idCnr := container.CalculateID(cnr)
|
||||
|
||||
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
|
||||
|
@ -527,7 +530,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
ns, as := testNodeMatrix(t, []int{2})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
},
|
||||
}
|
||||
|
@ -590,7 +593,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
ns, as := testNodeMatrix(t, []int{2})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
},
|
||||
}
|
||||
|
@ -634,7 +637,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
ns, as := testNodeMatrix(t, []int{2})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
},
|
||||
}
|
||||
|
@ -708,7 +711,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
c2.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
splitAddr.EncodeToString(): ns,
|
||||
},
|
||||
|
@ -781,7 +784,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
c2.addResult(child2Addr, nil, apistatus.ObjectNotFound{})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
linkAddr.EncodeToString(): ns,
|
||||
child1Addr.EncodeToString(): ns,
|
||||
|
@ -858,7 +861,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
c2.addResult(child2Addr, children[1], nil)
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
linkAddr.EncodeToString(): ns,
|
||||
child1Addr.EncodeToString(): ns,
|
||||
|
@ -924,7 +927,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
c2.addResult(splitAddr, nil, apistatus.ObjectNotFound{})
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
splitAddr.EncodeToString(): ns,
|
||||
},
|
||||
|
@ -988,7 +991,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
c2.addResult(rightAddr, rightObj, nil)
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
rightAddr.EncodeToString(): ns,
|
||||
preRightAddr.EncodeToString(): ns,
|
||||
|
@ -1058,7 +1061,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
}
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{},
|
||||
vectors: map[string][][]netmap.NodeInfo{},
|
||||
}
|
||||
|
||||
builder.vectors[addr.EncodeToString()] = ns
|
||||
|
@ -1116,7 +1119,8 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
func TestGetFromPastEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cnr := container.New(container.WithPolicy(new(netmap.PlacementPolicy)))
|
||||
pp := netmaptest.PlacementPolicy()
|
||||
cnr := container.New(container.WithPolicy(&pp))
|
||||
idCnr := container.CalculateID(cnr)
|
||||
|
||||
addr := oidtest.Address()
|
||||
|
@ -1153,12 +1157,12 @@ func TestGetFromPastEpoch(t *testing.T) {
|
|||
c: cnr,
|
||||
b: map[uint64]placement.Builder{
|
||||
curEpoch: &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns[:1],
|
||||
},
|
||||
},
|
||||
curEpoch - 1: &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns[1:],
|
||||
},
|
||||
},
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapCore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
internalclient "github.com/nspcc-dev/neofs-node/pkg/services/object/internal/client"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object/util"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
|
@ -29,7 +30,7 @@ type RemoteHeader struct {
|
|||
type RemoteHeadPrm struct {
|
||||
commonHeadPrm *Prm
|
||||
|
||||
node *netmap.NodeInfo
|
||||
node netmap.NodeInfo
|
||||
}
|
||||
|
||||
const remoteOpTTL = 1
|
||||
|
@ -45,7 +46,7 @@ func NewRemoteHeader(keyStorage *util.KeyStorage, cache ClientConstructor) *Remo
|
|||
}
|
||||
|
||||
// WithNodeInfo sets information about the remote node.
|
||||
func (p *RemoteHeadPrm) WithNodeInfo(v *netmap.NodeInfo) *RemoteHeadPrm {
|
||||
func (p *RemoteHeadPrm) WithNodeInfo(v netmap.NodeInfo) *RemoteHeadPrm {
|
||||
if p != nil {
|
||||
p.node = v
|
||||
}
|
||||
|
@ -71,7 +72,7 @@ func (h *RemoteHeader) Head(ctx context.Context, prm *RemoteHeadPrm) (*object.Ob
|
|||
|
||||
var info clientcore.NodeInfo
|
||||
|
||||
err = clientcore.NodeInfoFromRawNetmapElement(&info, prm.node)
|
||||
err = clientcore.NodeInfoFromRawNetmapElement(&info, netmapCore.Node(prm.node))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapCore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
internalclient "github.com/nspcc-dev/neofs-node/pkg/services/object/internal/client"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object/util"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer"
|
||||
|
@ -36,7 +37,7 @@ type RemoteSender struct {
|
|||
|
||||
// RemotePutPrm groups remote put operation parameters.
|
||||
type RemotePutPrm struct {
|
||||
node *netmap.NodeInfo
|
||||
node netmap.NodeInfo
|
||||
|
||||
obj *object.Object
|
||||
}
|
||||
|
@ -95,7 +96,7 @@ func NewRemoteSender(keyStorage *util.KeyStorage, cons ClientConstructor) *Remot
|
|||
}
|
||||
|
||||
// WithNodeInfo sets information about the remote node.
|
||||
func (p *RemotePutPrm) WithNodeInfo(v *netmap.NodeInfo) *RemotePutPrm {
|
||||
func (p *RemotePutPrm) WithNodeInfo(v netmap.NodeInfo) *RemotePutPrm {
|
||||
if p != nil {
|
||||
p.node = v
|
||||
}
|
||||
|
@ -120,7 +121,7 @@ func (s *RemoteSender) PutObject(ctx context.Context, p *RemotePutPrm) error {
|
|||
clientConstructor: s.clientConstructor,
|
||||
}
|
||||
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&t.nodeInfo, p.node)
|
||||
err := clientcore.NodeInfoFromRawNetmapElement(&t.nodeInfo, netmapCore.Node(p.node))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse client node info: %w", err)
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"testing"
|
||||
|
||||
clientcore "github.com/nspcc-dev/neofs-node/pkg/core/client"
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/network"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object/util"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement"
|
||||
|
@ -37,7 +38,7 @@ type testTraverserGenerator struct {
|
|||
}
|
||||
|
||||
type testPlacementBuilder struct {
|
||||
vectors map[string][]netmap.Nodes
|
||||
vectors map[string][][]netmap.NodeInfo
|
||||
}
|
||||
|
||||
type testClientCache struct {
|
||||
|
@ -73,7 +74,7 @@ func (g *testTraverserGenerator) generateTraverser(_ cid.ID, epoch uint64) (*pla
|
|||
)
|
||||
}
|
||||
|
||||
func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ *netmap.PlacementPolicy) ([]netmap.Nodes, error) {
|
||||
func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnr)
|
||||
|
||||
|
@ -86,7 +87,7 @@ func (p *testPlacementBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, _ *netmap
|
|||
return nil, errors.New("vectors for address not found")
|
||||
}
|
||||
|
||||
res := make([]netmap.Nodes, len(vs))
|
||||
res := make([][]netmap.NodeInfo, len(vs))
|
||||
copy(res, vs)
|
||||
|
||||
return res, nil
|
||||
|
@ -193,8 +194,8 @@ func TestGetLocalOnly(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func testNodeMatrix(t testing.TB, dim []int) ([]netmap.Nodes, [][]string) {
|
||||
mNodes := make([]netmap.Nodes, len(dim))
|
||||
func testNodeMatrix(t testing.TB, dim []int) ([][]netmap.NodeInfo, [][]string) {
|
||||
mNodes := make([][]netmap.NodeInfo, len(dim))
|
||||
mAddr := make([][]string, len(dim))
|
||||
|
||||
for i := range dim {
|
||||
|
@ -207,20 +208,20 @@ func testNodeMatrix(t testing.TB, dim []int) ([]netmap.Nodes, [][]string) {
|
|||
strconv.Itoa(60000+j),
|
||||
)
|
||||
|
||||
ni := netmap.NewNodeInfo()
|
||||
ni.SetAddresses(a)
|
||||
var ni netmap.NodeInfo
|
||||
ni.SetNetworkEndpoints(a)
|
||||
|
||||
var na network.AddressGroup
|
||||
|
||||
err := na.FromIterator(ni)
|
||||
err := na.FromIterator(netmapcore.Node(ni))
|
||||
require.NoError(t, err)
|
||||
|
||||
as[j] = network.StringifyGroup(na)
|
||||
|
||||
ns[j] = *ni
|
||||
ns[j] = ni
|
||||
}
|
||||
|
||||
mNodes[i] = netmap.NodesFromInfo(ns)
|
||||
mNodes[i] = ns
|
||||
mAddr[i] = as
|
||||
}
|
||||
|
||||
|
@ -232,15 +233,15 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
|
||||
placementDim := []int{2}
|
||||
|
||||
rs := make([]netmap.Replica, len(placementDim))
|
||||
rs := make([]netmap.ReplicaDescriptor, len(placementDim))
|
||||
for i := range placementDim {
|
||||
rs[i].SetCount(uint32(placementDim[i]))
|
||||
rs[i].SetNumberOfObjects(uint32(placementDim[i]))
|
||||
}
|
||||
|
||||
pp := netmap.NewPlacementPolicy()
|
||||
pp.SetReplicas(rs...)
|
||||
var pp netmap.PlacementPolicy
|
||||
pp.AddReplicas(rs...)
|
||||
|
||||
cnr := container.New(container.WithPolicy(pp))
|
||||
cnr := container.New(container.WithPolicy(&pp))
|
||||
id := container.CalculateID(cnr)
|
||||
|
||||
newSvc := func(b *testPlacementBuilder, c *testClientCache) *Service {
|
||||
|
@ -278,7 +279,7 @@ func TestGetRemoteSmall(t *testing.T) {
|
|||
ns, as := testNodeMatrix(t, placementDim)
|
||||
|
||||
builder := &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns,
|
||||
},
|
||||
}
|
||||
|
@ -317,16 +318,16 @@ func TestGetFromPastEpoch(t *testing.T) {
|
|||
|
||||
placementDim := []int{2, 2}
|
||||
|
||||
rs := make([]netmap.Replica, len(placementDim))
|
||||
rs := make([]netmap.ReplicaDescriptor, len(placementDim))
|
||||
|
||||
for i := range placementDim {
|
||||
rs[i].SetCount(uint32(placementDim[i]))
|
||||
rs[i].SetNumberOfObjects(uint32(placementDim[i]))
|
||||
}
|
||||
|
||||
pp := netmap.NewPlacementPolicy()
|
||||
pp.SetReplicas(rs...)
|
||||
var pp netmap.PlacementPolicy
|
||||
pp.AddReplicas(rs...)
|
||||
|
||||
cnr := container.New(container.WithPolicy(pp))
|
||||
cnr := container.New(container.WithPolicy(&pp))
|
||||
idCnr := container.CalculateID(cnr)
|
||||
|
||||
var addr oid.Address
|
||||
|
@ -360,12 +361,12 @@ func TestGetFromPastEpoch(t *testing.T) {
|
|||
c: cnr,
|
||||
b: map[uint64]placement.Builder{
|
||||
curEpoch: &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns[:1],
|
||||
},
|
||||
},
|
||||
curEpoch - 1: &testPlacementBuilder{
|
||||
vectors: map[string][]netmap.Nodes{
|
||||
vectors: map[string][][]netmap.NodeInfo{
|
||||
addr.EncodeToString(): ns[1:],
|
||||
},
|
||||
},
|
||||
|
|
|
@ -43,7 +43,7 @@ func NewLocalPlacement(b placement.Builder, s netmap.AnnouncedKeys) placement.Bu
|
|||
}
|
||||
}
|
||||
|
||||
func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy *netmapSDK.PlacementPolicy) ([]netmapSDK.Nodes, error) {
|
||||
func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
|
||||
vs, err := p.builder.BuildPlacement(cnr, obj, policy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
|
||||
|
@ -53,13 +53,13 @@ func (p *localPlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy *netmapS
|
|||
for j := range vs[i] {
|
||||
var addr network.AddressGroup
|
||||
|
||||
err := addr.FromIterator(vs[i][j])
|
||||
err := addr.FromIterator(network.NodeEndpointsIterator(vs[i][j]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if p.netmapKeys.IsLocalKey(vs[i][j].PublicKey()) {
|
||||
return []netmapSDK.Nodes{{vs[i][j]}}, nil
|
||||
return [][]netmapSDK.NodeInfo{{vs[i][j]}}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func NewRemotePlacementBuilder(b placement.Builder, s netmap.AnnouncedKeys) plac
|
|||
}
|
||||
}
|
||||
|
||||
func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy *netmapSDK.PlacementPolicy) ([]netmapSDK.Nodes, error) {
|
||||
func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
|
||||
vs, err := p.builder.BuildPlacement(cnr, obj, policy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(%T) could not build object placement: %w", p, err)
|
||||
|
@ -86,7 +86,7 @@ func (p *remotePlacement) BuildPlacement(cnr cid.ID, obj *oid.ID, policy *netmap
|
|||
for j := 0; j < len(vs[i]); j++ {
|
||||
var addr network.AddressGroup
|
||||
|
||||
err := addr.FromIterator(vs[i][j])
|
||||
err := addr.FromIterator(network.NodeEndpointsIterator(vs[i][j]))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ type netMapBuilder struct {
|
|||
nmSrc netmap.Source
|
||||
// mtx protects lastNm and containerCache fields.
|
||||
mtx sync.Mutex
|
||||
lastNm *netmapSDK.Netmap
|
||||
lastNm *netmapSDK.NetMap
|
||||
// containerCache caches container nodes by ID. It is used to skip `GetContainerNodes` invocation if
|
||||
// neither netmap nor container has changed.
|
||||
containerCache simplelru.LRUCache
|
||||
|
@ -25,13 +25,13 @@ type netMapBuilder struct {
|
|||
type netMapSrc struct {
|
||||
netmap.Source
|
||||
|
||||
nm *netmapSDK.Netmap
|
||||
nm *netmapSDK.NetMap
|
||||
}
|
||||
|
||||
// defaultContainerCacheSize is the default size for the container cache.
|
||||
const defaultContainerCacheSize = 10
|
||||
|
||||
func NewNetworkMapBuilder(nm *netmapSDK.Netmap) Builder {
|
||||
func NewNetworkMapBuilder(nm *netmapSDK.NetMap) Builder {
|
||||
cache, _ := simplelru.NewLRU(defaultContainerCacheSize, nil) // no error
|
||||
return &netMapBuilder{
|
||||
nmSrc: &netMapSrc{nm: nm},
|
||||
|
@ -47,11 +47,11 @@ func NewNetworkMapSourceBuilder(nmSrc netmap.Source) Builder {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *netMapSrc) GetNetMap(diff uint64) (*netmapSDK.Netmap, error) {
|
||||
func (s *netMapSrc) GetNetMap(diff uint64) (*netmapSDK.NetMap, error) {
|
||||
return s.nm, nil
|
||||
}
|
||||
|
||||
func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p *netmapSDK.PlacementPolicy) ([]netmapSDK.Nodes, error) {
|
||||
func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p netmapSDK.PlacementPolicy) ([][]netmapSDK.NodeInfo, error) {
|
||||
nm, err := netmap.GetLatestNetworkMap(b.nmSrc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get network map: %w", err)
|
||||
|
@ -65,7 +65,7 @@ func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p *netmapSDK.Pla
|
|||
raw, ok := b.containerCache.Get(string(binCnr))
|
||||
b.mtx.Unlock()
|
||||
if ok {
|
||||
cn := raw.(netmapSDK.ContainerNodes)
|
||||
cn := raw.([][]netmapSDK.NodeInfo)
|
||||
return BuildObjectPlacement(nm, cn, obj)
|
||||
}
|
||||
} else {
|
||||
|
@ -73,7 +73,7 @@ func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p *netmapSDK.Pla
|
|||
b.mtx.Unlock()
|
||||
}
|
||||
|
||||
cn, err := nm.GetContainerNodes(p, binCnr)
|
||||
cn, err := nm.ContainerNodes(p, binCnr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get container nodes: %w", err)
|
||||
}
|
||||
|
@ -85,15 +85,15 @@ func (b *netMapBuilder) BuildPlacement(cnr cid.ID, obj *oid.ID, p *netmapSDK.Pla
|
|||
return BuildObjectPlacement(nm, cn, obj)
|
||||
}
|
||||
|
||||
func BuildObjectPlacement(nm *netmapSDK.Netmap, cnrNodes netmapSDK.ContainerNodes, id *oid.ID) ([]netmapSDK.Nodes, error) {
|
||||
func BuildObjectPlacement(nm *netmapSDK.NetMap, cnrNodes [][]netmapSDK.NodeInfo, id *oid.ID) ([][]netmapSDK.NodeInfo, error) {
|
||||
if id == nil {
|
||||
return cnrNodes.Replicas(), nil
|
||||
return cnrNodes, nil
|
||||
}
|
||||
|
||||
binObj := make([]byte, sha256.Size)
|
||||
id.Encode(binObj)
|
||||
|
||||
on, err := nm.GetPlacementVectors(cnrNodes, binObj)
|
||||
on, err := nm.PlacementVectors(cnrNodes, binObj)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get placement vectors for object: %w", err)
|
||||
}
|
||||
|
@ -102,8 +102,15 @@ func BuildObjectPlacement(nm *netmapSDK.Netmap, cnrNodes netmapSDK.ContainerNode
|
|||
}
|
||||
|
||||
// FlattenNodes appends each row to the flat list.
|
||||
func FlattenNodes(ns []netmapSDK.Nodes) netmapSDK.Nodes {
|
||||
result := make(netmapSDK.Nodes, 0, len(ns))
|
||||
func FlattenNodes(ns [][]netmapSDK.NodeInfo) []netmapSDK.NodeInfo {
|
||||
var sz int
|
||||
|
||||
for i := range ns {
|
||||
sz += len(ns[i])
|
||||
}
|
||||
|
||||
result := make([]netmapSDK.NodeInfo, 0, sz)
|
||||
|
||||
for i := range ns {
|
||||
result = append(result, ns[i]...)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ type Builder interface {
|
|||
//
|
||||
// Must return all container nodes if object identifier
|
||||
// is nil.
|
||||
BuildPlacement(cid.ID, *oid.ID, *netmap.PlacementPolicy) ([]netmap.Nodes, error)
|
||||
BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error)
|
||||
}
|
||||
|
||||
// Option represents placement traverser option.
|
||||
|
@ -31,7 +31,7 @@ type Option func(*cfg)
|
|||
type Traverser struct {
|
||||
mtx *sync.RWMutex
|
||||
|
||||
vectors []netmap.Nodes
|
||||
vectors [][]netmap.NodeInfo
|
||||
|
||||
rem []int
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
|
|||
return nil, fmt.Errorf("%s: %w", invalidOptsMsg, errNilPolicy)
|
||||
}
|
||||
|
||||
ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, cfg.policy)
|
||||
ns, err := cfg.builder.BuildPlacement(cfg.cnr, cfg.obj, *cfg.policy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build placement: %w", err)
|
||||
}
|
||||
|
@ -88,12 +88,12 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
|
|||
ns = flatNodes(ns)
|
||||
rem = []int{int(*cfg.flatSuccess)}
|
||||
} else {
|
||||
rs := cfg.policy.Replicas()
|
||||
rem = make([]int, 0, len(rs))
|
||||
replNum := cfg.policy.NumberOfReplicas()
|
||||
rem = make([]int, 0, replNum)
|
||||
|
||||
for i := range rs {
|
||||
for i := 0; i < replNum; i++ {
|
||||
if cfg.trackCopies {
|
||||
rem = append(rem, int(rs[i].Count()))
|
||||
rem = append(rem, int(cfg.policy.ReplicaNumberByIndex(i)))
|
||||
} else {
|
||||
rem = append(rem, -1)
|
||||
}
|
||||
|
@ -107,18 +107,18 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func flatNodes(ns []netmap.Nodes) []netmap.Nodes {
|
||||
func flatNodes(ns [][]netmap.NodeInfo) [][]netmap.NodeInfo {
|
||||
sz := 0
|
||||
for i := range ns {
|
||||
sz += len(ns[i])
|
||||
}
|
||||
|
||||
flat := make(netmap.Nodes, 0, sz)
|
||||
flat := make([]netmap.NodeInfo, 0, sz)
|
||||
for i := range ns {
|
||||
flat = append(flat, ns[i]...)
|
||||
}
|
||||
|
||||
return []netmap.Nodes{flat}
|
||||
return [][]netmap.NodeInfo{flat}
|
||||
}
|
||||
|
||||
// Node is a descriptor of storage node with information required for intra-container communication.
|
||||
|
@ -161,7 +161,7 @@ func (t *Traverser) Next() []Node {
|
|||
nodes := make([]Node, count)
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
err := nodes[i].addresses.FromIterator(t.vectors[0][i])
|
||||
err := nodes[i].addresses.FromIterator(network.NodeEndpointsIterator(t.vectors[0][i]))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap"
|
||||
"github.com/nspcc-dev/neofs-node/pkg/network"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
|
@ -13,24 +14,24 @@ import (
|
|||
)
|
||||
|
||||
type testBuilder struct {
|
||||
vectors []netmap.Nodes
|
||||
vectors [][]netmap.NodeInfo
|
||||
}
|
||||
|
||||
func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, *netmap.PlacementPolicy) ([]netmap.Nodes, error) {
|
||||
func (b testBuilder) BuildPlacement(cid.ID, *oid.ID, netmap.PlacementPolicy) ([][]netmap.NodeInfo, error) {
|
||||
return b.vectors, nil
|
||||
}
|
||||
|
||||
func testNode(v uint32) (n netmap.NodeInfo) {
|
||||
n.SetAddresses("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
|
||||
n.SetNetworkEndpoints("/ip4/0.0.0.0/tcp/" + strconv.Itoa(int(v)))
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func copyVectors(v []netmap.Nodes) []netmap.Nodes {
|
||||
vc := make([]netmap.Nodes, 0, len(v))
|
||||
func copyVectors(v [][]netmap.NodeInfo) [][]netmap.NodeInfo {
|
||||
vc := make([][]netmap.NodeInfo, 0, len(v))
|
||||
|
||||
for i := range v {
|
||||
ns := make(netmap.Nodes, len(v[i]))
|
||||
ns := make([]netmap.NodeInfo, len(v[i]))
|
||||
copy(ns, v[i])
|
||||
|
||||
vc = append(vc, ns)
|
||||
|
@ -39,9 +40,9 @@ func copyVectors(v []netmap.Nodes) []netmap.Nodes {
|
|||
return vc
|
||||
}
|
||||
|
||||
func testPlacement(t *testing.T, ss, rs []int) ([]netmap.Nodes, *container.Container) {
|
||||
nodes := make([]netmap.Nodes, 0, len(rs))
|
||||
replicas := make([]netmap.Replica, 0, len(rs))
|
||||
func testPlacement(t *testing.T, ss, rs []int) ([][]netmap.NodeInfo, *container.Container) {
|
||||
nodes := make([][]netmap.NodeInfo, 0, len(rs))
|
||||
replicas := make([]netmap.ReplicaDescriptor, 0, len(rs))
|
||||
num := uint32(0)
|
||||
|
||||
for i := range ss {
|
||||
|
@ -52,24 +53,24 @@ func testPlacement(t *testing.T, ss, rs []int) ([]netmap.Nodes, *container.Conta
|
|||
num++
|
||||
}
|
||||
|
||||
nodes = append(nodes, netmap.NodesFromInfo(ns))
|
||||
nodes = append(nodes, ns)
|
||||
|
||||
var s netmap.Replica
|
||||
s.SetCount(uint32(rs[i]))
|
||||
var rd netmap.ReplicaDescriptor
|
||||
rd.SetNumberOfObjects(uint32(rs[i]))
|
||||
|
||||
replicas = append(replicas, s)
|
||||
replicas = append(replicas, rd)
|
||||
}
|
||||
|
||||
policy := new(netmap.PlacementPolicy)
|
||||
policy.SetReplicas(replicas...)
|
||||
policy.AddReplicas(replicas...)
|
||||
|
||||
return nodes, container.New(container.WithPolicy(policy))
|
||||
}
|
||||
|
||||
func assertSameAddress(t *testing.T, ni *netmap.NodeInfo, addr network.AddressGroup) {
|
||||
func assertSameAddress(t *testing.T, ni netmap.NodeInfo, addr network.AddressGroup) {
|
||||
var netAddr network.AddressGroup
|
||||
|
||||
err := netAddr.FromIterator(ni)
|
||||
err := netAddr.FromIterator(netmapcore.Node(ni))
|
||||
require.NoError(t, err)
|
||||
require.True(t, netAddr.Intersects(addr))
|
||||
}
|
||||
|
@ -96,7 +97,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
|||
require.Len(t, addrs, len(nodes[i]))
|
||||
|
||||
for j, n := range nodes[i] {
|
||||
assertSameAddress(t, n.NodeInfo, addrs[j].Addresses())
|
||||
assertSameAddress(t, n, addrs[j].Addresses())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +128,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
|||
|
||||
var n network.AddressGroup
|
||||
|
||||
err = n.FromIterator(nodes[1][0])
|
||||
err = n.FromIterator(netmapcore.Node(nodes[1][0]))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, []Node{{addresses: n}}, tr.Next())
|
||||
|
@ -153,7 +154,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
|||
require.Len(t, addrs, replicas[curVector])
|
||||
|
||||
for j := range addrs {
|
||||
assertSameAddress(t, nodes[curVector][i+j].NodeInfo, addrs[j].Addresses())
|
||||
assertSameAddress(t, nodes[curVector][i+j], addrs[j].Addresses())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +186,7 @@ func TestTraverserObjectScenarios(t *testing.T) {
|
|||
tr, err := NewTraverser(
|
||||
ForContainer(cnr),
|
||||
UseBuilder(&testBuilder{
|
||||
vectors: []netmap.Nodes{{nodes[1][1]}}, // single node (local)
|
||||
vectors: [][]netmap.NodeInfo{{nodes[1][1]}}, // single node (local)
|
||||
}),
|
||||
SuccessAfter(1),
|
||||
)
|
||||
|
|
|
@ -45,9 +45,17 @@ func (p *Policer) processObject(ctx context.Context, addr oid.Address) {
|
|||
}
|
||||
|
||||
policy := cnr.PlacementPolicy()
|
||||
if policy == nil {
|
||||
p.log.Error("missing placement policy in container",
|
||||
zap.Stringer("cid", idCnr),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
obj := addr.Object()
|
||||
|
||||
nn, err := p.placementBuilder.BuildPlacement(idCnr, &obj, policy)
|
||||
nn, err := p.placementBuilder.BuildPlacement(idCnr, &obj, *policy)
|
||||
if err != nil {
|
||||
p.log.Error("could not build placement vector for object",
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -56,7 +64,6 @@ func (p *Policer) processObject(ctx context.Context, addr oid.Address) {
|
|||
return
|
||||
}
|
||||
|
||||
replicas := policy.Replicas()
|
||||
c := &processPlacementContext{
|
||||
Context: ctx,
|
||||
}
|
||||
|
@ -76,7 +83,7 @@ func (p *Policer) processObject(ctx context.Context, addr oid.Address) {
|
|||
default:
|
||||
}
|
||||
|
||||
p.processNodes(c, addr, nn[i], replicas[i].Count(), checkedNodes)
|
||||
p.processNodes(c, addr, nn[i], policy.ReplicaNumberByIndex(i), checkedNodes)
|
||||
}
|
||||
|
||||
if !c.needLocalCopy {
|
||||
|
@ -95,7 +102,7 @@ type processPlacementContext struct {
|
|||
}
|
||||
|
||||
func (p *Policer) processNodes(ctx *processPlacementContext, addr oid.Address,
|
||||
nodes netmap.Nodes, shortage uint32, checkedNodes nodeCache) {
|
||||
nodes []netmap.NodeInfo, shortage uint32, checkedNodes nodeCache) {
|
||||
prm := new(headsvc.RemoteHeadPrm).WithObjectAddress(addr)
|
||||
|
||||
for i := 0; shortage > 0 && i < len(nodes); i++ {
|
||||
|
@ -110,7 +117,8 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addr oid.Address,
|
|||
|
||||
shortage--
|
||||
} else {
|
||||
if hasReplica, checked := checkedNodes[nodes[i].ID]; checked {
|
||||
nodeID := nodes[i].Hash()
|
||||
if hasReplica, checked := checkedNodes[nodeID]; checked {
|
||||
if hasReplica {
|
||||
// node already contains replica, no need to replicate
|
||||
nodes = append(nodes[:i], nodes[i+1:]...)
|
||||
|
@ -123,7 +131,7 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addr oid.Address,
|
|||
|
||||
callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
|
||||
|
||||
_, err := p.remoteHeader.Head(callCtx, prm.WithNodeInfo(nodes[i].NodeInfo))
|
||||
_, err := p.remoteHeader.Head(callCtx, prm.WithNodeInfo(nodes[i]))
|
||||
|
||||
cancel()
|
||||
|
||||
|
@ -133,7 +141,7 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addr oid.Address,
|
|||
}
|
||||
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
checkedNodes[nodes[i].ID] = false
|
||||
checkedNodes[nodeID] = false
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -144,7 +152,7 @@ func (p *Policer) processNodes(ctx *processPlacementContext, addr oid.Address,
|
|||
)
|
||||
} else {
|
||||
shortage--
|
||||
checkedNodes[nodes[i].ID] = true
|
||||
checkedNodes[nodeID] = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task *Task, res TaskResult)
|
|||
|
||||
callCtx, cancel := context.WithTimeout(ctx, p.putTimeout)
|
||||
|
||||
err = p.remoteSender.PutObject(callCtx, prm.WithNodeInfo(task.nodes[i].NodeInfo))
|
||||
err = p.remoteSender.PutObject(callCtx, prm.WithNodeInfo(task.nodes[i]))
|
||||
|
||||
cancel()
|
||||
|
||||
|
@ -65,7 +65,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task *Task, res TaskResult)
|
|||
|
||||
task.quantity--
|
||||
|
||||
res.SubmitSuccessfulReplication(task.nodes[i].ID)
|
||||
res.SubmitSuccessfulReplication(task.nodes[i].Hash())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ type Task struct {
|
|||
|
||||
addr oid.Address
|
||||
|
||||
nodes netmap.Nodes
|
||||
nodes []netmap.NodeInfo
|
||||
}
|
||||
|
||||
// WithCopiesNumber sets number of copies to replicate.
|
||||
|
@ -33,7 +33,7 @@ func (t *Task) WithObjectAddress(v oid.Address) *Task {
|
|||
}
|
||||
|
||||
// WithNodes sets a list of potential object holders.
|
||||
func (t *Task) WithNodes(v netmap.Nodes) *Task {
|
||||
func (t *Task) WithNodes(v []netmap.NodeInfo) *Task {
|
||||
if t != nil {
|
||||
t.nodes = v
|
||||
}
|
||||
|
|
|
@ -56,6 +56,21 @@ func NewManagerBuilder(prm ManagersPrm, opts ...MngOption) ManagerBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
// implements Server on apiNetmap.NodeInfo
|
||||
type nodeServer apiNetmap.NodeInfo
|
||||
|
||||
func (x nodeServer) PublicKey() []byte {
|
||||
return (apiNetmap.NodeInfo)(x).PublicKey()
|
||||
}
|
||||
|
||||
func (x nodeServer) IterateAddresses(f func(string) bool) {
|
||||
(apiNetmap.NodeInfo)(x).IterateNetworkEndpoints(f)
|
||||
}
|
||||
|
||||
func (x nodeServer) NumberOfAddresses() int {
|
||||
return (apiNetmap.NodeInfo)(x).NumberOfNetworkEndpoints()
|
||||
}
|
||||
|
||||
// BuildManagers sorts nodes in NetMap with HRW algorithms and
|
||||
// takes the next node after the current one as the only manager.
|
||||
func (mb *managerBuilder) BuildManagers(epoch uint64, p reputation.PeerID) ([]ServerInfo, error) {
|
||||
|
@ -69,10 +84,12 @@ func (mb *managerBuilder) BuildManagers(epoch uint64, p reputation.PeerID) ([]Se
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// make a copy to keep order consistency of the origin netmap after sorting
|
||||
nodes := make([]apiNetmap.Node, len(nm.Nodes))
|
||||
nmNodes := nm.Nodes()
|
||||
|
||||
copy(nodes, nm.Nodes)
|
||||
// make a copy to keep order consistency of the origin netmap after sorting
|
||||
nodes := make([]apiNetmap.NodeInfo, len(nmNodes))
|
||||
|
||||
copy(nodes, nmNodes)
|
||||
|
||||
hrw.SortSliceByValue(nodes, epoch)
|
||||
|
||||
|
@ -84,7 +101,7 @@ func (mb *managerBuilder) BuildManagers(epoch uint64, p reputation.PeerID) ([]Se
|
|||
managerIndex = 0
|
||||
}
|
||||
|
||||
return []ServerInfo{nodes[managerIndex]}, nil
|
||||
return []ServerInfo{nodeServer(nodes[managerIndex])}, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,104 +8,63 @@ import (
|
|||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
const (
|
||||
pairSeparator = "/"
|
||||
keyValueSeparator = ":"
|
||||
)
|
||||
const keyValueSeparator = ":"
|
||||
|
||||
var (
|
||||
errEmptyChain = errors.New("empty attribute chain")
|
||||
errNonUniqueBucket = errors.New("attributes must contain unique keys")
|
||||
errUnexpectedKey = errors.New("attributes contain unexpected key")
|
||||
)
|
||||
|
||||
// ParseV2Attributes parses strings like "K1:V1/K2:V2/K3:V3" into netmap
|
||||
// attributes. Supports escaped symbols "\:", "\/" and "\\".
|
||||
func ParseV2Attributes(attrs []string, excl []string) ([]netmap.NodeAttribute, error) {
|
||||
restricted := make(map[string]struct{}, len(excl))
|
||||
for i := range excl {
|
||||
restricted[excl[i]] = struct{}{}
|
||||
}
|
||||
|
||||
cache := make(map[string]*netmap.NodeAttribute, len(attrs))
|
||||
result := make([]netmap.NodeAttribute, 0, len(attrs))
|
||||
// ReadNodeAttributes parses node attributes from list of string in "Key:Value" format
|
||||
// and writes them into netmap.NodeInfo instance. Supports escaped symbols
|
||||
// "\:", "\/" and "\\".
|
||||
func ReadNodeAttributes(dst *netmap.NodeInfo, attrs []string) error {
|
||||
cache := make(map[string]struct{}, len(attrs))
|
||||
|
||||
for i := range attrs {
|
||||
line := strings.Trim(attrs[i], pairSeparator)
|
||||
line = replaceEscaping(line, false) // replaced escaped symbols with non-printable symbols
|
||||
chain := strings.Split(line, pairSeparator)
|
||||
if len(chain) == 0 {
|
||||
return nil, errEmptyChain
|
||||
line := replaceEscaping(attrs[i], false) // replaced escaped symbols with non-printable symbols
|
||||
|
||||
words := strings.Split(line, keyValueSeparator)
|
||||
if len(words) != 2 {
|
||||
return errors.New("missing attribute key and/or value")
|
||||
}
|
||||
|
||||
var parentKey string // backtrack parents in next pairs
|
||||
|
||||
for j := range chain {
|
||||
pair := strings.Split(chain[j], keyValueSeparator)
|
||||
if len(pair) != 2 {
|
||||
return nil, fmt.Errorf("incorrect attribute pair %s", chain[j])
|
||||
_, ok := cache[words[0]]
|
||||
if ok {
|
||||
return fmt.Errorf("duplicated keys %s", words[0])
|
||||
}
|
||||
|
||||
key := pair[0]
|
||||
value := pair[1]
|
||||
|
||||
attribute, present := cache[key]
|
||||
if present && attribute.Value() != value {
|
||||
return nil, errNonUniqueBucket
|
||||
}
|
||||
|
||||
if _, ok := restricted[key]; ok {
|
||||
return nil, errUnexpectedKey
|
||||
}
|
||||
|
||||
if !present {
|
||||
result = append(result, netmap.NodeAttribute{})
|
||||
attribute = &result[len(result)-1]
|
||||
cache[key] = attribute
|
||||
cache[words[0]] = struct{}{}
|
||||
|
||||
// replace non-printable symbols with escaped symbols without escape character
|
||||
key = replaceEscaping(key, true)
|
||||
value = replaceEscaping(value, true)
|
||||
words[0] = replaceEscaping(words[0], true)
|
||||
words[1] = replaceEscaping(words[1], true)
|
||||
fmt.Println(words[0], words[1])
|
||||
|
||||
attribute.SetKey(key)
|
||||
attribute.SetValue(value)
|
||||
if words[0] == "" {
|
||||
return errors.New("empty key")
|
||||
} else if words[1] == "" {
|
||||
return errors.New("empty value")
|
||||
}
|
||||
|
||||
if parentKey != "" {
|
||||
parentKeys := attribute.ParentKeys()
|
||||
if !hasString(parentKeys, parentKey) {
|
||||
attribute.SetParentKeys(append(parentKeys, parentKey)...)
|
||||
}
|
||||
dst.SetAttribute(words[0], words[1])
|
||||
}
|
||||
|
||||
parentKey = key
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceEscaping(target string, rollback bool) (s string) {
|
||||
const escChar = `\`
|
||||
|
||||
var (
|
||||
oldPairSep = escChar + pairSeparator
|
||||
oldKVSep = escChar + keyValueSeparator
|
||||
oldEsc = escChar + escChar
|
||||
newPairSep = string(uint8(1))
|
||||
newKVSep = string(uint8(2))
|
||||
newEsc = string(uint8(3))
|
||||
)
|
||||
|
||||
if rollback {
|
||||
oldPairSep, oldKVSep, oldEsc = newPairSep, newKVSep, newEsc
|
||||
newPairSep = pairSeparator
|
||||
oldKVSep, oldEsc = newKVSep, newEsc
|
||||
newKVSep = keyValueSeparator
|
||||
newEsc = escChar
|
||||
}
|
||||
|
||||
s = strings.ReplaceAll(target, oldEsc, newEsc)
|
||||
s = strings.ReplaceAll(s, oldPairSep, newPairSep)
|
||||
s = strings.ReplaceAll(s, oldKVSep, newKVSep)
|
||||
|
||||
return
|
||||
|
|
|
@ -4,117 +4,96 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-node/pkg/util/attributes"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseV2Attributes(t *testing.T) {
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
attrs, err := attributes.ParseV2Attributes(nil, nil)
|
||||
func testAttributeMap(t *testing.T, mSrc, mExp map[string]string) {
|
||||
var node netmap.NodeInfo
|
||||
|
||||
s := make([]string, 0, len(mSrc))
|
||||
for k, v := range mSrc {
|
||||
s = append(s, k+":"+v)
|
||||
}
|
||||
|
||||
err := attributes.ReadNodeAttributes(&node, s)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, attrs, 0)
|
||||
|
||||
if mExp == nil {
|
||||
mExp = mSrc
|
||||
}
|
||||
|
||||
node.IterateAttributes(func(key, value string) {
|
||||
v, ok := mExp[key]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, value, v)
|
||||
delete(mExp, key)
|
||||
})
|
||||
|
||||
t.Run("non unique bucket keys", func(t *testing.T) {
|
||||
good := []string{
|
||||
"StorageType:HDD/RPM:7200",
|
||||
"StorageType:HDD/SMR:True",
|
||||
}
|
||||
_, err := attributes.ParseV2Attributes(good, nil)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, mExp)
|
||||
}
|
||||
|
||||
bad := append(good, "StorageType:SSD/Cell:QLC")
|
||||
_, err = attributes.ParseV2Attributes(bad, nil)
|
||||
func TestParseV2Attributes(t *testing.T) {
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
var node netmap.NodeInfo
|
||||
err := attributes.ReadNodeAttributes(&node, nil)
|
||||
require.NoError(t, err)
|
||||
require.Zero(t, node.NumberOfAttributes())
|
||||
})
|
||||
|
||||
t.Run("empty key and/or value", func(t *testing.T) {
|
||||
var node netmap.NodeInfo
|
||||
err := attributes.ReadNodeAttributes(&node, []string{
|
||||
":HDD",
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
err = attributes.ReadNodeAttributes(&node, []string{
|
||||
"StorageType:",
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
err = attributes.ReadNodeAttributes(&node, []string{
|
||||
":",
|
||||
})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("non-unique keys", func(t *testing.T) {
|
||||
var node netmap.NodeInfo
|
||||
err := attributes.ReadNodeAttributes(&node, []string{
|
||||
"StorageType:HDD",
|
||||
"StorageType:HDD",
|
||||
})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("malformed", func(t *testing.T) {
|
||||
_, err := attributes.ParseV2Attributes([]string{"..."}, nil)
|
||||
var node netmap.NodeInfo
|
||||
err := attributes.ReadNodeAttributes(&node, []string{"..."})
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = attributes.ParseV2Attributes([]string{"a:b", ""}, nil)
|
||||
err = attributes.ReadNodeAttributes(&node, []string{"a:b", ""})
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = attributes.ParseV2Attributes([]string{"//"}, nil)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("unexpected", func(t *testing.T) {
|
||||
unexpectedBucket := []string{
|
||||
"Location:Europe/City:Moscow",
|
||||
"Price:100",
|
||||
}
|
||||
_, err := attributes.ParseV2Attributes(unexpectedBucket, []string{"Price"})
|
||||
err = attributes.ReadNodeAttributes(&node, []string{"//"})
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("correct", func(t *testing.T) {
|
||||
from := []string{
|
||||
"/Location:Europe/Country:Sweden/City:Stockholm",
|
||||
"/StorageType:HDD/RPM:7200",
|
||||
}
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, attrs, 5)
|
||||
testAttributeMap(t, map[string]string{
|
||||
"Location": "Europe",
|
||||
"StorageType": "HDD",
|
||||
}, nil)
|
||||
})
|
||||
|
||||
t.Run("escape characters", func(t *testing.T) {
|
||||
from := []string{
|
||||
`/K\:ey1:V\/alue\\/Ke\/y2:Va\:lue`,
|
||||
}
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, `K:ey1`, attrs[0].Key())
|
||||
require.Equal(t, `V/alue\`, attrs[0].Value())
|
||||
require.Equal(t, `Ke/y2`, attrs[1].Key())
|
||||
require.Equal(t, `Va:lue`, attrs[1].Value())
|
||||
testAttributeMap(t, map[string]string{
|
||||
`K\:ey1`: `V\/alue`,
|
||||
`Ke\/y2`: `Va\:lue`,
|
||||
}, map[string]string{
|
||||
`K:ey1`: `V\/alue`,
|
||||
`Ke\/y2`: `Va:lue`,
|
||||
})
|
||||
|
||||
t.Run("same attributes", func(t *testing.T) {
|
||||
from := []string{"/a:b", "/a:b"}
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, attrs, 1)
|
||||
|
||||
t.Run("with escape characters", func(t *testing.T) {
|
||||
from = []string{`/a\::b\/`, `/a\::b\/`}
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, attrs, 1)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("multiple parents", func(t *testing.T) {
|
||||
from := []string{
|
||||
"/parent1:x/child:x",
|
||||
"/parent2:x/child:x",
|
||||
"/parent2:x/child:x/subchild:x",
|
||||
}
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
var flag bool
|
||||
for _, attr := range attrs {
|
||||
if attr.Key() == "child" {
|
||||
flag = true
|
||||
require.Equal(t, []string{"parent1", "parent2"}, attr.ParentKeys())
|
||||
}
|
||||
}
|
||||
require.True(t, flag)
|
||||
})
|
||||
|
||||
t.Run("consistent order in chain", func(t *testing.T) {
|
||||
from := []string{"/a:1/b:2/c:3"}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
attrs, err := attributes.ParseV2Attributes(from, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "a", attrs[0].Key())
|
||||
require.Equal(t, "1", attrs[0].Value())
|
||||
require.Equal(t, "b", attrs[1].Key())
|
||||
require.Equal(t, "2", attrs[1].Value())
|
||||
require.Equal(t, "c", attrs[2].Key())
|
||||
require.Equal(t, "3", attrs[2].Value())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue