diff --git a/cmd/neofs-node/defaults.go b/cmd/neofs-node/defaults.go deleted file mode 100644 index dfe26eb52..000000000 --- a/cmd/neofs-node/defaults.go +++ /dev/null @@ -1,314 +0,0 @@ -package main - -import ( - "time" - - "github.com/nspcc-dev/neo-go/pkg/config/netmode" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/morph" - "github.com/spf13/viper" -) - -func setDefaults(v *viper.Viper) { - // Logger section - { - v.SetDefault("logger.level", "debug") - v.SetDefault("logger.format", "console") - v.SetDefault("logger.trace_level", "fatal") - v.SetDefault("logger.no_disclaimer", false) // to disable app_name and app_version - - v.SetDefault("logger.sampling.initial", 1000) // todo: add description - v.SetDefault("logger.sampling.thereafter", 1000) // todo: add description - } - - // Transport section - { - v.SetDefault("transport.attempts_count", 5) - v.SetDefault("transport.attempts_ttl", "30s") - } - - // Peers section - { - v.SetDefault("peers.metrics_timeout", "5s") - v.SetDefault("peers.connections_ttl", "30s") - v.SetDefault("peers.connections_idle", "30s") - v.SetDefault("peers.keep_alive.ttl", "30s") - v.SetDefault("peers.keep_alive.ping", "100ms") - } - - // Muxer session - { - v.SetDefault("muxer.http.read_buffer_size", 0) - v.SetDefault("muxer.http.write_buffer_size", 0) - v.SetDefault("muxer.http.read_timeout", 0) - v.SetDefault("muxer.http.write_timeout", 0) - } - - // Node section - { - v.SetDefault("node.proto", "tcp") // tcp or udp - v.SetDefault("node.address", ":8080") - v.SetDefault("node.shutdown_ttl", "30s") - v.SetDefault("node.private_key", "keys/node_00.key") - - v.SetDefault("node.grpc.logging", true) - v.SetDefault("node.grpc.metrics", true) - v.SetDefault("node.grpc.billing", true) - - // Contains public keys, which can send requests to state.DumpConfig - // for now, in the future, should be replaced with ACL or something else. - v.SetDefault("node.rpc.owners", []string{ - // By default we add user.key - // TODO should be removed before public release: - // or add into default Dockerfile `NEOFS_NODE_RPC_OWNERS_0=` - "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a", - }) - } - - // Object section - { - v.SetDefault("object.max_processing_size", 100) // size in MB, use 0 to remove restriction - v.SetDefault("object.workers_count", 5) - v.SetDefault("object.assembly", true) - v.SetDefault("object.window_size", 3) - - v.SetDefault("object.transformers.payload_limiter.max_payload_size", 5000) // size in KB - - // algorithm used for salt applying in range hash, for now only xor is available - v.SetDefault("object.salitor", "xor") - - // set true to check container ACL rules - v.SetDefault("object.check_acl", true) - - v.SetDefault("object.dial_timeout", "500ms") - rpcs := []string{"put", "get", "delete", "head", "search", "range", "range_hash"} - for i := range rpcs { - v.SetDefault("object."+rpcs[i]+".timeout", "5s") - v.SetDefault("object."+rpcs[i]+".log_errs", false) - } - } - - // Replication section - { - v.SetDefault("replication.manager.pool_size", 100) - v.SetDefault("replication.manager.pool_expansion_rate", 0.1) - v.SetDefault("replication.manager.read_pool_interval", "500ms") - v.SetDefault("replication.manager.push_task_timeout", "1s") - v.SetDefault("replication.manager.placement_honorer_enabled", true) - v.SetDefault("replication.manager.capacities.replicate", 1) - v.SetDefault("replication.manager.capacities.restore", 1) - v.SetDefault("replication.manager.capacities.garbage", 1) - - v.SetDefault("replication.placement_honorer.chan_capacity", 1) - v.SetDefault("replication.placement_honorer.result_timeout", "1s") - v.SetDefault("replication.placement_honorer.timeouts.put", "5s") - v.SetDefault("replication.placement_honorer.timeouts.get", "5s") - - v.SetDefault("replication.location_detector.chan_capacity", 1) - v.SetDefault("replication.location_detector.result_timeout", "1s") - v.SetDefault("replication.location_detector.timeouts.search", "5s") - - v.SetDefault("replication.storage_validator.chan_capacity", 1) - v.SetDefault("replication.storage_validator.result_timeout", "1s") - v.SetDefault("replication.storage_validator.salt_size", 64) // size in bytes - v.SetDefault("replication.storage_validator.max_payload_range_size", 64) // size in bytes - v.SetDefault("replication.storage_validator.payload_range_count", 3) - v.SetDefault("replication.storage_validator.salitor", "xor") - v.SetDefault("replication.storage_validator.timeouts.get", "5s") - v.SetDefault("replication.storage_validator.timeouts.head", "5s") - v.SetDefault("replication.storage_validator.timeouts.range_hash", "5s") - - v.SetDefault("replication.replicator.chan_capacity", 1) - v.SetDefault("replication.replicator.result_timeout", "1s") - v.SetDefault("replication.replicator.timeouts.put", "5s") - - v.SetDefault("replication.restorer.chan_capacity", 1) - v.SetDefault("replication.restorer.result_timeout", "1s") - v.SetDefault("replication.restorer.timeouts.get", "5s") - v.SetDefault("replication.restorer.timeouts.head", "5s") - } - - // PPROF section - { - v.SetDefault("pprof.enabled", true) - v.SetDefault("pprof.address", ":6060") - v.SetDefault("pprof.shutdown_ttl", "10s") - // v.SetDefault("pprof.read_timeout", "10s") - // v.SetDefault("pprof.read_header_timeout", "10s") - // v.SetDefault("pprof.write_timeout", "10s") - // v.SetDefault("pprof.idle_timeout", "10s") - // v.SetDefault("pprof.max_header_bytes", 1024) - } - - // Metrics section - { - v.SetDefault("metrics.enabled", true) - v.SetDefault("metrics.address", ":8090") - v.SetDefault("metrics.shutdown_ttl", "10s") - // v.SetDefault("metrics.read_header_timeout", "10s") - // v.SetDefault("metrics.write_timeout", "10s") - // v.SetDefault("metrics.idle_timeout", "10s") - // v.SetDefault("metrics.max_header_bytes", 1024) - } - - // Workers section - { - workers := []string{ - "peers", - "boot", - "replicator", - "metrics", - "event_listener", - } - - for i := range workers { - v.SetDefault("workers."+workers[i]+".immediately", true) - v.SetDefault("workers."+workers[i]+".disabled", false) - // v.SetDefault("workers."+workers[i]+".timer", "5s") // run worker every 5sec and reset timer after job - // v.SetDefault("workers."+workers[i]+".ticker", "5s") // run worker every 5sec - } - } - - // Morph section - { - - // Endpoint - v.SetDefault( - morph.EndpointOptPath(), - "http://morph_chain.localtest.nspcc.ru:30333", - ) - - // Dial timeout - v.SetDefault( - morph.DialTimeoutOptPath(), - 5*time.Second, - ) - - v.SetDefault( - morph.MagicNumberOptPath(), - uint32(netmode.PrivNet), - ) - - { // Event listener - // Endpoint - v.SetDefault( - morph.ListenerEndpointOptPath(), - "ws://morph_chain.localtest.nspcc.ru:30333/ws", - ) - - // Dial timeout - v.SetDefault( - morph.ListenerDialTimeoutOptPath(), - 5*time.Second, - ) - } - - { // Common parameters - for _, name := range morph.ContractNames { - // Script hash - v.SetDefault( - morph.ScriptHashOptPath(name), - "c77ecae9773ad0c619ad59f7f2dd6f585ddc2e70", // LE - ) - - // Invocation fee - v.SetDefault( - morph.InvocationFeeOptPath(name), - 0, - ) - } - } - - { // Container - // Set EACL method name - v.SetDefault( - morph.ContainerContractSetEACLOptPath(), - "SetEACL", - ) - - // Get EACL method name - v.SetDefault( - morph.ContainerContractEACLOptPath(), - "EACL", - ) - - // Put method name - v.SetDefault( - morph.ContainerContractPutOptPath(), - "Put", - ) - - // Get method name - v.SetDefault( - morph.ContainerContractGetOptPath(), - "Get", - ) - - // Delete method name - v.SetDefault( - morph.ContainerContractDelOptPath(), - "Delete", - ) - - // List method name - v.SetDefault( - morph.ContainerContractListOptPath(), - "List", - ) - } - - { // Netmap - // AddPeer method name - v.SetDefault( - morph.NetmapContractAddPeerOptPath(), - "AddPeer", - ) - - // New epoch method name - v.SetDefault( - morph.NetmapContractNewEpochOptPath(), - "NewEpoch", - ) - - // Netmap method name - v.SetDefault( - morph.NetmapContractNetmapOptPath(), - "Netmap", - ) - - // Update state method name - v.SetDefault( - morph.NetmapContractUpdateStateOptPath(), - "UpdateState", - ) - - // IR list method name - v.SetDefault( - morph.NetmapContractIRListOptPath(), - "InnerRingList", - ) - - // New epoch event type - v.SetDefault( - morph.ContractEventOptPath( - morph.NetmapContractName, - morph.NewEpochEventType, - ), - "NewEpoch", - ) - } - - { // Balance - // balanceOf method name - v.SetDefault( - morph.BalanceContractBalanceOfOptPath(), - "balanceOf", - ) - - // decimals method name - v.SetDefault( - morph.BalanceContractDecimalsOfOptPath(), - "decimals", - ) - } - } -} diff --git a/cmd/neofs-node/main.go b/cmd/neofs-node/main.go index 4e00169ad..ea939f56b 100644 --- a/cmd/neofs-node/main.go +++ b/cmd/neofs-node/main.go @@ -1,146 +1,11 @@ package main import ( - "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "flag" - "os" - "time" - - "github.com/nspcc-dev/neofs-api-go/service" - state2 "github.com/nspcc-dev/neofs-api-go/state" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/config" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/worker" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/node" - "github.com/nspcc-dev/neofs-node/misc" - "github.com/nspcc-dev/neofs-node/pkg/network/muxer" - statesrv "github.com/nspcc-dev/neofs-node/pkg/network/transport/state/grpc" - "github.com/nspcc-dev/neofs-node/pkg/util/profiler" - "github.com/pkg/errors" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" - "google.golang.org/grpc" + "github.com/nspcc-dev/neofs-node/pkg/util/grace" ) -type params struct { - dig.In - - Debug profiler.Profiler `optional:"true"` - Metric profiler.Metrics `optional:"true"` - Worker worker.Workers `optional:"true"` - Muxer muxer.Mux - Logger *zap.Logger -} - -var ( - healthCheck bool - configFile string -) - -func runner(ctx context.Context, p params) error { - // create combined service, that would start/stop all - svc := fix.NewServices(p.Debug, p.Metric, p.Muxer, p.Worker) - - p.Logger.Info("start services") - svc.Start(ctx) - - <-ctx.Done() - - p.Logger.Info("stop services") - svc.Stop() - - return nil -} - -func check(err error) { - if err != nil { - panic(err) - } -} - -// FIXME: this is a copypaste from node settings constructor -func keyFromCfg(v *viper.Viper) (*ecdsa.PrivateKey, error) { - switch key := v.GetString("node.private_key"); key { - case "": - return nil, errors.New("`node.private_key` could not be empty") - case "generated": - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - default: - return crypto.LoadPrivateKey(key) - } -} - -func runHealthCheck() { - if !healthCheck { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - cfg, err := config.NewConfig(config.Params{ - File: configFile, - Prefix: misc.Prefix, - Name: misc.NodeName, - Version: misc.Version, - - AppDefaults: setDefaults, - }) - check(err) - - addr := cfg.GetString("node.address") - - key, err := keyFromCfg(cfg) - if err != nil { - check(err) - } - - con, err := grpc.DialContext(ctx, addr, - // TODO: we must provide grpc.WithInsecure() or set credentials - grpc.WithInsecure()) - check(err) - - req := new(statesrv.HealthRequest) - req.SetTTL(service.NonForwardingTTL) - if err := service.SignRequestData(key, req); err != nil { - check(err) - } - - res, err := state2.NewStatusClient(con). - HealthCheck(ctx, req) - check(errors.Wrapf(err, "address: %q", addr)) - - var exitCode int - - if !res.Healthy { - exitCode = 2 - } - _, _ = os.Stdout.Write([]byte(res.Status + "\n")) - os.Exit(exitCode) -} - func main() { - flag.BoolVar(&healthCheck, "health", healthCheck, "run health-check") + ctx := grace.NewGracefulContext(nil) - // todo: if configFile is empty, we can check './config.yml' manually - flag.StringVar(&configFile, "config", configFile, "use config.yml file") - flag.Parse() - - runHealthCheck() - - fix.New(&fix.Settings{ - File: configFile, - Name: misc.NodeName, - Prefix: misc.Prefix, - Runner: runner, - Build: misc.Build, - Version: misc.Version, - - AppDefaults: setDefaults, - }, node.Module).RunAndCatch() + <-ctx.Done() } diff --git a/cmd/neofs-node/modules/bootstrap/healthy.go b/cmd/neofs-node/modules/bootstrap/healthy.go deleted file mode 100644 index 116b424e7..000000000 --- a/cmd/neofs-node/modules/bootstrap/healthy.go +++ /dev/null @@ -1,93 +0,0 @@ -package bootstrap - -import ( - "crypto/ecdsa" - "errors" - "sync" - - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - state "github.com/nspcc-dev/neofs-node/pkg/network/transport/state/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - healthyParams struct { - dig.In - - Logger *zap.Logger - Viper *viper.Viper - Place placement.Component - Checkers []state.HealthChecker `group:"healthy"` - - // for ChangeState - PrivateKey *ecdsa.PrivateKey - - Client *contract.Wrapper - } - - healthyResult struct { - dig.Out - - HealthyClient HealthyClient - - StateService state.Service - } - - // HealthyClient is an interface of healthiness checking tool. - HealthyClient interface { - Healthy() error - } - - healthyClient struct { - *sync.RWMutex - healthy func() error - } -) - -var errUnhealthy = errors.New("unhealthy") - -func (h *healthyClient) setHandler(handler func() error) { - if handler == nil { - return - } - - h.Lock() - h.healthy = handler - h.Unlock() -} - -func (h *healthyClient) Healthy() error { - if h.healthy == nil { - return errUnhealthy - } - - return h.healthy() -} - -func newHealthy(p healthyParams) (res healthyResult, err error) { - sp := state.Params{ - Stater: p.Place, - Logger: p.Logger, - Viper: p.Viper, - Checkers: p.Checkers, - PrivateKey: p.PrivateKey, - Client: p.Client, - } - - if res.StateService, err = state.New(sp); err != nil { - return - } - - healthyClient := &healthyClient{ - RWMutex: new(sync.RWMutex), - } - - healthyClient.setHandler(res.StateService.Healthy) - - res.HealthyClient = healthyClient - - return -} diff --git a/cmd/neofs-node/modules/bootstrap/module.go b/cmd/neofs-node/modules/bootstrap/module.go deleted file mode 100644 index e66552545..000000000 --- a/cmd/neofs-node/modules/bootstrap/module.go +++ /dev/null @@ -1,8 +0,0 @@ -package bootstrap - -import "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - -// Module is a module of bootstrap component. -var Module = module.Module{ - {Constructor: newHealthy}, -} diff --git a/cmd/neofs-node/modules/fix/catch.go b/cmd/neofs-node/modules/fix/catch.go deleted file mode 100644 index c0bb5a653..000000000 --- a/cmd/neofs-node/modules/fix/catch.go +++ /dev/null @@ -1,59 +0,0 @@ -package fix - -import ( - "fmt" - "reflect" - - "go.uber.org/zap" -) - -func (a *app) Catch(err error) { - if err == nil { - return - } - - if a.log == nil { - panic(err) - } - - a.log.Fatal("Can't run app", - zap.Error(err)) -} - -// CatchTrace catch errors for debugging -// use that function just for debug your application. -func (a *app) CatchTrace(err error) { - if err == nil { - return - } - - // digging into the root of the problem - for { - var ( - ok bool - v = reflect.ValueOf(err) - fn reflect.Value - ) - - if v.Type().Kind() != reflect.Struct { - break - } - - if !v.FieldByName("Reason").IsValid() { - break - } - - if v.FieldByName("Func").IsValid() { - fn = v.FieldByName("Func") - } - - fmt.Printf("Place: %#v\nReason: %s\n\n", fn, err) - - if err, ok = v.FieldByName("Reason").Interface().(error); !ok { - err = v.Interface().(error) - break - } - } - - panic(err) -} diff --git a/cmd/neofs-node/modules/fix/config/config.go b/cmd/neofs-node/modules/fix/config/config.go deleted file mode 100644 index fa9e860c4..000000000 --- a/cmd/neofs-node/modules/fix/config/config.go +++ /dev/null @@ -1,53 +0,0 @@ -package config - -import ( - "strings" - - "github.com/spf13/viper" -) - -// Params groups the parameters of configuration. -type Params struct { - File string - Type string - Prefix string - Name string - Version string - - AppDefaults func(v *viper.Viper) -} - -// NewConfig is a configuration tool's constructor. -func NewConfig(p Params) (v *viper.Viper, err error) { - v = viper.New() - v.SetEnvPrefix(p.Prefix) - v.AutomaticEnv() - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - - v.SetDefault("app.name", p.Name) - v.SetDefault("app.version", p.Version) - - if p.AppDefaults != nil { - p.AppDefaults(v) - } - - if p.fromFile() { - v.SetConfigFile(p.File) - v.SetConfigType(p.safeType()) - - err = v.ReadInConfig() - } - - return v, err -} - -func (p Params) fromFile() bool { - return p.File != "" -} - -func (p Params) safeType() string { - if p.Type == "" { - p.Type = "yaml" - } - return strings.ToLower(p.Type) -} diff --git a/cmd/neofs-node/modules/fix/fix.go b/cmd/neofs-node/modules/fix/fix.go deleted file mode 100644 index d3990a267..000000000 --- a/cmd/neofs-node/modules/fix/fix.go +++ /dev/null @@ -1,113 +0,0 @@ -package fix - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/config" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - "github.com/nspcc-dev/neofs-node/misc" - "github.com/nspcc-dev/neofs-node/pkg/util/grace" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - // App is an interface of executable application. - App interface { - Run() error - RunAndCatch() - } - - app struct { - err error - log *zap.Logger - di *dig.Container - runner interface{} - } - - // Settings groups the application parameters. - Settings struct { - File string - Type string - Name string - Prefix string - Build string - Version string - Runner interface{} - - AppDefaults func(v *viper.Viper) - } -) - -func (a *app) RunAndCatch() { - err := a.Run() - - if errors.Is(err, context.Canceled) { - return - } - - if ok, _ := strconv.ParseBool(misc.Debug); ok { - a.CatchTrace(err) - } - - a.Catch(err) -} - -func (a *app) Run() error { - if a.err != nil { - return a.err - } - - // setup app logger: - if err := a.di.Invoke(func(l *zap.Logger) { - a.log = l - }); err != nil { - return err - } - - return a.di.Invoke(a.runner) -} - -// New is an application constructor. -func New(s *Settings, mod module.Module) App { - var ( - a app - err error - ) - - a.di = dig.New(dig.DeferAcyclicVerification()) - a.runner = s.Runner - - if s.Prefix == "" { - s.Prefix = s.Name - } - - mod = mod.Append( - module.Module{ - {Constructor: logger.NewLogger}, - {Constructor: grace.NewGracefulContext}, - {Constructor: func() (*viper.Viper, error) { - return config.NewConfig(config.Params{ - File: s.File, - Type: s.Type, - Prefix: strings.ToUpper(s.Prefix), - Name: s.Name, - Version: fmt.Sprintf("%s(%s)", s.Version, s.Build), - - AppDefaults: s.AppDefaults, - }) - }}, - }) - - if err = module.Provide(a.di, mod); err != nil { - a.err = err - } - - return &a -} diff --git a/cmd/neofs-node/modules/fix/module/module.go b/cmd/neofs-node/modules/fix/module/module.go deleted file mode 100644 index 9e33f48e4..000000000 --- a/cmd/neofs-node/modules/fix/module/module.go +++ /dev/null @@ -1,35 +0,0 @@ -package module - -import ( - "go.uber.org/dig" -) - -type ( - // Module type - Module []*Provider - - // Provider struct - Provider struct { - Constructor interface{} - Options []dig.ProvideOption - } -) - -// Append module to target module and return new module -func (m Module) Append(mods ...Module) Module { - var result = m - for _, mod := range mods { - result = append(result, mod...) - } - return result -} - -// Provide set providers functions to DI container -func Provide(dic *dig.Container, providers Module) error { - for _, p := range providers { - if err := dic.Provide(p.Constructor, p.Options...); err != nil { - return err - } - } - return nil -} diff --git a/cmd/neofs-node/modules/fix/services.go b/cmd/neofs-node/modules/fix/services.go deleted file mode 100644 index 59a1a169e..000000000 --- a/cmd/neofs-node/modules/fix/services.go +++ /dev/null @@ -1,46 +0,0 @@ -package fix - -import ( - "context" -) - -type ( - // Service interface - Service interface { - Start(context.Context) - Stop() - } - - combiner []Service -) - -var _ Service = (combiner)(nil) - -// NewServices creates single runner. -func NewServices(items ...Service) Service { - var svc = make(combiner, 0, len(items)) - - for _, item := range items { - if item == nil { - continue - } - - svc = append(svc, item) - } - - return svc -} - -// Start all services. -func (c combiner) Start(ctx context.Context) { - for _, svc := range c { - svc.Start(ctx) - } -} - -// Stop all services. -func (c combiner) Stop() { - for _, svc := range c { - svc.Stop() - } -} diff --git a/cmd/neofs-node/modules/fix/worker/worker.go b/cmd/neofs-node/modules/fix/worker/worker.go deleted file mode 100644 index c6cbd13b4..000000000 --- a/cmd/neofs-node/modules/fix/worker/worker.go +++ /dev/null @@ -1,79 +0,0 @@ -package worker - -import ( - "context" - "sync" - "sync/atomic" - "time" -) - -type ( - // Workers is an interface of worker tool. - Workers interface { - Start(context.Context) - Stop() - - Add(Job Handler) - } - - workers struct { - cancel context.CancelFunc - started *int32 - wg *sync.WaitGroup - jobs []Handler - } - - // Handler is a worker's handling function. - Handler func(ctx context.Context) - - // Jobs is a map of worker names to handlers. - Jobs map[string]Handler - - // Job groups the parameters of worker's job. - Job struct { - Disabled bool - Immediately bool - Timer time.Duration - Ticker time.Duration - Handler Handler - } -) - -// New is a constructor of workers. -func New() Workers { - return &workers{ - started: new(int32), - wg: new(sync.WaitGroup), - } -} - -func (w *workers) Add(job Handler) { - w.jobs = append(w.jobs, job) -} - -func (w *workers) Stop() { - if !atomic.CompareAndSwapInt32(w.started, 1, 0) { - // already stopped - return - } - - w.cancel() - w.wg.Wait() -} - -func (w *workers) Start(ctx context.Context) { - if !atomic.CompareAndSwapInt32(w.started, 0, 1) { - // already started - return - } - - ctx, w.cancel = context.WithCancel(ctx) - for _, job := range w.jobs { - w.wg.Add(1) - - go func(handler Handler) { - defer w.wg.Done() - handler(ctx) - }(job) - } -} diff --git a/cmd/neofs-node/modules/grpc/billing.go b/cmd/neofs-node/modules/grpc/billing.go deleted file mode 100644 index d8500c265..000000000 --- a/cmd/neofs-node/modules/grpc/billing.go +++ /dev/null @@ -1,141 +0,0 @@ -package grpc - -import ( - "context" - - "github.com/gogo/protobuf/proto" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" -) - -type ( - billingStream struct { - grpc.ServerStream - *grpc.StreamServerInfo - - input int - output int - cid string - } - - cider interface { - CID() refs.CID - } -) - -const ( - typeInput = "input" - typeOutput = "output" - - labelType = "type" - labelMethod = "method" - labelContainer = "container" -) - -var ( - serviceBillingBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "neofs", - Name: "billing_bytes", - Help: "Count of bytes received / sent for method and container", - }, []string{labelType, labelMethod, labelContainer}) - - serviceBillingCalls = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "neofs", - Name: "billing_calls", - Help: "Count of calls for api methods", - }, []string{labelMethod, labelContainer}) -) - -func init() { - // Register billing metrics - prometheus.MustRegister(serviceBillingBytes) - prometheus.MustRegister(serviceBillingCalls) -} - -func getProtoSize(val interface{}) int { - if msg, ok := val.(proto.Message); ok && msg != nil { - return proto.Size(msg) - } - - return 0 -} - -func getProtoContainer(val interface{}) string { - if t, ok := val.(cider); ok && t != nil { - return t.CID().String() - } - - return "" -} - -func (b *billingStream) RecvMsg(msg interface{}) error { - err := b.ServerStream.RecvMsg(msg) - b.input += getProtoSize(msg) - - if cid := getProtoContainer(msg); cid != "" { - b.cid = cid - } - - return err -} - -func (b *billingStream) SendMsg(msg interface{}) error { - b.output += getProtoSize(msg) - - return b.ServerStream.SendMsg(msg) -} - -func (b *billingStream) report() { - labels := prometheus.Labels{ - labelMethod: b.FullMethod, - labelContainer: b.cid, - } - - serviceBillingCalls.With(labels).Inc() - - labels[labelType] = typeInput - serviceBillingBytes.With(labels).Add(float64(b.input)) - - labels[labelType] = typeOutput - serviceBillingBytes.With(labels).Add(float64(b.output)) -} - -func streamBilling(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - stream := &billingStream{ - ServerStream: ss, - StreamServerInfo: info, - } - - err := handler(srv, stream) - - stream.report() - - return err -} - -func unaryBilling(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (res interface{}, err error) { - input := getProtoSize(req) - cid := getProtoContainer(req) - - labels := prometheus.Labels{ - labelMethod: info.FullMethod, - labelContainer: cid, - } - - serviceBillingCalls.With(labels).Inc() - - if res, err = handler(ctx, req); err != nil { - return - } - - output := getProtoSize(res) - - labels[labelType] = typeInput - serviceBillingBytes.With(labels).Add(float64(input)) - - labels[labelType] = typeOutput - serviceBillingBytes.With(labels).Add(float64(output)) - - return -} diff --git a/cmd/neofs-node/modules/grpc/module.go b/cmd/neofs-node/modules/grpc/module.go deleted file mode 100644 index 861084ad4..000000000 --- a/cmd/neofs-node/modules/grpc/module.go +++ /dev/null @@ -1,8 +0,0 @@ -package grpc - -import "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - -// Module is a gRPC layer module. -var Module = module.Module{ - {Constructor: routing}, -} diff --git a/cmd/neofs-node/modules/grpc/routing.go b/cmd/neofs-node/modules/grpc/routing.go deleted file mode 100644 index bd97e42c5..000000000 --- a/cmd/neofs-node/modules/grpc/routing.go +++ /dev/null @@ -1,115 +0,0 @@ -// About "github.com/nspcc-dev/neofs-node/lib/grpc" -// there's just alias for "google.golang.org/grpc" -// with Service-interface - -package grpc - -import ( - middleware "github.com/grpc-ecosystem/go-grpc-middleware" - gZap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type ( - Service = libgrpc.Service - - // ServerParams to create gRPC-server - // and provide service-handlers - ServerParams struct { - dig.In - - Services []Service - Logger *zap.Logger - Viper *viper.Viper - } - - // ServicesResult ... - ServicesResult struct { - dig.Out - - Services []Service - } - - // Server type-alias - Server = grpc.Server - - // CallOption type-alias - CallOption = grpc.CallOption - - // ClientConn type-alias - ClientConn = grpc.ClientConn - - // ServerOption type-alias - ServerOption = grpc.ServerOption -) - -var ( - // DialContext func-alias - DialContext = grpc.DialContext - - // WithBlock func-alias - WithBlock = grpc.WithBlock - - // WithInsecure func-alias - WithInsecure = grpc.WithInsecure -) - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opts ...ServerOption) *Server { - return grpc.NewServer(opts...) -} - -// creates new gRPC server and attach handlers. -func routing(p ServerParams) *grpc.Server { - var ( - options []ServerOption - stream []grpc.StreamServerInterceptor - unary []grpc.UnaryServerInterceptor - ) - - if p.Viper.GetBool("node.grpc.billing") { - unary = append(unary, unaryBilling) - stream = append(stream, streamBilling) - } - - if p.Viper.GetBool("node.grpc.logging") { - stream = append(stream, gZap.StreamServerInterceptor(p.Logger)) - unary = append(unary, gZap.UnaryServerInterceptor(p.Logger)) - } - - if p.Viper.GetBool("node.grpc.metrics") { - stream = append(stream, prometheus.StreamServerInterceptor) - unary = append(unary, prometheus.UnaryServerInterceptor) - } - - // Add stream options: - if len(stream) > 0 { - options = append(options, - grpc.StreamInterceptor(middleware.ChainStreamServer(stream...)), - ) - } - - // Add unary options: - if len(unary) > 0 { - options = append(options, - grpc.UnaryInterceptor(middleware.ChainUnaryServer(unary...)), - ) - } - - g := grpc.NewServer(options...) - - // Service services here: - for _, service := range p.Services { - p.Logger.Info("register gRPC service", - zap.String("service", service.Name())) - service.Register(g) - } - - return g -} diff --git a/cmd/neofs-node/modules/morph/balance.go b/cmd/neofs-node/modules/morph/balance.go deleted file mode 100644 index e22200021..000000000 --- a/cmd/neofs-node/modules/morph/balance.go +++ /dev/null @@ -1,69 +0,0 @@ -package morph - -import ( - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/balance" - clientWrapper "github.com/nspcc-dev/neofs-node/pkg/morph/client/balance/wrapper" - accounting "github.com/nspcc-dev/neofs-node/pkg/network/transport/accounting/grpc" - "github.com/pkg/errors" - "go.uber.org/dig" -) - -type balanceContractResult struct { - dig.Out - - Client *clientWrapper.Wrapper - - AccountingService accounting.Service -} - -// BalanceContractName is a name of Balance contract config sub-section. -const BalanceContractName = "balance" - -const ( - balanceContractBalanceOfOpt = "balance_of_method" - - balanceContractDecimalsOfOpt = "decimals_method" -) - -// BalanceContractBalanceOfOptPath is a path to balanceOf method name option. -func BalanceContractBalanceOfOptPath() string { - return optPath(prefix, BalanceContractName, balanceContractBalanceOfOpt) -} - -// BalanceContractDecimalsOfOptPath is a path to decimals method name option. -func BalanceContractDecimalsOfOptPath() string { - return optPath(prefix, BalanceContractName, balanceContractDecimalsOfOpt) -} - -func newBalanceContract(p contractParams) (res balanceContractResult, err error) { - client, ok := p.MorphContracts[BalanceContractName] - if !ok { - err = errors.Errorf("missing %s contract client", BalanceContractName) - return - } - - var ( - balanceOfMethod = p.Viper.GetString(BalanceContractBalanceOfOptPath()) - decimalsMethod = p.Viper.GetString(BalanceContractDecimalsOfOptPath()) - ) - - var c *contract.Client - if c, err = contract.New(client, - contract.WithBalanceOfMethod(balanceOfMethod), - contract.WithDecimalsMethod(decimalsMethod), - ); err != nil { - return - } - - if res.Client, err = clientWrapper.New(c); err != nil { - return - } - - if res.AccountingService, err = accounting.New(accounting.Params{ - ContractClient: res.Client, - }); err != nil { - return - } - - return -} diff --git a/cmd/neofs-node/modules/morph/common.go b/cmd/neofs-node/modules/morph/common.go deleted file mode 100644 index 55fa64c44..000000000 --- a/cmd/neofs-node/modules/morph/common.go +++ /dev/null @@ -1,138 +0,0 @@ -package morph - -import ( - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/morph/client" - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -// SmartContracts maps smart contract name to contract client. -type SmartContracts map[string]*client.StaticClient - -// EventHandlers maps notification event name to handler information. -type EventHandlers map[string]event.HandlerInfo - -type morphContractsParams struct { - dig.In - - Viper *viper.Viper - - Client *client.Client - - Listener event.Listener -} - -type contractParams struct { - dig.In - - Viper *viper.Viper - - Logger *zap.Logger - - MorphContracts SmartContracts - - NodeInfo netmap.Info -} - -func newMorphContracts(p morphContractsParams) (SmartContracts, EventHandlers, error) { - mContracts := make(map[string]*client.StaticClient, len(ContractNames)) - mHandlers := make(map[string]event.HandlerInfo) - - for _, contractName := range ContractNames { - scHash, err := util.Uint160DecodeStringLE( - p.Viper.GetString( - ScriptHashOptPath(contractName), - ), - ) - if err != nil { - return nil, nil, err - } - - fee := util.Fixed8FromInt64( - p.Viper.GetInt64( - InvocationFeeOptPath(contractName), - ), - ) - - mContracts[contractName], err = client.NewStatic(p.Client, scHash, fee) - if err != nil { - return nil, nil, err - } - - // set event parsers - parserInfo := event.ParserInfo{} - parserInfo.SetScriptHash(scHash) - - handlerInfo := event.HandlerInfo{} - handlerInfo.SetScriptHash(scHash) - - for _, item := range mParsers[contractName] { - parserInfo.SetParser(item.parser) - - optPath := ContractEventOptPath(contractName, item.typ) - - typEvent := event.TypeFromString( - p.Viper.GetString(optPath), - ) - - parserInfo.SetType(typEvent) - handlerInfo.SetType(typEvent) - - p.Listener.SetParser(parserInfo) - - mHandlers[optPath] = handlerInfo - } - } - - return mContracts, mHandlers, nil -} - -const prefix = "morph" - -const ( - endpointOpt = "endpoint" - - dialTimeoutOpt = "dial_timeout" - - magicNumberOpt = "magic_number" - - scriptHashOpt = "script_hash" - - invocationFeeOpt = "invocation_fee" -) - -// ContractNames is a list of smart contract names. -var ContractNames = []string{ - containerContractName, - NetmapContractName, - BalanceContractName, -} - -// EndpointOptPath returns the config path to goclient endpoint. -func EndpointOptPath() string { - return optPath(prefix, endpointOpt) -} - -// MagicNumberOptPath returns the config path to goclient magic number. -func MagicNumberOptPath() string { - return optPath(prefix, magicNumberOpt) -} - -// DialTimeoutOptPath returns the config path to goclient dial timeout. -func DialTimeoutOptPath() string { - return optPath(prefix, dialTimeoutOpt) -} - -// ScriptHashOptPath calculates the config path to script hash config of particular contract. -func ScriptHashOptPath(name string) string { - return optPath(prefix, name, scriptHashOpt) -} - -// InvocationFeeOptPath calculates the config path to invocation fee config of particular contract. -func InvocationFeeOptPath(name string) string { - return optPath(prefix, name, invocationFeeOpt) -} diff --git a/cmd/neofs-node/modules/morph/container.go b/cmd/neofs-node/modules/morph/container.go deleted file mode 100644 index 550d35c84..000000000 --- a/cmd/neofs-node/modules/morph/container.go +++ /dev/null @@ -1,103 +0,0 @@ -package morph - -import ( - eacl "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/container" - clientWrapper "github.com/nspcc-dev/neofs-node/pkg/morph/client/container/wrapper" - "github.com/pkg/errors" - "go.uber.org/dig" -) - -type containerContractResult struct { - dig.Out - - ExtendedACLStore eacl.Storage - - ContainerStorage storage.Storage -} - -const ( - containerContractName = "container" - - containerContractSetEACLOpt = "set_eacl_method" - - containerContractEACLOpt = "get_eacl_method" - - containerContractPutOpt = "put_method" - - containerContractGetOpt = "get_method" - - containerContractDelOpt = "delete_method" - - containerContractListOpt = "list_method" -) - -// ContainerContractSetEACLOptPath returns the config path to set eACL method name of Container contract. -func ContainerContractSetEACLOptPath() string { - return optPath(prefix, containerContractName, containerContractSetEACLOpt) -} - -// ContainerContractEACLOptPath returns the config path to get eACL method name of Container contract. -func ContainerContractEACLOptPath() string { - return optPath(prefix, containerContractName, containerContractEACLOpt) -} - -// ContainerContractPutOptPath returns the config path to put container method name of Container contract. -func ContainerContractPutOptPath() string { - return optPath(prefix, containerContractName, containerContractPutOpt) -} - -// ContainerContractGetOptPath returns the config path to get container method name of Container contract. -func ContainerContractGetOptPath() string { - return optPath(prefix, containerContractName, containerContractGetOpt) -} - -// ContainerContractDelOptPath returns the config path to delete container method name of Container contract. -func ContainerContractDelOptPath() string { - return optPath(prefix, containerContractName, containerContractDelOpt) -} - -// ContainerContractListOptPath returns the config path to list containers method name of Container contract. -func ContainerContractListOptPath() string { - return optPath(prefix, containerContractName, containerContractListOpt) -} - -func newContainerContract(p contractParams) (res containerContractResult, err error) { - client, ok := p.MorphContracts[containerContractName] - if !ok { - err = errors.Errorf("missing %s contract client", containerContractName) - return - } - - var ( - setEACLMethod = p.Viper.GetString(ContainerContractSetEACLOptPath()) - eaclMethod = p.Viper.GetString(ContainerContractEACLOptPath()) - getMethod = p.Viper.GetString(ContainerContractGetOptPath()) - putMethod = p.Viper.GetString(ContainerContractPutOptPath()) - deleteMethod = p.Viper.GetString(ContainerContractDelOptPath()) - listMethod = p.Viper.GetString(ContainerContractListOptPath()) - ) - - var containerClient *contract.Client - if containerClient, err = contract.New(client, - contract.WithSetEACLMethod(setEACLMethod), - contract.WithEACLMethod(eaclMethod), - contract.WithGetMethod(getMethod), - contract.WithPutMethod(putMethod), - contract.WithDeleteMethod(deleteMethod), - contract.WithListMethod(listMethod), - ); err != nil { - return - } - - var wrapClient *clientWrapper.Wrapper - if wrapClient, err = clientWrapper.New(containerClient); err != nil { - return - } - - res.ContainerStorage = wrapClient - res.ExtendedACLStore = wrapClient - - return res, nil -} diff --git a/cmd/neofs-node/modules/morph/event.go b/cmd/neofs-node/modules/morph/event.go deleted file mode 100644 index b6d6a631d..000000000 --- a/cmd/neofs-node/modules/morph/event.go +++ /dev/null @@ -1,28 +0,0 @@ -package morph - -import ( - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - "github.com/nspcc-dev/neofs-node/pkg/morph/event/netmap" -) - -const eventOpt = "event" - -// NewEpochEventType is a config section of new epoch notification event. -const NewEpochEventType = "new_epoch" - -// ContractEventOptPath returns the config path to notification event name of particular contract. -func ContractEventOptPath(contract, event string) string { - return optPath(prefix, contract, eventOpt, event) -} - -var mParsers = map[string][]struct { - typ string - parser event.Parser -}{ - NetmapContractName: { - { - typ: NewEpochEventType, - parser: netmap.ParseNewEpoch, - }, - }, -} diff --git a/cmd/neofs-node/modules/morph/goclient.go b/cmd/neofs-node/modules/morph/goclient.go deleted file mode 100644 index 77b521496..000000000 --- a/cmd/neofs-node/modules/morph/goclient.go +++ /dev/null @@ -1,31 +0,0 @@ -package morph - -import ( - "crypto/ecdsa" - - "github.com/nspcc-dev/neo-go/pkg/config/netmode" - "github.com/nspcc-dev/neofs-node/pkg/morph/client" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type morphClientParams struct { - dig.In - - Viper *viper.Viper - - Logger *zap.Logger - - Key *ecdsa.PrivateKey -} - -func newClient(p morphClientParams) (*client.Client, error) { - return client.New( - p.Key, - p.Viper.GetString(optPath(prefix, endpointOpt)), - client.WithLogger(p.Logger), - client.WithDialTimeout(p.Viper.GetDuration(optPath(prefix, dialTimeoutOpt))), - client.WithMagic(netmode.Magic(p.Viper.GetUint32(optPath(prefix, magicNumberOpt)))), - ) -} diff --git a/cmd/neofs-node/modules/morph/listener.go b/cmd/neofs-node/modules/morph/listener.go deleted file mode 100644 index d70154432..000000000 --- a/cmd/neofs-node/modules/morph/listener.go +++ /dev/null @@ -1,53 +0,0 @@ -package morph - -import ( - "context" - - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - "github.com/nspcc-dev/neofs-node/pkg/morph/subscriber" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type eventListenerParams struct { - dig.In - - Viper *viper.Viper - - Logger *zap.Logger -} - -var listenerPrefix = optPath(prefix, "listener") - -const ( - listenerEndpointOpt = "endpoint" - - listenerDialTimeoutOpt = "dial_timeout" -) - -// ListenerEndpointOptPath returns the config path to event listener's endpoint. -func ListenerEndpointOptPath() string { - return optPath(listenerPrefix, listenerEndpointOpt) -} - -// ListenerDialTimeoutOptPath returns the config path to event listener's dial timeout. -func ListenerDialTimeoutOptPath() string { - return optPath(listenerPrefix, listenerDialTimeoutOpt) -} - -func newEventListener(p eventListenerParams) (event.Listener, error) { - sub, err := subscriber.New(context.Background(), &subscriber.Params{ - Log: p.Logger, - Endpoint: p.Viper.GetString(ListenerEndpointOptPath()), - DialTimeout: p.Viper.GetDuration(ListenerDialTimeoutOptPath()), - }) - if err != nil { - return nil, err - } - - return event.NewListener(event.ListenerParams{ - Logger: p.Logger, - Subscriber: sub, - }) -} diff --git a/cmd/neofs-node/modules/morph/module.go b/cmd/neofs-node/modules/morph/module.go deleted file mode 100644 index b61ce2c73..000000000 --- a/cmd/neofs-node/modules/morph/module.go +++ /dev/null @@ -1,21 +0,0 @@ -package morph - -import ( - "strings" - - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" -) - -// Module is a Neo:Morph module. -var Module = module.Module{ - {Constructor: newClient}, - {Constructor: newMorphContracts}, - {Constructor: newContainerContract}, - {Constructor: newNetmapContract}, - {Constructor: newEventListener}, - {Constructor: newBalanceContract}, -} - -func optPath(sections ...string) string { - return strings.Join(sections, ".") -} diff --git a/cmd/neofs-node/modules/morph/netmap.go b/cmd/neofs-node/modules/morph/netmap.go deleted file mode 100644 index 1fb8452f5..000000000 --- a/cmd/neofs-node/modules/morph/netmap.go +++ /dev/null @@ -1,94 +0,0 @@ -package morph - -import ( - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap" - clientWrapper "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/network/bootstrap" - "github.com/pkg/errors" - "go.uber.org/dig" -) - -type netmapContractResult struct { - dig.Out - - Client *clientWrapper.Wrapper - - NodeRegisterer *bootstrap.Registerer -} - -const ( - // NetmapContractName is a Netmap contract's config section name. - NetmapContractName = "netmap" - - netmapContractAddPeerOpt = "add_peer_method" - - netmapContractNewEpochOpt = "new_epoch_method" - - netmapContractNetmapOpt = "netmap_method" - - netmapContractUpdStateOpt = "update_state_method" - - netmapContractIRListOpt = "ir_list_method" -) - -// NetmapContractAddPeerOptPath returns the config path to add peer method of Netmap contract. -func NetmapContractAddPeerOptPath() string { - return optPath(prefix, NetmapContractName, netmapContractAddPeerOpt) -} - -// NetmapContractNewEpochOptPath returns the config path to new epoch method of Netmap contract. -func NetmapContractNewEpochOptPath() string { - return optPath(prefix, NetmapContractName, netmapContractNewEpochOpt) -} - -// NetmapContractNetmapOptPath returns the config path to get netmap method of Netmap contract. -func NetmapContractNetmapOptPath() string { - return optPath(prefix, NetmapContractName, netmapContractNetmapOpt) -} - -// NetmapContractUpdateStateOptPath returns the config path to update state method of Netmap contract. -func NetmapContractUpdateStateOptPath() string { - return optPath(prefix, NetmapContractName, netmapContractUpdStateOpt) -} - -// NetmapContractIRListOptPath returns the config path to inner ring list method of Netmap contract. -func NetmapContractIRListOptPath() string { - return optPath(prefix, NetmapContractName, netmapContractIRListOpt) -} - -func newNetmapContract(p contractParams) (res netmapContractResult, err error) { - client, ok := p.MorphContracts[NetmapContractName] - if !ok { - err = errors.Errorf("missing %s contract client", NetmapContractName) - return - } - - var ( - addPeerMethod = p.Viper.GetString(NetmapContractAddPeerOptPath()) - newEpochMethod = p.Viper.GetString(NetmapContractNewEpochOptPath()) - netmapMethod = p.Viper.GetString(NetmapContractNetmapOptPath()) - updStateMethod = p.Viper.GetString(NetmapContractUpdateStateOptPath()) - irListMethod = p.Viper.GetString(NetmapContractIRListOptPath()) - ) - - var c *contract.Client - if c, err = contract.New(client, - contract.WithAddPeerMethod(addPeerMethod), - contract.WithNewEpochMethod(newEpochMethod), - contract.WithNetMapMethod(netmapMethod), - contract.WithUpdateStateMethod(updStateMethod), - contract.WithInnerRingListMethod(irListMethod), - ); err != nil { - return - } - - if res.Client, err = clientWrapper.New(c); err != nil { - return - } - - if res.NodeRegisterer, err = bootstrap.New(res.Client, p.NodeInfo); err != nil { - return - } - - return res, nil -} diff --git a/cmd/neofs-node/modules/network/http.go b/cmd/neofs-node/modules/network/http.go deleted file mode 100644 index 001411c47..000000000 --- a/cmd/neofs-node/modules/network/http.go +++ /dev/null @@ -1,49 +0,0 @@ -package network - -import ( - "github.com/fasthttp/router" - svc "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/bootstrap" - "github.com/valyala/fasthttp" - "go.uber.org/dig" -) - -type ( - handlerParams struct { - dig.In - - Healthy svc.HealthyClient - } -) - -const ( - healthyState = "NeoFS node is " - defaultContentType = "text/plain; charset=utf-8" -) - -func newHTTPHandler(p handlerParams) (fasthttp.RequestHandler, error) { - r := router.New() - r.RedirectTrailingSlash = true - - r.GET("/-/ready/", func(c *fasthttp.RequestCtx) { - c.SetStatusCode(fasthttp.StatusOK) - c.SetBodyString(healthyState + "ready") - }) - - r.GET("/-/healthy/", func(c *fasthttp.RequestCtx) { - code := fasthttp.StatusOK - msg := "healthy" - - err := p.Healthy.Healthy() - if err != nil { - code = fasthttp.StatusBadRequest - msg = "unhealthy: " + err.Error() - } - - c.Response.Reset() - c.SetStatusCode(code) - c.SetContentType(defaultContentType) - c.SetBodyString(healthyState + msg) - }) - - return r.Handler, nil -} diff --git a/cmd/neofs-node/modules/network/module.go b/cmd/neofs-node/modules/network/module.go deleted file mode 100644 index 8e0e1f253..000000000 --- a/cmd/neofs-node/modules/network/module.go +++ /dev/null @@ -1,19 +0,0 @@ -package network - -import ( - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - "github.com/nspcc-dev/neofs-node/pkg/util/profiler" -) - -// Module is a network layer module. -var Module = module.Module{ - {Constructor: newMuxer}, - {Constructor: newPeers}, - {Constructor: newPlacement}, - - // Metrics is prometheus handler - {Constructor: profiler.NewMetrics}, - // Profiler is pprof handler - {Constructor: profiler.NewProfiler}, - {Constructor: newHTTPHandler}, -} diff --git a/cmd/neofs-node/modules/network/muxer.go b/cmd/neofs-node/modules/network/muxer.go deleted file mode 100644 index 2de81f25a..000000000 --- a/cmd/neofs-node/modules/network/muxer.go +++ /dev/null @@ -1,53 +0,0 @@ -package network - -import ( - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/network/muxer" - "github.com/spf13/viper" - "github.com/valyala/fasthttp" - "go.uber.org/dig" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type muxerParams struct { - dig.In - - Logger *zap.Logger - P2P *grpc.Server - - Address multiaddr.Multiaddr - ShutdownTTL time.Duration `name:"shutdown_ttl"` - API fasthttp.RequestHandler - Viper *viper.Viper -} - -const appName = "neofs-node" - -func newFastHTTPServer(p muxerParams) *fasthttp.Server { - srv := new(fasthttp.Server) - srv.Name = appName - srv.ReadBufferSize = p.Viper.GetInt("muxer.http.read_buffer_size") - srv.WriteBufferSize = p.Viper.GetInt("muxer.http.write_buffer_size") - srv.ReadTimeout = p.Viper.GetDuration("muxer.http.read_timeout") - srv.WriteTimeout = p.Viper.GetDuration("muxer.http.write_timeout") - srv.GetOnly = true - srv.DisableHeaderNamesNormalizing = true - srv.NoDefaultServerHeader = true - srv.NoDefaultContentType = true - srv.Handler = p.API - - return srv -} - -func newMuxer(p muxerParams) muxer.Mux { - return muxer.New(muxer.Params{ - P2P: p.P2P, - Logger: p.Logger, - Address: p.Address, - ShutdownTTL: p.ShutdownTTL, - API: newFastHTTPServer(p), - }) -} diff --git a/cmd/neofs-node/modules/network/peers.go b/cmd/neofs-node/modules/network/peers.go deleted file mode 100644 index 3fe60b77b..000000000 --- a/cmd/neofs-node/modules/network/peers.go +++ /dev/null @@ -1,28 +0,0 @@ -package network - -import ( - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type peersParams struct { - dig.In - - Viper *viper.Viper - Logger *zap.Logger - Address multiaddr.Multiaddr -} - -func newPeers(p peersParams) (peers.Interface, error) { - return peers.New(peers.Params{ - Logger: p.Logger, - ConnectionTTL: p.Viper.GetDuration("peers.connections_ttl"), - ConnectionIDLE: p.Viper.GetDuration("peers.connections_idle"), - MetricsTimeout: p.Viper.GetDuration("peers.metrics_timeout"), - KeepAliveTTL: p.Viper.GetDuration("peers.keep_alive.ttl"), - KeepAlivePingTTL: p.Viper.GetDuration("peers.keep_alive.ping"), - }) -} diff --git a/cmd/neofs-node/modules/network/placement.go b/cmd/neofs-node/modules/network/placement.go deleted file mode 100644 index 4b1d8bd80..000000000 --- a/cmd/neofs-node/modules/network/placement.go +++ /dev/null @@ -1,79 +0,0 @@ -package network - -import ( - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/morph" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - netmapevent "github.com/nspcc-dev/neofs-node/pkg/morph/event/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - state "github.com/nspcc-dev/neofs-node/pkg/network/transport/state/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - placementParams struct { - dig.In - - Log *zap.Logger - Peers peers.Store - Fetcher storage.Storage - - MorphEventListener event.Listener - - NetMapClient *contract.Wrapper - - MorphEventHandlers morph.EventHandlers - } - - placementOutput struct { - dig.Out - - Placement placement.Component - Healthy state.HealthChecker `group:"healthy"` - } -) - -const defaultChronologyDuraion = 2 - -func newPlacement(p placementParams) placementOutput { - place := placement.New(placement.Params{ - Log: p.Log, - Peerstore: p.Peers, - Fetcher: p.Fetcher, - ChronologyDuration: defaultChronologyDuraion, - }) - - if handlerInfo, ok := p.MorphEventHandlers[morph.ContractEventOptPath( - morph.NetmapContractName, - morph.NewEpochEventType, - )]; ok { - handlerInfo.SetHandler(func(ev event.Event) { - nm, err := p.NetMapClient.GetNetMap() - if err != nil { - p.Log.Error("could not get network map", - zap.String("error", err.Error()), - ) - return - } - - if err := place.Update( - ev.(netmapevent.NewEpoch).EpochNumber(), - nm, - ); err != nil { - p.Log.Error("could not update network map in placement component", - zap.String("error", err.Error()), - ) - } - }) - - p.MorphEventListener.RegisterHandler(handlerInfo) - } - - return placementOutput{ - Placement: place, - Healthy: place.(state.HealthChecker), - } -} diff --git a/cmd/neofs-node/modules/node/audit.go b/cmd/neofs-node/modules/node/audit.go deleted file mode 100644 index 77f22420c..000000000 --- a/cmd/neofs-node/modules/node/audit.go +++ /dev/null @@ -1,65 +0,0 @@ -package node - -import ( - "crypto/ecdsa" - - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - object "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/spf13/viper" - "go.uber.org/zap" -) - -type ( - cnrHandlerParams struct { - *viper.Viper - *zap.Logger - Placer *placement.PlacementWrapper - PeerStore peers.Store - Peers peers.Interface - TimeoutsPrefix string - Key *ecdsa.PrivateKey - - TokenStore session.PrivateTokenStore - } -) - -func newObjectsContainerHandler(p cnrHandlerParams) (transport.SelectiveContainerExecutor, error) { - as, err := storage.NewAddressStore(p.PeerStore, p.Logger) - if err != nil { - return nil, err - } - - multiTransport, err := object.NewMultiTransport(object.MultiTransportParams{ - AddressStore: as, - EpochReceiver: p.Placer, - RemoteService: object.NewRemoteService(p.Peers), - Logger: p.Logger, - Key: p.Key, - PutTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.put"), - GetTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.get"), - HeadTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.head"), - SearchTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.search"), - RangeHashTimeout: p.Viper.GetDuration(p.TimeoutsPrefix + ".timeouts.range_hash"), - DialTimeout: p.Viper.GetDuration("object.dial_timeout"), - - PrivateTokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - exec, err := transport.NewContainerTraverseExecutor(multiTransport) - if err != nil { - return nil, err - } - - return transport.NewObjectContainerHandler(transport.ObjectContainerHandlerParams{ - NodeLister: p.Placer, - Executor: exec, - Logger: p.Logger, - }) -} diff --git a/cmd/neofs-node/modules/node/container.go b/cmd/neofs-node/modules/node/container.go deleted file mode 100644 index a0cac8ab7..000000000 --- a/cmd/neofs-node/modules/node/container.go +++ /dev/null @@ -1,31 +0,0 @@ -package node - -import ( - svc "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/bootstrap" - eacl "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - container "github.com/nspcc-dev/neofs-node/pkg/network/transport/container/grpc" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type cnrParams struct { - dig.In - - Logger *zap.Logger - - Healthy svc.HealthyClient - - ExtendedACLStore eacl.Storage - - ContainerStorage storage.Storage -} - -func newContainerService(p cnrParams) (container.Service, error) { - return container.New(container.Params{ - Logger: p.Logger, - Healthy: p.Healthy, - Store: p.ContainerStorage, - ExtendedACLStore: p.ExtendedACLStore, - }) -} diff --git a/cmd/neofs-node/modules/node/core.go b/cmd/neofs-node/modules/node/core.go deleted file mode 100644 index cde0172ba..000000000 --- a/cmd/neofs-node/modules/node/core.go +++ /dev/null @@ -1,35 +0,0 @@ -package node - -import ( - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket/boltdb" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket/fsbucket" - "github.com/spf13/viper" -) - -type Buckets map[string]bucket.Bucket - -const ( - fsBucket = "fsbucket" - boltBucket = "bolt" -) - -func newBuckets(v *viper.Viper) (Buckets, error) { - var ( - err error - mBuckets = make(Buckets) - ) - - if mBuckets[fsBucket], err = fsbucket.NewBucket(v); err != nil { - return nil, err - } - - boltOpts, err := boltdb.NewOptions(v) - if err != nil { - return nil, err - } else if mBuckets[boltBucket], err = boltdb.NewBucket(&boltOpts); err != nil { - return nil, err - } - - return mBuckets, nil -} diff --git a/cmd/neofs-node/modules/node/localstore.go b/cmd/neofs-node/modules/node/localstore.go deleted file mode 100644 index c96eb1f48..000000000 --- a/cmd/neofs-node/modules/node/localstore.go +++ /dev/null @@ -1,53 +0,0 @@ -package node - -import ( - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - meta2 "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/meta" - metrics2 "github.com/nspcc-dev/neofs-node/pkg/services/metrics" - "go.uber.org/atomic" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - localstoreParams struct { - dig.In - - Logger *zap.Logger - Buckets Buckets - Counter *atomic.Float64 - Collector metrics2.Collector - } - - metaIterator struct { - iter localstore.Iterator - } -) - -func newMetaIterator(iter localstore.Iterator) meta2.Iterator { - return &metaIterator{iter: iter} -} - -func (m *metaIterator) Iterate(handler meta2.IterateFunc) error { - return m.iter.Iterate(nil, func(objMeta *localstore.ObjectMeta) bool { - return handler == nil || handler(objMeta.Object) != nil - }) -} - -func newLocalstore(p localstoreParams) (localstore.Localstore, error) { - local, err := localstore.New(localstore.Params{ - BlobBucket: p.Buckets[fsBucket], - MetaBucket: p.Buckets[boltBucket], - Logger: p.Logger, - Collector: p.Collector, - }) - if err != nil { - return nil, err - } - - iter := newMetaIterator(local) - p.Collector.SetCounter(local) - p.Collector.SetIterator(iter) - - return local, nil -} diff --git a/cmd/neofs-node/modules/node/metrics.go b/cmd/neofs-node/modules/node/metrics.go deleted file mode 100644 index 1eba1d69d..000000000 --- a/cmd/neofs-node/modules/node/metrics.go +++ /dev/null @@ -1,46 +0,0 @@ -package node - -import ( - metrics "github.com/nspcc-dev/neofs-node/pkg/network/transport/metrics/grpc" - metrics2 "github.com/nspcc-dev/neofs-node/pkg/services/metrics" - "github.com/spf13/viper" - "go.uber.org/atomic" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - metricsParams struct { - dig.In - - Logger *zap.Logger - Options []string `name:"node_options"` - Viper *viper.Viper - Buckets Buckets - } - - metricsServiceParams struct { - dig.In - - Logger *zap.Logger - Collector metrics2.Collector - } -) - -func newObjectCounter() *atomic.Float64 { return atomic.NewFloat64(0) } - -func newMetricsService(p metricsServiceParams) (metrics.Service, error) { - return metrics.New(metrics.Params{ - Logger: p.Logger, - Collector: p.Collector, - }) -} - -func newMetricsCollector(p metricsParams) (metrics2.Collector, error) { - return metrics2.New(metrics2.Params{ - Options: p.Options, - Logger: p.Logger, - Interval: p.Viper.GetDuration("metrics_collector.interval"), - MetricsStore: p.Buckets[fsBucket], - }) -} diff --git a/cmd/neofs-node/modules/node/module.go b/cmd/neofs-node/modules/node/module.go deleted file mode 100644 index 6edc42d7e..000000000 --- a/cmd/neofs-node/modules/node/module.go +++ /dev/null @@ -1,89 +0,0 @@ -package node - -import ( - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/bootstrap" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/worker" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/grpc" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/morph" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/network" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/settings" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/workers" - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - libboot "github.com/nspcc-dev/neofs-node/pkg/network/bootstrap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - metrics2 "github.com/nspcc-dev/neofs-node/pkg/services/metrics" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type jobParams struct { - dig.In - - Logger *zap.Logger - Viper *viper.Viper - Peers peers.Store - - Replicator replication.Manager - PeersInterface peers.Interface - Metrics metrics2.Collector - - MorphEventListener event.Listener - - NodeRegisterer *libboot.Registerer -} - -// Module is a NeoFS node module. -var Module = module.Module{ - {Constructor: attachJobs}, - {Constructor: newPeerstore}, - {Constructor: attachServices}, - {Constructor: newBuckets}, - {Constructor: newMetricsCollector}, - {Constructor: newObjectCounter}, - - // -- Container gRPC handlers -- // - {Constructor: newContainerService}, - - // -- gRPC Services -- // - - // -- Local store -- // - {Constructor: newLocalstore}, - - // -- Object manager -- // - {Constructor: newObjectManager}, - - // -- Replication manager -- // - {Constructor: newReplicationManager}, - - // -- Session service -- // - {Constructor: session.NewMapTokenStore}, - {Constructor: newSessionService}, - - // -- Placement tool -- // - {Constructor: newPlacementTool}, - - // metrics service -- // - {Constructor: newMetricsService}, -}.Append( - // app specific modules: - grpc.Module, - network.Module, - workers.Module, - settings.Module, - bootstrap.Module, - morph.Module, -) - -func attachJobs(p jobParams) worker.Jobs { - return worker.Jobs{ - "peers": p.PeersInterface.Job, - "metrics": p.Metrics.Start, - "event_listener": p.MorphEventListener.Listen, - "replicator": p.Replicator.Process, - "boot": p.NodeRegisterer.Bootstrap, - } -} diff --git a/cmd/neofs-node/modules/node/objectmanager.go b/cmd/neofs-node/modules/node/objectmanager.go deleted file mode 100644 index f54dd4fe6..000000000 --- a/cmd/neofs-node/modules/node/objectmanager.go +++ /dev/null @@ -1,201 +0,0 @@ -package node - -import ( - "crypto/ecdsa" - - "github.com/nspcc-dev/neofs-api-go/bootstrap" - "github.com/nspcc-dev/neofs-api-go/hash" - apiobj "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/session" - eacl "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - object "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - storage2 "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - objectManagerParams struct { - dig.In - - Logger *zap.Logger - Viper *viper.Viper - LocalStore localstore.Localstore - - PeersInterface peers.Interface - - Peers peers.Store - Placement placement.Component - TokenStore session.PrivateTokenStore - Options []string `name:"node_options"` - Key *ecdsa.PrivateKey - - NetMapClient *contract.Wrapper - - Placer *placement.PlacementWrapper - - ExtendedACLStore eacl.Storage - - ContainerStorage storage.Storage - } -) - -const ( - transformersSectionPath = "object.transformers." -) - -const xorSalitor = "xor" - -func newObjectManager(p objectManagerParams) (object.Service, error) { - var sltr object.Salitor - - if p.Viper.GetString("object.salitor") == xorSalitor { - sltr = hash.SaltXOR - } - - as, err := storage2.NewAddressStore(p.Peers, p.Logger) - if err != nil { - return nil, err - } - - rs := object.NewRemoteService(p.PeersInterface) - - pto := p.Viper.GetDuration("object.put.timeout") - gto := p.Viper.GetDuration("object.get.timeout") - hto := p.Viper.GetDuration("object.head.timeout") - sto := p.Viper.GetDuration("object.search.timeout") - rhto := p.Viper.GetDuration("object.range_hash.timeout") - dto := p.Viper.GetDuration("object.dial_timeout") - - tr, err := object.NewMultiTransport(object.MultiTransportParams{ - AddressStore: as, - EpochReceiver: p.Placer, - RemoteService: rs, - Logger: p.Logger, - Key: p.Key, - PutTimeout: pto, - GetTimeout: gto, - HeadTimeout: hto, - SearchTimeout: sto, - RangeHashTimeout: rhto, - DialTimeout: dto, - - PrivateTokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - exec, err := transport.NewContainerTraverseExecutor(tr) - if err != nil { - return nil, err - } - - selectiveExec, err := transport.NewObjectContainerHandler(transport.ObjectContainerHandlerParams{ - NodeLister: p.Placer, - Executor: exec, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - sgInfoRecv, err := storagegroup.NewStorageGroupInfoReceiver(storagegroup.StorageGroupInfoReceiverParams{ - SelectiveContainerExecutor: selectiveExec, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - verifier, err := storage2.NewLocalIntegrityVerifier() - if err != nil { - return nil, err - } - - trans, err := transformer.NewTransformer(transformer.Params{ - SGInfoReceiver: sgInfoRecv, - EpochReceiver: p.Placer, - SizeLimit: uint64(p.Viper.GetInt64(transformersSectionPath+"payload_limiter.max_payload_size") * apiobj.UnitsKB), - Verifier: verifier, - }) - if err != nil { - return nil, err - } - - verifier, err = storage2.NewLocalHeadIntegrityVerifier() - if err != nil { - return nil, err - } - - return object.New(&object.Params{ - Verifier: verifier, - Salitor: sltr, - LocalStore: p.LocalStore, - MaxProcessingSize: p.Viper.GetUint64("object.max_processing_size") * uint64(apiobj.UnitsMB), - StorageCapacity: bootstrap.NodeInfo{Options: p.Options}.Capacity() * uint64(apiobj.UnitsGB), - PoolSize: p.Viper.GetInt("object.workers_count"), - Placer: p.Placer, - Transformer: trans, - ObjectRestorer: transformer.NewRestorePipeline( - transformer.SplitRestorer(), - ), - RemoteService: rs, - AddressStore: as, - Logger: p.Logger, - TokenStore: p.TokenStore, - EpochReceiver: p.Placer, - PlacementWrapper: p.Placer, - Key: p.Key, - CheckACL: p.Viper.GetBool("object.check_acl"), - DialTimeout: p.Viper.GetDuration("object.dial_timeout"), - MaxPayloadSize: p.Viper.GetUint64("object.transformers.payload_limiter.max_payload_size") * uint64(apiobj.UnitsKB), - PutParams: object.OperationParams{ - Timeout: pto, - LogErrors: p.Viper.GetBool("object.put.log_errs"), - }, - GetParams: object.OperationParams{ - Timeout: gto, - LogErrors: p.Viper.GetBool("object.get.log_errs"), - }, - HeadParams: object.OperationParams{ - Timeout: hto, - LogErrors: p.Viper.GetBool("object.head.log_errs"), - }, - DeleteParams: object.OperationParams{ - Timeout: p.Viper.GetDuration("object.delete.timeout"), - LogErrors: p.Viper.GetBool("object.get.log_errs"), - }, - SearchParams: object.OperationParams{ - Timeout: sto, - LogErrors: p.Viper.GetBool("object.search.log_errs"), - }, - RangeParams: object.OperationParams{ - Timeout: p.Viper.GetDuration("object.range.timeout"), - LogErrors: p.Viper.GetBool("object.range.log_errs"), - }, - RangeHashParams: object.OperationParams{ - Timeout: rhto, - LogErrors: p.Viper.GetBool("object.range_hash.log_errs"), - }, - Assembly: p.Viper.GetBool("object.assembly"), - - WindowSize: p.Viper.GetInt("object.window_size"), - - ContainerStorage: p.ContainerStorage, - NetmapClient: p.NetMapClient, - - SGInfoReceiver: sgInfoRecv, - - ExtendedACLSource: p.ExtendedACLStore, - }) -} diff --git a/cmd/neofs-node/modules/node/peerstore.go b/cmd/neofs-node/modules/node/peerstore.go deleted file mode 100644 index 05e68fc90..000000000 --- a/cmd/neofs-node/modules/node/peerstore.go +++ /dev/null @@ -1,28 +0,0 @@ -package node - -import ( - "crypto/ecdsa" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type peerstoreParams struct { - dig.In - - Logger *zap.Logger - PrivateKey *ecdsa.PrivateKey - Address multiaddr.Multiaddr - Store peers.Storage `optional:"true"` -} - -func newPeerstore(p peerstoreParams) (peers.Store, error) { - return peers.NewStore(peers.StoreParams{ - Storage: p.Store, - Logger: p.Logger, - Addr: p.Address, - Key: p.PrivateKey, - }) -} diff --git a/cmd/neofs-node/modules/node/placement.go b/cmd/neofs-node/modules/node/placement.go deleted file mode 100644 index 015b6402f..000000000 --- a/cmd/neofs-node/modules/node/placement.go +++ /dev/null @@ -1,28 +0,0 @@ -package node - -import ( - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "go.uber.org/dig" -) - -type ( - placementToolParams struct { - dig.In - - Placement placement.Component - } - - placementToolResult struct { - dig.Out - - Placer *placement.PlacementWrapper - } -) - -func newPlacementTool(p placementToolParams) (res placementToolResult, err error) { - if res.Placer, err = placement.NewObjectPlacer(p.Placement); err != nil { - return - } - - return -} diff --git a/cmd/neofs-node/modules/node/replication.go b/cmd/neofs-node/modules/node/replication.go deleted file mode 100644 index 0c0538124..000000000 --- a/cmd/neofs-node/modules/node/replication.go +++ /dev/null @@ -1,385 +0,0 @@ -package node - -import ( - "context" - "crypto/ecdsa" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/morph" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/morph/event" - "github.com/nspcc-dev/neofs-node/pkg/morph/event/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/pkg/errors" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - replicationManagerParams struct { - dig.In - - Viper *viper.Viper - - PeersInterface peers.Interface - - LocalStore localstore.Localstore - Peers peers.Store - Placement placement.Component - Logger *zap.Logger - Key *ecdsa.PrivateKey - - Placer *placement.PlacementWrapper - - TokenStore session.PrivateTokenStore - - MorphEventListener event.Listener - MorphEventHandlers morph.EventHandlers - } -) - -const ( - mainReplicationPrefix = "replication" - managerPrefix = "manager" - placementHonorerPrefix = "placement_honorer" - locationDetectorPrefix = "location_detector" - storageValidatorPrefix = "storage_validator" - replicatorPrefix = "replicator" - restorerPrefix = "restorer" -) - -func newReplicationManager(p replicationManagerParams) (replication.Manager, error) { - as, err := storage.NewAddressStore(p.Peers, p.Logger) - if err != nil { - return nil, err - } - - ms, err := replication.NewMultiSolver(replication.MultiSolverParams{ - AddressStore: as, - Placement: p.Placement, - }) - if err != nil { - return nil, err - } - - op := replication.NewObjectPool() - - schd, err := replication.NewReplicationScheduler(replication.SchedulerParams{ - ContainerActualityChecker: ms, - Iterator: p.LocalStore, - }) - if err != nil { - return nil, err - } - - integrityVerifier, err := storage.NewLocalIntegrityVerifier() - if err != nil { - return nil, err - } - - verifier, err := storage.NewObjectValidator(&storage.ObjectValidatorParams{ - AddressStore: ms, - Localstore: p.LocalStore, - Logger: p.Logger, - Verifier: integrityVerifier, - }) - if err != nil { - return nil, err - } - - placementHonorer, err := newPlacementHonorer(p, ms) - if err != nil { - return nil, err - } - - locationDetector, err := newLocationDetector(p, ms) - if err != nil { - return nil, err - } - - storageValidator, err := newStorageValidator(p, ms) - if err != nil { - return nil, err - } - - replicator, err := newObjectReplicator(p, ms) - if err != nil { - return nil, err - } - - restorer, err := newRestorer(p, ms) - if err != nil { - return nil, err - } - - prefix := mainReplicationPrefix + "." + managerPrefix + "." - capPrefix := prefix + "capacities." - - mngr, err := replication.NewManager(replication.ManagerParams{ - Interval: p.Viper.GetDuration(prefix + "read_pool_interval"), - PushTaskTimeout: p.Viper.GetDuration(prefix + "push_task_timeout"), - InitPoolSize: p.Viper.GetInt(prefix + "pool_size"), - ExpansionRate: p.Viper.GetFloat64(prefix + "pool_expansion_rate"), - PlacementHonorerEnabled: p.Viper.GetBool(prefix + "placement_honorer_enabled"), - ReplicateTaskChanCap: p.Viper.GetInt(capPrefix + "replicate"), - RestoreTaskChanCap: p.Viper.GetInt(capPrefix + "restore"), - GarbageChanCap: p.Viper.GetInt(capPrefix + "garbage"), - ObjectPool: op, - ObjectVerifier: verifier, - PlacementHonorer: placementHonorer, - ObjectLocationDetector: locationDetector, - StorageValidator: storageValidator, - ObjectReplicator: replicator, - ObjectRestorer: restorer, - Scheduler: schd, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - if handlerInfo, ok := p.MorphEventHandlers[morph.ContractEventOptPath( - morph.NetmapContractName, - morph.NewEpochEventType, - )]; ok { - handlerInfo.SetHandler(func(ev event.Event) { - mngr.HandleEpoch( - context.Background(), - ev.(netmap.NewEpoch).EpochNumber(), - ) - }) - - p.MorphEventListener.RegisterHandler(handlerInfo) - } - - return mngr, nil -} - -func newPlacementHonorer(p replicationManagerParams, rss replication.RemoteStorageSelector) (replication.PlacementHonorer, error) { - prefix := mainReplicationPrefix + "." + placementHonorerPrefix - - och, err := newObjectsContainerHandler(cnrHandlerParams{ - Viper: p.Viper, - Logger: p.Logger, - Placer: p.Placer, - PeerStore: p.Peers, - Peers: p.PeersInterface, - TimeoutsPrefix: prefix, - Key: p.Key, - - TokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - storage, err := storage.NewObjectStorage(storage.ObjectStorageParams{ - Localstore: p.LocalStore, - SelectiveContainerExecutor: och, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - return replication.NewPlacementHonorer(replication.PlacementHonorerParams{ - ObjectSource: storage, - ObjectReceptacle: storage, - RemoteStorageSelector: rss, - PresenceChecker: p.LocalStore, - Logger: p.Logger, - TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), - ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), - }) -} - -func newLocationDetector(p replicationManagerParams, ms replication.MultiSolver) (replication.ObjectLocationDetector, error) { - prefix := mainReplicationPrefix + "." + locationDetectorPrefix - - och, err := newObjectsContainerHandler(cnrHandlerParams{ - Viper: p.Viper, - Logger: p.Logger, - Placer: p.Placer, - PeerStore: p.Peers, - Peers: p.PeersInterface, - TimeoutsPrefix: prefix, - Key: p.Key, - - TokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - locator, err := storage.NewObjectLocator(storage.LocatorParams{ - SelectiveContainerExecutor: och, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - return replication.NewLocationDetector(&replication.LocationDetectorParams{ - WeightComparator: ms, - ObjectLocator: locator, - ReservationRatioReceiver: ms, - PresenceChecker: p.LocalStore, - Logger: p.Logger, - TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), - ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), - }) -} - -func newStorageValidator(p replicationManagerParams, as replication.AddressStore) (replication.StorageValidator, error) { - prefix := mainReplicationPrefix + "." + storageValidatorPrefix - - var sltr storage.Salitor - - switch v := p.Viper.GetString(prefix + ".salitor"); v { - case xorSalitor: - sltr = hash.SaltXOR - default: - return nil, errors.Errorf("unsupported salitor: %s", v) - } - - och, err := newObjectsContainerHandler(cnrHandlerParams{ - Viper: p.Viper, - Logger: p.Logger, - Placer: p.Placer, - PeerStore: p.Peers, - Peers: p.PeersInterface, - TimeoutsPrefix: prefix, - Key: p.Key, - - TokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - headVerifier, err := storage.NewLocalHeadIntegrityVerifier() - if err != nil { - return nil, err - } - - verifier, err := storage.NewObjectValidator(&storage.ObjectValidatorParams{ - AddressStore: as, - Localstore: p.LocalStore, - SelectiveContainerExecutor: och, - Logger: p.Logger, - Salitor: sltr, - SaltSize: p.Viper.GetInt(prefix + ".salt_size"), - MaxPayloadRangeSize: p.Viper.GetUint64(prefix + ".max_payload_range_size"), - PayloadRangeCount: p.Viper.GetInt(prefix + ".payload_range_count"), - Verifier: headVerifier, - }) - if err != nil { - return nil, err - } - - return replication.NewStorageValidator(replication.StorageValidatorParams{ - ObjectVerifier: verifier, - PresenceChecker: p.LocalStore, - Logger: p.Logger, - TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), - ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), - AddrStore: as, - }) -} - -func newObjectReplicator(p replicationManagerParams, rss replication.RemoteStorageSelector) (replication.ObjectReplicator, error) { - prefix := mainReplicationPrefix + "." + replicatorPrefix - - och, err := newObjectsContainerHandler(cnrHandlerParams{ - Viper: p.Viper, - Logger: p.Logger, - Placer: p.Placer, - PeerStore: p.Peers, - Peers: p.PeersInterface, - TimeoutsPrefix: prefix, - Key: p.Key, - - TokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - storage, err := storage.NewObjectStorage(storage.ObjectStorageParams{ - Localstore: p.LocalStore, - SelectiveContainerExecutor: och, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - return replication.NewReplicator(replication.ObjectReplicatorParams{ - RemoteStorageSelector: rss, - ObjectSource: storage, - ObjectReceptacle: storage, - PresenceChecker: p.LocalStore, - Logger: p.Logger, - TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), - ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), - }) -} - -func newRestorer(p replicationManagerParams, ms replication.MultiSolver) (replication.ObjectRestorer, error) { - prefix := mainReplicationPrefix + "." + restorerPrefix - - och, err := newObjectsContainerHandler(cnrHandlerParams{ - Viper: p.Viper, - Logger: p.Logger, - Placer: p.Placer, - PeerStore: p.Peers, - Peers: p.PeersInterface, - TimeoutsPrefix: prefix, - Key: p.Key, - - TokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - integrityVerifier, err := storage.NewLocalIntegrityVerifier() - if err != nil { - return nil, err - } - - verifier, err := storage.NewObjectValidator(&storage.ObjectValidatorParams{ - AddressStore: ms, - Localstore: p.LocalStore, - SelectiveContainerExecutor: och, - Logger: p.Logger, - Verifier: integrityVerifier, - }) - if err != nil { - return nil, err - } - - storage, err := storage.NewObjectStorage(storage.ObjectStorageParams{ - Localstore: p.LocalStore, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - return replication.NewObjectRestorer(&replication.ObjectRestorerParams{ - ObjectVerifier: verifier, - ObjectReceptacle: storage, - EpochReceiver: ms, - RemoteStorageSelector: ms, - PresenceChecker: p.LocalStore, - Logger: p.Logger, - TaskChanCap: p.Viper.GetInt(prefix + ".chan_capacity"), - ResultTimeout: p.Viper.GetDuration(prefix + ".result_timeout"), - }) -} diff --git a/cmd/neofs-node/modules/node/services.go b/cmd/neofs-node/modules/node/services.go deleted file mode 100644 index 3aec901ed..000000000 --- a/cmd/neofs-node/modules/node/services.go +++ /dev/null @@ -1,36 +0,0 @@ -package node - -import ( - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/grpc" - accounting "github.com/nspcc-dev/neofs-node/pkg/network/transport/accounting/grpc" - container "github.com/nspcc-dev/neofs-node/pkg/network/transport/container/grpc" - metrics "github.com/nspcc-dev/neofs-node/pkg/network/transport/metrics/grpc" - object "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc" - session "github.com/nspcc-dev/neofs-node/pkg/network/transport/session/grpc" - state "github.com/nspcc-dev/neofs-node/pkg/network/transport/state/grpc" - "go.uber.org/dig" -) - -type servicesParams struct { - dig.In - - Status state.Service - Container container.Service - Object object.Service - Session session.Service - Accounting accounting.Service - Metrics metrics.Service -} - -func attachServices(p servicesParams) grpc.ServicesResult { - return grpc.ServicesResult{ - Services: []grpc.Service{ - p.Status, - p.Container, - p.Accounting, - p.Metrics, - p.Session, - p.Object, - }, - } -} diff --git a/cmd/neofs-node/modules/node/session.go b/cmd/neofs-node/modules/node/session.go deleted file mode 100644 index 46aaed8b7..000000000 --- a/cmd/neofs-node/modules/node/session.go +++ /dev/null @@ -1,26 +0,0 @@ -package node - -import ( - session "github.com/nspcc-dev/neofs-node/pkg/network/transport/session/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type sessionParams struct { - dig.In - - Logger *zap.Logger - - TokenStore session.TokenStore - - EpochReceiver *placement.PlacementWrapper -} - -func newSessionService(p sessionParams) (session.Service, error) { - return session.New(session.Params{ - TokenStore: p.TokenStore, - Logger: p.Logger, - EpochReceiver: p.EpochReceiver, - }), nil -} diff --git a/cmd/neofs-node/modules/settings/address.go b/cmd/neofs-node/modules/settings/address.go deleted file mode 100644 index 53cc80349..000000000 --- a/cmd/neofs-node/modules/settings/address.go +++ /dev/null @@ -1,108 +0,0 @@ -package settings - -import ( - "net" - "strconv" - "strings" - - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" -) - -const ( - protoTCP = "tcp" - protoUDP = "udp" - protoQUIC = "quic" -) - -const emptyAddr = "0.0.0.0" - -const ip4ColonCount = 1 - -var ( - errEmptyAddress = errors.New("`node.address` could not be empty") - errEmptyProtocol = errors.New("`node.protocol` could not be empty") - errUnknownProtocol = errors.New("`node.protocol` unknown protocol") - errEmptyShutdownTTL = errors.New("`node.shutdown_ttl` could not be empty") -) - -func ipVersion(address string) string { - if strings.Count(address, ":") > ip4ColonCount { - return "ip6" - } - - return "ip4" -} - -func prepareAddress(address string) (string, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return "", errors.Wrapf(err, "could not fetch host/port: %s", address) - } else if host == "" { - host = emptyAddr - } - - addr, err := net.ResolveIPAddr("ip", host) - if err != nil { - return "", errors.Wrapf(err, "could not resolve address: %s:%s", host, port) - } - - return net.JoinHostPort(addr.IP.String(), port), nil -} - -func resolveAddress(proto, address string) (string, string, error) { - var ( - ip net.IP - host, port string - ) - - switch proto { - case protoTCP: - addr, err := net.ResolveTCPAddr(protoTCP, address) - if err != nil { - return "", "", errors.Wrapf(err, "could not parse address: '%s'", address) - } - - ip = addr.IP - port = strconv.Itoa(addr.Port) - case protoUDP, protoQUIC: - addr, err := net.ResolveUDPAddr(protoUDP, address) - if err != nil { - return "", "", errors.Wrapf(err, "could not parse address: '%s'", address) - } - - ip = addr.IP - port = strconv.Itoa(addr.Port) - default: - return "", "", errors.Wrapf(errUnknownProtocol, "unknown protocol: '%s'", proto) - } - - if host = ip.String(); ip == nil { - host = emptyAddr - } - - return host, port, nil -} - -func multiAddressFromProtoAddress(proto, addr string) (multiaddr.Multiaddr, error) { - var ( - err error - host, port string - ipVer = ipVersion(addr) - ) - - if host, port, err = resolveAddress(proto, addr); err != nil { - return nil, errors.Wrapf(err, "could not resolve address: (%s) '%s'", proto, addr) - } - - items := []string{ - ipVer, - host, - proto, - port, - } - - addr = "/" + strings.Join(items, "/") - - return multiaddr.NewMultiaddr(addr) -} diff --git a/cmd/neofs-node/modules/settings/module.go b/cmd/neofs-node/modules/settings/module.go deleted file mode 100644 index 6980967e3..000000000 --- a/cmd/neofs-node/modules/settings/module.go +++ /dev/null @@ -1,8 +0,0 @@ -package settings - -import "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - -// Module is a node settings module. -var Module = module.Module{ - {Constructor: newNodeSettings}, -} diff --git a/cmd/neofs-node/modules/settings/node.go b/cmd/neofs-node/modules/settings/node.go deleted file mode 100644 index 1cdf2a593..000000000 --- a/cmd/neofs-node/modules/settings/node.go +++ /dev/null @@ -1,150 +0,0 @@ -package settings - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "fmt" - "strconv" - "strings" - "time" - - "github.com/multiformats/go-multiaddr" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/pkg/errors" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - nodeSettings struct { - dig.Out - - Address multiaddr.Multiaddr - PrivateKey *ecdsa.PrivateKey - NodeOpts []string `name:"node_options"` - ShutdownTTL time.Duration `name:"shutdown_ttl"` - - NodeInfo netmap.Info - } -) - -const generateKey = "generated" - -var errEmptyNodeSettings = errors.New("node settings could not be empty") - -func newNodeSettings(v *viper.Viper, l *zap.Logger) (cfg nodeSettings, err error) { - // check, that we have node settings in provided config - if !v.IsSet("node") { - err = errEmptyNodeSettings - return - } - - // try to load and setup ecdsa.PrivateKey - key := v.GetString("node.private_key") - switch key { - case "": - err = crypto.ErrEmptyPrivateKey - return cfg, err - case generateKey: - if cfg.PrivateKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { - return cfg, err - } - default: - if cfg.PrivateKey, err = crypto.LoadPrivateKey(key); err != nil { - return cfg, errors.Wrap(err, "cannot unmarshal private key") - } - } - - id := peers.IDFromPublicKey(&cfg.PrivateKey.PublicKey) - pub := crypto.MarshalPublicKey(&cfg.PrivateKey.PublicKey) - l.Debug("private key loaded successful", - zap.String("file", v.GetString("node.private_key")), - zap.Binary("public", pub), - zap.Stringer("node-id", id)) - - var ( - addr string - proto string - ) - - // fetch shutdown timeout from settings - if cfg.ShutdownTTL = v.GetDuration("node.shutdown_ttl"); cfg.ShutdownTTL == 0 { - return cfg, errEmptyShutdownTTL - } - - // fetch address and protocol from settings - if addr = v.GetString("node.address"); addr == "" { - return cfg, errors.Wrapf(errEmptyAddress, "given '%s'", addr) - } else if addr, err := prepareAddress(addr); err != nil { - return cfg, err - } else if proto = v.GetString("node.proto"); proto == "" { - return cfg, errors.Wrapf(errEmptyProtocol, "given '%s'", proto) - } else if cfg.Address, err = multiAddressFromProtoAddress(proto, addr); err != nil { - return cfg, errors.Wrapf(err, "given '%s' '%s'", proto, addr) - } - - // add well-known options - items := map[string]string{ - "Capacity": "capacity", - "Price": "price", - "Location": "location", - "Country": "country", - "City": "city", - } - - // TODO: use const namings - prefix := "node." - - for opt, path := range items { - val := v.GetString(prefix + path) - if len(val) == 0 { - err = errors.Errorf("node option %s must be set explicitly", opt) - return - } - - cfg.NodeOpts = append(cfg.NodeOpts, - fmt.Sprintf("/%s:%s", - opt, - val, - ), - ) - } - - // add other options - - var ( - i int - val string - ) -loop: - for ; ; i++ { - val = v.GetString("node.options." + strconv.Itoa(i)) - if val == "" { - break - } - - for opt := range items { - if strings.Contains(val, "/"+opt) { - continue loop - } - } - - cfg.NodeOpts = append(cfg.NodeOpts, val) - } - - nodeInfo := netmap.Info{} - nodeInfo.SetAddress(cfg.Address.String()) - nodeInfo.SetPublicKey(crypto.MarshalPublicKey(&cfg.PrivateKey.PublicKey)) - nodeInfo.SetOptions(cfg.NodeOpts) - - cfg.NodeInfo = nodeInfo - - l.Debug("loaded node options", - zap.Strings("options", cfg.NodeOpts)) - - return cfg, err -} diff --git a/cmd/neofs-node/modules/workers/module.go b/cmd/neofs-node/modules/workers/module.go deleted file mode 100644 index a95da32b3..000000000 --- a/cmd/neofs-node/modules/workers/module.go +++ /dev/null @@ -1,8 +0,0 @@ -package workers - -import "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/module" - -// Module is a workers module. -var Module = module.Module{ - {Constructor: prepare}, -} diff --git a/cmd/neofs-node/modules/workers/prepare.go b/cmd/neofs-node/modules/workers/prepare.go deleted file mode 100644 index 45930b10c..000000000 --- a/cmd/neofs-node/modules/workers/prepare.go +++ /dev/null @@ -1,132 +0,0 @@ -package workers - -import ( - "context" - "time" - - "github.com/nspcc-dev/neofs-node/cmd/neofs-node/modules/fix/worker" - "github.com/spf13/viper" - "go.uber.org/dig" - "go.uber.org/zap" -) - -type ( - // Result returns wrapped workers group for DI. - Result struct { - dig.Out - - Workers []*worker.Job - } - - // Params is dependencies for create workers slice. - Params struct { - dig.In - - Jobs worker.Jobs - Viper *viper.Viper - Logger *zap.Logger - } -) - -func prepare(p Params) worker.Workers { - w := worker.New() - - for name, handler := range p.Jobs { - if job := byConfig(name, handler, p.Logger, p.Viper); job != nil { - p.Logger.Debug("worker: add new job", - zap.String("name", name)) - - w.Add(job) - } - } - - return w -} - -func byTicker(d time.Duration, h worker.Handler) worker.Handler { - return func(ctx context.Context) { - ticker := time.NewTicker(d) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - default: - select { - case <-ctx.Done(): - return - case <-ticker.C: - h(ctx) - } - } - } - } -} - -func byTimer(d time.Duration, h worker.Handler) worker.Handler { - return func(ctx context.Context) { - timer := time.NewTimer(d) - defer timer.Stop() - - for { - select { - case <-ctx.Done(): - return - default: - select { - case <-ctx.Done(): - return - case <-timer.C: - h(ctx) - timer.Reset(d) - } - } - } - } -} - -func byConfig(name string, h worker.Handler, l *zap.Logger, v *viper.Viper) worker.Handler { - var job worker.Handler - - if !v.IsSet("workers." + name) { - l.Info("worker: has no configuration", - zap.String("worker", name)) - return nil - } - - if v.GetBool("workers." + name + ".disabled") { - l.Info("worker: disabled", - zap.String("worker", name)) - return nil - } - - if ticker := v.GetDuration("workers." + name + ".ticker"); ticker > 0 { - job = byTicker(ticker, h) - } - - if timer := v.GetDuration("workers." + name + ".timer"); timer > 0 { - job = byTimer(timer, h) - } - - if v.GetBool("workers." + name + ".immediately") { - return func(ctx context.Context) { - h(ctx) - - if job == nil { - return - } - - // check context before run immediately job again - select { - case <-ctx.Done(): - return - default: - } - - job(ctx) - } - } - - return job -} diff --git a/pkg/core/container/.gitkeep b/pkg/core/container/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/core/container/acl/basic/enum.go b/pkg/core/container/acl/basic/enum.go deleted file mode 100644 index e4de0cddd..000000000 --- a/pkg/core/container/acl/basic/enum.go +++ /dev/null @@ -1,24 +0,0 @@ -package basic - -const ( - // OpGetRangeHash is an index of GetRangeHash operation in basic ACL bitmask order. - OpGetRangeHash uint8 = iota - - // OpGetRange is an index of GetRange operation in basic ACL bitmask order. - OpGetRange - - // OpSearch is an index of Search operation in basic ACL bitmask order. - OpSearch - - // OpDelete is an index of Delete operation in basic ACL bitmask order. - OpDelete - - // OpPut is an index of Put operation in basic ACL bitmask order. - OpPut - - // OpHead is an index of Head operation in basic ACL bitmask order. - OpHead - - // OpGet is an index of Get operation in basic ACL bitmask order. - OpGet -) diff --git a/pkg/core/container/acl/basic/types.go b/pkg/core/container/acl/basic/types.go deleted file mode 100644 index 682fff36a..000000000 --- a/pkg/core/container/acl/basic/types.go +++ /dev/null @@ -1,159 +0,0 @@ -package basic - -// ACL represents a container's -// basic permission bits. -type ACL uint32 - -const ( - reservedBitNumber = 2 // first left bits are reserved - - stickyBitPos = reservedBitNumber // X-bit after reserved bits - - finalBitPos = stickyBitPos + 1 // F-bit after X-bit -) - -const ( - opOffset = finalBitPos + 1 // offset of operation bits - - bitsPerOp = 4 // number of bits per operation - - opNumber = 7 // number of operation bit sections -) - -const ( - bitUser uint8 = iota - bitSystem - bitOthers - bitBearer -) - -const leftACLBitPos = opOffset + bitsPerOp*opNumber - 1 - -// returns true if n-th left bit is set (starting at 0). -func isLeftBitSet(value ACL, n uint8) bool { - bitMask := ACL(1 << (leftACLBitPos - n)) - return bitMask != 0 && value&bitMask == bitMask -} - -// sets n-th left bit (starting at 0). -func setLeftBit(value *ACL, n uint8) { - *value |= ACL(1 << (leftACLBitPos - n)) -} - -// resets n-th left bit (starting at 0). -func resetLeftBit(value *ACL, n uint8) { - *value &= ^ACL(1 << (leftACLBitPos - n)) -} - -// Reserved returns true if n-th reserved option is enabled in basic ACL. -func (a ACL) Reserved(n uint8) bool { - return n < reservedBitNumber && isLeftBitSet(a, n) -} - -// SetReserved enables the n-th reserved option in basic ACL. -func (a *ACL) SetReserved(bit uint8) { - if bit < reservedBitNumber { - setLeftBit(a, bit) - } -} - -// ResetReserved disables the n-th reserved option in basic ACL. -func (a *ACL) ResetReserved(bit uint8) { - if bit < reservedBitNumber { - resetLeftBit(a, bit) - } -} - -// Final returns true if final option is enabled in basic ACL. -func (a ACL) Final() bool { - return isLeftBitSet(a, finalBitPos) -} - -// SetFinal enables final option in basic ACL. -func (a *ACL) SetFinal() { - setLeftBit(a, finalBitPos) -} - -// ResetFinal disables final option in basic ACL. -func (a *ACL) ResetFinal() { - resetLeftBit(a, finalBitPos) -} - -// Sticky returns true if sticky option is enabled in basic ACL. -func (a ACL) Sticky() bool { - return isLeftBitSet(a, stickyBitPos) -} - -// SetSticky enables the sticky option in basic ACL. -func (a *ACL) SetSticky() { - setLeftBit(a, stickyBitPos) -} - -// ResetSticky disables the sticky option in basic ACL. -func (a *ACL) ResetSticky() { - resetLeftBit(a, stickyBitPos) -} - -// UserAllowed returns true if user allowed the n-th operation in basic ACL. -func (a ACL) UserAllowed(n uint8) bool { - return isLeftBitSet(a, opOffset+n*bitsPerOp+bitUser) -} - -// AllowUser allows user the n-th operation in basic ACL. -func (a *ACL) AllowUser(n uint8) { - setLeftBit(a, opOffset+n*bitsPerOp+bitUser) -} - -// ForbidUser forbids user the n-th operation in basic ACL. -func (a *ACL) ForbidUser(n uint8) { - resetLeftBit(a, opOffset+n*bitsPerOp+bitUser) -} - -// SystemAllowed returns true if System group allowed the n-th operation is set in basic ACL. -func (a ACL) SystemAllowed(n uint8) bool { - if n != OpDelete && n != OpGetRange { - return true - } - - return isLeftBitSet(a, opOffset+n*bitsPerOp+bitSystem) -} - -// AllowSystem allows System group the n-th operation in basic ACL. -func (a *ACL) AllowSystem(op uint8) { - setLeftBit(a, opOffset+op*bitsPerOp+bitSystem) -} - -// ForbidSystem forbids System group the n-th operation in basic ACL. -func (a *ACL) ForbidSystem(op uint8) { - resetLeftBit(a, opOffset+op*bitsPerOp+bitSystem) -} - -// OthersAllowed returns true if Others group allowed the n-th operation is set in basic ACL. -func (a ACL) OthersAllowed(op uint8) bool { - return isLeftBitSet(a, opOffset+op*bitsPerOp+bitOthers) -} - -// AllowOthers allows Others group the n-th operation in basic ACL. -func (a *ACL) AllowOthers(op uint8) { - setLeftBit(a, opOffset+op*bitsPerOp+bitOthers) -} - -// ForbidOthers forbids Others group the n-th operation in basic ACL. -func (a *ACL) ForbidOthers(op uint8) { - resetLeftBit(a, opOffset+op*bitsPerOp+bitOthers) -} - -// BearerAllowed returns true if Bearer token usage is allowed for n-th operation in basic ACL. -func (a ACL) BearerAllowed(op uint8) bool { - return isLeftBitSet(a, opOffset+op*bitsPerOp+bitBearer) -} - -// AllowBearer allows Bearer token usage for n-th operation in basic ACL. -func (a *ACL) AllowBearer(op uint8) { - setLeftBit(a, opOffset+op*bitsPerOp+bitBearer) -} - -// ForbidBearer forbids Bearer token usage for n-th operation in basic ACL. -func (a *ACL) ForbidBearer(op uint8) { - resetLeftBit(a, opOffset+op*bitsPerOp+bitBearer) -} diff --git a/pkg/core/container/acl/basic/types_test.go b/pkg/core/container/acl/basic/types_test.go deleted file mode 100644 index 7628aec13..000000000 --- a/pkg/core/container/acl/basic/types_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package basic - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestACLValues(t *testing.T) { - t.Run("private", func(t *testing.T) { - acl := FromUint32(0x1C8C8CCC) - - require.False(t, acl.Reserved(0)) - require.False(t, acl.Reserved(1)) - require.False(t, acl.Sticky()) - require.True(t, acl.Final()) - - require.True(t, acl.UserAllowed(OpGetRangeHash)) - require.True(t, acl.SystemAllowed(OpGetRangeHash)) - require.False(t, acl.OthersAllowed(OpGetRangeHash)) - require.False(t, acl.BearerAllowed(OpGetRangeHash)) - - require.True(t, acl.UserAllowed(OpGetRange)) - require.False(t, acl.SystemAllowed(OpGetRange)) - require.False(t, acl.OthersAllowed(OpGetRange)) - require.False(t, acl.BearerAllowed(OpGetRange)) - - require.True(t, acl.UserAllowed(OpSearch)) - require.True(t, acl.SystemAllowed(OpSearch)) - require.False(t, acl.OthersAllowed(OpSearch)) - require.False(t, acl.BearerAllowed(OpSearch)) - - require.True(t, acl.UserAllowed(OpDelete)) - require.False(t, acl.SystemAllowed(OpDelete)) - require.False(t, acl.OthersAllowed(OpDelete)) - require.False(t, acl.BearerAllowed(OpDelete)) - - require.True(t, acl.UserAllowed(OpPut)) - require.True(t, acl.SystemAllowed(OpPut)) - require.False(t, acl.OthersAllowed(OpPut)) - require.False(t, acl.BearerAllowed(OpPut)) - - require.True(t, acl.UserAllowed(OpHead)) - require.True(t, acl.SystemAllowed(OpHead)) - require.False(t, acl.OthersAllowed(OpHead)) - require.False(t, acl.BearerAllowed(OpHead)) - - require.True(t, acl.UserAllowed(OpGet)) - require.True(t, acl.SystemAllowed(OpGet)) - require.False(t, acl.OthersAllowed(OpGet)) - require.False(t, acl.BearerAllowed(OpGet)) - }) - - t.Run("public with X-bit", func(t *testing.T) { - acl := FromUint32(0x3FFFFFFF) - - require.False(t, acl.Reserved(0)) - require.False(t, acl.Reserved(1)) - require.True(t, acl.Sticky()) - require.True(t, acl.Final()) - - require.True(t, acl.UserAllowed(OpGetRangeHash)) - require.True(t, acl.SystemAllowed(OpGetRangeHash)) - require.True(t, acl.OthersAllowed(OpGetRangeHash)) - require.True(t, acl.BearerAllowed(OpGetRangeHash)) - - require.True(t, acl.UserAllowed(OpGetRange)) - require.True(t, acl.SystemAllowed(OpGetRange)) - require.True(t, acl.OthersAllowed(OpGetRange)) - require.True(t, acl.BearerAllowed(OpGetRange)) - - require.True(t, acl.UserAllowed(OpSearch)) - require.True(t, acl.SystemAllowed(OpSearch)) - require.True(t, acl.OthersAllowed(OpSearch)) - require.True(t, acl.BearerAllowed(OpSearch)) - - require.True(t, acl.UserAllowed(OpDelete)) - require.True(t, acl.SystemAllowed(OpDelete)) - require.True(t, acl.OthersAllowed(OpDelete)) - require.True(t, acl.BearerAllowed(OpDelete)) - - require.True(t, acl.UserAllowed(OpPut)) - require.True(t, acl.SystemAllowed(OpPut)) - require.True(t, acl.OthersAllowed(OpPut)) - require.True(t, acl.BearerAllowed(OpPut)) - - require.True(t, acl.UserAllowed(OpHead)) - require.True(t, acl.SystemAllowed(OpHead)) - require.True(t, acl.OthersAllowed(OpHead)) - require.True(t, acl.BearerAllowed(OpHead)) - - require.True(t, acl.UserAllowed(OpGet)) - require.True(t, acl.SystemAllowed(OpGet)) - require.True(t, acl.OthersAllowed(OpGet)) - require.True(t, acl.BearerAllowed(OpGet)) - }) - - t.Run("read only", func(t *testing.T) { - acl := FromUint32(0x1FFFCCFF) - - require.False(t, acl.Reserved(0)) - require.False(t, acl.Reserved(1)) - require.False(t, acl.Sticky()) - require.True(t, acl.Final()) - - require.True(t, acl.UserAllowed(OpGetRangeHash)) - require.True(t, acl.SystemAllowed(OpGetRangeHash)) - require.True(t, acl.OthersAllowed(OpGetRangeHash)) - require.True(t, acl.BearerAllowed(OpGetRangeHash)) - - require.True(t, acl.UserAllowed(OpGetRange)) - require.True(t, acl.SystemAllowed(OpGetRange)) - require.True(t, acl.OthersAllowed(OpGetRange)) - require.True(t, acl.BearerAllowed(OpGetRange)) - - require.True(t, acl.UserAllowed(OpSearch)) - require.True(t, acl.SystemAllowed(OpSearch)) - require.True(t, acl.OthersAllowed(OpSearch)) - require.True(t, acl.BearerAllowed(OpSearch)) - - require.True(t, acl.UserAllowed(OpDelete)) - require.True(t, acl.SystemAllowed(OpDelete)) - require.False(t, acl.OthersAllowed(OpDelete)) - require.False(t, acl.BearerAllowed(OpDelete)) - - require.True(t, acl.UserAllowed(OpPut)) - require.True(t, acl.SystemAllowed(OpPut)) - require.False(t, acl.OthersAllowed(OpPut)) - require.False(t, acl.BearerAllowed(OpPut)) - - require.True(t, acl.UserAllowed(OpHead)) - require.True(t, acl.SystemAllowed(OpHead)) - require.True(t, acl.OthersAllowed(OpHead)) - require.True(t, acl.BearerAllowed(OpHead)) - - require.True(t, acl.UserAllowed(OpGet)) - require.True(t, acl.SystemAllowed(OpGet)) - require.True(t, acl.OthersAllowed(OpGet)) - require.True(t, acl.BearerAllowed(OpGet)) - }) -} - -func TestACLMethods(t *testing.T) { - acl := new(ACL) - - for i := uint8(0); i < reservedBitNumber; i++ { - acl.SetReserved(i) - require.True(t, acl.Reserved(i)) - acl.ResetReserved(i) - require.False(t, acl.Reserved(i)) - } - - acl.SetSticky() - require.True(t, acl.Sticky()) - acl.ResetSticky() - require.False(t, acl.Sticky()) - - acl.SetFinal() - require.True(t, acl.Final()) - acl.ResetFinal() - require.False(t, acl.Final()) - - for i := OpGetRangeHash; i <= OpGet; i++ { - acl.AllowUser(i) - require.True(t, acl.UserAllowed(i)) - acl.ForbidUser(i) - require.False(t, acl.UserAllowed(i)) - - acl.AllowOthers(i) - require.True(t, acl.OthersAllowed(i)) - acl.ForbidOthers(i) - require.False(t, acl.OthersAllowed(i)) - - acl.AllowBearer(i) - require.True(t, acl.BearerAllowed(i)) - acl.ForbidBearer(i) - require.False(t, acl.BearerAllowed(i)) - - acl.AllowSystem(i) - require.True(t, acl.SystemAllowed(i)) - acl.ForbidSystem(i) - - if i == OpDelete || i == OpGetRange { - require.False(t, acl.SystemAllowed(i)) - } else { - require.True(t, acl.SystemAllowed(i)) - } - } -} diff --git a/pkg/core/container/acl/basic/util.go b/pkg/core/container/acl/basic/util.go deleted file mode 100644 index 77ab5829f..000000000 --- a/pkg/core/container/acl/basic/util.go +++ /dev/null @@ -1,64 +0,0 @@ -package basic - -import ( - "encoding/binary" - "io" -) - -// Size is a size of ACL -// in a binary form. -const Size = 4 - -// FromUint32 converts builtin -// uint32 value to ACL. -// -// Try to avoid direct cast for -// better portability. -func FromUint32(v uint32) ACL { - return ACL(v) -} - -// ToUint32 converts ACL value -// to builtin uint32. -// -// Try to avoid direct cast for -// better portability. -func ToUint32(v ACL) uint32 { - return uint32(v) -} - -// Equal reports whether e and e2 are the same ACL. -// -// Function defines the relation of equality -// between two ACL. Try to avoid comparison through -// "==" operator for better portability. -func Equal(a, b ACL) bool { - return ToUint32(a) == ToUint32(b) -} - -// Marshal encodes ACL into a -// binary form and returns the result. -// -// Result slice has Size length. -func Marshal(a ACL) []byte { - d := make([]byte, Size) - - binary.BigEndian.PutUint32(d, ToUint32(a)) - - return d -} - -// UnmarshalBinary unmarshals ACL from -// a binary representation. -// -// If buffer size is insufficient, -// io.ErrUnexpectedEOF is returned. -func (a *ACL) UnmarshalBinary(data []byte) error { - if len(data) < Size { - return io.ErrUnexpectedEOF - } - - *a = FromUint32(binary.BigEndian.Uint32(data)) - - return nil -} diff --git a/pkg/core/container/acl/basic/util_test.go b/pkg/core/container/acl/basic/util_test.go deleted file mode 100644 index f48eecf22..000000000 --- a/pkg/core/container/acl/basic/util_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package basic - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestEqual(t *testing.T) { - require.True(t, - Equal( - FromUint32(1), - FromUint32(1), - ), - ) - - require.False(t, - Equal( - FromUint32(1), - FromUint32(2), - ), - ) -} - -func TestMarshal(t *testing.T) { - a := FromUint32(1) - a2 := new(ACL) - - require.NoError(t, - a2.UnmarshalBinary( - Marshal(a), - ), - ) - - require.True(t, Equal(a, *a2)) -} diff --git a/pkg/core/container/acl/extended/storage/storage.go b/pkg/core/container/acl/extended/storage/storage.go deleted file mode 100644 index bdd0559b9..000000000 --- a/pkg/core/container/acl/extended/storage/storage.go +++ /dev/null @@ -1,53 +0,0 @@ -package storage - -import ( - "errors" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container" -) - -// CID represents the container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.ID. -type CID = container.ID - -// Table represents extended ACL rule table. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/eacl/extended.Table. -type Table = eacl.Table - -// Storage is the interface that wraps -// basic methods of extended ACL table storage. -type Storage interface { - // GetEACL reads the table from the storage by identifier. - // It returns any error encountered. - // - // GetEACL must return exactly one non-nil value. - // GetEACL must return ErrNotFound if the table is not in storage. - // - // Implementations must not retain or modify the table - // (even temporarily). - GetEACL(CID) (Table, error) - - // PutEACL saves the table to the underlying storage. - // It returns any error encountered that caused the saving to interrupt. - // - // PutEACL must return extended.ErrNilTable on nil table. - // - // Implementations must not retain or modify the table (even temporarily). - // - // Table rewriting behavior is dictated by implementation. - PutEACL(CID, Table, []byte) error -} - -// ErrNotFound is the error returned when eACL table -// was not found in storage. -var ErrNotFound = errors.New("container not found") - -// ErrNilStorage is the error returned by functions that -// expect a non-nil eACL table storage implementation, -// but received nil. -var ErrNilStorage = errors.New("eACL storage is nil") diff --git a/pkg/core/container/acl/extended/storage/test/storage.go b/pkg/core/container/acl/extended/storage/test/storage.go deleted file mode 100644 index 7a5242e50..000000000 --- a/pkg/core/container/acl/extended/storage/test/storage.go +++ /dev/null @@ -1,48 +0,0 @@ -package test - -import ( - "sync" - - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" -) - -type testStorage struct { - *sync.RWMutex - - items map[container.ID]storage.Table -} - -func (s *testStorage) GetEACL(cid storage.CID) (storage.Table, error) { - s.RLock() - table, ok := s.items[cid] - s.RUnlock() - - if !ok { - return nil, storage.ErrNotFound - } - - return table, nil -} - -func (s *testStorage) PutEACL(cid storage.CID, table storage.Table, _ []byte) error { - if table == nil { - return extended.ErrNilTable - } - - s.Lock() - s.items[cid] = table - s.Unlock() - - return nil -} - -// New creates new eACL table storage -// that stores table in go-builtin map. -func New() storage.Storage { - return &testStorage{ - RWMutex: new(sync.RWMutex), - items: make(map[container.ID]storage.Table), - } -} diff --git a/pkg/core/container/acl/extended/types.go b/pkg/core/container/acl/extended/types.go deleted file mode 100644 index a1442fcf3..000000000 --- a/pkg/core/container/acl/extended/types.go +++ /dev/null @@ -1,102 +0,0 @@ -package extended - -import ( - "errors" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container" -) - -// FIXME: do not duplicate constants - -// OperationType represents the enumeration -// of different destination types of request. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/eacl.OperationType. -// FIXME: operation type should be defined in core lib. -type OperationType = eacl.OperationType - -// Group represents the enumeration -// of different authorization groups. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.Group. -// FIXME: target should be defined in core lib. -type Group = eacl.Group - -// HeaderType represents the enumeration -// of different types of header. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/eacl.HeaderType. -// FIXME: header type enum should be defined in core lib. -type HeaderType = eacl.HeaderType - -// CID represents the container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.ID. -type CID = container.ID - -// Header is an interface that wraps -// methods of string key-value header. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/eacl.Header. -// FIXME: header should be defined in core lib. -type Header = eacl.Header - -// Table represents extended ACL rule table. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/eacl.ExtendedACLTable. -// FIXME: eacl table should be defined in core package. -// type Table = eacl.ExtendedACLTable - -// TypedHeaderSource is the interface that wraps -// method for selecting typed headers by type. -type TypedHeaderSource interface { - // HeadersOfType returns the list of key-value headers - // of particular type. - // - // It returns any problem encountered through the boolean - // false value. - HeadersOfType(HeaderType) ([]Header, bool) -} - -// RequestInfo is an interface that wraps -// request with authority methods. -type RequestInfo interface { - TypedHeaderSource - - // CID returns container identifier from request context. - CID() CID - - // Key returns the binary representation of - // author's public key. - // - // Any problem encountered can be reflected - // through an empty slice. - // - // Binary key format is dictated by implementation. - Key() []byte - - // OperationType returns the type of request destination. - // - // Any problem encountered can be reflected - // through OpTypeUnknown value. Caller should handle - // OpTypeUnknown value according to its internal logic. - OperationType() OperationType - - // Group returns the authority group type. - // - // Any problem encountered can be reflected - // through GroupUnknown value. Caller should handle - // TargetUnknown value according to its internal logic. - Group() Group -} - -// ErrNilTable is the error returned by functions that -// expect a non-nil eACL table, but received nil. -var ErrNilTable = errors.New("eACL table is nil") diff --git a/pkg/core/container/container.go b/pkg/core/container/container.go deleted file mode 100644 index 8f1282fff..000000000 --- a/pkg/core/container/container.go +++ /dev/null @@ -1,82 +0,0 @@ -package container - -import ( - "errors" - - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/nspcc-dev/netmap" -) - -// BasicACL represents the basic -// ACL rules. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/basic.ACL. -type BasicACL = basic.ACL - -// PlacementRule represents placement -// rules of the container. -// -// It is a type alias of -// github.com/nspcc-dev/netmap.PlacementRule. -// FIXME: container placement rules should be defined in core lib. -type PlacementRule = netmap.PlacementRule - -// Container represents NeoFS container. -type Container struct { - basicACL BasicACL // basic ACL mask - - ownerID OwnerID // the identifier of container's owner - - salt []byte // unique container bytes - - placementRule PlacementRule // placement rules -} - -// ErrNilContainer is the error returned by functions that -// expect a non-nil container pointer, but received nil. -var ErrNilContainer = errors.New("container is nil") - -// OwnerID returns an ID of the container's owner. -func (c *Container) OwnerID() OwnerID { - return c.ownerID -} - -// SetOwnerID sets the ID of the container's owner. -func (c *Container) SetOwnerID(v OwnerID) { - c.ownerID = v -} - -// Salt returns the container salt. -// -// Slice is returned by reference without copying. -func (c *Container) Salt() []byte { - return c.salt -} - -// SetSalt sets the container salt. -// -// Slice is assigned by reference without copying. -func (c *Container) SetSalt(v []byte) { - c.salt = v -} - -// BasicACL returns the mask of basic container permissions. -func (c *Container) BasicACL() BasicACL { - return c.basicACL -} - -// SetBasicACL sets the mask of basic container permissions. -func (c *Container) SetBasicACL(v BasicACL) { - c.basicACL = v -} - -// PlacementRule returns placement rule of the container. -func (c *Container) PlacementRule() PlacementRule { - return c.placementRule -} - -// SetPlacementRule sets placement rule of the container. -func (c *Container) SetPlacementRule(v PlacementRule) { - c.placementRule = v -} diff --git a/pkg/core/container/container_test.go b/pkg/core/container/container_test.go deleted file mode 100644 index 21fc1e152..000000000 --- a/pkg/core/container/container_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package container - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/stretchr/testify/require" -) - -func TestContainerMethods(t *testing.T) { - c := new(Container) - - acl := basic.FromUint32(1) - c.SetBasicACL(acl) - require.True(t, basic.Equal(acl, c.BasicACL())) - - ownerID := OwnerID{1, 2, 3} - c.SetOwnerID(ownerID) - require.Equal(t, ownerID, c.OwnerID()) - - salt := []byte{4, 5, 6} - c.SetSalt(salt) - require.Equal(t, salt, c.Salt()) - - rule := PlacementRule{ - ReplFactor: 1, - } - c.SetPlacementRule(rule) - require.Equal(t, rule, c.PlacementRule()) -} diff --git a/pkg/core/container/id.go b/pkg/core/container/id.go deleted file mode 100644 index 251053c2c..000000000 --- a/pkg/core/container/id.go +++ /dev/null @@ -1,49 +0,0 @@ -package container - -import ( - "crypto/sha256" - - "github.com/nspcc-dev/neofs-api-go/refs" -) - -// ID represents the -// container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/refs.CID. -// FIXME: container id should be defined in core package. -type ID = refs.CID - -// OwnerID represents the -// container owner identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/refs.OwnerID. -// FIXME: owner ID should be defined in core lib. -type OwnerID = refs.OwnerID - -// OwnerIDSize is a size of OwnerID -// in a binary form. -const OwnerIDSize = refs.OwnerIDSize - -// CalculateID calculates container identifier -// as SHA256 checksum of the binary form. -// -// If container is nil, ErrNilContainer is returned. -func CalculateID(cnr *Container) (*ID, error) { - if cnr == nil { - return nil, ErrNilContainer - } - - data, err := cnr.MarshalBinary() - if err != nil { - return nil, err - } - - res := new(ID) - sh := sha256.Sum256(data) - - copy(res[:], sh[:]) - - return res, nil -} diff --git a/pkg/core/container/id_test.go b/pkg/core/container/id_test.go deleted file mode 100644 index 142375ca8..000000000 --- a/pkg/core/container/id_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package container - -import ( - "crypto/sha256" - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -func TestCalculateID(t *testing.T) { - _, err := CalculateID(nil) - require.True(t, errors.Is(err, ErrNilContainer)) - - cnr := new(Container) - cnr.SetBasicACL(basic.FromUint32(1)) - cnr.SetOwnerID(OwnerID{1, 2, 3}) - cnr.SetSalt([]byte{4, 5, 6}) - - id1, err := CalculateID(cnr) - require.NoError(t, err) - - data, err := cnr.MarshalBinary() - require.NoError(t, err) - - sh := sha256.Sum256(data) - - require.Equal(t, id1.Bytes(), sh[:]) - - // change the container - cnr.SetSalt(append(cnr.Salt(), 1)) - - id2, err := CalculateID(cnr) - require.NoError(t, err) - - require.NotEqual(t, id1, id2) -} diff --git a/pkg/core/container/marshal.go b/pkg/core/container/marshal.go deleted file mode 100644 index 1cb938ec2..000000000 --- a/pkg/core/container/marshal.go +++ /dev/null @@ -1,75 +0,0 @@ -package container - -import ( - "encoding/binary" - "io" - - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" -) - -const ( - saltLenSize = 2 - - fixedSize = 0 + - basic.Size + - OwnerIDSize + - saltLenSize -) - -// MarshalBinary encodes the container into a binary form -// and returns the result. -func (c *Container) MarshalBinary() ([]byte, error) { - data := make([]byte, binaryContainerSize(c)) - - off := copy(data, basic.Marshal(c.basicACL)) - - off += copy(data[off:], c.ownerID.Bytes()) - - binary.BigEndian.PutUint16(data[off:], uint16(len(c.salt))) - off += saltLenSize - - off += copy(data[off:], c.salt) - - if _, err := c.placementRule.MarshalTo(data[off:]); err != nil { - return nil, err - } - - return data, nil -} - -// UnmarshalBinary unmarshals container from a binary -// representation. -// -// If buffer size is insufficient, io.ErrUnexpectedEOF is returned. -func (c *Container) UnmarshalBinary(data []byte) error { - if len(data) < binaryContainerSize(c) { - return io.ErrUnexpectedEOF - } - - if err := c.basicACL.UnmarshalBinary(data); err != nil { - return err - } - - off := basic.Size - - off += copy(c.ownerID[:], data[off:]) - - saltLen := binary.BigEndian.Uint16(data[off:]) - off += saltLenSize - - c.salt = make([]byte, saltLen) - off += copy(c.salt, data[off:]) - - if err := c.placementRule.Unmarshal(data[off:]); err != nil { - return err - } - - return nil -} - -// returns the length of the container in binary form. -func binaryContainerSize(cnr *Container) int { - return fixedSize + - len(cnr.salt) + - cnr.placementRule.Size() -} diff --git a/pkg/core/container/marshal_test.go b/pkg/core/container/marshal_test.go deleted file mode 100644 index 20579b8eb..000000000 --- a/pkg/core/container/marshal_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package container - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/stretchr/testify/require" -) - -func TestContainerMarshal(t *testing.T) { - srcCnr := new(Container) - srcCnr.SetBasicACL(basic.FromUint32(1)) - srcCnr.SetOwnerID(OwnerID{1, 2, 3}) - srcCnr.SetSalt([]byte{4, 5, 6}) - srcCnr.SetPlacementRule(PlacementRule{ - ReplFactor: 3, - }) - - data, err := srcCnr.MarshalBinary() - require.NoError(t, err) - - dstCnr := new(Container) - require.NoError(t, dstCnr.UnmarshalBinary(data)) - - require.Equal(t, srcCnr, dstCnr) -} diff --git a/pkg/core/container/storage/storage.go b/pkg/core/container/storage/storage.go deleted file mode 100644 index 9a5faadcb..000000000 --- a/pkg/core/container/storage/storage.go +++ /dev/null @@ -1,79 +0,0 @@ -package storage - -import ( - "errors" - - "github.com/nspcc-dev/neofs-node/pkg/core/container" -) - -// Container represents the NeoFS container. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.Container. -type Container = container.Container - -// OwnerID represents the container -// owner identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.OwnerID. -type OwnerID = container.OwnerID - -// CID represents the container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.ID. -type CID = container.ID - -// Storage is an interface that wraps -// basic container storage methods. -type Storage interface { - // Put saves pointed container to the underlying storage. - // It returns calculated container identifier and any error - // encountered that caused the saving to interrupt. - // - // Put must return container.ErrNilContainer on nil-pointer. - // - // Implementations must not modify the container through the pointer (even temporarily). - // Implementations must not retain the container pointer. - // - // Container rewriting behavior is dictated by implementation. - Put(*Container) (*CID, error) - - // Get reads the container from the storage by identifier. - // It returns the pointer to requested container and any error encountered. - // - // Get must return exactly one non-nil value. - // Get must return ErrNotFound if the container is not in storage. - // - // Implementations must not retain the container pointer and modify - // the container through it. - Get(CID) (*Container, error) - - // Delete removes the container from the storage. - // It returns any error encountered that caused the deletion to interrupt. - // - // Delete must return nil if container was successfully deleted. - // - // Behavior when deleting a nonexistent container is dictated by implementation. - Delete(CID) error - - // List returns a list of container identifiers belonging to the specified owner. - // It returns any error encountered that caused the listing to interrupt. - // - // List must return the identifiers of all stored containers if owner pointer is nil. - // List must return the empty list and no error in the absence of containers in storage. - // - // Result slice can be either empty slice or nil, so empty list should be checked - // by comparing with zero length (not nil). - // - // Callers should carefully handle the incomplete list in case of interrupt error. - List(*OwnerID) ([]CID, error) -} - -// ErrNotFound is the error returned when container was not found in storage. -var ErrNotFound = errors.New("container not found") - -// ErrNilStorage is the error returned by functions that -// expect a non-nil container storage implementation, but received nil. -var ErrNilStorage = errors.New("container storage is nil") diff --git a/pkg/core/container/storage/test/storage.go b/pkg/core/container/storage/test/storage.go deleted file mode 100644 index b2fe18ab3..000000000 --- a/pkg/core/container/storage/test/storage.go +++ /dev/null @@ -1,127 +0,0 @@ -package test - -import ( - "sync" - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/stretchr/testify/require" -) - -type testStorage struct { - *sync.RWMutex - - items map[container.ID]*container.Container -} - -func (s *testStorage) Put(cnr *storage.Container) (*storage.CID, error) { - if cnr == nil { - return nil, container.ErrNilContainer - } - - cid, err := container.CalculateID(cnr) - if err != nil { - return nil, err - } - - s.Lock() - s.items[*cid] = cnr - s.Unlock() - - return cid, nil -} - -func (s *testStorage) Get(cid storage.CID) (*storage.Container, error) { - s.RLock() - cnr, ok := s.items[cid] - s.RUnlock() - - if !ok { - return nil, storage.ErrNotFound - } - - return cnr, nil -} - -func (s *testStorage) Delete(cid storage.CID) error { - s.Lock() - delete(s.items, cid) - s.Unlock() - - return nil -} - -func (s *testStorage) List(ownerID *storage.OwnerID) ([]storage.CID, error) { - s.RLock() - defer s.RUnlock() - - res := make([]storage.CID, 0) - - for cid, cnr := range s.items { - if ownerID == nil || ownerID.Equal(cnr.OwnerID()) { - res = append(res, cid) - } - } - - return res, nil -} - -// New creates new container storage -// that stores containers in go-builtin map. -func New() storage.Storage { - return &testStorage{ - RWMutex: new(sync.RWMutex), - items: make(map[container.ID]*container.Container), - } -} - -// Storage conducts testing of container -// storage for interface specification. -// -// Storage must be empty. -func Storage(t *testing.T, s storage.Storage) { - list, err := s.List(nil) - require.NoError(t, err) - require.Empty(t, list) - - cnr1 := new(container.Container) - cnr1.SetOwnerID(container.OwnerID{1, 2, 3}) - - id1, err := s.Put(cnr1) - require.NoError(t, err) - - res, err := s.Get(*id1) - require.NoError(t, err) - require.Equal(t, cnr1, res) - - cnr2 := new(container.Container) - owner1 := cnr1.OwnerID() - owner1[0]++ - cnr2.SetOwnerID(owner1) - - id2, err := s.Put(cnr2) - require.NoError(t, err) - - res, err = s.Get(*id2) - require.NoError(t, err) - require.Equal(t, cnr2, res) - - list, err = s.List(nil) - require.NoError(t, err) - require.Len(t, list, 2) - require.Contains(t, list, *id1) - require.Contains(t, list, *id2) - - owner1 = cnr1.OwnerID() - list, err = s.List(&owner1) - require.NoError(t, err) - require.Len(t, list, 1) - require.Equal(t, *id1, list[0]) - - owner2 := cnr2.OwnerID() - list, err = s.List(&owner2) - require.NoError(t, err) - require.Len(t, list, 1) - require.Equal(t, *id2, list[0]) -} diff --git a/pkg/core/container/storage/test/storage_test.go b/pkg/core/container/storage/test/storage_test.go deleted file mode 100644 index 7614f467f..000000000 --- a/pkg/core/container/storage/test/storage_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package test - -import ( - "testing" -) - -func TestNewStorage(t *testing.T) { - s := New() - - Storage(t, s) -} diff --git a/pkg/core/netmap/.gitkeep b/pkg/core/netmap/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/core/netmap/epoch/marshal.go b/pkg/core/netmap/epoch/marshal.go deleted file mode 100644 index a15fc9ca5..000000000 --- a/pkg/core/netmap/epoch/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -package epoch - -import ( - "encoding/binary" - "io" -) - -// Size is a size of Epoch -// in a binary form. -const Size = 8 - -// Marshal encodes Epoch into a -// binary form and returns the result. -// -// Result slice has Size length. -func Marshal(e Epoch) []byte { - d := make([]byte, Size) - - binary.BigEndian.PutUint64(d, ToUint64(e)) - - return d -} - -// UnmarshalBinary unmarshals Epoch from -// a binary representation. -// -// If buffer size is insufficient, -// io.ErrUnexpectedEOF is returned. -func (e *Epoch) UnmarshalBinary(data []byte) error { - if len(data) < Size { - return io.ErrUnexpectedEOF - } - - *e = FromUint64(binary.BigEndian.Uint64(data)) - - return nil -} diff --git a/pkg/core/netmap/epoch/marshal_test.go b/pkg/core/netmap/epoch/marshal_test.go deleted file mode 100644 index 7d20ae43e..000000000 --- a/pkg/core/netmap/epoch/marshal_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package epoch - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestEpochMarshal(t *testing.T) { - e := FromUint64(1) - e2 := new(Epoch) - - require.NoError(t, - e2.UnmarshalBinary( - Marshal(e), - ), - ) - - require.True(t, EQ(e, *e2)) -} diff --git a/pkg/core/netmap/epoch/math.go b/pkg/core/netmap/epoch/math.go deleted file mode 100644 index 7f68d3c74..000000000 --- a/pkg/core/netmap/epoch/math.go +++ /dev/null @@ -1,12 +0,0 @@ -package epoch - -// Sum returns the result of -// summing up two Epoch. -// -// Function defines a binary -// operation of summing two Epoch. -// Try to avoid using operator -// "+" for better portability. -func Sum(a, b Epoch) Epoch { - return FromUint64(ToUint64(a) + ToUint64(b)) -} diff --git a/pkg/core/netmap/epoch/math_test.go b/pkg/core/netmap/epoch/math_test.go deleted file mode 100644 index a1ae88261..000000000 --- a/pkg/core/netmap/epoch/math_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package epoch - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestEpochMath(t *testing.T) { - items := []struct { - mathFn func(Epoch, Epoch) Epoch - - a, b, c uint64 - }{ - { - mathFn: Sum, a: 1, b: 2, c: 3}, - } - - for _, item := range items { - require.Equal(t, - item.mathFn( - FromUint64(item.a), - FromUint64(item.b), - ), - FromUint64(item.c), - ) - } -} diff --git a/pkg/core/netmap/epoch/relation.go b/pkg/core/netmap/epoch/relation.go deleted file mode 100644 index 56ae0a1d0..000000000 --- a/pkg/core/netmap/epoch/relation.go +++ /dev/null @@ -1,55 +0,0 @@ -package epoch - -// EQ reports whether e and e2 are the same Epoch. -// -// Function defines the relation of equality -// between two Epoch. Try to avoid comparison through -// "==" operator for better portability. -func EQ(e1, e2 Epoch) bool { - return ToUint64(e1) == ToUint64(e2) -} - -// NE reports whether e1 and e2 are the different Epoch. -// -// Method defines the relation of inequality -// between two Epoch. Try to avoid comparison through -// "!=" operator for better portability. -func NE(e1, e2 Epoch) bool { - return ToUint64(e1) != ToUint64(e2) -} - -// LT reports whether e1 is less Epoch than e2. -// -// Method defines the "less than" relation -// between two Epoch. Try to avoid comparison through -// "<" operator for better portability. -func LT(e1, e2 Epoch) bool { - return ToUint64(e1) < ToUint64(e2) -} - -// GT reports whether e1 is greater Epoch than e2. -// -// Method defines the "greater than" relation -// between two Epoch. Try to avoid comparison through -// ">" operator for better portability. -func GT(e1, e2 Epoch) bool { - return ToUint64(e1) > ToUint64(e2) -} - -// LE reports whether e1 is less or equal Epoch than e2. -// -// Method defines the "less or equal" relation -// between two Epoch. Try to avoid comparison through -// "<=" operator for better portability. -func LE(e1, e2 Epoch) bool { - return ToUint64(e1) <= ToUint64(e2) -} - -// GE reports whether e1 is greater or equal Epoch than e2. -// -// Method defines the "greater or equal" relation -// between two Epoch. Try to avoid comparison through -// ">=" operator for better portability. -func GE(e1, e2 Epoch) bool { - return ToUint64(e1) >= ToUint64(e2) -} diff --git a/pkg/core/netmap/epoch/relation_test.go b/pkg/core/netmap/epoch/relation_test.go deleted file mode 100644 index 003cfc1d0..000000000 --- a/pkg/core/netmap/epoch/relation_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package epoch - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestEpochRelations(t *testing.T) { - items := []struct { - relFn func(Epoch, Epoch) bool - - base, ok, fail uint64 - }{ - {relFn: EQ, base: 1, ok: 1, fail: 2}, - {relFn: NE, base: 1, ok: 2, fail: 1}, - {relFn: LT, base: 1, ok: 2, fail: 0}, - {relFn: GT, base: 1, ok: 0, fail: 2}, - {relFn: LE, base: 1, ok: 1, fail: 0}, - {relFn: LE, base: 1, ok: 2, fail: 0}, - {relFn: GE, base: 1, ok: 0, fail: 2}, - {relFn: GE, base: 1, ok: 1, fail: 2}, - } - - for _, item := range items { - require.True(t, - item.relFn( - FromUint64(item.base), - FromUint64(item.ok), - ), - ) - - require.False(t, - item.relFn( - FromUint64(item.base), - FromUint64(item.fail), - ), - ) - } -} diff --git a/pkg/core/netmap/epoch/type.go b/pkg/core/netmap/epoch/type.go deleted file mode 100644 index 79b3da550..000000000 --- a/pkg/core/netmap/epoch/type.go +++ /dev/null @@ -1,23 +0,0 @@ -package epoch - -// Epoch represents the -// number of NeoFS epoch. -type Epoch uint64 - -// FromUint64 converts builtin -// uint64 value to Epoch. -// -// Try to avoid direct cast for -// better portability. -func FromUint64(e uint64) Epoch { - return Epoch(e) -} - -// ToUint64 converts Epoch value -// to builtin uint64. -// -// Try to avoid direct cast for -// better portability. -func ToUint64(e Epoch) uint64 { - return uint64(e) -} diff --git a/pkg/core/netmap/netmap.go b/pkg/core/netmap/netmap.go deleted file mode 100644 index 17c60834d..000000000 --- a/pkg/core/netmap/netmap.go +++ /dev/null @@ -1,119 +0,0 @@ -package netmap - -import ( - "bytes" - "sync" - - "github.com/nspcc-dev/neofs-node/pkg/core/netmap/node" - "github.com/nspcc-dev/netmap" -) - -// Info represent node information. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/netmap/node.Info. -type Info = node.Info - -// Bucket represents NeoFS network map as a graph. -// -// If is a type alias of -// github.com/nspcc-dev/netmap.Bucket. -type Bucket = netmap.Bucket - -// NetMap represents NeoFS network map -// with concurrent access support. -type NetMap struct { - mtx *sync.RWMutex - - root *Bucket - - items []Info -} - -// New creates and initializes a new NetMap. -// -// Using the NetMap that has been created with new(NetMap) -// expression (or just declaring a NetMap variable) is unsafe -// and can lead to panic. -func New() *NetMap { - return &NetMap{ - mtx: new(sync.RWMutex), - root: new(Bucket), - } -} - -// Root returns the root bucket of the network map. -// -// Changing the result is unsafe and -// affects the network map. -func (n NetMap) Root() *Bucket { - n.mtx.RLock() - defer n.mtx.RUnlock() - - return n.root -} - -// SetRoot sets the root bucket of the network map. -// -// Subsequent changing the source bucket -// is unsafe and affects the network map. -func (n *NetMap) SetRoot(v *Bucket) { - n.mtx.Lock() - n.root = v - n.mtx.Unlock() -} - -// Nodes returns node list of the network map. -// -// Changing the result is unsafe and -// affects the network map. -func (n NetMap) Nodes() []Info { - n.mtx.RLock() - defer n.mtx.RUnlock() - - return n.items -} - -// SetNodes sets node list of the network map. -// -// Subsequent changing the source slice -// is unsafe and affects the network map. -func (n *NetMap) SetNodes(v []Info) { - n.mtx.Lock() - n.items = v - n.mtx.Unlock() -} - -// AddNode adds node information to the network map -// -// If node with provided information is already presented -// in network map, nothing happens, -func (n *NetMap) AddNode(nodeInfo Info) error { - n.mtx.Lock() - defer n.mtx.Unlock() - - num := -1 - - // looking for existed node info item - for i := range n.items { - if bytes.Equal( - n.items[i].PublicKey(), - nodeInfo.PublicKey(), - ) { - num = i - break - } - } - - // add node if it does not exist - if num < 0 { - n.items = append(n.items, nodeInfo) - num = len(n.items) - 1 - } - - return n.root.AddStrawNode(netmap.Node{ - N: uint32(num), - C: n.items[num].Capacity(), - P: n.items[num].Price(), - }, nodeInfo.Options()...) -} diff --git a/pkg/core/netmap/netmap_test.go b/pkg/core/netmap/netmap_test.go deleted file mode 100644 index e3bf1385c..000000000 --- a/pkg/core/netmap/netmap_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package netmap - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNetMap_Nodes(t *testing.T) { - nm := New() - - info1 := Info{} - info1.SetPublicKey([]byte{1, 2, 3}) - - info2 := Info{} - info2.SetPublicKey([]byte{4, 5, 6}) - - nodes := []Info{ - info1, - info2, - } - - nm.SetNodes(nodes) - - require.Equal(t, nodes, nm.Nodes()) -} - -func TestNetMap_Root(t *testing.T) { - nm := New() - - bucket := &Bucket{ - Key: "key", - Value: "value", - } - - nm.SetRoot(bucket) - - require.Equal(t, bucket, nm.Root()) -} diff --git a/pkg/core/netmap/node/info.go b/pkg/core/netmap/node/info.go deleted file mode 100644 index e8ca3defc..000000000 --- a/pkg/core/netmap/node/info.go +++ /dev/null @@ -1,154 +0,0 @@ -package node - -import ( - "errors" -) - -// Info represents the information -// about NeoFS storage node. -type Info struct { - address string // net address - - key []byte // public key - - opts []string // options - - status Status // status bits -} - -// ErrNilInfo is returned by functions that expect -// a non-nil Info pointer, but received nil. -var ErrNilInfo = errors.New("node info is nil") - -// Address returns node network address. -// -// Address format is dictated by -// application architecture. -func (i Info) Address() string { - return i.address -} - -// SetAddress sets node network address. -func (i *Info) SetAddress(v string) { - i.address = v -} - -// Status returns the node status. -func (i Info) Status() Status { - return i.status -} - -// SetStatus sets the node status. -func (i *Info) SetStatus(v Status) { - i.status = v -} - -// PublicKey returns node public key in -// a binary format. -// -// Changing the result is unsafe and -// affects the node info. In order to -// prevent state mutations, use -// CopyPublicKey. -// -// Key format is dictated by -// application architecture. -func (i Info) PublicKey() []byte { - return i.key -} - -// CopyPublicKey returns the copy of -// node public key. -// -// Changing the result is safe and -// does not affect the node info. -func CopyPublicKey(i Info) []byte { - res := make([]byte, len(i.key)) - - copy(res, i.key) - - return res -} - -// SetPublicKey sets node public key -// in a binary format. -// -// Subsequent changing the source slice -// is unsafe and affects node info. -// In order to prevent state mutations, -// use SetPublicKeyCopy. -func (i *Info) SetPublicKey(v []byte) { - i.key = v -} - -// SetPublicKeyCopy copies public key and -// sets the copy as node public key. -// -// Subsequent changing the source slice -// is safe and does not affect node info. -// -// Returns ErrNilInfo on nil node info. -func SetPublicKeyCopy(i *Info, key []byte) error { - if i == nil { - return ErrNilInfo - } - - i.key = make([]byte, len(key)) - - copy(i.key, key) - - return nil -} - -// Options returns node option list. -// -// Changing the result is unsafe and -// affects the node info. In order to -// prevent state mutations, use -// CopyOptions. -// -// Option format is dictated by -// application architecture. -func (i Info) Options() []string { - return i.opts -} - -// CopyOptions returns the copy of -// node options list. -// -// Changing the result is safe and -// does not affect the node info. -func CopyOptions(i Info) []string { - res := make([]string, len(i.opts)) - - copy(res, i.opts) - - return res -} - -// SetOptions sets node option list. -// -// Subsequent changing the source slice -// is unsafe and affects node info. -// In order to prevent state mutations, -// use SetOptionsCopy. -func (i *Info) SetOptions(v []string) { - i.opts = v -} - -// SetOptionsCopy copies option list and sets -// the copy as node options list. -// -// Subsequent changing the source slice -// is safe and does not affect node info. -// -// SetOptionsCopy does nothing if Info is nil. -func SetOptionsCopy(i *Info, opts []string) { - if i == nil { - return - } - - i.opts = make([]string, len(opts)) - - copy(i.opts, opts) -} diff --git a/pkg/core/netmap/node/info_test.go b/pkg/core/netmap/node/info_test.go deleted file mode 100644 index d3a183b37..000000000 --- a/pkg/core/netmap/node/info_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package node - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestInfo_Address(t *testing.T) { - i := new(Info) - - addr := "address" - i.SetAddress(addr) - - require.Equal(t, addr, i.Address()) -} - -func TestInfo_Status(t *testing.T) { - i := new(Info) - - st := StatusFromUint64(1) - i.SetStatus(st) - - require.Equal(t, st, i.Status()) -} - -func TestInfo_PublicKey(t *testing.T) { - i := new(Info) - - key := []byte{1, 2, 3} - i.SetPublicKey(key) - - require.Equal(t, key, i.PublicKey()) -} - -func TestCopyPublicKey(t *testing.T) { - i := Info{} - - // set initial node key - initKey := []byte{1, 2, 3} - i.SetPublicKey(initKey) - - // get node key copy - keyCopy := CopyPublicKey(i) - - // change the copy - keyCopy[0]++ - - // check that node key has not changed - require.Equal(t, initKey, i.PublicKey()) -} - -func TestSetPublicKeyCopy(t *testing.T) { - require.EqualError(t, - SetPublicKeyCopy(nil, nil), - ErrNilInfo.Error(), - ) - - i := new(Info) - - // create source key - srcKey := []byte{1, 2, 3} - - // copy and set node key - require.NoError(t, SetPublicKeyCopy(i, srcKey)) - - // get node key - nodeKey := i.PublicKey() - - // change the source key - srcKey[0]++ - - // check that node key has not changed - require.Equal(t, nodeKey, i.PublicKey()) -} - -func TestInfo_Options(t *testing.T) { - i := new(Info) - - opts := []string{ - "opt1", - "opt2", - } - i.SetOptions(opts) - - require.Equal(t, opts, i.Options()) -} - -func TestCopyOptions(t *testing.T) { - i := Info{} - - // set initial node options - initOpts := []string{ - "opt1", - "opt2", - } - i.SetOptions(initOpts) - - // get node options copy - optsCopy := CopyOptions(i) - - // change the copy - optsCopy[0] = "some other opt" - - // check that node options have not changed - require.Equal(t, initOpts, i.Options()) -} - -func TestSetOptionsCopy(t *testing.T) { - require.NotPanics(t, func() { - SetOptionsCopy(nil, nil) - }) - - i := new(Info) - - // create source options - srcOpts := []string{ - "opt1", - "opt2", - } - - // copy and set node options - SetOptionsCopy(i, srcOpts) - - // get node options - nodeOpts := i.Options() - - // change the source options - srcOpts[0] = "some other opt" - - // check that node options have not changed - require.Equal(t, nodeOpts, i.Options()) -} diff --git a/pkg/core/netmap/node/options.go b/pkg/core/netmap/node/options.go deleted file mode 100644 index 692c7a7ef..000000000 --- a/pkg/core/netmap/node/options.go +++ /dev/null @@ -1,46 +0,0 @@ -package node - -import ( - "strconv" - "strings" - - "github.com/nspcc-dev/neofs-api-go/object" -) - -const optionPrice = "/Price:" - -const optionCapacity = "/Capacity:" - -// Price parses node options and returns the price in 1e-8*GAS/Megabyte per month. -// -// User sets the price in GAS/Terabyte per month. -func (i Info) Price() uint64 { - for j := range i.opts { - if strings.HasPrefix(i.opts[j], optionPrice) { - n, err := strconv.ParseFloat(i.opts[j][len(optionPrice):], 64) - if err != nil { - return 0 - } - - return uint64(n*1e8) / uint64(object.UnitsMB) // UnitsMB == megabytes in 1 terabyte - } - } - - return 0 -} - -// Capacity parses node options and returns the capacity . -func (i Info) Capacity() uint64 { - for j := range i.opts { - if strings.HasPrefix(i.opts[j], optionCapacity) { - n, err := strconv.ParseUint(i.opts[j][len(optionCapacity):], 10, 64) - if err != nil { - return 0 - } - - return n - } - } - - return 0 -} diff --git a/pkg/core/netmap/node/options_test.go b/pkg/core/netmap/node/options_test.go deleted file mode 100644 index 3965b70e8..000000000 --- a/pkg/core/netmap/node/options_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package node - -import ( - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/stretchr/testify/require" -) - -func TestInfo_Price(t *testing.T) { - var info Info - - // too small value - info.opts = []string{"/Price:0.01048575"} - require.Equal(t, uint64(0), info.Price()) - - // min value - info.opts = []string{"/Price:0.01048576"} - require.Equal(t, uint64(1), info.Price()) - - // big value - info.opts = []string{"/Price:1000000000.666"} - require.Equal(t, uint64(1000000000.666*1e8/object.UnitsMB), info.Price()) -} diff --git a/pkg/core/netmap/node/status.go b/pkg/core/netmap/node/status.go deleted file mode 100644 index 1f080642d..000000000 --- a/pkg/core/netmap/node/status.go +++ /dev/null @@ -1,63 +0,0 @@ -package node - -// Status represents a node -// status bits. -type Status uint64 - -const leftBitPos = 64 - -const ( - bitFullStorage = 1 -) - -// returns true if n-th left bit is set (starting at 0). -func isLeftBitSet(value Status, n uint8) bool { - bitMask := Status(1 << (leftBitPos - n)) - return bitMask != 0 && value&bitMask == bitMask -} - -// sets n-th left bit (starting at 0). -func setLeftBit(value *Status, n uint8) { - *value |= Status(1 << (leftBitPos - n)) -} - -// resets n-th left bit (starting at 0). -func resetLeftBit(value *Status, n uint8) { - *value &= ^Status(1 << (leftBitPos - n)) -} - -// Full returns true if node is in Full status. -// -// Full status marks node has enough space -// for storing users objects. -func (n Status) Full() bool { - return isLeftBitSet(n, bitFullStorage) -} - -// SetFull sets Full status of node. -func (n *Status) SetFull() { - setLeftBit(n, bitFullStorage) -} - -// ResetFull resets Full status of node. -func (n *Status) ResetFull() { - resetLeftBit(n, bitFullStorage) -} - -// StatusFromUint64 converts builtin -// uint64 value to Status. -// -// Try to avoid direct cast for -// better portability. -func StatusFromUint64(v uint64) Status { - return Status(v) -} - -// StatusToUint64 converts Status value -// to builtin uint64. -// -// Try to avoid direct cast for -// better portability. -func StatusToUint64(s Status) uint64 { - return uint64(s) -} diff --git a/pkg/core/netmap/node/status_test.go b/pkg/core/netmap/node/status_test.go deleted file mode 100644 index 5d540f9e3..000000000 --- a/pkg/core/netmap/node/status_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package node - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestStatus_Full(t *testing.T) { - st := new(Status) - - st.SetFull() - require.True(t, st.Full()) - - st.ResetFull() - require.False(t, st.Full()) -} diff --git a/pkg/core/object/.gitkeep b/pkg/core/object/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/core/object/extended.go b/pkg/core/object/extended.go deleted file mode 100644 index 330169a1b..000000000 --- a/pkg/core/object/extended.go +++ /dev/null @@ -1,94 +0,0 @@ -package object - -// ExtendedHeaderType represents the enumeration -// of extended header types of the NeoFS object. -type ExtendedHeaderType uint32 - -// ExtendedHeader represents the extended -// header of NeoFS object. -type ExtendedHeader struct { - typ ExtendedHeaderType - - val interface{} -} - -// Type returns the extended header type. -func (h ExtendedHeader) Type() ExtendedHeaderType { - return h.typ -} - -// SetType sets the extended header type. -func (h *ExtendedHeader) SetType(v ExtendedHeaderType) { - h.typ = v -} - -// Value returns the extended header value. -// -// In the case of a reference type, the value is -// returned by reference, so value mutations affect -// header state. Therefore, callers must first copy -// the value before changing manipulations. -func (h ExtendedHeader) Value() interface{} { - return h.val -} - -// SetValue sets the extended header value. -// -// Caller must take into account that each type of -// header usually has a limited set of expected -// value types. -// -// In the case of a reference type, the value is set -// by reference, so source value mutations affect -// header state. Therefore, callers must first copy -// the source value before changing manipulations. -func (h *ExtendedHeader) SetValue(v interface{}) { - h.val = v -} - -// TypeFromUint32 converts builtin -// uint32 value to Epoch. -// -// Try to avoid direct cast for -// better portability. -func TypeFromUint32(v uint32) ExtendedHeaderType { - return ExtendedHeaderType(v) -} - -// TypeToUint32 converts Epoch value -// to builtin uint32. -// -// Try to avoid direct cast for -// better portability. -func TypeToUint32(v ExtendedHeaderType) uint32 { - return uint32(v) -} - -// TypesEQ reports whether t1 and t2 are the same ExtendedHeaderType. -// -// Function defines the relation of equality -// between two ExtendedHeaderType. Try to avoid comparison through -// "==" operator for better portability. -func TypesEQ(t1, t2 ExtendedHeaderType) bool { - return TypeToUint32(t1) == TypeToUint32(t2) -} - -// TypesLT reports whether t1 ExtendedHeaderType -// is less than t2. -// -// Function defines the "less than" relation -// between two ExtendedHeaderType. Try to avoid -// comparison through "<" operator for better portability. -func TypesLT(t1, t2 ExtendedHeaderType) bool { - return TypeToUint32(t1) < TypeToUint32(t2) -} - -// TypesGT reports whether t1 ExtendedHeaderType -// is greater than t2. -// -// Function defines the "greater than" relation -// between two ExtendedHeaderType. Try to avoid -// comparison through ">" operator for better portability. -func TypesGT(t1, t2 ExtendedHeaderType) bool { - return TypeToUint32(t1) > TypeToUint32(t2) -} diff --git a/pkg/core/object/extended_test.go b/pkg/core/object/extended_test.go deleted file mode 100644 index a8bd709ca..000000000 --- a/pkg/core/object/extended_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package object - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestExtendedHeader_Type(t *testing.T) { - h := new(ExtendedHeader) - - ht := TypeFromUint32(3) - h.SetType(ht) - - require.True(t, TypesEQ(ht, h.Type())) -} - -func TestExtendedHeader_Value(t *testing.T) { - h := new(ExtendedHeader) - - val := 100 - h.SetValue(val) - - require.Equal(t, val, h.Value()) -} diff --git a/pkg/core/object/header.go b/pkg/core/object/header.go deleted file mode 100644 index a55e7683f..000000000 --- a/pkg/core/object/header.go +++ /dev/null @@ -1,73 +0,0 @@ -package object - -import ( - "errors" -) - -// Header represents NeoFS object header. -type Header struct { - // SystemHeader is an obligatory part of any object header. - // It is used to set the identity and basic parameters of - // the object. - // - // Header must inherit all the methods of SystemHeader, - // so the SystemHeader is embedded in Header. - SystemHeader - - extendedHeaders []ExtendedHeader // extended headers -} - -// ErrNilHeader is returned by functions that expect -// a non-nil Header pointer, but received nil. -var ErrNilHeader = errors.New("object header is nil") - -// ExtendedHeaders returns the extended headers of header. -// -// Changing the result is unsafe and affects the header. -// In order to prevent state mutations, use CopyExtendedHeaders. -func (h *Header) ExtendedHeaders() []ExtendedHeader { - return h.extendedHeaders -} - -// CopyExtendedHeaders returns the copy of extended headers. -// -// Changing the result is safe and does not affect the header. -// -// Returns nil if header is nil. -func CopyExtendedHeaders(h *Header) []ExtendedHeader { - if h == nil { - return nil - } - - res := make([]ExtendedHeader, len(h.extendedHeaders)) - - copy(res, h.extendedHeaders) - - return res -} - -// SetExtendedHeaders sets the extended headers of the header. -// -// Subsequent changing the source slice is unsafe and affects -// the header. In order to prevent state mutations, use -// SetExtendedHeadersCopy. -func (h *Header) SetExtendedHeaders(v []ExtendedHeader) { - h.extendedHeaders = v -} - -// SetExtendedHeadersCopy copies extended headers and sets the copy -// as the object extended headers. -// -// Subsequent changing the source slice is safe and does not affect -// the header. -// -// SetExtendedHeadersCopy does nothing if Header is nil. -func SetExtendedHeadersCopy(h *Header, hs []ExtendedHeader) { - if h == nil { - return - } - - h.extendedHeaders = make([]ExtendedHeader, len(hs)) - - copy(h.extendedHeaders, hs) -} diff --git a/pkg/core/object/header_test.go b/pkg/core/object/header_test.go deleted file mode 100644 index 8164bf10b..000000000 --- a/pkg/core/object/header_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package object - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func testHeaders(num uint32) []ExtendedHeader { - res := make([]ExtendedHeader, num) - - for i := uint32(0); i < num; i++ { - res[i].SetType(TypeFromUint32(i)) - res[i].SetValue(i) - } - - return res -} - -func TestObject_ExtendedHeaders(t *testing.T) { - h := new(Header) - - hs := testHeaders(2) - - h.SetExtendedHeaders(hs) - - require.Equal(t, hs, h.ExtendedHeaders()) -} - -func TestCopyExtendedHeaders(t *testing.T) { - require.Nil(t, CopyExtendedHeaders(nil)) - - h := new(Header) - - // set initial headers - initHs := testHeaders(2) - h.SetExtendedHeaders(initHs) - - // get extended headers copy - hsCopy := CopyExtendedHeaders(h) - - // change the copy - hsCopy[0] = hsCopy[1] - - // check that extended headers have not changed - require.Equal(t, initHs, h.ExtendedHeaders()) -} - -func TestSetExtendedHeadersCopy(t *testing.T) { - require.NotPanics(t, func() { - SetExtendedHeadersCopy(nil, nil) - }) - - h := new(Header) - - // create source headers - srcHs := testHeaders(2) - - // copy and set headers - SetExtendedHeadersCopy(h, srcHs) - - // get extended headers - objHs := h.ExtendedHeaders() - - // change the source headers - srcHs[0] = srcHs[1] - - // check that headeres have not changed - require.Equal(t, objHs, h.ExtendedHeaders()) -} - -func TestHeaderRelations(t *testing.T) { - items := []struct { - relFn func(ExtendedHeaderType, ExtendedHeaderType) bool - - base, ok, fail uint32 - }{ - {relFn: TypesEQ, base: 1, ok: 1, fail: 2}, - {relFn: TypesLT, base: 1, ok: 2, fail: 0}, - {relFn: TypesGT, base: 1, ok: 0, fail: 2}, - } - - for _, item := range items { - require.True(t, - item.relFn( - TypeFromUint32(item.base), - TypeFromUint32(item.ok), - ), - ) - - require.False(t, - item.relFn( - TypeFromUint32(item.base), - TypeFromUint32(item.fail), - ), - ) - } -} diff --git a/pkg/core/object/headers/enum.go b/pkg/core/object/headers/enum.go deleted file mode 100644 index 730d35597..000000000 --- a/pkg/core/object/headers/enum.go +++ /dev/null @@ -1,63 +0,0 @@ -package headers - -import ( - "github.com/nspcc-dev/neofs-node/pkg/core/object" -) - -// Header represents object extended header. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/object.ExtendedHeader. -type Header = object.ExtendedHeader - -// Type represents extended header type. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/object.ExtendedHeaderType. -type Type = object.ExtendedHeaderType - -const ( - // this is the only place where this cast is appropriate, - // use object.TypeFromUint32 instead. - lowerUndefined = Type(iota) // lower unsupported Type value - - // TypeLink is the type of object reference header. - TypeLink - - // TypeUser is the of user key-value string header. - TypeUser - - // TypeTransform is the type of transformation mark header. - TypeTransform - - // TypeTombstone is the type of tombstone mark header. - TypeTombstone - - // TypeSessionToken is the type of session token header. - TypeSessionToken - - // TypeHomomorphicHash is the type of homomorphic hash header. - TypeHomomorphicHash - - // TypePayloadChecksum is the type of payload checksum header. - TypePayloadChecksum - - // TypeIntegrity is the type of integrity header. - TypeIntegrity - - // TypeStorageGroup is the type of storage group header. - TypeStorageGroup - - // TypePublicKey is the type of public key header. - TypePublicKey - - upperUndefined // upper unsupported Type value -) - -// SupportedType returns true if Type is -// the known type of extended header. Each -// supported type has named constant. -func SupportedType(t Type) bool { - return object.TypesGT(t, lowerUndefined) && - object.TypesLT(t, upperUndefined) -} diff --git a/pkg/core/object/headers/enum_test.go b/pkg/core/object/headers/enum_test.go deleted file mode 100644 index 346948ab2..000000000 --- a/pkg/core/object/headers/enum_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package headers - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/stretchr/testify/require" -) - -func TestSupportedType(t *testing.T) { - for _, typ := range []Type{ - TypeLink, - TypeUser, - TypeTransform, - TypeTombstone, - TypeSessionToken, - TypeHomomorphicHash, - TypePayloadChecksum, - TypeIntegrity, - TypeStorageGroup, - TypePublicKey, - } { - require.True(t, SupportedType(typ)) - } - - for _, typ := range []Type{ - lowerUndefined, - upperUndefined, - object.TypeFromUint32(object.TypeToUint32(lowerUndefined) - 1), - object.TypeFromUint32(object.TypeToUint32(upperUndefined) + 1), - } { - require.False(t, SupportedType(typ)) - } -} diff --git a/pkg/core/object/headers/user.go b/pkg/core/object/headers/user.go deleted file mode 100644 index 9ef738d82..000000000 --- a/pkg/core/object/headers/user.go +++ /dev/null @@ -1,45 +0,0 @@ -package headers - -// UserHeader is a value of object extended header -// that carries user string key-value pairs. -// -// All user headers must be type of TypeUser. -// All user header must have UserHeader pointer value. -type UserHeader struct { - key, val string -} - -// NewUserHeader creates, initialized and returns -// the user extended header. -func NewUserHeader(key, val string) *Header { - res := new(Header) - - res.SetType(TypeUser) - - res.SetValue(&UserHeader{ - key: key, - val: val, - }) - - return res -} - -// Key returns the user header key. -func (u UserHeader) Key() string { - return u.key -} - -// SetKey sets the user header key. -func (u *UserHeader) SetKey(key string) { - u.key = key -} - -// Value returns the user header value. -func (u UserHeader) Value() string { - return u.val -} - -// SetValue sets the user header value. -func (u *UserHeader) SetValue(val string) { - u.val = val -} diff --git a/pkg/core/object/headers/user_test.go b/pkg/core/object/headers/user_test.go deleted file mode 100644 index 91903ed00..000000000 --- a/pkg/core/object/headers/user_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package headers - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/stretchr/testify/require" -) - -func TestUserHeader_Key(t *testing.T) { - h := new(UserHeader) - - key := "random key" - h.SetKey(key) - - require.Equal(t, key, h.Key()) -} - -func TestUserHeader_Value(t *testing.T) { - h := new(UserHeader) - - val := "random value" - h.SetValue(val) - - require.Equal(t, val, h.Value()) -} - -func TestNewUserHeader(t *testing.T) { - key := "user key" - val := "user val" - - h := NewUserHeader(key, val) - - require.True(t, - object.TypesEQ( - TypeUser, - h.Type(), - ), - ) - - uh := h.Value().(*UserHeader) - - require.Equal(t, key, uh.Key()) - require.Equal(t, val, uh.Value()) -} diff --git a/pkg/core/object/id.go b/pkg/core/object/id.go deleted file mode 100644 index c6a5fa582..000000000 --- a/pkg/core/object/id.go +++ /dev/null @@ -1,61 +0,0 @@ -package object - -import ( - "github.com/nspcc-dev/neofs-api-go/refs" -) - -// ID represents the object identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/refs.ObjectID. -// FIXME: object ID should be defined in core package. -type ID = refs.ObjectID - -// Address represents NeoFS Object address. -// Acts as a reference to the object. -type Address struct { - cid CID - - id ID -} - -// CID return the identifier of the container -// that the object belongs to. -func (a Address) CID() CID { - return a.cid -} - -// SetCID sets the identifier of the container -// that the object belongs to. -func (a *Address) SetCID(v CID) { - a.cid = v -} - -// ID returns the unique identifier of the -// object in container. -func (a Address) ID() ID { - return a.id -} - -// SetID sets the unique identifier of the -// object in container. -func (a *Address) SetID(v ID) { - a.id = v -} - -// AddressFromObject returns an address based -// on the object's header. -// -// Returns nil on nil object. -func AddressFromObject(o *Object) *Address { - if o == nil { - return nil - } - - a := new(Address) - - a.SetCID(o.CID()) - a.SetID(o.ID()) - - return a -} diff --git a/pkg/core/object/id_test.go b/pkg/core/object/id_test.go deleted file mode 100644 index 853c2817e..000000000 --- a/pkg/core/object/id_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package object - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAddress_CID(t *testing.T) { - a := new(Address) - - cid := CID{1, 2, 3} - a.SetCID(cid) - - require.Equal(t, cid, a.CID()) -} - -func TestAddress_ID(t *testing.T) { - a := new(Address) - - id := ID{1, 2, 3} - a.SetID(id) - - require.Equal(t, id, a.ID()) -} - -func TestAddressFromObject(t *testing.T) { - require.Nil(t, AddressFromObject(nil)) - - o := new(Object) - - cid := CID{4, 5, 6} - o.SetCID(cid) - - id := ID{1, 2, 3} - o.SetID(id) - - a := AddressFromObject(o) - - require.Equal(t, cid, a.CID()) - require.Equal(t, id, a.ID()) -} diff --git a/pkg/core/object/object.go b/pkg/core/object/object.go deleted file mode 100644 index 57e874467..000000000 --- a/pkg/core/object/object.go +++ /dev/null @@ -1,76 +0,0 @@ -package object - -import ( - "errors" -) - -// Object represents NeoFS Object. -type Object struct { - // Header is an obligatory part of any object. - // It is used to carry any additional information - // besides payload. - // - // Object must inherit all the methods of Header, - // so the Header is embedded in Object. - Header - - payload []byte // payload bytes -} - -// ErrNilObject is returned by functions that expect -// a non-nil Object pointer, but received nil. -var ErrNilObject = errors.New("object is nil") - -// Payload returns payload bytes of the object. -// -// Changing the result is unsafe and affects -// the object. In order to prevent state -// mutations, use CopyPayload. -func (o *Object) Payload() []byte { - return o.payload -} - -// CopyPayload returns the copy of -// object payload. -// -// Changing the result is safe and -// does not affect the object. -// -// CopyPayload returns nil if object is nil. -func CopyPayload(o *Object) []byte { - if o == nil { - return nil - } - - res := make([]byte, len(o.payload)) - copy(res, o.payload) - - return res -} - -// SetPayload sets objecyt payload bytes. -// -// Subsequent changing the source slice -// is unsafe and affects the object. -// In order to prevent state mutations, -// use SetPayloadCopy. -func (o *Object) SetPayload(v []byte) { - o.payload = v -} - -// SetPayloadCopy copies slice bytes and sets -// the copy as object payload. -// -// Subsequent changing the source slice -// is safe and does not affect the object. -// -// SetPayloadCopy does nothing if object is nil. -func SetPayloadCopy(o *Object, payload []byte) { - if o == nil { - return - } - - o.payload = make([]byte, len(payload)) - - copy(o.payload, payload) -} diff --git a/pkg/core/object/object_test.go b/pkg/core/object/object_test.go deleted file mode 100644 index 9b1bba54d..000000000 --- a/pkg/core/object/object_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package object - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestObject_Payload(t *testing.T) { - o := new(Object) - - payload := []byte{1, 2, 3} - o.SetPayload(payload) - - require.Equal(t, payload, o.Payload()) -} - -func TestCopyPayload(t *testing.T) { - require.Nil(t, CopyPayload(nil)) - - o := new(Object) - - // set initial node key - initPayload := []byte{1, 2, 3} - o.SetPayload(initPayload) - - // get payload copy - pCopy := CopyPayload(o) - - // change the copy - pCopy[0]++ - - // check that payload has not changed - require.Equal(t, initPayload, o.Payload()) -} - -func TestSetPayloadCopy(t *testing.T) { - require.NotPanics(t, func() { - SetExtendedHeadersCopy(nil, nil) - }) - - o := new(Object) - - // create source payload - srcPayload := []byte{1, 2, 3} - - // copy and set payload - SetPayloadCopy(o, srcPayload) - - // get payload - objPayload := o.Payload() - - // change the source payload - srcPayload[0]++ - - // check that payload has not changed - require.Equal(t, objPayload, o.Payload()) -} diff --git a/pkg/core/object/storage/storage.go b/pkg/core/object/storage/storage.go deleted file mode 100644 index 169059260..000000000 --- a/pkg/core/object/storage/storage.go +++ /dev/null @@ -1,61 +0,0 @@ -package storage - -import ( - "errors" - - "github.com/nspcc-dev/neofs-node/pkg/core/object" -) - -// Object represents the NeoFS Object. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/object.Object. -type Object = object.Object - -// Address represents the address of -// NeoFS Object. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/object.Address. -type Address = object.Address - -// Storage is an interface that wraps -// basic object storage methods. -type Storage interface { - // Put saves pointed object to the underlying storage. - // It returns object address for reference and any error - // encountered that caused the saving to interrupt. - // - // Put must return object.ErrNilObject on nil-pointer. - // - // Implementations must not modify the object through the pointer (even temporarily). - // Implementations must not retain the object pointer. - // - // Object rewriting behavior is dictated by implementation. - Put(*Object) (*Address, error) - - // Get reads the object from the storage by address. - // It returns the pointer to requested object and any error encountered. - // - // Get must return exactly one non-nil value. - // Get must return ErrNotFound if the object is not in storage. - // - // Implementations must not retain the object pointer and modify - // the object through it. - Get(Address) (*Object, error) - - // Delete removes the object from the storage. - // It returns any error encountered that caused the deletion to interrupt. - // - // Delete must return nil if object was successfully deleted. - // - // Behavior when deleting a nonexistent object is dictated by implementation. - Delete(Address) error -} - -// ErrNotFound is the error returned when object was not found in storage. -var ErrNotFound = errors.New("object not found") - -// ErrNilStorage is the error returned by functions that -// expect a non-nil object storage implementation, but received nil. -var ErrNilStorage = errors.New("object storage is nil") diff --git a/pkg/core/object/storage/test/storage.go b/pkg/core/object/storage/test/storage.go deleted file mode 100644 index fbea9e777..000000000 --- a/pkg/core/object/storage/test/storage.go +++ /dev/null @@ -1,88 +0,0 @@ -package test - -import ( - "sync" - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/core/object/storage" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type testStorage struct { - *sync.RWMutex - - items map[storage.Address]*storage.Object -} - -func (s *testStorage) Put(o *storage.Object) (*storage.Address, error) { - if o == nil { - return nil, object.ErrNilObject - } - - a := object.AddressFromObject(o) - - s.Lock() - s.items[*a] = o - s.Unlock() - - return a, nil -} - -func (s *testStorage) Get(a storage.Address) (*storage.Object, error) { - s.RLock() - o, ok := s.items[a] - s.RUnlock() - - if !ok { - return nil, storage.ErrNotFound - } - - return o, nil -} - -func (s *testStorage) Delete(a storage.Address) error { - s.Lock() - delete(s.items, a) - s.Unlock() - - return nil -} - -// New creates new container storage -// that stores containers in go-builtin map. -func New() storage.Storage { - return &testStorage{ - RWMutex: new(sync.RWMutex), - items: make(map[storage.Address]*storage.Object), - } -} - -// Storage conducts testing of object -// storage for interface specification. -// -// Storage must be empty. -func Storage(t *testing.T, s storage.Storage) { - _, err := s.Put(nil) - require.True(t, errors.Is(err, object.ErrNilObject)) - - a := new(object.Address) - _, err = s.Get(*a) - require.True(t, errors.Is(err, storage.ErrNotFound)) - - o := new(object.Object) - o.SetID(object.ID{1, 2, 3}) - - a, err = s.Put(o) - require.NoError(t, err) - - o2, err := s.Get(*a) - require.NoError(t, err) - - require.Equal(t, o, o2) - - require.NoError(t, s.Delete(*a)) - _, err = s.Get(*a) - require.True(t, errors.Is(err, storage.ErrNotFound)) -} diff --git a/pkg/core/object/storage/test/storage_test.go b/pkg/core/object/storage/test/storage_test.go deleted file mode 100644 index 7614f467f..000000000 --- a/pkg/core/object/storage/test/storage_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package test - -import ( - "testing" -) - -func TestNewStorage(t *testing.T) { - s := New() - - Storage(t, s) -} diff --git a/pkg/core/object/sys.go b/pkg/core/object/sys.go deleted file mode 100644 index a73a9ba16..000000000 --- a/pkg/core/object/sys.go +++ /dev/null @@ -1,107 +0,0 @@ -package object - -import ( - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap/epoch" -) - -// CID represents the container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.ID. -type CID = container.ID - -// OwnerID represents the container -// owner identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.OwnerID. -type OwnerID = container.OwnerID - -// Epoch represents the NeoFS epoch number. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/netmap/epoch.Epoch. -type Epoch = epoch.Epoch - -// SystemHeader represents the -// system header of NeoFS Object. -type SystemHeader struct { - version uint64 // object version - - payloadLen uint64 // length of the payload bytes - - id ID // object ID - - cid CID // container ID - - ownerID OwnerID // object owner ID - - creatEpoch Epoch // creation epoch number -} - -// Version returns the object version number. -func (s *SystemHeader) Version() uint64 { - return s.version -} - -// SetVersion sets the object version number. -func (s *SystemHeader) SetVersion(v uint64) { - s.version = v -} - -// PayloadLength returns the length of the -// object payload bytes. -func (s *SystemHeader) PayloadLength() uint64 { - return s.payloadLen -} - -// SetPayloadLength sets the length of the object -// payload bytes. -func (s *SystemHeader) SetPayloadLength(v uint64) { - s.payloadLen = v -} - -// ID returns the object identifier. -func (s *SystemHeader) ID() ID { - return s.id -} - -// SetID sets the object identifier. -func (s *SystemHeader) SetID(v ID) { - s.id = v -} - -// CID returns the container identifier -// to which the object belongs. -func (s *SystemHeader) CID() CID { - return s.cid -} - -// SetCID sets the container identifier -// to which the object belongs. -func (s *SystemHeader) SetCID(v CID) { - s.cid = v -} - -// OwnerID returns the object owner identifier. -func (s *SystemHeader) OwnerID() OwnerID { - return s.ownerID -} - -// SetOwnerID sets the object owner identifier. -func (s *SystemHeader) SetOwnerID(v OwnerID) { - s.ownerID = v -} - -// CreationEpoch returns the epoch number -// in which the object was created. -func (s *SystemHeader) CreationEpoch() Epoch { - return s.creatEpoch -} - -// SetCreationEpoch sets the epoch number -// in which the object was created. -func (s *SystemHeader) SetCreationEpoch(v Epoch) { - s.creatEpoch = v -} diff --git a/pkg/core/object/sys_test.go b/pkg/core/object/sys_test.go deleted file mode 100644 index f14ee82a3..000000000 --- a/pkg/core/object/sys_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package object - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/core/netmap/epoch" - "github.com/stretchr/testify/require" -) - -func TestSystemHeader_Version(t *testing.T) { - h := new(SystemHeader) - - v := uint64(7) - h.SetVersion(v) - - require.Equal(t, v, h.Version()) -} - -func TestSystemHeader_PayloadLength(t *testing.T) { - h := new(SystemHeader) - - ln := uint64(3) - h.SetPayloadLength(ln) - - require.Equal(t, ln, h.PayloadLength()) -} - -func TestSystemHeader_ID(t *testing.T) { - h := new(SystemHeader) - - id := ID{1, 2, 3} - h.SetID(id) - - require.Equal(t, id, h.ID()) -} - -func TestSystemHeader_CID(t *testing.T) { - h := new(SystemHeader) - - cid := CID{1, 2, 3} - h.SetCID(cid) - - require.Equal(t, cid, h.CID()) -} - -func TestSystemHeader_OwnerID(t *testing.T) { - h := new(SystemHeader) - - ownerID := OwnerID{1, 2, 3} - h.SetOwnerID(ownerID) - - require.Equal(t, ownerID, h.OwnerID()) -} - -func TestSystemHeader_CreationEpoch(t *testing.T) { - h := new(SystemHeader) - - ep := epoch.FromUint64(1) - h.SetCreationEpoch(ep) - - require.True(t, epoch.EQ(ep, h.CreationEpoch())) -} diff --git a/pkg/morph/client/balance/wrapper/balanceOf.go b/pkg/morph/client/balance/wrapper/balanceOf.go index bb1d04910..add725d32 100644 --- a/pkg/morph/client/balance/wrapper/balanceOf.go +++ b/pkg/morph/client/balance/wrapper/balanceOf.go @@ -1,35 +1,11 @@ package wrapper -import ( - "github.com/nspcc-dev/neo-go/pkg/encoding/address" - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/morph/client/balance" - "github.com/pkg/errors" -) - // OwnerID represents the container owner identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.OwnerID. -type OwnerID = container.OwnerID +// FIXME: correct the definition. +type OwnerID struct{} // BalanceOf receives the amount of funds in the client's account // through the Balance contract call, and returns it. func (w *Wrapper) BalanceOf(ownerID OwnerID) (int64, error) { - // convert Neo wallet address to Uint160 - u160, err := address.StringToUint160(ownerID.String()) - if err != nil { - return 0, errors.Wrap(err, "could not convert wallet address to Uint160") - } - - // prepare invocation arguments - args := balance.GetBalanceOfArgs{} - args.SetWallet(u160.BytesBE()) - - values, err := w.client.BalanceOf(args) - if err != nil { - return 0, errors.Wrap(err, "could not invoke smart contract") - } - - return values.Amount(), nil + panic("implement me") } diff --git a/pkg/morph/client/container/wrapper/container.go b/pkg/morph/client/container/wrapper/container.go index 5a390b401..c049c8aa5 100644 --- a/pkg/morph/client/container/wrapper/container.go +++ b/pkg/morph/client/container/wrapper/container.go @@ -1,24 +1,12 @@ package wrapper -import ( - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/container" - "github.com/pkg/errors" -) - // OwnerID represents the container owner identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/storage.OwnerID. -type OwnerID = storage.OwnerID +// FIXME: correct the definition. +type OwnerID struct{} // Container represents the NeoFS Container structure. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/storage.Container. -type Container = storage.Container +// FIXME: correct the definition. +type Container struct{} // Put saves passed container structure in NeoFS system // through Container contract call. @@ -26,33 +14,7 @@ type Container = storage.Container // Returns calculated container identifier and any error // encountered that caused the saving to interrupt. func (w *Wrapper) Put(cnr *Container) (*CID, error) { - // calculate container identifier - // - // Note: cid is used as return value only, but the calculation is performed - // primarily in order to catch potential error before contract client call. - cid, err := container.CalculateID(cnr) - if err != nil { - return nil, errors.Wrap(err, "could not calculate container identifier") - } - - // marshal the container - cnrBytes, err := cnr.MarshalBinary() - if err != nil { - return nil, errors.Wrap(err, "could not marshal the container") - } - - // prepare invocation arguments - args := contract.PutArgs{} - args.SetOwnerID(cnr.OwnerID().Bytes()) - args.SetContainer(cnrBytes) - args.SetSignature(nil) // TODO: set signature from request when will appear. - - // invoke smart contract call - if err := w.client.Put(args); err != nil { - return nil, errors.Wrap(err, "could not invoke smart contract") - } - - return cid, nil + panic("implement me") } // Get reads the container from NeoFS system by identifier @@ -61,29 +23,7 @@ func (w *Wrapper) Put(cnr *Container) (*CID, error) { // If an empty slice is returned for the requested identifier, // storage.ErrNotFound error is returned. func (w *Wrapper) Get(cid CID) (*Container, error) { - // prepare invocation arguments - args := contract.GetArgs{} - args.SetCID(cid.Bytes()) - - // invoke smart contract call - values, err := w.client.Get(args) - if err != nil { - return nil, errors.Wrap(err, "could not invoke smart contract") - } - - cnrBytes := values.Container() - if len(cnrBytes) == 0 { - return nil, storage.ErrNotFound - } - - cnr := new(Container) - - // unmarshal the container - if err := cnr.UnmarshalBinary(cnrBytes); err != nil { - return nil, errors.Wrap(err, "could not unmarshal container") - } - - return cnr, nil + panic("implement me") } // Delete removes the container from NeoFS system @@ -92,19 +32,7 @@ func (w *Wrapper) Get(cid CID) (*Container, error) { // Returns any error encountered that caused // the removal to interrupt. func (w *Wrapper) Delete(cid CID) error { - // prepare invocation arguments - args := contract.DeleteArgs{} - args.SetCID(cid.Bytes()) - args.SetOwnerID(nil) // TODO: add owner ID when will appear. - args.SetSignature(nil) // TODO: add CID signature when will appear. - - // invoke smart contract call - // - // Note: errors.Wrap return nil on nil error arg. - return errors.Wrap( - w.client.Delete(args), - "could not invoke smart contract", - ) + panic("implement me") } // List returns a list of container identifiers belonging @@ -114,35 +42,5 @@ func (w *Wrapper) Delete(cid CID) error { // Returns the identifiers of all NeoFS containers if pointer // to owner identifier is nil. func (w *Wrapper) List(ownerID *OwnerID) ([]CID, error) { - // prepare invocation arguments - args := contract.ListArgs{} - - // Note: by default owner identifier slice is nil, - // so client won't attach invocation arguments. - // This behavior matches the nil argument of current method. - // If argument is not nil, we must specify owner identifier. - if ownerID != nil { - args.SetOwnerID(ownerID.Bytes()) - } - - // invoke smart contract call - values, err := w.client.List(args) - if err != nil { - return nil, errors.Wrap(err, "could not invoke smart contract") - } - - binCIDList := values.CIDList() - cidList := make([]CID, 0, len(binCIDList)) - - // unmarshal all container identifiers - for i := range binCIDList { - cid, err := refs.CIDFromBytes(binCIDList[i]) - if err != nil { - return nil, errors.Wrapf(err, "could not decode container ID #%d", i) - } - - cidList = append(cidList, cid) - } - - return cidList, nil + panic("implement me") } diff --git a/pkg/morph/client/container/wrapper/eacl.go b/pkg/morph/client/container/wrapper/eacl.go index 4300186aa..e0dbb8cc4 100644 --- a/pkg/morph/client/container/wrapper/eacl.go +++ b/pkg/morph/client/container/wrapper/eacl.go @@ -1,33 +1,13 @@ package wrapper -import ( - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/container" - "github.com/pkg/errors" -) - // Table represents extended ACL rule table. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage.Table. -type Table = storage.Table +// FIXME: correct the definition. +type Table struct{} // GetEACL reads the extended ACL table from NeoFS system // through Container contract call. func (w *Wrapper) GetEACL(cid CID) (Table, error) { - // prepare invocation arguments - args := contract.EACLArgs{} - args.SetCID(cid.Bytes()) - - // invoke smart contract call - values, err := w.client.EACL(args) - if err != nil { - return nil, errors.Wrap(err, "could not invoke smart contract") - } - - // unmarshal and return eACL table - return eacl.UnmarshalTable(values.EACL()) + panic("implement me") } // PutEACL saves the extended ACL table in NeoFS system @@ -35,17 +15,5 @@ func (w *Wrapper) GetEACL(cid CID) (Table, error) { // // Returns any error encountered that caused the saving to interrupt. func (w *Wrapper) PutEACL(cid CID, table Table, sig []byte) error { - // prepare invocation arguments - args := contract.SetEACLArgs{} - args.SetEACL(eacl.MarshalTable(table)) - args.SetCID(cid.Bytes()) - args.SetSignature(sig) - - // invoke smart contract call - // - // Note: errors.Wrap return nil on nil error arg. - return errors.Wrap( - w.client.SetEACL(args), - "could not invoke smart contract", - ) + panic("implement me") } diff --git a/pkg/morph/client/container/wrapper/wrapper.go b/pkg/morph/client/container/wrapper/wrapper.go index 9d44c41ed..18b0d1f10 100644 --- a/pkg/morph/client/container/wrapper/wrapper.go +++ b/pkg/morph/client/container/wrapper/wrapper.go @@ -1,7 +1,6 @@ package wrapper import ( - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" "github.com/nspcc-dev/neofs-node/pkg/morph/client/container" ) @@ -12,10 +11,8 @@ import ( type Client = container.Client // CID represents the container identifier. -// -// CID is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/storage.CID. -type CID = storage.CID +// FIXME: correct the definition. +type CID struct{} // Wrapper is a wrapper over container contract // client which implements container storage and diff --git a/pkg/morph/client/netmap/wrapper/add_peer.go b/pkg/morph/client/netmap/wrapper/add_peer.go index 4afcee702..4644c9aa6 100644 --- a/pkg/morph/client/netmap/wrapper/add_peer.go +++ b/pkg/morph/client/netmap/wrapper/add_peer.go @@ -1,37 +1,10 @@ package wrapper -import ( - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap" - "github.com/pkg/errors" -) +// NodeInfo groups information about NeoFS storage node. +type NodeInfo struct{} // AddPeer registers peer in NeoFS network through // Netmap contract call. -func (w *Wrapper) AddPeer(nodeInfo netmap.Info) error { - // prepare invocation arguments - args := contract.AddPeerArgs{} - - info := contract.PeerInfo{} - info.SetPublicKey(nodeInfo.PublicKey()) - info.SetAddress([]byte(nodeInfo.Address())) - - opts := nodeInfo.Options() - binOpts := make([][]byte, 0, len(opts)) - - for i := range opts { - binOpts = append(binOpts, []byte(opts[i])) - } - - info.SetOptions(binOpts) - - args.SetInfo(info) - - // invoke smart contract call - // - // Note: errors.Wrap returns nil on nil error arg. - return errors.Wrap( - w.client.AddPeer(args), - "could not invoke smart contract", - ) +func (w *Wrapper) AddPeer(nodeInfo NodeInfo) error { + panic("implement me") } diff --git a/pkg/morph/client/netmap/wrapper/netmap.go b/pkg/morph/client/netmap/wrapper/netmap.go index 250ac145a..42b1f6acd 100644 --- a/pkg/morph/client/netmap/wrapper/netmap.go +++ b/pkg/morph/client/netmap/wrapper/netmap.go @@ -1,60 +1,12 @@ package wrapper -import ( - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap" - "github.com/pkg/errors" -) - // NetMap represents the NeoFS network map. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/NetMap. -type NetMap = netmap.NetMap - -// Info represents node information. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/netmap.Info. -type Info = netmap.Info +// FIXME: correct the definition. +type NetMap struct{} // GetNetMap receives information list about storage nodes // through the Netmap contract call, composes network map // from them and returns it. func (w *Wrapper) GetNetMap() (*NetMap, error) { - // prepare invocation arguments - args := contract.GetNetMapArgs{} - - // invoke smart contract call - values, err := w.client.NetMap(args) - if err != nil { - return nil, errors.Wrap(err, "could not invoke smart contract") - } - - // parse response and fill the network map - nm := netmap.New() - - peerList := values.Peers() - - for i := range peerList { - info := Info{} - - info.SetPublicKey(peerList[i].PublicKey()) - info.SetAddress(string(peerList[i].Address())) - - binOpts := peerList[i].Options() - opts := make([]string, 0, len(binOpts)) - - for j := range binOpts { - opts = append(opts, string(binOpts[j])) - } - - info.SetOptions(opts) - - if err := nm.AddNode(info); err != nil { - return nil, errors.Wrapf(err, "could not add node #%d to network map", i) - } - } - - return nm, nil + panic("implement me") } diff --git a/pkg/morph/client/netmap/wrapper/new_epoch.go b/pkg/morph/client/netmap/wrapper/new_epoch.go index d4eae6530..db32f7481 100644 --- a/pkg/morph/client/netmap/wrapper/new_epoch.go +++ b/pkg/morph/client/netmap/wrapper/new_epoch.go @@ -1,23 +1,11 @@ package wrapper -import ( - "github.com/nspcc-dev/neofs-node/pkg/core/netmap/epoch" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap" - "github.com/pkg/errors" -) +// Epoch represents the NeoFS epoch. +// FIXME: correct the definition. +type Epoch struct{} // NewEpoch updates NeoFS epoch number through // Netmap contract call. -func (w *Wrapper) NewEpoch(e epoch.Epoch) error { - // prepare invocation arguments - args := contract.NewEpochArgs{} - args.SetEpochNumber(int64(epoch.ToUint64(e))) - - // invoke smart contract call - // - // Note: errors.Wrap returns nil on nil error arg. - return errors.Wrap( - w.client.NewEpoch(args), - "could not invoke smart contract", - ) +func (w *Wrapper) NewEpoch(e Epoch) error { + panic("implement me") } diff --git a/pkg/network/bootstrap/.gitkeep b/pkg/network/bootstrap/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/bootstrap/bootstrap.go b/pkg/network/bootstrap/bootstrap.go deleted file mode 100644 index 041041dfa..000000000 --- a/pkg/network/bootstrap/bootstrap.go +++ /dev/null @@ -1,60 +0,0 @@ -package bootstrap - -import ( - "context" - - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" -) - -// ContractClient represents the Netmap -// contract client. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper.Wrapper. -type ContractClient = *wrapper.Wrapper - -// NodeInfo represents the -// information about storage node. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/netmap.Info. -type NodeInfo = netmap.Info - -// Registerer represents the tool that -// registers storage node in NeoFS system. -// -// Working Registerer must be created via constructor New. -// Using the Registerer that has been created with new(Registerer) -// expression (or just declaring a Registerer variable) is unsafe -// and can lead to panic. -type Registerer struct { - client ContractClient - - info NodeInfo -} - -// New creates, initializes and returns the Registerer instance. -// -// If passed contract client is nil, wrapper.ErrNilWrapper is returned. -func New(client ContractClient, info NodeInfo) (*Registerer, error) { - if client == nil { - return nil, wrapper.ErrNilWrapper - } - - return &Registerer{ - client: client, - info: info, - }, nil -} - -// Bootstrap registers storage node in NeoFS system -// through Netmap contract client. -// -// If contract client returns error, panic arises without retry. -func (r *Registerer) Bootstrap(context.Context) { - // register peer in NeoFS network - if err := r.client.AddPeer(r.info); err != nil { - panic(err) - } -} diff --git a/pkg/network/peers/.gitkeep b/pkg/network/peers/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/peers/metrics.go b/pkg/network/peers/metrics.go deleted file mode 100644 index 9391f7f18..000000000 --- a/pkg/network/peers/metrics.go +++ /dev/null @@ -1,45 +0,0 @@ -package peers - -import ( - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc/connectivity" -) - -const stateLabel = "state" - -var grpcConnections = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Help: "gRPC connections", - Name: "grpc_connections", - Namespace: "neofs", - }, - []string{stateLabel}, -) - -var conStates = []connectivity.State{ - connectivity.Idle, - connectivity.Connecting, - connectivity.Ready, - connectivity.TransientFailure, - connectivity.Shutdown, -} - -func updateMetrics(items map[connectivity.State]float64) { - for _, state := range conStates { - grpcConnections.With(prometheus.Labels{ - stateLabel: state.String(), - }).Set(items[state]) - } -} - -func init() { - prometheus.MustRegister( - grpcConnections, - ) - - for _, state := range conStates { - grpcConnections.With(prometheus.Labels{ - stateLabel: state.String(), - }).Set(0) - } -} diff --git a/pkg/network/peers/peers.go b/pkg/network/peers/peers.go deleted file mode 100644 index 0536bd222..000000000 --- a/pkg/network/peers/peers.go +++ /dev/null @@ -1,234 +0,0 @@ -package peers - -import ( - "context" - "net" - "sync" - "time" - - "github.com/multiformats/go-multiaddr" - manet "github.com/multiformats/go-multiaddr-net" - "github.com/nspcc-dev/neofs-node/pkg/network" - "github.com/pkg/errors" - "go.uber.org/zap" - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/status" -) - -type ( - // Interface is an interface of network connections controller. - Interface interface { - Job(context.Context) - GRPCConnector - } - - // GRPCConnector is an interface of gRPC virtual connector. - GRPCConnector interface { - GRPCConnection(ctx context.Context, maddr multiaddr.Multiaddr) (*grpc.ClientConn, error) - } - - // Params groups the parameters of Interface. - Params struct { - Logger *zap.Logger - ConnectionTTL time.Duration - ConnectionIDLE time.Duration - MetricsTimeout time.Duration - KeepAliveTTL time.Duration - KeepAlivePingTTL time.Duration - } - - connItem struct { - sync.RWMutex - conn *grpc.ClientConn - used time.Time - } - - iface struct { - log *zap.Logger - tick time.Duration - idle time.Duration - - keepAlive time.Duration - pingTTL time.Duration - - metricsTimeout time.Duration - - grpc struct { - // globalMutex used by garbage collector and other high - globalMutex *sync.RWMutex - // bookMutex resolves concurrent access to the new connection - bookMutex *sync.RWMutex - // connBook contains connection info - // it's mutex resolves concurrent access to existed connection - connBook map[string]*connItem - } - } -) - -const ( - defaultCloseTimer = 30 * time.Second - defaultConIdleTTL = 30 * time.Second - defaultKeepAliveTTL = 5 * time.Second - defaultMetricsTimeout = 5 * time.Second - defaultKeepAlivePingTTL = 50 * time.Millisecond -) - -var errNilMultiaddr = errors.New("empty multi-address") - -func (s *iface) removeGRPCConnection(addr string) error { - if gCon, ok := s.grpc.connBook[addr]; ok && gCon.conn != nil { - if err := gCon.conn.Close(); err != nil { - state, ok := status.FromError(err) - if !ok { - return err - } - - s.log.Debug("error state", - zap.String("address", addr), - zap.Any("code", state.Code()), - zap.String("state", state.Message()), - zap.Any("details", state.Details())) - } - } - - delete(s.grpc.connBook, addr) - - return nil -} - -func isGRPCClosed(con *grpc.ClientConn) bool { - switch con.GetState() { - case connectivity.Idle, connectivity.Connecting, connectivity.Ready: - return false - default: - // connectivity.TransientFailure, connectivity.Shutdown - return true - } -} - -func convertAddress(maddr multiaddr.Multiaddr) (string, error) { - if maddr == nil { - return "", errNilMultiaddr - } - - addr, err := manet.ToNetAddr(maddr) - if err != nil { - return "", errors.Wrapf(err, "could not convert address `%s`", maddr) - } - - return addr.String(), nil -} - -// GRPCConnection creates gRPC connection over peers connection. -func (s *iface) GRPCConnection(ctx context.Context, maddr multiaddr.Multiaddr) (*grpc.ClientConn, error) { - addr, err := convertAddress(maddr) - if err != nil { - return nil, errors.Wrapf(err, "could not convert `%v`", maddr) - } - - // Get global mutex on read. - // All high level function e.g. peers garbage collector - // or shutdown must use globalMutex.Lock instead - s.grpc.globalMutex.RLock() - - // Get connection item from connection book or create a new one. - // Concurrent map access resolved by bookMutex. - s.grpc.bookMutex.Lock() - - item, ok := s.grpc.connBook[addr] - if !ok { - item = new(connItem) - s.grpc.connBook[addr] = item - } - - s.grpc.bookMutex.Unlock() - - // Now lock connection item. - // This denies concurrent access to the same address, - // but allows concurrent access to a different addresses. - item.Lock() - - if item.conn != nil && !isGRPCClosed(item.conn) { - item.used = time.Now() - - item.Unlock() - s.grpc.globalMutex.RUnlock() - - return item.conn, nil - } - - // Если вышеописанные строки переместить внутрь WithDialer, - // мы получим сломанный коннекшн, но ошибка не будет возвращена, - // поэтому мы сначала проверяем коннекшн и лишь потом возвращаем - // *gRPC.ClientConn - // - // Это будет работать с `grpc.WithBlock()`, см. ниже - conn, err := grpc.DialContext(ctx, maddr.String(), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: s.pingTTL, - Timeout: s.keepAlive, - PermitWithoutStream: true, - }), - // TODO: we must provide grpc.WithInsecure() or set credentials - grpc.WithInsecure(), - grpc.WithBlock(), - grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { - return network.Dial(ctx, maddr) - }), - ) - if err == nil { - item.conn = conn - item.used = time.Now() - } - - item.Unlock() - s.grpc.globalMutex.RUnlock() - - return conn, err -} - -// New create iface instance and check arguments. -func New(p Params) (Interface, error) { - if p.ConnectionTTL <= 0 { - p.ConnectionTTL = defaultCloseTimer - } - - if p.ConnectionIDLE <= 0 { - p.ConnectionIDLE = defaultConIdleTTL - } - - if p.KeepAliveTTL <= 0 { - p.KeepAliveTTL = defaultKeepAliveTTL - } - - if p.KeepAlivePingTTL <= 0 { - p.KeepAlivePingTTL = defaultKeepAlivePingTTL - } - - if p.MetricsTimeout <= 0 { - p.MetricsTimeout = defaultMetricsTimeout - } - - return &iface{ - tick: p.ConnectionTTL, - idle: p.ConnectionIDLE, - - keepAlive: p.KeepAliveTTL, - pingTTL: p.KeepAlivePingTTL, - - metricsTimeout: p.MetricsTimeout, - - log: p.Logger, - grpc: struct { - globalMutex *sync.RWMutex - bookMutex *sync.RWMutex - connBook map[string]*connItem - }{ - globalMutex: new(sync.RWMutex), - bookMutex: new(sync.RWMutex), - connBook: make(map[string]*connItem), - }, - }, nil -} diff --git a/pkg/network/peers/peers_test.go b/pkg/network/peers/peers_test.go deleted file mode 100644 index fc29228c2..000000000 --- a/pkg/network/peers/peers_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package peers - -import ( - "context" - "encoding" - "encoding/json" - "net" - "strings" - "sync" - "testing" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/network" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -type ( - fakeAddress struct { - json.Marshaler - json.Unmarshaler - encoding.TextMarshaler - encoding.TextUnmarshaler - encoding.BinaryMarshaler - encoding.BinaryUnmarshaler - } - - // service is used to implement GreaterServer. - service struct{} -) - -// Hello is simple handler -func (*service) Hello(ctx context.Context, req *HelloRequest) (*HelloResponse, error) { - return &HelloResponse{ - Message: "Hello " + req.Name, - }, nil -} - -var _ multiaddr.Multiaddr = (*fakeAddress)(nil) - -func (fakeAddress) Equal(multiaddr.Multiaddr) bool { - return false -} - -func (fakeAddress) Bytes() []byte { - return nil -} - -func (fakeAddress) String() string { - return "fake" -} - -func (fakeAddress) Protocols() []multiaddr.Protocol { - return []multiaddr.Protocol{{Name: "fake"}} -} - -func (fakeAddress) Encapsulate(multiaddr.Multiaddr) multiaddr.Multiaddr { - panic("implement me") -} - -func (fakeAddress) Decapsulate(multiaddr.Multiaddr) multiaddr.Multiaddr { - panic("implement me") -} - -func (fakeAddress) ValueForProtocol(code int) (string, error) { - return "", nil -} - -const testCount = 10 - -func newTestAddress(t *testing.T) multiaddr.Multiaddr { - lis, err := net.Listen("tcp", "0.0.0.0:0") // nolint:gosec - require.NoError(t, err) - require.NoError(t, lis.Close()) - - l, ok := lis.(*net.TCPListener) - require.True(t, ok) - - _, port, err := net.SplitHostPort(l.Addr().String()) - require.NoError(t, err) - - items := []string{ - "ip4", - "127.0.0.1", - "tcp", - port, - } - - maddr, err := multiaddr.NewMultiaddr("/" + strings.Join(items, "/")) - require.NoError(t, err) - - return maddr -} - -func TestInterface(t *testing.T) { - t.Run("gRPC connection test", func(t *testing.T) { - var ( - err error - s Interface - h = &service{} - g = grpc.NewServer() - a1 = newTestAddress(t) - _ = h - done = make(chan struct{}) - ) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - s, err = New(Params{}) - require.NoError(t, err) - - RegisterGreeterServer(g, h) // register service - - l, err := network.Listen(a1) - require.NoError(t, err) - - defer l.Close() // nolint:golint - - wg := new(sync.WaitGroup) - wg.Add(1) - - go func() { - close(done) - - _ = g.Serve(l) - - wg.Done() - }() - - <-done // wait for server is start listening connections: - - // Fail connection - con, err := s.GRPCConnection(ctx, &fakeAddress{}) - require.Nil(t, con) - require.Error(t, err) - - con, err = s.GRPCConnection(ctx, a1) - require.NoError(t, err) - - cli := NewGreeterClient(con) - resp, err := cli.Hello(ctx, &HelloRequest{ - Name: "Interface test", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, "Hello Interface test", resp.Message) - - g.GracefulStop() - - wg.Wait() - }) - - t.Run("test grpc connections", func(t *testing.T) { - var ( - ifaces = make([]Interface, 0, testCount) - addresses = make([]multiaddr.Multiaddr, 0, testCount) - ) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for i := 0; i < testCount; i++ { - addresses = append(addresses, newTestAddress(t)) - - s, err := New(Params{}) - require.NoError(t, err) - - lis, err := network.Listen(addresses[i]) - require.NoError(t, err) - - svc := &service{} - srv := grpc.NewServer() - - RegisterGreeterServer(srv, svc) - - ifaces = append(ifaces, s) - - go func() { - require.NoError(t, srv.Serve(lis)) - }() - } - - const reqName = "test" - wg := new(sync.WaitGroup) - - for i := 0; i < testCount; i++ { - for j := 0; j < testCount; j++ { - wg.Add(1) - go func(i, j int) { - defer wg.Done() - - con, err := ifaces[i].GRPCConnection(ctx, addresses[j]) - require.NoError(t, err) - - cli := NewGreeterClient(con) - - resp, err := cli.Hello(ctx, &HelloRequest{Name: reqName}) - require.NoError(t, err) - - require.Equal(t, "Hello "+reqName, resp.Message) - - require.NoError(t, con.Close()) - }(i, j) - - } - } - - wg.Wait() - }) -} diff --git a/pkg/network/peers/peers_test.pb.go b/pkg/network/peers/peers_test.pb.go deleted file mode 100644 index 0a26c5a04..000000000 Binary files a/pkg/network/peers/peers_test.pb.go and /dev/null differ diff --git a/pkg/network/peers/peers_test.proto b/pkg/network/peers/peers_test.proto deleted file mode 100644 index d38ad7654..000000000 --- a/pkg/network/peers/peers_test.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; -option go_package = "github.com/nspcc-dev/neofs-node/pkg/network/peers"; - -package peers; - -// The Greater service definition. -service Greeter { - rpc Hello(HelloRequest) returns (HelloResponse); -} - -// Request message example -message HelloRequest { - string name = 1; -} - -message HelloResponse { - string message = 1; -} diff --git a/pkg/network/peers/peerstore.go b/pkg/network/peers/peerstore.go deleted file mode 100644 index 09b358fa0..000000000 --- a/pkg/network/peers/peerstore.go +++ /dev/null @@ -1,238 +0,0 @@ -package peers - -import ( - "crypto/ecdsa" - "crypto/elliptic" - - "github.com/multiformats/go-multiaddr" - crypto "github.com/nspcc-dev/neofs-crypto" - netmap "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // Store is an interface to storage of all p2p connections - Store interface { - SelfIDReceiver - PublicKeyStore - AddressIDReceiver - AddPeer(multiaddr.Multiaddr, *ecdsa.PublicKey, *ecdsa.PrivateKey) (ID, error) - DeletePeer(ID) - PeerNetAddressStore - GetPrivateKey(ID) (*ecdsa.PrivateKey, error) - Update(*netmap.NetMap) error - Sign([]byte) ([]byte, error) - Verify(id ID, data, sign []byte) error - Check(min int) error - } - - // PublicKeyStore is an interface of the storage of peer's public keys. - PublicKeyStore interface { - GetPublicKey(ID) (*ecdsa.PublicKey, error) - } - - // SelfIDReceiver is an interface of local peer ID value with read access. - SelfIDReceiver interface { - SelfID() ID - } - - // AddressIDReceiver is an interface of Multiaddr to ID converter. - AddressIDReceiver interface { - AddressID(multiaddr.Multiaddr) (ID, error) - } - - // PeerNetAddressStore is an interface of ID to Multiaddr converter. - PeerNetAddressStore interface { - GetAddr(ID) (multiaddr.Multiaddr, error) - } - - // StoreParams for creating new Store. - StoreParams struct { - Addr multiaddr.Multiaddr - Key *ecdsa.PrivateKey - Storage Storage - StoreCap int - Logger *zap.Logger - } - - store struct { - self ID - addr multiaddr.Multiaddr - storage Storage - log *zap.Logger - key *ecdsa.PrivateKey - } -) - -const defaultMinimalSignaturesCount = 3 - -var errPeerNotFound = errors.New("peer not found") - -func (p *store) AddressID(addr multiaddr.Multiaddr) (ID, error) { - if p.addr.Equal(addr) { - return p.self, nil - } - - res := p.storage.Filter(maddrFilter(addr)) - if len(res) == 0 { - return "", errPeerNotFound - } - - return res[0], nil -} - -func maddrFilter(addr multiaddr.Multiaddr) PeerFilter { - return func(p Peer) bool { return addr.Equal(p.Address()) } -} - -// SelfID return ID of current Node. -func (p *store) SelfID() ID { - return p.self -} - -// AddPeer to store.. -// Try to get PeerID from PublicKey, or return error -// Store Address and PublicKey for that PeerID. -func (p *store) AddPeer(addr multiaddr.Multiaddr, pub *ecdsa.PublicKey, key *ecdsa.PrivateKey) (ID, error) { - item := NewPeer(addr, pub, key) - if err := p.storage.Set(item.ID(), item); err != nil { - return "", err - } - - return item.ID(), nil -} - -// DeletePeer from store. -func (p *store) DeletePeer(id ID) { - if err := p.storage.Rem(id); err != nil { - p.log.Error("could not delete peer", - zap.Stringer("id", id), - zap.Error(err)) - } -} - -// Update update Store by new network map. -func (p *store) Update(nm *netmap.NetMap) error { - if err := p.storage.Update(nm); err != nil { - return err - } - - // we must provide our PrivateKey, after updating - if peer, err := p.storage.Get(p.self); err != nil { - peer = NewPeer(p.addr, &p.key.PublicKey, p.key) - return p.storage.Set(p.self, peer) - } else if err := peer.SetPrivateKey(p.key); err != nil { - return errors.Wrapf(err, "could not update private key (%s)", p.self.String()) - } else if err := p.storage.Set(p.self, peer); err != nil { - return errors.Wrapf(err, "could not save peer(%s)", p.self.String()) - } - - return nil -} - -// GetAddr by PeerID. -func (p *store) GetAddr(id ID) (multiaddr.Multiaddr, error) { - n, err := p.storage.Get(id) - if err != nil { - return nil, err - } - - return n.Address(), nil -} - -// GetPublicKey by PeerID. -func (p *store) GetPublicKey(id ID) (*ecdsa.PublicKey, error) { - n, err := p.storage.Get(id) - if err != nil { - return nil, err - } - - return n.PublicKey(), nil -} - -// GetPrivateKey by PeerID. -func (p *store) GetPrivateKey(id ID) (*ecdsa.PrivateKey, error) { - n, err := p.storage.Get(id) - if err != nil { - return nil, err - } - - return n.PrivateKey() -} - -// Sign signs a data using the private key. If the data is longer than -// the bit-length of the private key's curve order, the hash will be -// truncated to that length. It returns the signature as slice bytes. -// The security of the private key depends on the entropy of rand. -func (p *store) Sign(data []byte) ([]byte, error) { - return crypto.Sign(p.key, data) -} - -// Verify verifies the signature in r, s of hash using the public key, pub. Its -// return value records whether the signature is valid. -// If store doesn't contains public key for ID, -// returns error about that -// TODO we must provide same method, but for IR list, to check, -// that we have valid signatures of needed IR members -func (p *store) Verify(id ID, data, sign []byte) error { - if pub, err := p.GetPublicKey(id); err != nil { - return errors.Wrap(err, "could not get PublicKey") - } else if err := crypto.Verify(pub, data, sign); err != nil { - return errors.Wrapf(err, "could not verify signature: sign(`%x`) & data(`%x`)", sign, data) - } - - return nil -} - -// Neighbours peers that which are distributed by hrw(id). -func (p *store) Neighbours(seed int64, count int) ([]ID, error) { - return p.storage.List(p.self, seed, count) -} - -// Check validate signatures count -// TODO replace with settings or something else. -// We can fetch min-count from settings, or -// use another method for validate this.. -func (p *store) Check(min int) error { - if min <= defaultMinimalSignaturesCount { - return errors.Errorf("invalid count of valid signatures: minimum %d, actual %d", - defaultMinimalSignaturesCount, - min, - ) - } - - return nil -} - -// NewStore creates new store by params. -func NewStore(p StoreParams) (Store, error) { - var storage Storage - - if p.Key == nil || p.Key.Curve != elliptic.P256() { - return nil, crypto.ErrEmptyPrivateKey - } - - if p.Addr == nil { - return nil, errNilMultiaddr - } - - if storage = p.Storage; storage == nil { - storage = NewSimpleStorage(p.StoreCap, p.Logger) - } - - id := IDFromPublicKey(&p.Key.PublicKey) - peer := NewPeer(p.Addr, &p.Key.PublicKey, p.Key) - - if err := storage.Set(id, peer); err != nil { - return nil, err - } - - return &store{ - self: id, - storage: storage, - key: p.Key, - addr: p.Addr, - log: p.Logger, - }, nil -} diff --git a/pkg/network/peers/peerstore_test.go b/pkg/network/peers/peerstore_test.go deleted file mode 100644 index ab0e5ab08..000000000 --- a/pkg/network/peers/peerstore_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package peers - -import ( - "strconv" - "testing" - - "github.com/multiformats/go-multiaddr" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - loggertest "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type testSign struct { - ID ID - Sign []byte -} - -const debug = false - -func createNetworkMap(t *testing.T) *netmap.NetMap { - var ( - Region = []string{"America", "Europe", "Asia"} - Country = map[string][]string{ - "America": {"USA", "Canada", "Brazil"}, - "Europe": {"France", "Germany", "Sweden"}, - "Asia": {"Russia", "China", "Korea", "Japan"}, - } - City = map[string][]string{ - "USA": {"Washington", "New-York", "Seattle", "Chicago", "Detroit"}, - "Canada": {"Toronto", "Ottawa", "Quebec", "Winnipeg"}, - "Brazil": {"Rio-de-Janeiro", "San-Paulo", "Salvador"}, - "France": {"Paris", "Lion", "Nice", "Marseille"}, - "Germany": {"Berlin", "Munich", "Dortmund", "Hamburg", "Cologne"}, - "Sweden": {"Stockholm", "Malmo", "Uppsala"}, - "Russia": {"Moscow", "Saint-Petersburg", "Ekaterinburg", "Novosibirsk"}, - "China": {"Beijing", "Shanghai", "Shenzhen", "Guangzhou"}, - "Korea": {"Seoul", "Busan"}, - "Japan": {"Tokyo", "Kyoto", "Yokohama", "Osaka"}, - } - nm = netmap.New() - port int64 = 4000 - i = 0 - ) - for _, r := range Region { - for _, co := range Country[r] { - for _, ci := range City[co] { - addr := "/ip4/127.0.0.1/tcp/" + strconv.FormatInt(port, 10) - port++ - option := "/Region:" + r + "/Country:" + co + "/City:" + ci - pk := crypto.MarshalPublicKey(&test.DecodeKey(i).PublicKey) - i++ - - info := netmap.Info{} - info.SetAddress(addr) - info.SetPublicKey(pk) - info.SetOptions([]string{option}) - - require.NoError(t, nm.AddNode(info)) - } - } - } - return nm -} - -func testMultiAddress(t *testing.T) multiaddr.Multiaddr { - addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") - require.NoError(t, err) - return addr -} - -func TestPeerstore(t *testing.T) { - var ( - l = loggertest.NewLogger(false) - key = test.DecodeKey(1) - ) - - t.Run("it should creates new store", func(t *testing.T) { - ps, err := NewStore(StoreParams{ - Key: key, - Logger: l, - Addr: testMultiAddress(t), - }) - require.NoError(t, err) - require.NotNil(t, ps) - - maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4000") - require.NoError(t, err) - - expect := crypto.MarshalPublicKey(&key.PublicKey) - - id, err := ps.AddPeer(maddr, &key.PublicKey, key) - require.NoError(t, err) - - pub, err := ps.GetPublicKey(id) - require.NoError(t, err) - - actual := crypto.MarshalPublicKey(pub) - require.Equal(t, expect, actual) - - addr1, err := ps.GetAddr(id) - require.NoError(t, err) - require.True(t, maddr.Equal(addr1)) - - ps.DeletePeer(id) - addr1, err = ps.GetAddr(id) - require.Nil(t, addr1) - require.Error(t, err) - }) - - t.Run("it should creates new store based on netmap", func(t *testing.T) { - var nm = createNetworkMap(t) - - ps, err := NewStore(StoreParams{ - Key: key, - Logger: l, - Addr: testMultiAddress(t), - }) - require.NoError(t, err) - require.NotNil(t, ps) - - err = ps.Update(nm) - require.NoError(t, err) - - expect := nm.Nodes()[0].PublicKey() - - id := IDFromBinary(expect) - - addr, err := ps.GetAddr(id) - require.NoError(t, err) - require.Equal(t, nm.Nodes()[0].Address(), addr.String()) - - pub, err := ps.GetPublicKey(id) - require.NoError(t, err) - - actual := crypto.MarshalPublicKey(pub) - require.Equal(t, expect, actual) - }) - - t.Run("multiple store's", func(t *testing.T) { - var ( - count = 10 - items = make([]Store, 0, count) - - data = []byte("Hello world") - peers = make([]Peer, 0, count) - signs = make([]*testSign, 0, count) - ) - - for i := 0; i < count; i++ { - key := test.DecodeKey(i) - addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") - require.NoError(t, err) - - peers = append(peers, NewLocalPeer(addr, key)) - } - - for i := 0; i < count; i++ { - key, err := peers[i].PrivateKey() - require.NoError(t, err) - - store, err := NewStore(StoreParams{ - Addr: peers[i].Address(), - Key: key, - Logger: zap.L(), - }) - require.NoError(t, err) - - items = append(items, store) - - hash, err := store.Sign(data) - require.NoError(t, err) - - sign := &testSign{ - ID: peers[i].ID(), - Sign: hash, - } - signs = append(signs, sign) - l.Info("add peer", - zap.Stringer("id", peers[i].ID())) - } - - for i := 0; i < count; i++ { - signature, err := items[i].Sign(data) - require.NoError(t, err) - - // check the newly generated signature - err = items[i].Verify(peers[i].ID(), data, signature) - require.NoError(t, err) - - for j := 0; j < count; j++ { - // check previously generated signature - addr, pub := peers[j].Address(), peers[j].PublicKey() - key, err := peers[j].PrivateKey() - require.NoError(t, err) - - _, err = items[i].AddPeer(addr, pub, key) - require.NoError(t, err) - - err = items[i].Verify(signs[j].ID, data, signs[j].Sign) - require.NoError(t, err) - } - } - }) - - t.Run("Get self address", func(t *testing.T) { - addr := testMultiAddress(t) - - ps, err := NewStore(StoreParams{ - Key: key, - Logger: l, - Addr: addr, - }) - require.NoError(t, err) - require.NotNil(t, ps) - - selfAddr, err := ps.GetAddr(ps.SelfID()) - require.NoError(t, err) - require.Equal(t, selfAddr, addr) - }) - - t.Run("Get ID for multi address", func(t *testing.T) { - addr := testMultiAddress(t) - - ps, err := NewStore(StoreParams{ - Key: key, - Logger: l, - Addr: addr, - }) - require.NoError(t, err) - require.NotNil(t, ps) - - maddr, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4000") - require.NoError(t, err) - - id, err := ps.AddPeer(maddr, &key.PublicKey, key) - require.NoError(t, err) - - res, err := ps.AddressID(maddr) - require.NoError(t, err) - require.True(t, id.Equal(res)) - - maddr2, err := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001") - require.NoError(t, err) - - res, err = ps.AddressID(maddr2) - require.EqualError(t, err, errPeerNotFound.Error()) - }) -} diff --git a/pkg/network/peers/storage.go b/pkg/network/peers/storage.go deleted file mode 100644 index 80afec628..000000000 --- a/pkg/network/peers/storage.go +++ /dev/null @@ -1,297 +0,0 @@ -package peers - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/sha256" - "sync" - - "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multihash" - "github.com/nspcc-dev/hrw" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/pkg/errors" - "github.com/spaolacci/murmur3" - "go.uber.org/zap" -) - -type ( - // Peer is value, that stores in Store storage - Peer interface { - ID() ID - Address() multiaddr.Multiaddr - PublicKey() *ecdsa.PublicKey - PrivateKey() (*ecdsa.PrivateKey, error) - SetPrivateKey(*ecdsa.PrivateKey) error - - // TODO implement marshal/unmarshal binary. - // Not sure that this method need for now, - // that's why let's leave it on future - // encoding.BinaryMarshaler - // encoding.BinaryUnmarshaler - } - - peer struct { - id ID - pub *ecdsa.PublicKey - key *ecdsa.PrivateKey - addr multiaddr.Multiaddr - } - - // ID is a type of peer identification - ID string - - storage struct { - log *zap.Logger - - mu *sync.RWMutex - items map[ID]Peer - } - - // PeerFilter is a Peer filtering function. - PeerFilter func(Peer) bool - - // Storage is storage interface for Store - Storage interface { - Get(ID) (Peer, error) - Set(ID, Peer) error - Has(ID) bool - Rem(ID) error - List(ID, int64, int) ([]ID, error) - Filter(PeerFilter) []ID - Update(*netmap.NetMap) error - } -) - -const defaultStoreCapacity = 100 - -var ( - errUnknownPeer = errors.New("unknown peer") - errBadPublicKey = errors.New("bad public key") -) - -var errNilNetMap = errors.New("netmap is nil") - -// Hash method used in HRW-library. -func (i ID) Hash() uint64 { - return murmur3.Sum64(i.Bytes()) -} - -// NewLocalPeer creates new peer instance. -func NewLocalPeer(addr multiaddr.Multiaddr, key *ecdsa.PrivateKey) Peer { - pub := &key.PublicKey - - return &peer{ - id: IDFromPublicKey(pub), - pub: pub, - key: key, - addr: addr, - } -} - -// NewPeer creates new peer instance. -func NewPeer(addr multiaddr.Multiaddr, pub *ecdsa.PublicKey, key *ecdsa.PrivateKey) Peer { - return &peer{ - id: IDFromPublicKey(pub), - pub: pub, - key: key, - addr: addr, - } -} - -func (p *peer) SetPrivateKey(key *ecdsa.PrivateKey) error { - if key == nil || key.Curve != elliptic.P256() { - return crypto.ErrEmptyPrivateKey - } - - p.key = key - - return nil -} - -// ID of peer. -func (p peer) ID() ID { - return p.id -} - -// Address of peer. -func (p peer) Address() multiaddr.Multiaddr { - return p.addr -} - -// PublicKey returns copy of peer public key. -func (p peer) PublicKey() *ecdsa.PublicKey { - return p.pub -} - -func (p peer) PrivateKey() (*ecdsa.PrivateKey, error) { - if p.key == nil { - return nil, crypto.ErrEmptyPrivateKey - } - - return p.key, nil -} - -// String returns string representation of PeerID. -func (i ID) String() string { - return string(i) -} - -// -- -- // - -// Bytes returns bytes representation of PeerID. -func (i ID) Bytes() []byte { - return []byte(i) -} - -// Equal checks that both id's are identical. -func (i ID) Equal(id ID) bool { - return i == id -} - -// IDFromPublicKey returns peer ID for host with given public key. -func IDFromPublicKey(pk *ecdsa.PublicKey) ID { - if pk == nil { - return "" - } - - return IDFromBinary(crypto.MarshalPublicKey(pk)) -} - -// IDFromBinary returns peer ID for host with given slice of byte. -func IDFromBinary(b []byte) ID { - bytes := sha256.Sum256(b) - hash, _ := multihash.Encode(bytes[:], multihash.IDENTITY) - ident := multihash.Multihash(hash) - - return ID(ident.B58String()) -} - -// NewSimpleStorage is implementation over map. -func NewSimpleStorage(capacity int, l *zap.Logger) Storage { - if capacity <= 0 { - capacity = defaultStoreCapacity - } - - return &storage{ - log: l, - mu: new(sync.RWMutex), - items: make(map[ID]Peer, capacity), - } -} - -// List peers that which are distributed by hrw(seed). -func (s *storage) List(id ID, seed int64, count int) ([]ID, error) { - s.mu.RLock() - items := make([]ID, 0, len(s.items)) - - for key := range s.items { - // ignore ourselves - if id.Equal(key) { - continue - } - - items = append(items, key) - } - s.mu.RUnlock() - - // distribute keys by hrw(seed) - hrw.SortSliceByValue(items, - uint64(seed)) - - return items[:count], nil -} - -// Get peer by ID. -func (s *storage) Get(id ID) (Peer, error) { - s.mu.RLock() - p, ok := s.items[id] - s.mu.RUnlock() - - if ok { - return p, nil - } - - return nil, errors.Wrapf(errUnknownPeer, "peer(%s)", id) -} - -// Set peer by id. -func (s *storage) Set(id ID, p Peer) error { - s.mu.Lock() - s.items[id] = p - s.mu.Unlock() - - return nil -} - -// Has checks peer exists by id. -func (s *storage) Has(id ID) bool { - s.mu.RLock() - _, ok := s.items[id] - s.mu.RUnlock() - - return ok -} - -// Rem peer by id. -func (s *storage) Rem(id ID) error { - s.mu.Lock() - delete(s.items, id) - s.mu.Unlock() - - return nil -} - -// Update storage by network map. -func (s *storage) Update(nm *netmap.NetMap) error { - s.mu.Lock() - defer s.mu.Unlock() - - list := nm.Nodes() - if len(list) == 0 { - return errNilNetMap - } - - items := make(map[ID]Peer, len(s.items)) - - for i := range list { - addr, err := multiaddr.NewMultiaddr(list[i].Address()) - if err != nil { - return errors.Wrapf(err, "address=`%s`", list[i].Address()) - } - - pubKey := list[i].PublicKey() - pk := crypto.UnmarshalPublicKey(pubKey) - if pk == nil && pubKey != nil { - return errors.Wrapf(errBadPublicKey, "pubkey=`%x`", pubKey) - } - - id := IDFromPublicKey(pk) - if pv, ok := s.items[id]; ok { - if pv.Address() != nil && pv.Address().Equal(addr) { - items[id] = pv - continue - } - } - - items[id] = NewPeer(addr, pk, nil) - } - - s.items = items - - return nil -} - -func (s *storage) Filter(filter PeerFilter) (res []ID) { - s.mu.RLock() - defer s.mu.RUnlock() - - for id, peer := range s.items { - if filter(peer) { - res = append(res, id) - } - } - - return -} diff --git a/pkg/network/peers/worker.go b/pkg/network/peers/worker.go deleted file mode 100644 index 1fac37f06..000000000 --- a/pkg/network/peers/worker.go +++ /dev/null @@ -1,67 +0,0 @@ -package peers - -import ( - "context" - "time" - - "go.uber.org/zap" - "google.golang.org/grpc/connectivity" -) - -func (s *iface) Job(ctx context.Context) { - var ( - tick = time.NewTimer(s.tick) - metrics = time.NewTimer(s.metricsTimeout) - ) - -loop: - for { - select { - case <-ctx.Done(): - break loop - case <-metrics.C: - var items = make(map[connectivity.State]float64) - s.grpc.globalMutex.Lock() - for _, item := range s.grpc.connBook { - if item.conn != nil { - items[item.conn.GetState()]++ - } - } - s.grpc.globalMutex.Unlock() - - updateMetrics(items) - - metrics.Reset(s.metricsTimeout) - case <-tick.C: - var count int - - s.grpc.globalMutex.Lock() - for addr, item := range s.grpc.connBook { - if item.conn == nil || isGRPCClosed(item.conn) || time.Since(item.used) > s.idle { - if err := s.removeGRPCConnection(addr); err != nil { - s.log.Error("could not close connection", - zap.String("address", addr), - zap.String("target", item.conn.Target()), - zap.Stringer("idle", time.Since(item.used)), - zap.Error(err)) - continue - } - - count++ - } else { - s.log.Debug("ignore connection", - zap.String("address", addr), - zap.Stringer("idle", time.Since(item.used))) - } - } - s.grpc.globalMutex.Unlock() - - s.log.Debug("cleanup connections done", - zap.Int("closed", count)) - - tick.Reset(s.tick) - } - } - - tick.Stop() -} diff --git a/pkg/network/transport/accounting/grpc/.gitkeep b/pkg/network/transport/accounting/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/accounting/grpc/service.go b/pkg/network/transport/accounting/grpc/service.go deleted file mode 100644 index 8497273c6..000000000 --- a/pkg/network/transport/accounting/grpc/service.go +++ /dev/null @@ -1,94 +0,0 @@ -package accounting - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/accounting" - "github.com/nspcc-dev/neofs-api-go/decimal" - "github.com/nspcc-dev/neofs-node/pkg/morph/client/balance/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type ( - // Service is an interface of the server of Accounting service. - Service interface { - grpc.Service - accounting.AccountingServer - } - - // ContractClient represents the client of Balance contract. - // - // It is a type alias of the pointer to - // github.com/nspcc-dev/neofs-node/pkg/morph/client/balance/wrapper.Wrapper. - ContractClient = *wrapper.Wrapper - - // Params groups the parameters of Accounting service server's constructor. - Params struct { - ContractClient ContractClient - } - - accService struct { - contractClient ContractClient - } -) - -var requestVerifyFunc = libgrpc.VerifyRequestWithSignatures - -// New is an Accounting service server's constructor. -// -// If Balance contract client is nil, -// wrapper.ErrNilWrapper is returned. -func New(p Params) (Service, error) { - if p.ContractClient == nil { - return nil, wrapper.ErrNilWrapper - } - - return &accService{ - contractClient: p.ContractClient, - }, nil -} - -func (accService) Name() string { return "AccountingService" } - -func (s accService) Register(g *grpc.Server) { accounting.RegisterAccountingServer(g, s) } - -func (s accService) Balance(ctx context.Context, req *accounting.BalanceRequest) (*accounting.BalanceResponse, error) { - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // get the amount of funds in client's account - fundsAmount, err := s.contractClient.BalanceOf(req.GetOwnerID()) - if err != nil { - return nil, status.Error(codes.Aborted, err.Error()) - } - - // get decimals precision of currency transactions - - // TODO: Reconsider the approach of getting decimals. - // - // Decimals value does not seem to be frequently changing. - // In this case service can work in static decimals mode and - // the value can be received once to facilitate call flow. - // - // In a true dynamic value installation it is advisable to get - // a balance with decimals through a single call. Variations: - // - add decimal value stack parameter of balanceOf method; - // - create a new method entitled smth like balanceWithDecimals. - decimals, err := s.contractClient.Decimals() - if err != nil { - return nil, status.Error(codes.Aborted, err.Error()) - } - - res := new(accounting.BalanceResponse) - res.Balance = decimal.NewWithPrecision( - fundsAmount, - decimals, - ) - - return res, nil -} diff --git a/pkg/network/transport/accounting/grpc/service_test.go b/pkg/network/transport/accounting/grpc/service_test.go deleted file mode 100644 index d59995eff..000000000 --- a/pkg/network/transport/accounting/grpc/service_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package accounting - -// TODO: write unit tests diff --git a/pkg/network/transport/container/grpc/.gitkeep b/pkg/network/transport/container/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/container/grpc/acl.go b/pkg/network/transport/container/grpc/acl.go deleted file mode 100644 index fe942d963..000000000 --- a/pkg/network/transport/container/grpc/acl.go +++ /dev/null @@ -1,70 +0,0 @@ -package container - -import ( - "context" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s cnrService) SetExtendedACL(ctx context.Context, req *container.SetExtendedACLRequest) (*container.SetExtendedACLResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, status.Error(codes.Unavailable, err.Error()) - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // unmarshal eACL table - table, err := eacl.UnmarshalTable(req.GetEACL()) - if err != nil { - return nil, status.Error( - codes.InvalidArgument, - errors.Wrap(err, "could not decode eACL table").Error(), - ) - } - - // store eACL table - if err := s.aclStore.PutEACL(req.GetID(), table, req.GetSignature()); err != nil { - return nil, status.Error( - codes.Aborted, - errors.Wrap(err, "could not save eACL in storage").Error(), - ) - } - - return new(container.SetExtendedACLResponse), nil -} - -func (s cnrService) GetExtendedACL(ctx context.Context, req *container.GetExtendedACLRequest) (*container.GetExtendedACLResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, status.Error(codes.Unavailable, err.Error()) - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // receive binary eACL - table, err := s.aclStore.GetEACL(req.GetID()) - if err != nil { - return nil, status.Error( - codes.NotFound, - errors.Wrap(err, "could not get eACL from storage").Error(), - ) - } - - // fill the response - res := new(container.GetExtendedACLResponse) - res.SetEACL(eacl.MarshalTable(table)) - res.SetSignature(nil) // TODO: set signature when will appear. - - return res, nil -} diff --git a/pkg/network/transport/container/grpc/acl_test.go b/pkg/network/transport/container/grpc/acl_test.go deleted file mode 100644 index 16445b4ba..000000000 --- a/pkg/network/transport/container/grpc/acl_test.go +++ /dev/null @@ -1,214 +0,0 @@ -package container - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/acl" - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testEACLEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -var requestSignFunc = service.SignRequestData - -func (s *testEACLEntity) GetEACL(cid CID) (Table, error) { - if s.f != nil { - s.f(cid) - } - - if s.err != nil { - return nil, s.err - } - - return s.res.(Table), nil -} - -func (s *testEACLEntity) PutEACL(cid CID, table Table, sig []byte) error { - if s.f != nil { - s.f(cid, table, sig) - } - - return s.err -} - -func TestCnrService_SetExtendedACL(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.SetExtendedACL(ctx, new(container.SetExtendedACLRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.SetExtendedACLRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.SetExtendedACL(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) - - t.Run("EACL storage failure", func(t *testing.T) { - record := new(acl.EACLRecord) - record.SetAction(acl.EACLRecord_Allow) - - table := eacl.WrapTable(nil) - table.SetRecords([]eacl.Record{eacl.WrapRecord(record)}) - - req := new(container.SetExtendedACLRequest) - req.SetID(CID{1, 2, 3}) - req.SetEACL(eacl.MarshalTable(table)) - req.SetSignature([]byte{4, 5, 6}) - - require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) - - s := cnrService{ - healthy: new(testCommonEntity), - aclStore: &testEACLEntity{ - f: func(items ...interface{}) { - require.Equal(t, req.GetID(), items[0]) - require.Equal(t, req.GetSignature(), items[2]) - }, - err: errors.New("storage error"), - }, - } - - _, err := s.SetExtendedACL(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.Aborted, st.Code()) - }) - - t.Run("correct result", func(t *testing.T) { - req := new(container.SetExtendedACLRequest) - - require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) - - s := cnrService{ - healthy: new(testCommonEntity), - aclStore: new(testEACLEntity), - } - - res, err := s.SetExtendedACL(ctx, req) - require.NoError(t, err) - require.NotNil(t, res) - }) -} - -func TestCnrService_GetExtendedACL(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.GetExtendedACL(ctx, new(container.GetExtendedACLRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.GetExtendedACLRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.GetExtendedACL(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) - - t.Run("EACL storage failure", func(t *testing.T) { - req := new(container.GetExtendedACLRequest) - req.SetID(CID{1, 2, 3}) - - require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) - - s := cnrService{ - healthy: new(testCommonEntity), - aclStore: &testEACLEntity{ - f: func(items ...interface{}) { - require.Equal(t, req.GetID(), items[0]) - }, - err: errors.New("storage error"), - }, - } - - _, err := s.GetExtendedACL(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.NotFound, st.Code()) - }) - - t.Run("correct result", func(t *testing.T) { - req := new(container.GetExtendedACLRequest) - req.SetID(CID{1, 2, 3}) - - require.NoError(t, requestSignFunc(test.DecodeKey(0), req)) - - table := eacl.WrapTable(nil) - - record := new(acl.EACLRecord) - record.SetAction(acl.EACLRecord_Allow) - - table.SetRecords([]eacl.Record{eacl.WrapRecord(record)}) - - s := cnrService{ - healthy: new(testCommonEntity), - aclStore: &testEACLEntity{ - res: table, - }, - } - - res, err := s.GetExtendedACL(ctx, req) - require.NoError(t, err) - require.Equal(t, eacl.MarshalTable(table), res.GetEACL()) - require.Empty(t, res.GetSignature()) - }) -} diff --git a/pkg/network/transport/container/grpc/alias.go b/pkg/network/transport/container/grpc/alias.go deleted file mode 100644 index 74f9afa33..000000000 --- a/pkg/network/transport/container/grpc/alias.go +++ /dev/null @@ -1,30 +0,0 @@ -package container - -import ( - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container" -) - -// CID represents the container identifier. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.ID. -type CID = container.ID - -// OwnerID represents the container owner identifier.. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.OwnerID. -type OwnerID = container.OwnerID - -// Container represents the NeoFS Container structure. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container.Container. -type Container = container.Container - -// Table represents the eACL table. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.ExtendedACLTable. -type Table = eacl.Table diff --git a/pkg/network/transport/container/grpc/common_test.go b/pkg/network/transport/container/grpc/common_test.go deleted file mode 100644 index 8d218c7b1..000000000 --- a/pkg/network/transport/container/grpc/common_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package container - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testCommonEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func (s testCommonEntity) Healthy() error { - return s.err -} diff --git a/pkg/network/transport/container/grpc/delete.go b/pkg/network/transport/container/grpc/delete.go deleted file mode 100644 index a841242f2..000000000 --- a/pkg/network/transport/container/grpc/delete.go +++ /dev/null @@ -1,32 +0,0 @@ -package container - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s cnrService) Delete(ctx context.Context, req *container.DeleteRequest) (*container.DeleteResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, errors.Wrap(err, "try again later") - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // remove container from storage - if err := s.cnrStore.Delete(req.GetCID()); err != nil { - return nil, status.Error( - codes.Aborted, - errors.Wrap(err, "could not remove container from storage").Error(), - ) - } - - return new(container.DeleteResponse), nil -} diff --git a/pkg/network/transport/container/grpc/delete_test.go b/pkg/network/transport/container/grpc/delete_test.go deleted file mode 100644 index e4dbefc48..000000000 --- a/pkg/network/transport/container/grpc/delete_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testDeleteEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func TestCnrService_Delete(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.Delete(ctx, new(container.DeleteRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.DeleteRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.Delete(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) -} diff --git a/pkg/network/transport/container/grpc/get.go b/pkg/network/transport/container/grpc/get.go deleted file mode 100644 index 148018274..000000000 --- a/pkg/network/transport/container/grpc/get.go +++ /dev/null @@ -1,56 +0,0 @@ -package container - -import ( - "context" - - "github.com/google/uuid" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s cnrService) Get(ctx context.Context, req *container.GetRequest) (*container.GetResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, status.Error(codes.Unavailable, err.Error()) - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // get container from storage - cnr, err := s.cnrStore.Get(req.GetCID()) - if err != nil { - return nil, status.Error( - codes.NotFound, - errors.Wrap(err, "could not get container from storage").Error(), - ) - } - - // fill the response - res := new(container.GetResponse) - - // FIXME: salt should be []byte in the message - salt, err := uuid.FromBytes(cnr.Salt()) - if err != nil { - return nil, status.Error( - codes.Aborted, - errors.Wrap(err, "could not decode salt").Error(), - ) - } - - // FIXME: message field should be the same type or []byte. - res.Container = new(container.Container) - res.Container.Salt = refs.UUID(salt) - res.Container = new(container.Container) - res.Container.OwnerID = cnr.OwnerID() - res.Container.Rules = cnr.PlacementRule() - res.Container.BasicACL = basic.ToUint32(cnr.BasicACL()) - - return res, nil -} diff --git a/pkg/network/transport/container/grpc/get_test.go b/pkg/network/transport/container/grpc/get_test.go deleted file mode 100644 index f3f297841..000000000 --- a/pkg/network/transport/container/grpc/get_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testGetEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func TestCnrService_Get(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.Get(ctx, new(container.GetRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.GetRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.Get(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) -} diff --git a/pkg/network/transport/container/grpc/list.go b/pkg/network/transport/container/grpc/list.go deleted file mode 100644 index 5ffdcb5f3..000000000 --- a/pkg/network/transport/container/grpc/list.go +++ /dev/null @@ -1,38 +0,0 @@ -package container - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func (s cnrService) List(ctx context.Context, req *container.ListRequest) (*container.ListResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, errors.Wrap(err, "try again later") - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // list container identifiers from storage - ownerID := req.GetOwnerID() - cidList, err := s.cnrStore.List(&ownerID) - if err != nil { - return nil, status.Error( - codes.NotFound, - errors.Wrap(err, "could not list the containers in storage").Error(), - ) - } - - // fill the response - res := new(container.ListResponse) - res.CID = cidList - - return res, nil -} diff --git a/pkg/network/transport/container/grpc/list_test.go b/pkg/network/transport/container/grpc/list_test.go deleted file mode 100644 index c8026f152..000000000 --- a/pkg/network/transport/container/grpc/list_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testListEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func TestCnrService_List(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.List(ctx, new(container.ListRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.ListRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.List(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) -} diff --git a/pkg/network/transport/container/grpc/put.go b/pkg/network/transport/container/grpc/put.go deleted file mode 100644 index a0172f8c9..000000000 --- a/pkg/network/transport/container/grpc/put.go +++ /dev/null @@ -1,57 +0,0 @@ -package container - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// TODO verify MessageID. -func (s cnrService) Put(ctx context.Context, req *container.PutRequest) (*container.PutResponse, error) { - // check healthiness - if err := s.healthy.Healthy(); err != nil { - return nil, errors.Wrap(err, "try again later") - } - - // verify request structure - if err := requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // create container structure - // FIXME: message field should be the same type or []byte. - cnr := new(Container) - cnr.SetOwnerID(req.GetOwnerID()) - cnr.SetPlacementRule(req.GetRules()) - cnr.SetBasicACL(basic.FromUint32(req.GetBasicACL())) - - uid, err := refs.NewUUID() - if err != nil { - return nil, status.Error( - codes.Aborted, - errors.Wrap(err, "could not generate the salt").Error(), - ) - } - - cnr.SetSalt(uid.Bytes()) - - // save container in storage - cid, err := s.cnrStore.Put(cnr) - if err != nil { - return nil, status.Error( - codes.Aborted, - errors.Wrap(err, "could not save the container instorage").Error(), - ) - } - - // fill the response - res := new(container.PutResponse) - res.CID = *cid - - return res, nil -} diff --git a/pkg/network/transport/container/grpc/put_test.go b/pkg/network/transport/container/grpc/put_test.go deleted file mode 100644 index 7698bec2d..000000000 --- a/pkg/network/transport/container/grpc/put_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testPutEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func TestCnrService_Put(t *testing.T) { - ctx := context.TODO() - - t.Run("unhealthy", func(t *testing.T) { - s := cnrService{ - healthy: &testCommonEntity{ - err: errors.New("some error"), - }, - } - - _, err := s.Put(ctx, new(container.PutRequest)) - require.Error(t, err) - }) - - t.Run("invalid request structure", func(t *testing.T) { - s := cnrService{ - healthy: new(testCommonEntity), - } - - // create unsigned request - req := new(container.PutRequest) - require.Error(t, requestVerifyFunc(req)) - - _, err := s.Put(ctx, req) - require.Error(t, err) - - st, ok := status.FromError(err) - require.True(t, ok) - require.Equal(t, codes.InvalidArgument, st.Code()) - }) -} diff --git a/pkg/network/transport/container/grpc/service.go b/pkg/network/transport/container/grpc/service.go deleted file mode 100644 index 615706fad..000000000 --- a/pkg/network/transport/container/grpc/service.go +++ /dev/null @@ -1,78 +0,0 @@ -package container - -import ( - "errors" - - "github.com/nspcc-dev/neofs-api-go/container" - eacl "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "go.uber.org/zap" -) - -type ( - // Service is an interface of the server of Container service. - Service interface { - grpc.Service - container.ServiceServer - } - - // HealthChecker is an interface of node healthiness checking tool. - HealthChecker interface { - Healthy() error - } - - // Params groups the parameters of Container service server's constructor. - Params struct { - Logger *zap.Logger - - Healthy HealthChecker - - Store storage.Storage - - ExtendedACLStore eacl.Storage - } - - cnrService struct { - log *zap.Logger - - healthy HealthChecker - - cnrStore storage.Storage - - aclStore eacl.Storage - } -) - -var ( - errEmptyLogger = errors.New("empty log component") - errEmptyHealthChecker = errors.New("empty healthy component") -) - -var requestVerifyFunc = libgrpc.VerifyRequestWithSignatures - -// New is an Container service server's constructor. -func New(p Params) (Service, error) { - switch { - case p.Logger == nil: - return nil, errEmptyLogger - case p.Store == nil: - return nil, storage.ErrNilStorage - case p.Healthy == nil: - return nil, errEmptyHealthChecker - case p.ExtendedACLStore == nil: - return nil, eacl.ErrNilStorage - } - - return &cnrService{ - log: p.Logger, - healthy: p.Healthy, - cnrStore: p.Store, - aclStore: p.ExtendedACLStore, - }, nil -} - -func (cnrService) Name() string { return "ContainerService" } - -func (s cnrService) Register(g *grpc.Server) { container.RegisterServiceServer(g, s) } diff --git a/pkg/network/transport/grpc/.gitkeep b/pkg/network/transport/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/grpc/service.go b/pkg/network/transport/grpc/service.go deleted file mode 100644 index 3c8ed9545..000000000 --- a/pkg/network/transport/grpc/service.go +++ /dev/null @@ -1,13 +0,0 @@ -package grpc - -import ( - "google.golang.org/grpc" -) - -type Server = grpc.Server - -// Service interface -type Service interface { - Name() string - Register(*Server) -} diff --git a/pkg/network/transport/grpc/validate.go b/pkg/network/transport/grpc/validate.go deleted file mode 100644 index 41b8b4d6c..000000000 --- a/pkg/network/transport/grpc/validate.go +++ /dev/null @@ -1,23 +0,0 @@ -package grpc - -import ( - "errors" - - "github.com/nspcc-dev/neofs-api-go/service" -) - -// ErrMissingKeySignPairs is returned by functions that expect -// a non-empty SignKeyPair slice, but received empty. -var ErrMissingKeySignPairs = errors.New("missing key-signature pairs") - -// VerifyRequestWithSignatures checks if request has signatures and all of them are valid. -// -// Returns ErrMissingKeySignPairs if request does not have signatures. -// Otherwise, behaves like service.VerifyRequestData. -func VerifyRequestWithSignatures(req service.RequestVerifyData) error { - if len(req.GetSignKeyPairs()) == 0 { - return ErrMissingKeySignPairs - } - - return service.VerifyRequestData(req) -} diff --git a/pkg/network/transport/metrics/grpc/.gitkeep b/pkg/network/transport/metrics/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/metrics/grpc/service.go b/pkg/network/transport/metrics/grpc/service.go deleted file mode 100644 index 51ba6e78d..000000000 --- a/pkg/network/transport/metrics/grpc/service.go +++ /dev/null @@ -1,60 +0,0 @@ -package metrics - -import ( - "context" - "errors" - - "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "github.com/nspcc-dev/neofs-node/pkg/services/metrics" - "go.uber.org/zap" -) - -type ( - // Service is an interface of the server of Metrics service. - Service interface { - MetricsServer - grpc.Service - } - - // Params groups the parameters of Metrics service server's constructor. - Params struct { - Logger *zap.Logger - Collector metrics.Collector - } - - serviceMetrics struct { - log *zap.Logger - col metrics.Collector - } -) - -var ( - errEmptyLogger = errors.New("empty logger") - errEmptyCollector = errors.New("empty metrics collector") -) - -// New is a Metrics service server's constructor. -func New(p Params) (Service, error) { - switch { - case p.Logger == nil: - return nil, errEmptyLogger - case p.Collector == nil: - return nil, errEmptyCollector - } - - return &serviceMetrics{ - log: p.Logger, - col: p.Collector, - }, nil -} - -func (s *serviceMetrics) ResetSpaceCounter(_ context.Context, _ *ResetSpaceRequest) (*ResetSpaceResponse, error) { - s.col.UpdateSpaceUsage() - return &ResetSpaceResponse{}, nil -} - -func (s *serviceMetrics) Name() string { return "metrics" } - -func (s *serviceMetrics) Register(srv *grpc.Server) { - RegisterMetricsServer(srv, s) -} diff --git a/pkg/network/transport/metrics/grpc/service.pb.go b/pkg/network/transport/metrics/grpc/service.pb.go deleted file mode 100644 index d9af9ea8b..000000000 Binary files a/pkg/network/transport/metrics/grpc/service.pb.go and /dev/null differ diff --git a/pkg/network/transport/metrics/grpc/service.proto b/pkg/network/transport/metrics/grpc/service.proto deleted file mode 100644 index fcec48992..000000000 --- a/pkg/network/transport/metrics/grpc/service.proto +++ /dev/null @@ -1,10 +0,0 @@ -syntax = "proto3"; -package metrics; -option go_package = "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc/metrics"; - -service Metrics { - rpc ResetSpaceCounter(ResetSpaceRequest) returns (ResetSpaceResponse); -} - -message ResetSpaceRequest {} -message ResetSpaceResponse {} \ No newline at end of file diff --git a/pkg/network/transport/object/grpc/.gitkeep b/pkg/network/transport/object/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/object/grpc/acl.go b/pkg/network/transport/object/grpc/acl.go deleted file mode 100644 index f40dd32cb..000000000 --- a/pkg/network/transport/object/grpc/acl.go +++ /dev/null @@ -1,742 +0,0 @@ -package object - -import ( - "bytes" - "context" - "crypto/ecdsa" - "fmt" - "strconv" - - "github.com/multiformats/go-multiaddr" - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended" - eaclstorage "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - eaclcheck "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc/eacl" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // RequestTargeter is an interface of request's ACL group calculator. - RequestTargeter interface { - Target(context.Context, serviceRequest) requestTarget - } - - // aclPreProcessor is an implementation of requestPreProcessor interface. - aclPreProcessor struct { - log *zap.Logger - - aclInfoReceiver aclInfoReceiver - - reqActionCalc requestActionCalculator - - localStore localstore.Localstore - - extACLSource eaclstorage.Storage - - bearerVerifier bearerTokenVerifier - } - - // duplicates NetmapClient method, used for testing. - irKeysReceiver interface { - InnerRingKeys() ([][]byte, error) - } - - containerNodesLister interface { - ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) - ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]netmap.Info, error) - } - - targetFinder struct { - log *zap.Logger - - irKeysRecv irKeysReceiver - cnrLister containerNodesLister - cnrStorage storage.Storage - } -) - -type objectHeadersSource interface { - getHeaders() (*Object, bool) -} - -type requestActionCalculator interface { - calculateRequestAction(context.Context, requestActionParams) eacl.Action -} - -type aclInfoReceiver struct { - cnrStorage storage.Storage - - targetFinder RequestTargeter -} - -type aclInfo struct { - rule container.BasicACL - - checkExtended bool - - checkBearer bool - - targetInfo requestTarget -} - -type reqActionCalc struct { - storage eaclstorage.Storage - - log *zap.Logger -} - -type serviceRequestInfo struct { - group eacl.Group - - req serviceRequest - - objHdrSrc objectHeadersSource -} - -type requestObjHdrSrc struct { - req serviceRequest - - ls localstore.Localstore -} - -type eaclFromBearer struct { - eaclstorage.Storage - - bearer service.BearerToken -} - -type requestTarget struct { - group eacl.Group - - ir bool -} - -var _ requestPreProcessor = (*aclPreProcessor)(nil) - -var errMissingSignatures = errors.New("empty signature list") - -func (p *aclPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { - if req == nil { - panic(pmEmptyServiceRequest) - } - - // fetch ACL info - aclInfo, err := p.aclInfoReceiver.getACLInfo(ctx, req) - if err != nil { - p.log.Warn("can't get acl of the container", zap.Stringer("cid", req.CID())) - return errAccessDenied - } - - // check basic ACL permissions - var checkFn func(uint8) bool - - switch aclInfo.targetInfo.group { - case eacl.GroupUser: - checkFn = aclInfo.rule.UserAllowed - case eacl.GroupSystem: - checkFn = aclInfo.rule.SystemAllowed - case eacl.GroupOthers: - checkFn = aclInfo.rule.OthersAllowed - default: - panic(fmt.Sprintf("unknown request group (aclPreProcessor): %d", aclInfo.targetInfo.group)) - } - - if requestType := req.Type(); !checkFn(requestACLSection(requestType)) || - aclInfo.targetInfo.ir && !allowedInnerRingRequest(requestType) { - return errAccessDenied - } - - if aclInfo.targetInfo.group != eacl.GroupSystem && - aclInfo.rule.Sticky() && - !checkObjectRequestOwnerMatch(req) { - return errAccessDenied - } - - if !aclInfo.checkBearer && !aclInfo.checkExtended { - return nil - } - - actionParams := requestActionParams{ - eaclSrc: p.extACLSource, - request: req, - objHdrSrc: &requestObjHdrSrc{ - req: req, - ls: p.localStore, - }, - group: aclInfo.targetInfo.group, - } - - if aclInfo.checkBearer { - bearer := req.GetBearerToken() - - if err := p.bearerVerifier.verifyBearerToken(ctx, req.CID(), bearer); err != nil { - p.log.Warn("bearer token verification failure", - zap.String("error", err.Error()), - ) - - return errAccessDenied - } - - actionParams.eaclSrc = eaclFromBearer{ - bearer: bearer, - } - } - - if p.reqActionCalc.calculateRequestAction(ctx, actionParams) != eacl.ActionAllow { - return errAccessDenied - } - - return nil -} - -func (t *targetFinder) Target(ctx context.Context, req serviceRequest) requestTarget { - res := requestTarget{ - group: eacl.GroupUnknown, - } - - ownerID, ownerKey, err := requestOwner(req) - if err != nil { - t.log.Warn("could not get request owner", - zap.String("error", err.Error()), - ) - - return res - } else if ownerKey == nil { - t.log.Warn("signature with nil public key detected") - return res - } - - // if request from container owner then return GroupUser - isOwner, err := isContainerOwner(t.cnrStorage, req.CID(), ownerID) - if err != nil { - t.log.Warn("can't check container owner", zap.String("err", err.Error())) - return res - } else if isOwner { - res.group = eacl.GroupUser - return res - } - - ownerKeyBytes := crypto.MarshalPublicKey(ownerKey) - - // if request from inner ring then return GroupSystem - irKeyList, err := t.irKeysRecv.InnerRingKeys() - if err != nil { - t.log.Warn("could not verify the key belongs to the IR node", zap.String("err", err.Error())) - return res - } - - for i := range irKeyList { - if bytes.Equal(irKeyList[i], ownerKeyBytes) { - res.group = eacl.GroupSystem - res.ir = true - return res - } - } - - // if request from current container node then return GroupSystem - cnr, err := t.cnrLister.ContainerNodesInfo(ctx, req.CID(), 0) - if err != nil { - t.log.Warn("can't get current container list", zap.String("err", err.Error())) - return res - } - - for i := range cnr { - if bytes.Equal(cnr[i].PublicKey(), ownerKeyBytes) { - res.group = eacl.GroupSystem - return res - } - } - - // if request from previous container node then return GroupSystem - cnr, err = t.cnrLister.ContainerNodesInfo(ctx, req.CID(), 1) - if err != nil { - t.log.Warn("can't get previous container list", zap.String("err", err.Error())) - return res - } - - for i := range cnr { - if bytes.Equal(cnr[i].PublicKey(), ownerKeyBytes) { - res.group = eacl.GroupSystem - return res - } - } - - res.group = eacl.GroupOthers - - // if none of the above return GroupOthers - return res -} - -func checkObjectRequestOwnerMatch(req serviceRequest) bool { - rt := req.Type() - - // ignore all request types except Put and Delete - if rt != object.RequestPut && rt != object.RequestDelete { - return true - } - - // get request owner - reqOwner, _, err := requestOwner(req) - if err != nil { - return false - } - - var payloadOwner OwnerID - - // get owner from request payload - if rt == object.RequestPut { - obj := req.(transport.PutInfo).GetHead() - if obj == nil { - return false - } - - payloadOwner = obj.GetSystemHeader().OwnerID - } else { - payloadOwner = req.(*object.DeleteRequest).OwnerID - } - - return reqOwner.Equal(payloadOwner) -} - -// FIXME: this solution only works with healthy key-to-owner conversion. -func requestOwner(req serviceRequest) (OwnerID, *ecdsa.PublicKey, error) { - // if session token exists => return its owner - if token := req.GetSessionToken(); token != nil { - return token.GetOwnerID(), crypto.UnmarshalPublicKey(token.GetOwnerKey()), nil - } - - signKeys := req.GetSignKeyPairs() - if len(signKeys) == 0 { - return OwnerID{}, nil, errMissingSignatures - } - - firstKey := signKeys[0].GetPublicKey() - if firstKey == nil { - return OwnerID{}, nil, crypto.ErrEmptyPublicKey - } - - owner, err := refs.NewOwnerID(firstKey) - - return owner, firstKey, err -} - -// HeadersOfType returns request or object headers. -func (s serviceRequestInfo) HeadersOfType(typ eacl.HeaderType) ([]eacl.Header, bool) { - switch typ { - default: - return nil, true - case eacl.HdrTypeRequest: - return TypedHeaderSourceFromExtendedHeaders(s.req).HeadersOfType(typ) - case eacl.HdrTypeObjSys, eacl.HdrTypeObjUsr: - obj, ok := s.objHdrSrc.getHeaders() - if !ok { - return nil, false - } - - return TypedHeaderSourceFromObject(obj).HeadersOfType(typ) - } -} - -// Key returns a binary representation of sender public key. -func (s serviceRequestInfo) Key() []byte { - _, key, err := requestOwner(s.req) - if err != nil { - return nil - } - - return crypto.MarshalPublicKey(key) -} - -// TypeOf returns true of object request type corresponds to passed OperationType. -func (s serviceRequestInfo) OperationType() eacl.OperationType { - switch t := s.req.Type(); t { - case object.RequestGet: - return eacl.OpTypeGet - case object.RequestPut: - return eacl.OpTypePut - case object.RequestHead: - return eacl.OpTypeHead - case object.RequestSearch: - return eacl.OpTypeSearch - case object.RequestDelete: - return eacl.OpTypeDelete - case object.RequestRange: - return eacl.OpTypeRange - case object.RequestRangeHash: - return eacl.OpTypeRangeHash - default: - panic(fmt.Sprintf("unknown request type (serviceRequestInfo): %d", t)) - } -} - -// Group returns the access group of the request. -func (s serviceRequestInfo) Group() eacl.Group { - return s.group -} - -// CID returns the container identifier of request. -func (s serviceRequestInfo) CID() CID { - return s.req.CID() -} - -func (s requestObjHdrSrc) getHeaders() (*Object, bool) { - switch s.req.Type() { - case object.RequestSearch: - // object header filters is not supported in Search request now - return nil, true - case object.RequestPut: - // for Put we get object headers from request - return s.req.(transport.PutInfo).GetHead(), true - default: - tReq := &transportRequest{ - serviceRequest: s.req, - } - - // for other requests we get object headers from local storage - m, err := s.ls.Meta(tReq.GetAddress()) - if err == nil { - return m.GetObject(), true - } - - return nil, false - } -} - -type requestActionParams struct { - eaclSrc eaclstorage.Storage - - request serviceRequest - - objHdrSrc objectHeadersSource - - group eacl.Group -} - -func (s reqActionCalc) calculateRequestAction(ctx context.Context, p requestActionParams) eacl.Action { - // build eACL validator - validator, err := eaclcheck.NewValidator(p.eaclSrc, s.log) - if err != nil { - s.log.Warn("could not build eacl acl validator", - zap.String("error", err.Error()), - ) - - return eacl.ActionUnknown - } - - // create RequestInfo instance - reqInfo := &serviceRequestInfo{ - group: p.group, - req: p.request, - objHdrSrc: p.objHdrSrc, - } - - // calculate ACL action - return validator.CalculateAction(reqInfo) -} - -func (s aclInfoReceiver) getACLInfo(ctx context.Context, req serviceRequest) (*aclInfo, error) { - cnr, err := s.cnrStorage.Get(req.CID()) - if err != nil { - return nil, err - } - - rule := cnr.BasicACL() - - isBearer := rule.BearerAllowed(requestACLSection(req.Type())) - - // fetch group from the request - t := s.targetFinder.Target(ctx, req) - - return &aclInfo{ - rule: rule, - - checkExtended: !rule.Final(), - - targetInfo: t, - - checkBearer: t.group != eacl.GroupSystem && isBearer && req.GetBearerToken() != nil, - }, nil -} - -func (s eaclFromBearer) GetEACL(cid CID) (eaclstorage.Table, error) { - return eacl.UnmarshalTable(s.bearer.GetACLRules()) -} - -// returns true if request of type argument is allowed for IR needs (audit). -func allowedInnerRingRequest(t object.RequestType) (res bool) { - switch t { - case - object.RequestSearch, - object.RequestHead, - object.RequestRangeHash: - res = true - } - - return -} - -// returns the index number of request section bits. -func requestACLSection(t object.RequestType) uint8 { - switch t { - case object.RequestRangeHash: - return 0 - case object.RequestRange: - return 1 - case object.RequestSearch: - return 2 - case object.RequestDelete: - return 3 - case object.RequestPut: - return 4 - case object.RequestHead: - return 5 - case object.RequestGet: - return 6 - default: - panic(fmt.Sprintf("unknown request type (requestACLSection): %d", t)) - } -} - -type objectHeaderSource struct { - obj *Object -} - -type typedHeader struct { - n string - v string - t eacl.HeaderType -} - -type extendedHeadersWrapper struct { - hdrSrc service.ExtendedHeadersSource -} - -type typedExtendedHeader struct { - hdr service.ExtendedHeader -} - -func newTypedObjSysHdr(name, value string) eacl.TypedHeader { - return &typedHeader{ - n: name, - v: value, - t: eacl.HdrTypeObjSys, - } -} - -// Name is a name field getter. -func (s typedHeader) Name() string { - return s.n -} - -// Value is a value field getter. -func (s typedHeader) Value() string { - return s.v -} - -// HeaderType is a type field getter. -func (s typedHeader) HeaderType() eacl.HeaderType { - return s.t -} - -// TypedHeaderSourceFromObject wraps passed object and returns TypedHeaderSource interface. -func TypedHeaderSourceFromObject(obj *object.Object) extended.TypedHeaderSource { - return &objectHeaderSource{ - obj: obj, - } -} - -// HeaderOfType gathers object headers of passed type and returns Header list. -// -// If value of some header can not be calculated (e.g. nil eacl header), it does not appear in list. -// -// Always returns true. -func (s objectHeaderSource) HeadersOfType(typ eacl.HeaderType) ([]eacl.Header, bool) { - if s.obj == nil { - return nil, true - } - - var res []eacl.Header - - switch typ { - case eacl.HdrTypeObjUsr: - objHeaders := s.obj.GetHeaders() - - res = make([]eacl.Header, 0, len(objHeaders)) // 7 system header fields - - for i := range objHeaders { - if h := newTypedObjectExtendedHeader(objHeaders[i]); h != nil { - res = append(res, h) - } - } - case eacl.HdrTypeObjSys: - res = make([]eacl.Header, 0, 7) - - sysHdr := s.obj.GetSystemHeader() - - created := sysHdr.GetCreatedAt() - - res = append(res, - // ID - newTypedObjSysHdr( - eacl.HdrObjSysNameID, - sysHdr.ID.String(), - ), - - // CID - newTypedObjSysHdr( - eacl.HdrObjSysNameCID, - sysHdr.CID.String(), - ), - - // OwnerID - newTypedObjSysHdr( - eacl.HdrObjSysNameOwnerID, - sysHdr.OwnerID.String(), - ), - - // Version - newTypedObjSysHdr( - eacl.HdrObjSysNameVersion, - strconv.FormatUint(sysHdr.GetVersion(), 10), - ), - - // PayloadLength - newTypedObjSysHdr( - eacl.HdrObjSysNamePayloadLength, - strconv.FormatUint(sysHdr.GetPayloadLength(), 10), - ), - - // CreatedAt.UnitTime - newTypedObjSysHdr( - eacl.HdrObjSysNameCreatedUnix, - strconv.FormatUint(uint64(created.GetUnixTime()), 10), - ), - - // CreatedAt.Epoch - newTypedObjSysHdr( - eacl.HdrObjSysNameCreatedEpoch, - strconv.FormatUint(created.GetEpoch(), 10), - ), - ) - } - - return res, true -} - -func newTypedObjectExtendedHeader(h object.Header) eacl.TypedHeader { - val := h.GetValue() - if val == nil { - return nil - } - - res := new(typedHeader) - res.t = eacl.HdrTypeObjSys - - switch hdr := val.(type) { - case *object.Header_UserHeader: - if hdr.UserHeader == nil { - return nil - } - - res.t = eacl.HdrTypeObjUsr - res.n = hdr.UserHeader.GetKey() - res.v = hdr.UserHeader.GetValue() - case *object.Header_Link: - if hdr.Link == nil { - return nil - } - - switch hdr.Link.GetType() { - case object.Link_Previous: - res.n = eacl.HdrObjSysLinkPrev - case object.Link_Next: - res.n = eacl.HdrObjSysLinkNext - case object.Link_Child: - res.n = eacl.HdrObjSysLinkChild - case object.Link_Parent: - res.n = eacl.HdrObjSysLinkPar - case object.Link_StorageGroup: - res.n = eacl.HdrObjSysLinkSG - default: - return nil - } - - res.v = hdr.Link.ID.String() - default: - return nil - } - - return res -} - -// TypedHeaderSourceFromExtendedHeaders wraps passed ExtendedHeadersSource and returns TypedHeaderSource interface. -func TypedHeaderSourceFromExtendedHeaders(hdrSrc service.ExtendedHeadersSource) extended.TypedHeaderSource { - return &extendedHeadersWrapper{ - hdrSrc: hdrSrc, - } -} - -// Name returns the result of Key method. -func (s typedExtendedHeader) Name() string { - return s.hdr.Key() -} - -// Value returns the result of Value method. -func (s typedExtendedHeader) Value() string { - return s.hdr.Value() -} - -// HeaderType always returns HdrTypeRequest. -func (s typedExtendedHeader) HeaderType() eacl.HeaderType { - return eacl.HdrTypeRequest -} - -// TypedHeaders gathers eacl request headers and returns TypedHeader list. -// -// Nil headers are ignored. -// -// Always returns true. -func (s extendedHeadersWrapper) HeadersOfType(typ eacl.HeaderType) ([]eacl.Header, bool) { - if s.hdrSrc == nil { - return nil, true - } - - var res []eacl.Header - - if typ == eacl.HdrTypeRequest { - hs := s.hdrSrc.ExtendedHeaders() - - res = make([]eacl.Header, 0, len(hs)) - - for i := range hs { - if hs[i] == nil { - continue - } - - res = append(res, &typedExtendedHeader{ - hdr: hs[i], - }) - } - } - - return res, true -} - -func isContainerOwner(storage storage.Storage, cid CID, ownerID OwnerID) (bool, error) { - cnr, err := storage.Get(cid) - if err != nil { - return false, err - } - - return cnr.OwnerID().Equal(ownerID), nil -} diff --git a/pkg/network/transport/object/grpc/acl_test.go b/pkg/network/transport/object/grpc/acl_test.go deleted file mode 100644 index ca4ebe7bd..000000000 --- a/pkg/network/transport/object/grpc/acl_test.go +++ /dev/null @@ -1,575 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "errors" - "testing" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - libcnr "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/stretchr/testify/require" -) - -type ( - testACLEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - RequestTargeter - eacl.Table - storage.Storage - containerNodesLister - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -type testBasicChecker struct { - actionErr error - action bool - - sticky bool - - extended bool - - bearer bool -} - -func (t *testACLEntity) Get(cid storage.CID) (*storage.Container, error) { - if t.err != nil { - return nil, t.err - } - - return t.res.(*storage.Container), nil -} - -func (t *testACLEntity) calculateRequestAction(context.Context, requestActionParams) eacl.Action { - return t.res.(eacl.Action) -} - -func (t *testACLEntity) GetBasicACL(context.Context, CID) (libcnr.BasicACL, error) { - if t.err != nil { - return 0, t.err - } - - return t.res.(libcnr.BasicACL), nil -} - -func (t *testACLEntity) Target(context.Context, serviceRequest) requestTarget { - return t.res.(requestTarget) -} - -func (t *testACLEntity) CID() CID { return CID{} } - -func (t *testACLEntity) Type() object.RequestType { return t.res.(object.RequestType) } - -func (t *testACLEntity) GetBearerToken() service.BearerToken { return nil } - -func (t *testACLEntity) GetOwner() (*ecdsa.PublicKey, error) { - if t.err != nil { - return nil, t.err - } - - return t.res.(*ecdsa.PublicKey), nil -} - -func (t testACLEntity) InnerRingKeys() ([][]byte, error) { - if t.err != nil { - return nil, t.err - } - - return t.res.([][]byte), nil -} - -func (t *testACLEntity) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]netmap.Info, error) { - if t.err != nil { - return nil, t.err - } - - return t.res.([][]netmap.Info)[prev], nil -} - -func (t testACLEntity) GetSignKeyPairs() []service.SignKeyPair { - if t.res == nil { - return nil - } - return t.res.([]service.SignKeyPair) -} - -func TestPreprocessor(t *testing.T) { - ctx := context.TODO() - - t.Run("empty request", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - _ = new(aclPreProcessor).preProcess(ctx, nil) - }) - }) - - t.Run("everything is okay", func(t *testing.T) { - var rule basic.ACL - rule.SetFinal() - rule.AllowOthers(requestACLSection(object.RequestGet)) - - cnr := new(storage.Container) - cnr.SetBasicACL(rule) - - reqTarget := requestTarget{ - group: eacl.GroupOthers, - } - - preprocessor := aclPreProcessor{ - log: testlogger.NewLogger(false), - aclInfoReceiver: aclInfoReceiver{ - cnrStorage: &testACLEntity{ - res: cnr, - }, - targetFinder: &testACLEntity{res: reqTarget}, - }, - } - require.NoError(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) - - reqTarget.group = eacl.GroupSystem - preprocessor.aclInfoReceiver.targetFinder = &testACLEntity{res: reqTarget} - require.NoError(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) - - reqTarget.group = eacl.GroupUser - preprocessor.aclInfoReceiver.targetFinder = &testACLEntity{res: reqTarget} - require.Error(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) - }) - - t.Run("can't fetch container", func(t *testing.T) { - preprocessor := aclPreProcessor{ - log: testlogger.NewLogger(false), - aclInfoReceiver: aclInfoReceiver{ - cnrStorage: &testACLEntity{err: container.ErrNotFound}, - targetFinder: &testACLEntity{res: requestTarget{ - group: eacl.GroupOthers, - }}, - }, - } - require.Error(t, preprocessor.preProcess(ctx, &testACLEntity{res: object.RequestGet})) - - }) - - t.Run("sticky bit", func(t *testing.T) { - var rule basic.ACL - rule.SetSticky() - rule.SetFinal() - for i := uint8(0); i < 7; i++ { - rule.AllowUser(i) - } - - cnr := new(storage.Container) - cnr.SetBasicACL(rule) - - s := &aclPreProcessor{ - log: testlogger.NewLogger(false), - aclInfoReceiver: aclInfoReceiver{ - cnrStorage: &testACLEntity{ - res: cnr, - }, - targetFinder: &testACLEntity{ - res: requestTarget{ - group: eacl.GroupUser, - }, - }, - }, - } - - ownerKey := &test.DecodeKey(0).PublicKey - - ownerID, err := refs.NewOwnerID(ownerKey) - require.NoError(t, err) - - okItems := []func() []serviceRequest{ - // Read requests - func() []serviceRequest { - return []serviceRequest{ - new(object.GetRequest), - new(object.HeadRequest), - new(object.SearchRequest), - new(GetRangeRequest), - new(object.GetRangeHashRequest), - } - }, - // PutRequest / DeleteRequest (w/o token) - func() []serviceRequest { - req := object.MakePutRequestHeader(&Object{ - SystemHeader: SystemHeader{ - OwnerID: ownerID, - }, - }) - req.AddSignKey(nil, ownerKey) - putReq := &putRequest{ - PutRequest: req, - } - - delReq := new(object.DeleteRequest) - delReq.OwnerID = ownerID - delReq.AddSignKey(nil, ownerKey) - - return []serviceRequest{putReq, delReq} - }, - // PutRequest / DeleteRequest (w/ token) - func() []serviceRequest { - token := new(service.Token) - token.SetOwnerID(ownerID) - token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) - - req := object.MakePutRequestHeader(&Object{ - SystemHeader: SystemHeader{ - OwnerID: ownerID, - }, - }) - req.SetToken(token) - putReq := &putRequest{ - PutRequest: req, - } - - delReq := new(object.DeleteRequest) - delReq.OwnerID = ownerID - delReq.SetToken(token) - - return []serviceRequest{putReq, delReq} - }, - } - - failItems := []func() []serviceRequest{ - // PutRequest / DeleteRequest (w/o token and wrong owner) - func() []serviceRequest { - otherOwner := ownerID - otherOwner[0]++ - - req := object.MakePutRequestHeader(&Object{ - SystemHeader: SystemHeader{ - OwnerID: otherOwner, - }, - }) - req.AddSignKey(nil, ownerKey) - putReq := &putRequest{ - PutRequest: req, - } - - delReq := new(object.DeleteRequest) - delReq.OwnerID = otherOwner - delReq.AddSignKey(nil, ownerKey) - - return []serviceRequest{putReq, delReq} - }, - // PutRequest / DeleteRequest (w/ token w/ wrong owner) - func() []serviceRequest { - otherOwner := ownerID - otherOwner[0]++ - - token := new(service.Token) - token.SetOwnerID(ownerID) - token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) - - req := object.MakePutRequestHeader(&Object{ - SystemHeader: SystemHeader{ - OwnerID: otherOwner, - }, - }) - req.SetToken(token) - putReq := &putRequest{ - PutRequest: req, - } - - delReq := new(object.DeleteRequest) - delReq.OwnerID = otherOwner - delReq.SetToken(token) - - return []serviceRequest{putReq, delReq} - }, - } - - for _, ok := range okItems { - for _, req := range ok() { - require.NoError(t, s.preProcess(ctx, req)) - } - } - - for _, fail := range failItems { - for _, req := range fail() { - require.Error(t, s.preProcess(ctx, req)) - } - } - }) - - t.Run("eacl ACL", func(t *testing.T) { - target := requestTarget{ - group: eacl.GroupOthers, - } - - req := &testACLEntity{ - res: object.RequestGet, - } - - actCalc := new(testACLEntity) - - var rule basic.ACL - rule.AllowOthers(requestACLSection(object.RequestGet)) - - cnr := new(storage.Container) - cnr.SetBasicACL(rule) - - s := &aclPreProcessor{ - log: testlogger.NewLogger(false), - aclInfoReceiver: aclInfoReceiver{ - cnrStorage: &testACLEntity{ - res: cnr, - }, - targetFinder: &testACLEntity{ - res: target, - }, - }, - - reqActionCalc: actCalc, - } - - // force to return non-ActionAllow - actCalc.res = eacl.ActionAllow + 1 - require.EqualError(t, s.preProcess(ctx, req), errAccessDenied.Error()) - - // force to return ActionAllow - actCalc.res = eacl.ActionAllow - require.NoError(t, s.preProcess(ctx, req)) - }) - - t.Run("inner ring group", func(t *testing.T) { - reqTarget := requestTarget{ - group: eacl.GroupSystem, - ir: true, - } - - cnr := new(storage.Container) - cnr.SetBasicACL(basic.FromUint32(^uint32(0))) - - preprocessor := aclPreProcessor{ - log: testlogger.NewLogger(false), - aclInfoReceiver: aclInfoReceiver{ - cnrStorage: &testACLEntity{res: cnr}, - targetFinder: &testACLEntity{res: reqTarget}, - }, - } - - for _, rt := range []object.RequestType{ - object.RequestSearch, - object.RequestHead, - object.RequestRangeHash, - } { - require.NoError(t, - preprocessor.preProcess(ctx, &testACLEntity{ - res: rt, - }), - ) - } - - for _, rt := range []object.RequestType{ - object.RequestRange, - object.RequestPut, - object.RequestDelete, - object.RequestGet, - } { - require.EqualError(t, - preprocessor.preProcess(ctx, &testACLEntity{ - res: rt, - }), - errAccessDenied.Error(), - ) - } - }) -} - -func TestTargetFinder(t *testing.T) { - ctx := context.TODO() - irKey := test.DecodeKey(2) - containerKey := test.DecodeKey(3) - prevContainerKey := test.DecodeKey(4) - - var infoList1 []netmap.Info - info := netmap.Info{} - info.SetPublicKey(crypto.MarshalPublicKey(&containerKey.PublicKey)) - infoList1 = append(infoList1, info) - - var infoList2 []netmap.Info - info.SetPublicKey(crypto.MarshalPublicKey(&prevContainerKey.PublicKey)) - infoList2 = append(infoList2, info) - - finder := &targetFinder{ - log: testlogger.NewLogger(false), - irKeysRecv: &testACLEntity{ - res: [][]byte{crypto.MarshalPublicKey(&irKey.PublicKey)}, - }, - cnrLister: &testACLEntity{res: [][]netmap.Info{ - infoList1, - infoList2, - }}, - } - - t.Run("trusted node", func(t *testing.T) { - - pk := &test.DecodeKey(0).PublicKey - - ownerKey := &test.DecodeKey(1).PublicKey - owner, err := refs.NewOwnerID(ownerKey) - require.NoError(t, err) - - token := new(service.Token) - token.SetSessionKey(crypto.MarshalPublicKey(pk)) - token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) - token.SetOwnerID(owner) - - req := new(object.SearchRequest) - req.ContainerID = CID{1, 2, 3} - req.SetToken(token) - req.AddSignKey(nil, pk) - - cnr := new(storage.Container) - cnr.SetOwnerID(owner) - - finder.cnrStorage = &testACLEntity{ - res: cnr, - } - - require.Equal(t, - requestTarget{ - group: eacl.GroupUser, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("container owner", func(t *testing.T) { - key := &test.DecodeKey(0).PublicKey - owner, err := refs.NewOwnerID(key) - require.NoError(t, err) - - cnr := new(storage.Container) - cnr.SetOwnerID(owner) - - finder.cnrStorage = &testACLEntity{res: cnr} - - req := new(object.SearchRequest) - req.AddSignKey(nil, key) - - require.Equal(t, - requestTarget{ - group: eacl.GroupUser, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("system owner", func(t *testing.T) { - finder.cnrStorage = &testACLEntity{res: new(storage.Container)} - - req := new(object.SearchRequest) - req.AddSignKey(nil, &irKey.PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupSystem, - ir: true, - }, - finder.Target(ctx, req), - ) - - req = new(object.SearchRequest) - req.AddSignKey(nil, &containerKey.PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupSystem, - }, - finder.Target(ctx, req), - ) - - req = new(object.SearchRequest) - req.AddSignKey(nil, &prevContainerKey.PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupSystem, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("other owner", func(t *testing.T) { - finder.cnrStorage = &testACLEntity{res: new(storage.Container)} - - req := new(object.SearchRequest) - req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupOthers, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("can't fetch request owner", func(t *testing.T) { - req := new(object.SearchRequest) - - require.Equal(t, - requestTarget{ - group: eacl.GroupUnknown, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("can't fetch container", func(t *testing.T) { - finder.cnrStorage = &testACLEntity{err: container.ErrNotFound} - - req := new(object.SearchRequest) - req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupUnknown, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("can't fetch ir list", func(t *testing.T) { - finder.cnrStorage = &testACLEntity{res: new(storage.Container)} - finder.irKeysRecv = &testACLEntity{err: errors.New("blockchain is busy")} - - req := new(object.SearchRequest) - req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupUnknown, - }, - finder.Target(ctx, req), - ) - }) - - t.Run("can't fetch container list", func(t *testing.T) { - finder.cnrStorage = &testACLEntity{res: new(storage.Container)} - finder.cnrLister = &testACLEntity{err: container.ErrNotFound} - - req := new(object.SearchRequest) - req.AddSignKey(nil, &test.DecodeKey(0).PublicKey) - require.Equal(t, - requestTarget{ - group: eacl.GroupUnknown, - }, - finder.Target(ctx, req), - ) - }) -} diff --git a/pkg/network/transport/object/grpc/bearer.go b/pkg/network/transport/object/grpc/bearer.go deleted file mode 100644 index 019d3db67..000000000 --- a/pkg/network/transport/object/grpc/bearer.go +++ /dev/null @@ -1,72 +0,0 @@ -package object - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/pkg/errors" -) - -type bearerTokenVerifier interface { - verifyBearerToken(context.Context, CID, service.BearerToken) error -} - -type complexBearerVerifier struct { - items []bearerTokenVerifier -} - -type bearerActualityVerifier struct { - epochRecv EpochReceiver -} - -type bearerOwnershipVerifier struct { - cnrStorage storage.Storage -} - -type bearerSignatureVerifier struct{} - -var errWrongBearerOwner = errors.New("bearer author is not a container owner") - -func (s complexBearerVerifier) verifyBearerToken(ctx context.Context, cid CID, token service.BearerToken) error { - for i := range s.items { - if err := s.items[i].verifyBearerToken(ctx, cid, token); err != nil { - return err - } - } - - return nil -} - -func (s bearerActualityVerifier) verifyBearerToken(_ context.Context, _ CID, token service.BearerToken) error { - local := s.epochRecv.Epoch() - validUntil := token.ExpirationEpoch() - - if local > validUntil { - return errors.Errorf("bearer token is expired (local %d, valid until %d)", - local, - validUntil, - ) - } - - return nil -} - -func (s bearerOwnershipVerifier) verifyBearerToken(ctx context.Context, cid CID, token service.BearerToken) error { - isOwner, err := isContainerOwner(s.cnrStorage, cid, token.GetOwnerID()) - if err != nil { - return err - } else if !isOwner { - return errWrongBearerOwner - } - - return nil -} - -func (s bearerSignatureVerifier) verifyBearerToken(_ context.Context, _ CID, token service.BearerToken) error { - return service.VerifySignatureWithKey( - crypto.UnmarshalPublicKey(token.GetOwnerKey()), - service.NewVerifiedBearerToken(token), - ) -} diff --git a/pkg/network/transport/object/grpc/capacity.go b/pkg/network/transport/object/grpc/capacity.go deleted file mode 100644 index d0cc58c82..000000000 --- a/pkg/network/transport/object/grpc/capacity.go +++ /dev/null @@ -1,19 +0,0 @@ -package object - -func (s *objectService) RelativeAvailableCap() float64 { - diff := float64(s.ls.Size()) / float64(s.storageCap) - if 1-diff < 0 { - return 0 - } - - return 1 - diff -} - -func (s *objectService) AbsoluteAvailableCap() uint64 { - localSize := uint64(s.ls.Size()) - if localSize > s.storageCap { - return 0 - } - - return s.storageCap - localSize -} diff --git a/pkg/network/transport/object/grpc/capacity_test.go b/pkg/network/transport/object/grpc/capacity_test.go deleted file mode 100644 index 7054f44c1..000000000 --- a/pkg/network/transport/object/grpc/capacity_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package object - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testCapacityEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - localstore.Localstore - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var _ localstore.Localstore = (*testCapacityEntity)(nil) - -func (s *testCapacityEntity) Size() int64 { return s.res.(int64) } - -func TestObjectService_RelativeAvailableCap(t *testing.T) { - localStoreSize := int64(100) - - t.Run("oversize", func(t *testing.T) { - s := objectService{ - ls: &testCapacityEntity{res: localStoreSize}, - storageCap: uint64(localStoreSize - 1), - } - - require.Zero(t, s.RelativeAvailableCap()) - }) - - t.Run("correct calculation", func(t *testing.T) { - s := objectService{ - ls: &testCapacityEntity{res: localStoreSize}, - storageCap: 13 * uint64(localStoreSize), - } - - require.Equal(t, 1-float64(localStoreSize)/float64(s.storageCap), s.RelativeAvailableCap()) - }) -} - -func TestObjectService_AbsoluteAvailableCap(t *testing.T) { - localStoreSize := int64(100) - - t.Run("free space", func(t *testing.T) { - s := objectService{ - ls: &testCapacityEntity{res: localStoreSize}, - storageCap: uint64(localStoreSize), - } - - require.Zero(t, s.AbsoluteAvailableCap()) - s.storageCap-- - require.Zero(t, s.AbsoluteAvailableCap()) - }) - - t.Run("correct calculation", func(t *testing.T) { - s := objectService{ - ls: &testCapacityEntity{res: localStoreSize}, - storageCap: uint64(localStoreSize) + 12, - } - - require.Equal(t, s.storageCap-uint64(localStoreSize), s.AbsoluteAvailableCap()) - }) -} diff --git a/pkg/network/transport/object/grpc/delete.go b/pkg/network/transport/object/grpc/delete.go deleted file mode 100644 index e333d258c..000000000 --- a/pkg/network/transport/object/grpc/delete.go +++ /dev/null @@ -1,285 +0,0 @@ -package object - -import ( - "context" - "crypto/sha256" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - objectRemover interface { - delete(context.Context, deleteInfo) error - } - - coreObjRemover struct { - delPrep deletePreparer - straightRem objectRemover - tokenStore session.PrivateTokenStore - - // Set of potential deletePreparer errors that won't be converted into errDeletePrepare - mErr map[error]struct{} - - log *zap.Logger - } - - straightObjRemover struct { - tombCreator tombstoneCreator - objStorer objectStorer - } - - tombstoneCreator interface { - createTombstone(context.Context, deleteInfo) *Object - } - - coreTombCreator struct{} - - deletePreparer interface { - prepare(context.Context, deleteInfo) ([]deleteInfo, error) - } - - coreDelPreparer struct { - timeout time.Duration - childLister objectChildrenLister - } - - deleteInfo interface { - transport.AddressInfo - GetOwnerID() OwnerID - } - - rawDeleteInfo struct { - rawAddrInfo - ownerID OwnerID - } -) - -const emRemovePart = "could not remove object part #%d of #%d" - -var ( - _ tombstoneCreator = (*coreTombCreator)(nil) - _ deleteInfo = (*rawDeleteInfo)(nil) - _ deletePreparer = (*coreDelPreparer)(nil) - _ objectRemover = (*straightObjRemover)(nil) - _ objectRemover = (*coreObjRemover)(nil) - _ deleteInfo = (*transportRequest)(nil) - - checksumOfEmptyPayload = sha256.Sum256([]byte{}) -) - -func (s *objectService) Delete(ctx context.Context, req *object.DeleteRequest) (res *object.DeleteResponse, err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestDelete), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestDelete, - e: err, - }) - }() - - if _, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ - request: req, - executor: s, - }); err != nil { - return - } - - res = makeDeleteResponse() - err = s.respPreparer.prepareResponse(ctx, req, res) - - return -} - -func (s *coreObjRemover) delete(ctx context.Context, dInfo deleteInfo) error { - token := dInfo.GetSessionToken() - if token == nil { - return errNilToken - } - - key := session.PrivateTokenKey{} - key.SetOwnerID(dInfo.GetOwnerID()) - key.SetTokenID(token.GetID()) - - pToken, err := s.tokenStore.Fetch(key) - if err != nil { - return &detailedError{ - error: errTokenRetrieval, - d: privateTokenRecvDetails(token.GetID(), token.GetOwnerID()), - } - } - - deleteList, err := s.delPrep.prepare(ctx, dInfo) - if err != nil { - if _, ok := s.mErr[errors.Cause(err)]; !ok { - s.log.Error("delete info preparation failure", - zap.String("error", err.Error()), - ) - - err = errDeletePrepare - } - - return err - } - - ctx = contextWithValues(ctx, - transformer.PrivateSessionToken, pToken, - transformer.PublicSessionToken, token, - storagegroup.BearerToken, dInfo.GetBearerToken(), - storagegroup.ExtendedHeaders, dInfo.ExtendedHeaders(), - ) - - for i := range deleteList { - if err := s.straightRem.delete(ctx, deleteList[i]); err != nil { - return errors.Wrapf(err, emRemovePart, i+1, len(deleteList)) - } - } - - return nil -} - -func (s *coreDelPreparer) prepare(ctx context.Context, src deleteInfo) ([]deleteInfo, error) { - var ( - ownerID = src.GetOwnerID() - token = src.GetSessionToken() - addr = src.GetAddress() - bearer = src.GetBearerToken() - extHdrs = src.ExtendedHeaders() - ) - - dInfo := newRawDeleteInfo() - dInfo.setOwnerID(ownerID) - dInfo.setAddress(addr) - dInfo.setTTL(service.NonForwardingTTL) - dInfo.setSessionToken(token) - dInfo.setBearerToken(bearer) - dInfo.setExtendedHeaders(extHdrs) - dInfo.setTimeout(s.timeout) - - ctx = contextWithValues(ctx, - transformer.PublicSessionToken, src.GetSessionToken(), - storagegroup.BearerToken, bearer, - storagegroup.ExtendedHeaders, extHdrs, - ) - - children := s.childLister.children(ctx, addr) - - res := make([]deleteInfo, 0, len(children)+1) - - res = append(res, dInfo) - - for i := range children { - dInfo = newRawDeleteInfo() - dInfo.setOwnerID(ownerID) - dInfo.setAddress(Address{ - ObjectID: children[i], - CID: addr.CID, - }) - dInfo.setTTL(service.NonForwardingTTL) - dInfo.setSessionToken(token) - dInfo.setBearerToken(bearer) - dInfo.setExtendedHeaders(extHdrs) - dInfo.setTimeout(s.timeout) - - res = append(res, dInfo) - } - - return res, nil -} - -func (s *straightObjRemover) delete(ctx context.Context, dInfo deleteInfo) error { - putInfo := newRawPutInfo() - putInfo.setHead( - s.tombCreator.createTombstone(ctx, dInfo), - ) - putInfo.setSessionToken(dInfo.GetSessionToken()) - putInfo.setBearerToken(dInfo.GetBearerToken()) - putInfo.setExtendedHeaders(dInfo.ExtendedHeaders()) - putInfo.setTTL(dInfo.GetTTL()) - putInfo.setTimeout(dInfo.GetTimeout()) - - _, err := s.objStorer.putObject(ctx, putInfo) - - return err -} - -func (s *coreTombCreator) createTombstone(ctx context.Context, dInfo deleteInfo) *Object { - addr := dInfo.GetAddress() - obj := &Object{ - SystemHeader: SystemHeader{ - ID: addr.ObjectID, - CID: addr.CID, - OwnerID: dInfo.GetOwnerID(), - }, - Headers: []Header{ - { - Value: &object.Header_Tombstone{ - Tombstone: new(object.Tombstone), - }, - }, - { - Value: &object.Header_PayloadChecksum{ - PayloadChecksum: checksumOfEmptyPayload[:], - }, - }, - }, - } - - return obj -} - -func (s *rawDeleteInfo) GetAddress() Address { - return s.addr -} - -func (s *rawDeleteInfo) setAddress(addr Address) { - s.addr = addr -} - -func (s *rawDeleteInfo) GetOwnerID() OwnerID { - return s.ownerID -} - -func (s *rawDeleteInfo) setOwnerID(id OwnerID) { - s.ownerID = id -} - -func (s *rawDeleteInfo) setAddrInfo(v *rawAddrInfo) { - s.rawAddrInfo = *v - s.setType(object.RequestDelete) -} - -func newRawDeleteInfo() *rawDeleteInfo { - res := new(rawDeleteInfo) - - res.setAddrInfo(newRawAddressInfo()) - - return res -} - -func (s *transportRequest) GetToken() *session.Token { - return s.serviceRequest.(*object.DeleteRequest).GetToken() -} -func (s *transportRequest) GetHead() *Object { - return &Object{SystemHeader: SystemHeader{ - ID: s.serviceRequest.(*object.DeleteRequest).Address.ObjectID, - }} -} - -func (s *transportRequest) GetOwnerID() OwnerID { - return s.serviceRequest.(*object.DeleteRequest).OwnerID -} diff --git a/pkg/network/transport/object/grpc/delete_test.go b/pkg/network/transport/object/grpc/delete_test.go deleted file mode 100644 index af4e8cf72..000000000 --- a/pkg/network/transport/object/grpc/delete_test.go +++ /dev/null @@ -1,448 +0,0 @@ -package object - -import ( - "context" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/rand" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testDeleteEntity struct { - // Set of interfaces which testDeleteEntity must implement, but some methods from those does not call. - session.PrivateTokenStore - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ EpochReceiver = (*testDeleteEntity)(nil) - _ objectStorer = (*testDeleteEntity)(nil) - _ tombstoneCreator = (*testDeleteEntity)(nil) - _ objectChildrenLister = (*testDeleteEntity)(nil) - _ objectRemover = (*testDeleteEntity)(nil) - _ requestHandler = (*testDeleteEntity)(nil) - _ deletePreparer = (*testDeleteEntity)(nil) - _ responsePreparer = (*testDeleteEntity)(nil) -) - -func (s *testDeleteEntity) verify(context.Context, *session.Token, *Object) error { - return nil -} - -func (s *testDeleteEntity) Fetch(id session.PrivateTokenKey) (session.PrivateToken, error) { - if s.f != nil { - s.f(id) - } - if s.err != nil { - return nil, s.err - } - return s.res.(session.PrivateToken), nil -} - -func (s *testDeleteEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testDeleteEntity) Epoch() uint64 { return s.res.(uint64) } - -func (s *testDeleteEntity) putObject(_ context.Context, p transport.PutInfo) (*Address, error) { - if s.f != nil { - s.f(p) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Address), nil -} - -func (s *testDeleteEntity) createTombstone(_ context.Context, p deleteInfo) *Object { - if s.f != nil { - s.f(p) - } - return s.res.(*Object) -} - -func (s *testDeleteEntity) children(ctx context.Context, addr Address) []ID { - if s.f != nil { - s.f(addr, ctx) - } - return s.res.([]ID) -} - -func (s *testDeleteEntity) delete(ctx context.Context, p deleteInfo) error { - if s.f != nil { - s.f(p, ctx) - } - return s.err -} - -func (s *testDeleteEntity) prepare(_ context.Context, p deleteInfo) ([]deleteInfo, error) { - if s.f != nil { - s.f(p) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]deleteInfo), nil -} - -func (s *testDeleteEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func Test_objectService_Delete(t *testing.T) { - ctx := context.TODO() - req := &object.DeleteRequest{Address: testObjectAddress(t)} - - t.Run("handler error", func(t *testing.T) { - rhErr := errors.New("test error for request handler") - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, req, p.request) - require.Equal(t, s, p.executor) - }) - }, - err: rhErr, // force requestHandler to return rhErr - } - - res, err := s.Delete(ctx, req) - // ascertain that error returns as expected - require.EqualError(t, err, rhErr.Error()) - require.Nil(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - s := objectService{ - requestHandler: new(testDeleteEntity), - respPreparer: &testDeleteEntity{res: new(object.DeleteResponse)}, - - statusCalculator: newStatusCalculator(), - } - - res, err := s.Delete(ctx, req) - require.NoError(t, err) - require.Equal(t, new(object.DeleteResponse), res) - }) -} - -func Test_coreObjRemover_delete(t *testing.T) { - ctx := context.TODO() - pToken, err := session.NewPrivateToken(0) - require.NoError(t, err) - - addr := testObjectAddress(t) - - token := new(service.Token) - token.SetAddress(addr) - - req := newRawDeleteInfo() - req.setAddress(addr) - req.setSessionToken(token) - - t.Run("nil token", func(t *testing.T) { - s := new(coreObjRemover) - - req := newRawDeleteInfo() - require.Nil(t, req.GetSessionToken()) - - require.EqualError(t, s.delete(ctx, req), errNilToken.Error()) - }) - - t.Run("prepare error", func(t *testing.T) { - dpErr := errors.New("test error for delete preparer") - - dp := &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct delete preparer params", func(t *testing.T) { - require.Equal(t, req, items[0]) - }) - }, - err: dpErr, // force deletePreparer to return dpErr - } - - s := &coreObjRemover{ - delPrep: dp, - tokenStore: &testDeleteEntity{res: pToken}, - mErr: map[error]struct{}{ - dpErr: {}, - }, - log: zap.L(), - } - - // ascertain that error returns as expected - require.EqualError(t, s.delete(ctx, req), dpErr.Error()) - - dp.err = errors.New("some other error") - - // ascertain that error returns as expected - require.EqualError(t, s.delete(ctx, req), errDeletePrepare.Error()) - }) - - t.Run("straight remover error", func(t *testing.T) { - dInfo := newRawDeleteInfo() - dInfo.setAddress(addr) - dInfo.setSessionToken(token) - - list := []deleteInfo{ - dInfo, - } - - srErr := errors.New("test error for straight remover") - - s := &coreObjRemover{ - delPrep: &testDeleteEntity{ - res: list, // force deletePreparer to return list - }, - straightRem: &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct straight remover params", func(t *testing.T) { - require.Equal(t, list[0], items[0]) - - ctx := items[1].(context.Context) - - require.Equal(t, - dInfo.GetSessionToken(), - ctx.Value(transformer.PublicSessionToken), - ) - - require.Equal(t, - pToken, - ctx.Value(transformer.PrivateSessionToken), - ) - }) - }, - err: srErr, // force objectRemover to return srErr - }, - tokenStore: &testDeleteEntity{res: pToken}, - } - - // ascertain that error returns as expected - require.EqualError(t, s.delete(ctx, req), errors.Wrapf(srErr, emRemovePart, 1, 1).Error()) - }) - - t.Run("success", func(t *testing.T) { - dInfo := newRawDeleteInfo() - dInfo.setAddress(addr) - dInfo.setSessionToken(token) - - list := []deleteInfo{ - dInfo, - } - - s := &coreObjRemover{ - delPrep: &testDeleteEntity{ - res: list, // force deletePreparer to return list - }, - straightRem: &testDeleteEntity{ - err: nil, // force objectRemover to return empty error - }, - tokenStore: &testDeleteEntity{res: pToken}, - } - - // ascertain that nil error returns - require.NoError(t, s.delete(ctx, req)) - }) -} - -func Test_coreDelPreparer_prepare(t *testing.T) { - var ( - ctx = context.TODO() - ownerID = OwnerID{1, 2, 3} - addr = testObjectAddress(t) - timeout = 5 * time.Second - token = new(service.Token) - childCount = 10 - children = make([]ID, 0, childCount) - ) - - req := newRawDeleteInfo() - req.setAddress(addr) - req.setSessionToken(token) - req.setOwnerID(ownerID) - - token.SetID(session.TokenID{1, 2, 3}) - - for i := 0; i < childCount; i++ { - children = append(children, testObjectAddress(t).ObjectID) - } - - s := &coreDelPreparer{ - timeout: timeout, - childLister: &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct children lister params", func(t *testing.T) { - require.Equal(t, addr, items[0]) - require.Equal(t, - token, - items[1].(context.Context).Value(transformer.PublicSessionToken), - ) - }) - }, - res: children, - }, - } - - res, err := s.prepare(ctx, req) - require.NoError(t, err) - - require.Len(t, res, childCount+1) - - for i := range res { - require.Equal(t, timeout, res[i].GetTimeout()) - require.Equal(t, token, res[i].GetSessionToken()) - require.Equal(t, uint32(service.NonForwardingTTL), res[i].GetTTL()) - - a := res[i].GetAddress() - require.Equal(t, addr.CID, a.CID) - if i > 0 { - require.Equal(t, children[i-1], a.ObjectID) - } else { - require.Equal(t, addr.ObjectID, a.ObjectID) - } - } -} - -func Test_straightObjRemover_delete(t *testing.T) { - var ( - ctx = context.TODO() - addr = testObjectAddress(t) - ttl = uint32(10) - timeout = 5 * time.Second - token = new(service.Token) - obj = &Object{SystemHeader: SystemHeader{ID: addr.ObjectID, CID: addr.CID}} - ) - - token.SetID(session.TokenID{1, 2, 3}) - - req := newRawDeleteInfo() - req.setTTL(ttl) - req.setTimeout(timeout) - req.setAddress(testObjectAddress(t)) - req.setSessionToken(token) - - t.Run("correct result", func(t *testing.T) { - osErr := errors.New("test error for object storer") - - s := &straightObjRemover{ - tombCreator: &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct tombstone creator params", func(t *testing.T) { - require.Equal(t, req, items[0]) - }) - }, - res: obj, - }, - objStorer: &testDeleteEntity{ - f: func(items ...interface{}) { - t.Run("correct object storer params", func(t *testing.T) { - p := items[0].(transport.PutInfo) - require.Equal(t, timeout, p.GetTimeout()) - require.Equal(t, ttl, p.GetTTL()) - require.Equal(t, obj, p.GetHead()) - require.Equal(t, token, p.GetSessionToken()) - }) - }, - err: osErr, // force objectStorer to return osErr - }, - } - - // ascertain that error returns as expected - require.EqualError(t, s.delete(ctx, req), osErr.Error()) - }) -} - -func Test_coreTombCreator_createTombstone(t *testing.T) { - var ( - ctx = context.TODO() - addr = testObjectAddress(t) - ownerID = OwnerID{1, 2, 3} - ) - - req := newRawDeleteInfo() - req.setAddress(addr) - req.setOwnerID(ownerID) - - t.Run("correct result", func(t *testing.T) { - s := new(coreTombCreator) - - res := s.createTombstone(ctx, req) - require.Equal(t, addr.CID, res.SystemHeader.CID) - require.Equal(t, addr.ObjectID, res.SystemHeader.ID) - require.Equal(t, ownerID, res.SystemHeader.OwnerID) - - _, tsHdr := res.LastHeader(object.HeaderType(object.TombstoneHdr)) - require.NotNil(t, tsHdr) - require.Equal(t, new(object.Tombstone), tsHdr.Value.(*object.Header_Tombstone).Tombstone) - }) -} - -func Test_deleteInfo(t *testing.T) { - t.Run("address", func(t *testing.T) { - addr := testObjectAddress(t) - - req := newRawDeleteInfo() - req.setAddress(addr) - - require.Equal(t, addr, req.GetAddress()) - }) - - t.Run("owner ID", func(t *testing.T) { - ownerID := OwnerID{} - _, err := rand.Read(ownerID[:]) - require.NoError(t, err) - - req := newRawDeleteInfo() - req.setOwnerID(ownerID) - require.Equal(t, ownerID, req.GetOwnerID()) - - tReq := &transportRequest{serviceRequest: &object.DeleteRequest{OwnerID: ownerID}} - require.Equal(t, ownerID, tReq.GetOwnerID()) - }) - - t.Run("token", func(t *testing.T) { - token := new(session.Token) - _, err := rand.Read(token.ID[:]) - require.NoError(t, err) - - req := newRawDeleteInfo() - req.setSessionToken(token) - require.Equal(t, token, req.GetSessionToken()) - - dReq := new(object.DeleteRequest) - dReq.SetToken(token) - tReq := &transportRequest{serviceRequest: dReq} - require.Equal(t, token, tReq.GetSessionToken()) - }) -} diff --git a/pkg/network/transport/object/grpc/eacl/validator.go b/pkg/network/transport/object/grpc/eacl/validator.go deleted file mode 100644 index d7ef4eb8a..000000000 --- a/pkg/network/transport/object/grpc/eacl/validator.go +++ /dev/null @@ -1,244 +0,0 @@ -package eacl - -import ( - "bytes" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "go.uber.org/zap" -) - -// RequestInfo represents the information -// about the request. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended.RequestInfo. -type RequestInfo = extended.RequestInfo - -// Action represents action on the request. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended.Action. -type Action = eacl.Action - -// Storage represents the eACL table storage. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage.Storage. -type Storage = storage.Storage - -// Target represents authorization group. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.Target. -type Target = eacl.Target - -// Table represents extended ACL rule table. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.ExtendedACLTable. -type Table = eacl.Table - -// HeaderFilter represents the header filter. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.HeaderFilter. -type HeaderFilter = eacl.HeaderFilter - -// Header represents the string -// key-value header. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended.Header. -type Header = extended.Header - -// MatchType represents value match type. -// -// It is a type alias of -// github.com/nspcc-dev/neofs-api-go/acl/extended.MatchType. -type MatchType = eacl.MatchType - -// Validator is a tool that calculates -// the action on a request according -// to the extended ACL rule table. -// -// Validator receives eACL table from -// the eACL storage. -type Validator struct { - logger *logger.Logger // logging component - - storage Storage // eACL table storage -} - -// NewValidator creates and initializes a new Validator using arguments. -// -// Returns an error if some of the arguments is nil. -// -// Using the Validator that has been created with new(Validator) -// expression (or just declaring a Validator variable) is unsafe -// and can lead to panic. -func NewValidator(st Storage, lg *logger.Logger) (*Validator, error) { - switch { - case st == nil: - return nil, storage.ErrNilStorage - case lg == nil: - return nil, logger.ErrNilLogger - } - - return &Validator{ - logger: lg, - storage: st, - }, nil -} - -// CalculateAction calculates action on the request according -// to its information. -// -// The action is calculated according to the application of -// eACL table of rules to the request. -// -// If request info argument is nil, eacl.ActionUnknown is -// returned immediately. -// -// If the eACL table is not available at the time of the call, -// eacl.ActionUnknown is returned. -// -// If no matching table entry is found, ActionAllow is returned. -func (v *Validator) CalculateAction(info RequestInfo) Action { - if info == nil { - return eacl.ActionUnknown - } - - // get container identifier from request - cid := info.CID() - - // get eACL table by container ID - table, err := v.storage.GetEACL(cid) - if err != nil { - v.logger.Error("could not get eACL table", - zap.Stringer("cid", cid), - zap.String("error", err.Error()), - ) - - return eacl.ActionUnknown - } - - return tableAction(info, table) -} - -// calculates action on the request based on the eACL rules. -func tableAction(info RequestInfo, table Table) Action { - requestOpType := info.OperationType() - - for _, record := range table.Records() { - // check type of operation - if record.OperationType() != requestOpType { - continue - } - - // check target - if !targetMatches(info, record.TargetList()) { - continue - } - - // check headers - switch val := matchFilters(info, record.HeaderFilters()); { - case val < 0: - // headers of some type could not be composed => allow - return eacl.ActionAllow - case val == 0: - return record.Action() - } - } - - return eacl.ActionAllow -} - -// returns: -// - positive value if no matching header is found for at least one filter; -// - zero if at least one suitable header is found for all filters; -// - negative value if the headers of at least one filter cannot be obtained. -func matchFilters(info extended.TypedHeaderSource, filters []HeaderFilter) int { - matched := 0 - - for _, filter := range filters { - // prevent NPE - if filter == nil { - continue - } - - headers, ok := info.HeadersOfType(filter.HeaderType()) - if !ok { - return -1 - } - - // get headers of filtering type - for _, header := range headers { - // prevent NPE - if header == nil { - continue - } - - // check header name - if header.Name() != filter.Name() { - continue - } - - // get match function - matchFn, ok := mMatchFns[filter.MatchType()] - if !ok { - continue - } - - // check match - if !matchFn(header, filter) { - continue - } - - // increment match counter - matched++ - - break - } - } - - return len(filters) - matched -} - -// returns true if one of ExtendedACLTarget has -// suitable target OR suitable public key. -func targetMatches(req RequestInfo, groups []Target) bool { - requestKey := req.Key() - - for _, target := range groups { - recordGroup := target.Group() - - // check public key match - for _, key := range target.KeyList() { - if bytes.Equal(key, requestKey) { - return true - } - } - - // check target group match - if req.Group() == recordGroup { - return true - } - } - - return false -} - -// Maps MatchType to corresponding function. -// 1st argument of function - header value, 2nd - header filter. -var mMatchFns = map[MatchType]func(Header, Header) bool{ - eacl.StringEqual: func(header Header, filter Header) bool { - return header.Value() == filter.Value() - }, - - eacl.StringNotEqual: func(header Header, filter Header) bool { - return header.Value() != filter.Value() - }, -} diff --git a/pkg/network/transport/object/grpc/execution.go b/pkg/network/transport/object/grpc/execution.go deleted file mode 100644 index 46c5763b7..000000000 --- a/pkg/network/transport/object/grpc/execution.go +++ /dev/null @@ -1,468 +0,0 @@ -package object - -import ( - "bytes" - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - operationExecutor interface { - executeOperation(context.Context, transport.MetaInfo, responseItemHandler) error - } - - coreOperationExecutor struct { - pre executionParamsComputer - fin operationFinalizer - loc operationExecutor - } - - operationFinalizer interface { - completeExecution(context.Context, operationParams) error - } - - computableParams struct { - addr Address - stopCount int - allowPartialResult bool - tryPreviousNetMap bool - selfForward bool - maxRecycleCount int - reqType object.RequestType - } - - responseItemHandler interface { - handleItem(interface{}) - } - - operationParams struct { - computableParams - metaInfo transport.MetaInfo - itemHandler responseItemHandler - } - - coreOperationFinalizer struct { - curPlacementBuilder placementBuilder - prevPlacementBuilder placementBuilder - interceptorPreparer interceptorPreparer - workerPool WorkerPool - traverseExec transport.ContainerTraverseExecutor - resLogger resultLogger - log *zap.Logger - } - - localFullObjectReceiver interface { - getObject(context.Context, Address) (*Object, error) - } - - localHeadReceiver interface { - headObject(context.Context, Address) (*Object, error) - } - - localObjectStorer interface { - putObject(context.Context, *Object) error - } - - localQueryImposer interface { - imposeQuery(context.Context, CID, []byte, int) ([]Address, error) - } - - localRangeReader interface { - getRange(context.Context, Address, Range) ([]byte, error) - } - - localRangeHasher interface { - getHashes(context.Context, Address, []Range, []byte) ([]Hash, error) - } - - localStoreExecutor struct { - salitor Salitor - epochRecv EpochReceiver - localStore localstore.Localstore - } - - localOperationExecutor struct { - objRecv localFullObjectReceiver - headRecv localHeadReceiver - objStore localObjectStorer - queryImp localQueryImposer - rngReader localRangeReader - rngHasher localRangeHasher - } - - coreHandler struct { - traverser containerTraverser - itemHandler responseItemHandler - resLogger resultLogger - reqType object.RequestType - } - - executionParamsComputer interface { - computeParams(*computableParams, transport.MetaInfo) - } - - coreExecParamsComp struct{} - - resultTracker interface { - trackResult(context.Context, resultItems) - } - - interceptorPreparer interface { - prepareInterceptor(interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) - } - - interceptorItems struct { - selfForward bool - handler transport.ResultHandler - metaInfo transport.MetaInfo - itemHandler responseItemHandler - } - - coreInterceptorPreparer struct { - localExec operationExecutor - addressStore storage.AddressStore - } - - resultItems struct { - requestType object.RequestType - node multiaddr.Multiaddr - satisfactory bool - } - - idleResultTracker struct { - } - - resultLogger interface { - logErr(object.RequestType, multiaddr.Multiaddr, error) - } - - coreResultLogger struct { - mLog map[object.RequestType]struct{} - log *zap.Logger - } -) - -const emRangeReadFail = "could not read %d range data" - -var errIncompleteOperation = errors.New("operation is not completed") - -var ( - _ resultTracker = (*idleResultTracker)(nil) - _ executionParamsComputer = (*coreExecParamsComp)(nil) - _ operationFinalizer = (*coreOperationFinalizer)(nil) - _ operationExecutor = (*localOperationExecutor)(nil) - _ operationExecutor = (*coreOperationExecutor)(nil) - _ transport.ResultHandler = (*coreHandler)(nil) - _ localFullObjectReceiver = (*localStoreExecutor)(nil) - _ localHeadReceiver = (*localStoreExecutor)(nil) - _ localObjectStorer = (*localStoreExecutor)(nil) - _ localRangeReader = (*localStoreExecutor)(nil) - _ localRangeHasher = (*localStoreExecutor)(nil) - _ resultLogger = (*coreResultLogger)(nil) -) - -func (s *coreExecParamsComp) computeParams(p *computableParams, req transport.MetaInfo) { - switch p.reqType = req.Type(); p.reqType { - case object.RequestPut: - if req.GetTTL() < service.NonForwardingTTL { - p.stopCount = 1 - } else { - p.stopCount = int(req.(transport.PutInfo).CopiesNumber()) - } - - p.allowPartialResult = false - p.tryPreviousNetMap = false - p.selfForward = false - p.addr = *req.(transport.PutInfo).GetHead().Address() - p.maxRecycleCount = 0 - case object.RequestGet: - p.stopCount = 1 - p.allowPartialResult = false - p.tryPreviousNetMap = true - p.selfForward = false - p.addr = req.(transport.AddressInfo).GetAddress() - p.maxRecycleCount = 0 - case object.RequestHead: - p.stopCount = 1 - p.allowPartialResult = false - p.tryPreviousNetMap = true - p.selfForward = false - p.addr = req.(transport.AddressInfo).GetAddress() - p.maxRecycleCount = 0 - case object.RequestSearch: - p.stopCount = -1 // to traverse all possible nodes in current and prev container - p.allowPartialResult = true - p.tryPreviousNetMap = true - p.selfForward = false - p.addr = Address{CID: req.(transport.SearchInfo).GetCID()} - p.maxRecycleCount = 0 - case object.RequestRange: - p.stopCount = 1 - p.allowPartialResult = false - p.tryPreviousNetMap = false - p.selfForward = false - p.addr = req.(transport.AddressInfo).GetAddress() - p.maxRecycleCount = 0 - case object.RequestRangeHash: - p.stopCount = 1 - p.allowPartialResult = false - p.tryPreviousNetMap = false - p.selfForward = false - p.addr = req.(transport.AddressInfo).GetAddress() - p.maxRecycleCount = 0 - } -} - -func (s idleResultTracker) trackResult(context.Context, resultItems) {} - -func (s *coreOperationExecutor) executeOperation(ctx context.Context, req transport.MetaInfo, h responseItemHandler) error { - // if TTL is zero then execute local operation - if req.GetTTL() < service.NonForwardingTTL { - return s.loc.executeOperation(ctx, req, h) - } - - p := new(computableParams) - s.pre.computeParams(p, req) - - return s.fin.completeExecution(ctx, operationParams{ - computableParams: *p, - metaInfo: req, - itemHandler: h, - }) -} - -func (s *coreOperationFinalizer) completeExecution(ctx context.Context, p operationParams) error { - traverser := newContainerTraverser(&traverseParams{ - tryPrevNM: p.tryPreviousNetMap, - addr: p.addr, - curPlacementBuilder: s.curPlacementBuilder, - prevPlacementBuilder: s.prevPlacementBuilder, - maxRecycleCount: p.maxRecycleCount, - stopCount: p.stopCount, - }) - - handler := &coreHandler{ - traverser: traverser, - itemHandler: p.itemHandler, - resLogger: s.resLogger, - reqType: p.reqType, - } - - interceptor, err := s.interceptorPreparer.prepareInterceptor(interceptorItems{ - selfForward: p.selfForward, - handler: handler, - metaInfo: p.metaInfo, - itemHandler: p.itemHandler, - }) - if err != nil { - return err - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - s.traverseExec.Execute(ctx, transport.TraverseParams{ - TransportInfo: p.metaInfo, - Handler: handler, - Traverser: traverser, - WorkerPool: s.workerPool, - ExecutionInterceptor: interceptor, - }) - - switch err := errors.Cause(traverser.Err()); err { - case container.ErrNotFound: - return &detailedError{ - error: errContainerNotFound, - d: containerDetails(p.addr.CID, descContainerNotFound), - } - case placement.ErrEmptyNodes: - if !p.allowPartialResult { - return errIncompleteOperation - } - - return nil - default: - if err != nil { - s.log.Error("traverse failure", - zap.String("error", err.Error()), - ) - - err = errPlacementProblem - } else if !p.allowPartialResult && !traverser.finished() { - err = errIncompleteOperation - } - - return err - } -} - -func (s *coreInterceptorPreparer) prepareInterceptor(p interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) { - selfAddr, err := s.addressStore.SelfAddr() - if err != nil { - return nil, err - } - - return func(ctx context.Context, node multiaddr.Multiaddr) (res bool) { - if node.Equal(selfAddr) { - p.handler.HandleResult(ctx, selfAddr, nil, - s.localExec.executeOperation(ctx, p.metaInfo, p.itemHandler)) - return !p.selfForward - } - - return false - }, nil -} - -func (s *coreHandler) HandleResult(ctx context.Context, n multiaddr.Multiaddr, r interface{}, e error) { - ok := e == nil - - s.traverser.add(n, ok) - - if ok && r != nil { - s.itemHandler.handleItem(r) - } - - s.resLogger.logErr(s.reqType, n, e) -} - -func (s *coreResultLogger) logErr(t object.RequestType, n multiaddr.Multiaddr, e error) { - if e == nil { - return - } else if _, ok := s.mLog[t]; !ok { - return - } - - s.log.Error("object request failure", - zap.Stringer("type", t), - zap.Stringer("node", n), - zap.String("error", e.Error()), - ) -} - -func (s *localOperationExecutor) executeOperation(ctx context.Context, req transport.MetaInfo, h responseItemHandler) error { - switch req.Type() { - case object.RequestPut: - obj := req.(transport.PutInfo).GetHead() - if err := s.objStore.putObject(ctx, obj); err != nil { - return err - } - - h.handleItem(obj.Address()) - case object.RequestGet: - obj, err := s.objRecv.getObject(ctx, req.(transport.AddressInfo).GetAddress()) - if err != nil { - return err - } - - h.handleItem(obj) - case object.RequestHead: - head, err := s.headRecv.headObject(ctx, req.(transport.AddressInfo).GetAddress()) - if err != nil { - return err - } - - h.handleItem(head) - case object.RequestSearch: - r := req.(transport.SearchInfo) - - addrList, err := s.queryImp.imposeQuery(ctx, r.GetCID(), r.GetQuery(), 1) // TODO: add query version to SearchInfo - if err != nil { - return err - } - - h.handleItem(addrList) - case object.RequestRange: - r := req.(transport.RangeInfo) - - rangesData, err := s.rngReader.getRange(ctx, r.GetAddress(), r.GetRange()) - if err != nil { - return err - } - - h.handleItem(bytes.NewReader(rangesData)) - case object.RequestRangeHash: - r := req.(transport.RangeHashInfo) - - rangesHashes, err := s.rngHasher.getHashes(ctx, r.GetAddress(), r.GetRanges(), r.GetSalt()) - if err != nil { - return err - } - - h.handleItem(rangesHashes) - default: - return errors.Errorf(pmWrongRequestType, req) - } - - return nil -} - -func (s *localStoreExecutor) getHashes(ctx context.Context, addr Address, ranges []Range, salt []byte) ([]Hash, error) { - res := make([]Hash, 0, len(ranges)) - - for i := range ranges { - chunk, err := s.localStore.PRead(ctx, addr, ranges[i]) - if err != nil { - return nil, errors.Wrapf(err, emRangeReadFail, i+1) - } - - res = append(res, hash.Sum(s.salitor(chunk, salt))) - } - - return res, nil -} - -func (s *localStoreExecutor) getRange(ctx context.Context, addr Address, r Range) ([]byte, error) { - return s.localStore.PRead(ctx, addr, r) -} - -func (s *localStoreExecutor) putObject(ctx context.Context, obj *Object) error { - ctx = context.WithValue(ctx, localstore.StoreEpochValue, s.epochRecv.Epoch()) - - switch err := s.localStore.Put(ctx, obj); err { - // TODO: add all error cases - case nil: - return nil - default: - return errPutLocal - } -} - -func (s *localStoreExecutor) headObject(_ context.Context, addr Address) (*Object, error) { - m, err := s.localStore.Meta(addr) - if err != nil { - switch errors.Cause(err) { - case bucket.ErrNotFound: - return nil, errIncompleteOperation - default: - return nil, err - } - } - - return m.Object, nil -} - -func (s *localStoreExecutor) getObject(_ context.Context, addr Address) (*Object, error) { - obj, err := s.localStore.Get(addr) - if err != nil { - switch errors.Cause(err) { - case bucket.ErrNotFound: - return nil, errIncompleteOperation - default: - return nil, err - } - } - - return obj, nil -} diff --git a/pkg/network/transport/object/grpc/execution_test.go b/pkg/network/transport/object/grpc/execution_test.go deleted file mode 100644 index 4620c2f23..000000000 --- a/pkg/network/transport/object/grpc/execution_test.go +++ /dev/null @@ -1,1205 +0,0 @@ -package object - -import ( - "context" - "io" - "io/ioutil" - "testing" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testExecutionEntity struct { - // Set of interfaces which testExecutionEntity must implement, but some methods from those does not call. - transport.MetaInfo - localstore.Localstore - containerTraverser - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -func (s *testExecutionEntity) HandleResult(_ context.Context, n multiaddr.Multiaddr, r interface{}, e error) { - if s.f != nil { - s.f(n, r, e) - } -} - -var ( - _ transport.ResultHandler = (*testExecutionEntity)(nil) - _ interceptorPreparer = (*testExecutionEntity)(nil) - _ WorkerPool = (*testExecutionEntity)(nil) - _ operationExecutor = (*testExecutionEntity)(nil) - _ placementBuilder = (*testExecutionEntity)(nil) - _ storage.AddressStore = (*testExecutionEntity)(nil) - _ executionParamsComputer = (*testExecutionEntity)(nil) - _ operationFinalizer = (*testExecutionEntity)(nil) - _ EpochReceiver = (*testExecutionEntity)(nil) - _ localstore.Localstore = (*testExecutionEntity)(nil) - _ containerTraverser = (*testExecutionEntity)(nil) - _ responseItemHandler = (*testExecutionEntity)(nil) - _ resultTracker = (*testExecutionEntity)(nil) - _ localObjectStorer = (*testExecutionEntity)(nil) - _ localFullObjectReceiver = (*testExecutionEntity)(nil) - _ localHeadReceiver = (*testExecutionEntity)(nil) - _ localQueryImposer = (*testExecutionEntity)(nil) - _ localRangeReader = (*testExecutionEntity)(nil) - _ localRangeHasher = (*testExecutionEntity)(nil) -) - -func (s *testExecutionEntity) prepareInterceptor(p interceptorItems) (func(context.Context, multiaddr.Multiaddr) bool, error) { - if s.f != nil { - s.f(p) - } - if s.err != nil { - return nil, s.err - } - return s.res.(func(context.Context, multiaddr.Multiaddr) bool), nil -} - -func (s *testExecutionEntity) Execute(_ context.Context, p transport.TraverseParams) { - if s.f != nil { - s.f(p) - } -} - -func (s *testExecutionEntity) Submit(func()) error { - return s.err -} - -func (s *testExecutionEntity) executeOperation(ctx context.Context, r transport.MetaInfo, h responseItemHandler) error { - if s.f != nil { - s.f(r, h) - } - return s.err -} - -func (s *testExecutionEntity) buildPlacement(_ context.Context, a Address, n ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - if s.f != nil { - s.f(a, n) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]multiaddr.Multiaddr), nil -} - -func (s *testExecutionEntity) getHashes(_ context.Context, a Address, r []Range, sa []byte) ([]Hash, error) { - if s.f != nil { - s.f(a, r, sa) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Hash), nil -} - -func (s *testExecutionEntity) getRange(_ context.Context, addr Address, rngs Range) ([]byte, error) { - if s.f != nil { - s.f(addr, rngs) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]byte), nil -} - -func (s *testExecutionEntity) imposeQuery(_ context.Context, c CID, d []byte, v int) ([]Address, error) { - if s.f != nil { - s.f(c, d, v) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Address), nil -} - -func (s *testExecutionEntity) headObject(_ context.Context, addr Address) (*Object, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Object), nil -} - -func (s *testExecutionEntity) getObject(_ context.Context, addr Address) (*Object, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Object), nil -} - -func (s *testExecutionEntity) putObject(_ context.Context, obj *Object) error { - if s.f != nil { - s.f(obj) - } - return s.err -} - -func (s *testExecutionEntity) trackResult(_ context.Context, p resultItems) { - if s.f != nil { - s.f(p) - } -} - -func (s *testExecutionEntity) handleItem(v interface{}) { - if s.f != nil { - s.f(v) - } -} - -func (s *testExecutionEntity) add(n multiaddr.Multiaddr, b bool) { - if s.f != nil { - s.f(n, b) - } -} - -func (s *testExecutionEntity) done(n multiaddr.Multiaddr) bool { - if s.f != nil { - s.f(n) - } - return s.res.(bool) -} - -func (s *testExecutionEntity) close() { - if s.f != nil { - s.f() - } -} - -func (s *testExecutionEntity) PRead(ctx context.Context, addr Address, rng Range) ([]byte, error) { - if s.f != nil { - s.f(addr, rng) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]byte), nil -} - -func (s *testExecutionEntity) Put(ctx context.Context, obj *Object) error { - if s.f != nil { - s.f(ctx, obj) - } - return s.err -} - -func (s *testExecutionEntity) Get(addr Address) (*Object, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Object), nil -} - -func (s *testExecutionEntity) Meta(addr Address) (*Meta, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Meta), nil -} - -func (s *testExecutionEntity) Has(addr Address) (bool, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return false, s.err - } - return s.res.(bool), nil -} - -func (s *testExecutionEntity) Epoch() uint64 { return s.res.(uint64) } - -func (s *testExecutionEntity) completeExecution(_ context.Context, p operationParams) error { - if s.f != nil { - s.f(p) - } - return s.err -} - -func (s *testExecutionEntity) computeParams(p *computableParams, r transport.MetaInfo) { - if s.f != nil { - s.f(p, r) - } -} - -func (s *testExecutionEntity) SelfAddr() (multiaddr.Multiaddr, error) { - if s.err != nil { - return nil, s.err - } - return s.res.(multiaddr.Multiaddr), nil -} - -func (s *testExecutionEntity) Type() object.RequestType { - return s.res.(object.RequestType) -} - -func Test_typeOfRequest(t *testing.T) { - t.Run("correct mapping", func(t *testing.T) { - items := []struct { - exp object.RequestType - v transport.MetaInfo - }{ - {exp: object.RequestSearch, v: &transportRequest{serviceRequest: new(object.SearchRequest)}}, - {exp: object.RequestSearch, v: newRawSearchInfo()}, - {exp: object.RequestPut, v: new(putRequest)}, - {exp: object.RequestPut, v: &transportRequest{serviceRequest: new(object.PutRequest)}}, - {exp: object.RequestGet, v: newRawGetInfo()}, - {exp: object.RequestGet, v: &transportRequest{serviceRequest: new(object.GetRequest)}}, - {exp: object.RequestHead, v: newRawHeadInfo()}, - {exp: object.RequestHead, v: &transportRequest{serviceRequest: new(object.HeadRequest)}}, - {exp: object.RequestRange, v: newRawRangeInfo()}, - {exp: object.RequestRange, v: &transportRequest{serviceRequest: new(GetRangeRequest)}}, - {exp: object.RequestRangeHash, v: newRawRangeHashInfo()}, - {exp: object.RequestRangeHash, v: &transportRequest{serviceRequest: new(object.GetRangeHashRequest)}}, - } - - for i := range items { - require.Equal(t, items[i].exp, items[i].v.Type()) - } - }) -} - -func Test_coreExecParamsComp_computeParams(t *testing.T) { - s := new(coreExecParamsComp) - addr := testObjectAddress(t) - - t.Run("put", func(t *testing.T) { - addr := testObjectAddress(t) - - p := new(computableParams) - r := &putRequest{PutRequest: &object.PutRequest{ - R: &object.PutRequest_Header{ - Header: &object.PutRequest_PutHeader{ - Object: &Object{ - SystemHeader: SystemHeader{ - ID: addr.ObjectID, - CID: addr.CID, - }, - }, - }, - }, - }} - - s.computeParams(p, r) - - t.Run("non-forwarding behavior", func(t *testing.T) { - require.Equal(t, 1, p.stopCount) - }) - - r.SetTTL(service.NonForwardingTTL) - - s.computeParams(p, r) - - require.False(t, p.allowPartialResult) - require.False(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr, p.addr) - require.Equal(t, 0, p.maxRecycleCount) - require.Equal(t, 0, int(r.CopiesNumber())) - }) - - t.Run("get", func(t *testing.T) { - p := new(computableParams) - - r := newRawGetInfo() - r.setAddress(addr) - - s.computeParams(p, r) - - require.Equal(t, 1, p.stopCount) - require.False(t, p.allowPartialResult) - require.True(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr, p.addr) - require.Equal(t, 0, p.maxRecycleCount) - }) - - t.Run("head", func(t *testing.T) { - p := new(computableParams) - r := &transportRequest{serviceRequest: &object.HeadRequest{Address: addr}} - - s.computeParams(p, r) - - require.Equal(t, 1, p.stopCount) - require.False(t, p.allowPartialResult) - require.True(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr, p.addr) - require.Equal(t, 0, p.maxRecycleCount) - }) - - t.Run("search", func(t *testing.T) { - p := new(computableParams) - r := &transportRequest{serviceRequest: &object.SearchRequest{ContainerID: addr.CID}} - - s.computeParams(p, r) - - require.Equal(t, -1, p.stopCount) - require.True(t, p.allowPartialResult) - require.True(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr.CID, p.addr.CID) - require.True(t, p.addr.ObjectID.Empty()) - require.Equal(t, 0, p.maxRecycleCount) - }) - - t.Run("range", func(t *testing.T) { - p := new(computableParams) - - r := newRawRangeInfo() - r.setAddress(addr) - - s.computeParams(p, r) - - require.Equal(t, 1, p.stopCount) - require.False(t, p.allowPartialResult) - require.False(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr, p.addr) - require.Equal(t, 0, p.maxRecycleCount) - }) - - t.Run("range hash", func(t *testing.T) { - p := new(computableParams) - - r := newRawRangeHashInfo() - r.setAddress(addr) - - s.computeParams(p, r) - - require.Equal(t, 1, p.stopCount) - require.False(t, p.allowPartialResult) - require.False(t, p.tryPreviousNetMap) - require.False(t, p.selfForward) - require.Equal(t, addr, p.addr) - require.Equal(t, 0, p.maxRecycleCount) - }) -} - -func Test_coreOperationExecutor_executeOperation(t *testing.T) { - ctx := context.TODO() - - t.Run("correct result", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - p := new(testExecutionEntity) - req := newRawPutInfo() - req.setTTL(1) - finErr := errors.New("test error for operation finalizer") - - s := &coreOperationExecutor{ - pre: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct params computer arguments", func(t *testing.T) { - require.Equal(t, computableParams{}, *items[0].(*computableParams)) - require.Equal(t, req, items[1].(transport.MetaInfo)) - }) - }, - }, - fin: &testExecutionEntity{ - f: func(items ...interface{}) { - par := items[0].(operationParams) - require.Equal(t, req, par.metaInfo) - require.Equal(t, p, par.itemHandler) - }, - err: finErr, - }, - loc: new(testExecutionEntity), - } - - require.EqualError(t, - s.executeOperation(ctx, req, p), - finErr.Error(), - ) - }) - - t.Run("zero ttl", func(t *testing.T) { - p := new(testExecutionEntity) - req := newRawPutInfo() - finErr := errors.New("test error for operation finalizer") - - s := &coreOperationExecutor{ - loc: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, p, items[1]) - }, - err: finErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, p), - finErr.Error(), - ) - }) - }) -} - -func Test_localStoreExecutor(t *testing.T) { - ctx := context.TODO() - addr := testObjectAddress(t) - - t.Run("put", func(t *testing.T) { - epoch := uint64(100) - obj := new(Object) - putErr := errors.New("test error for put") - - ls := &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct local store put params", func(t *testing.T) { - v, ok := items[0].(context.Context).Value(localstore.StoreEpochValue).(uint64) - require.True(t, ok) - require.Equal(t, epoch, v) - - require.Equal(t, obj, items[1].(*Object)) - }) - }, - } - - s := &localStoreExecutor{ - epochRecv: &testExecutionEntity{ - res: epoch, - }, - localStore: ls, - } - - require.NoError(t, s.putObject(ctx, obj)) - - ls.err = putErr - - require.EqualError(t, - s.putObject(ctx, obj), - errPutLocal.Error(), - ) - }) - - t.Run("get", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - getErr := errors.New("test error for get") - - ls := &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct local store get params", func(t *testing.T) { - require.Equal(t, addr, items[0].(Address)) - }) - }, - err: getErr, - } - - s := &localStoreExecutor{ - localStore: ls, - } - - res, err := s.getObject(ctx, addr) - require.EqualError(t, err, getErr.Error()) - require.Nil(t, res) - - ls.err = errors.Wrap(bucket.ErrNotFound, "wrap message") - - res, err = s.getObject(ctx, addr) - require.EqualError(t, err, errIncompleteOperation.Error()) - require.Nil(t, res) - }) - - t.Run("success", func(t *testing.T) { - obj := new(Object) - - s := &localStoreExecutor{ - localStore: &testExecutionEntity{ - res: obj, - }, - } - - res, err := s.getObject(ctx, addr) - require.NoError(t, err) - require.Equal(t, obj, res) - }) - }) - - t.Run("head", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - headErr := errors.New("test error for head") - - ls := &testExecutionEntity{ - err: headErr, - } - - s := &localStoreExecutor{ - localStore: ls, - } - - res, err := s.headObject(ctx, addr) - require.EqualError(t, err, headErr.Error()) - require.Nil(t, res) - - ls.err = errors.Wrap(bucket.ErrNotFound, "wrap message") - - res, err = s.headObject(ctx, addr) - require.EqualError(t, err, errIncompleteOperation.Error()) - require.Nil(t, res) - }) - - t.Run("success", func(t *testing.T) { - obj := new(Object) - - s := &localStoreExecutor{ - localStore: &testExecutionEntity{ - res: &Meta{Object: obj}, - }, - } - - res, err := s.headObject(ctx, addr) - require.NoError(t, err) - require.Equal(t, obj, res) - }) - }) - - t.Run("get range", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - rngErr := errors.New("test error for range reader") - - s := &localStoreExecutor{ - localStore: &testExecutionEntity{ - err: rngErr, - }, - } - - res, err := s.getRange(ctx, addr, Range{}) - require.EqualError(t, err, rngErr.Error()) - require.Empty(t, res) - }) - - t.Run("success", func(t *testing.T) { - rng := Range{Offset: 1, Length: 1} - - d := testData(t, 10) - - s := &localStoreExecutor{ - localStore: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct local store pread params", func(t *testing.T) { - require.Equal(t, addr, items[0].(Address)) - require.Equal(t, rng, items[1].(Range)) - }) - }, - res: d, - }, - } - - res, err := s.getRange(ctx, addr, rng) - require.NoError(t, err) - require.Equal(t, d, res) - }) - }) - - t.Run("get range hash", func(t *testing.T) { - t.Run("empty range list", func(t *testing.T) { - s := &localStoreExecutor{ - localStore: new(testExecutionEntity), - } - - res, err := s.getHashes(ctx, addr, nil, nil) - require.NoError(t, err) - require.Empty(t, res) - }) - - t.Run("error", func(t *testing.T) { - rhErr := errors.New("test error for range hasher") - - s := &localStoreExecutor{ - localStore: &testExecutionEntity{ - err: rhErr, - }, - } - - res, err := s.getHashes(ctx, addr, make([]Range, 1), nil) - require.EqualError(t, err, errors.Wrapf(rhErr, emRangeReadFail, 1).Error()) - require.Empty(t, res) - }) - - t.Run("success", func(t *testing.T) { - rngs := []Range{ - {Offset: 0, Length: 0}, - {Offset: 1, Length: 1}, - } - - d := testData(t, 64) - salt := testData(t, 20) - - callNum := 0 - - s := &localStoreExecutor{ - salitor: hash.SaltXOR, - localStore: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct local store pread params", func(t *testing.T) { - require.Equal(t, addr, items[0].(Address)) - require.Equal(t, rngs[callNum], items[1].(Range)) - callNum++ - }) - }, - res: d, - }, - } - - res, err := s.getHashes(ctx, addr, rngs, salt) - require.NoError(t, err) - require.Len(t, res, len(rngs)) - for i := range rngs { - require.Equal(t, hash.Sum(hash.SaltXOR(d, salt)), res[i]) - } - }) - }) -} - -func Test_coreHandler_HandleResult(t *testing.T) { - ctx := context.TODO() - node := testNode(t, 1) - - t.Run("error", func(t *testing.T) { - handled := false - err := errors.New("") - - s := &coreHandler{ - traverser: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct traverser params", func(t *testing.T) { - require.Equal(t, node, items[0].(multiaddr.Multiaddr)) - require.False(t, items[1].(bool)) - }) - }, - }, - itemHandler: &testExecutionEntity{ - f: func(items ...interface{}) { - handled = true - }, - }, - resLogger: new(coreResultLogger), - } - - s.HandleResult(ctx, node, nil, err) - - require.False(t, handled) - }) - - t.Run("success", func(t *testing.T) { - handled := false - res := testData(t, 10) - - s := &coreHandler{ - traverser: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct traverser params", func(t *testing.T) { - require.Equal(t, node, items[0].(multiaddr.Multiaddr)) - require.True(t, items[1].(bool)) - }) - }, - }, - itemHandler: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, res, items[0]) - }, - }, - resLogger: new(coreResultLogger), - } - - s.HandleResult(ctx, node, res, nil) - - require.False(t, handled) - }) -} - -func Test_localOperationExecutor_executeOperation(t *testing.T) { - ctx := context.TODO() - - addr := testObjectAddress(t) - - obj := &Object{ - SystemHeader: SystemHeader{ - ID: addr.ObjectID, - CID: addr.CID, - }, - } - - t.Run("wrong type", func(t *testing.T) { - req := &testExecutionEntity{ - res: object.RequestType(-1), - } - - require.EqualError(t, - new(localOperationExecutor).executeOperation(ctx, req, nil), - errors.Errorf(pmWrongRequestType, req).Error(), - ) - }) - - t.Run("put", func(t *testing.T) { - req := &putRequest{PutRequest: &object.PutRequest{ - R: &object.PutRequest_Header{ - Header: &object.PutRequest_PutHeader{ - Object: obj, - }, - }, - }} - - t.Run("error", func(t *testing.T) { - putErr := errors.New("test error for put") - - s := &localOperationExecutor{ - objStore: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, obj, items[0].(*Object)) - }, - err: putErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - putErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - h := &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, *items[0].(*Address)) - }, - } - - s := &localOperationExecutor{ - objStore: new(testExecutionEntity), - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) - - t.Run("get", func(t *testing.T) { - req := newRawGetInfo() - req.setAddress(addr) - - t.Run("error", func(t *testing.T) { - getErr := errors.New("test error for get") - - s := &localOperationExecutor{ - objRecv: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0].(Address)) - }, - err: getErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - getErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - h := &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, obj, items[0].(*Object)) - }, - } - - s := &localOperationExecutor{ - objRecv: &testExecutionEntity{ - res: obj, - }, - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) - - t.Run("head", func(t *testing.T) { - req := &transportRequest{serviceRequest: &object.HeadRequest{ - Address: addr, - }} - - t.Run("error", func(t *testing.T) { - headErr := errors.New("test error for head") - - s := &localOperationExecutor{ - headRecv: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0].(Address)) - }, - err: headErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - headErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - h := &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, obj, items[0].(*Object)) - }, - } - - s := &localOperationExecutor{ - headRecv: &testExecutionEntity{ - res: obj, - }, - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) - - t.Run("search", func(t *testing.T) { - cid := testObjectAddress(t).CID - testQuery := testData(t, 10) - - req := &transportRequest{serviceRequest: &object.SearchRequest{ - ContainerID: cid, - Query: testQuery, - }} - - t.Run("error", func(t *testing.T) { - searchErr := errors.New("test error for search") - - s := &localOperationExecutor{ - queryImp: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, cid, items[0].(CID)) - require.Equal(t, testQuery, items[1].([]byte)) - require.Equal(t, 1, items[2].(int)) - }, - err: searchErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - searchErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - addrList := testAddrList(t, 5) - - h := &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addrList, items[0].([]Address)) - }, - } - - s := &localOperationExecutor{ - queryImp: &testExecutionEntity{ - res: addrList, - }, - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) - - t.Run("get range", func(t *testing.T) { - rng := Range{Offset: 1, Length: 1} - - req := newRawRangeInfo() - req.setAddress(addr) - req.setRange(rng) - - t.Run("error", func(t *testing.T) { - rrErr := errors.New("test error for range reader") - - s := &localOperationExecutor{ - rngReader: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0].(Address)) - require.Equal(t, rng, items[1].(Range)) - }, - err: rrErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - rrErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - data := testData(t, 10) - - h := &testExecutionEntity{ - f: func(items ...interface{}) { - d, err := ioutil.ReadAll(items[0].(io.Reader)) - require.NoError(t, err) - require.Equal(t, data, d) - }, - } - - s := &localOperationExecutor{ - rngReader: &testExecutionEntity{ - res: data, - }, - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) - - t.Run("get range hash", func(t *testing.T) { - rngs := []Range{ - {Offset: 0, Length: 0}, - {Offset: 1, Length: 1}, - } - - salt := testData(t, 10) - - req := newRawRangeHashInfo() - req.setAddress(addr) - req.setRanges(rngs) - req.setSalt(salt) - - t.Run("error", func(t *testing.T) { - rhErr := errors.New("test error for range hasher") - - s := &localOperationExecutor{ - rngHasher: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0].(Address)) - require.Equal(t, rngs, items[1].([]Range)) - require.Equal(t, salt, items[2].([]byte)) - }, - err: rhErr, - }, - } - - require.EqualError(t, - s.executeOperation(ctx, req, nil), - rhErr.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - hashes := []Hash{ - hash.Sum(testData(t, 10)), - hash.Sum(testData(t, 10)), - } - - h := &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, hashes, items[0].([]Hash)) - }, - } - - s := &localOperationExecutor{ - rngHasher: &testExecutionEntity{ - res: hashes, - }, - } - - require.NoError(t, s.executeOperation(ctx, req, h)) - }) - }) -} - -func Test_coreOperationFinalizer_completeExecution(t *testing.T) { - ctx := context.TODO() - - t.Run("address store failure", func(t *testing.T) { - asErr := errors.New("test error for address store") - - s := &coreOperationFinalizer{ - interceptorPreparer: &testExecutionEntity{ - err: asErr, - }, - } - - require.EqualError(t, s.completeExecution(ctx, operationParams{ - metaInfo: &transportRequest{serviceRequest: new(object.SearchRequest)}, - }), asErr.Error()) - }) - - t.Run("correct execution construction", func(t *testing.T) { - req := &transportRequest{ - serviceRequest: &object.SearchRequest{ - ContainerID: testObjectAddress(t).CID, - Query: testData(t, 10), - QueryVersion: 1, - }, - timeout: 10 * time.Second, - } - - req.SetTTL(10) - - itemHandler := new(testExecutionEntity) - opParams := operationParams{ - computableParams: computableParams{ - addr: testObjectAddress(t), - stopCount: 2, - allowPartialResult: false, - tryPreviousNetMap: false, - selfForward: true, - maxRecycleCount: 7, - }, - metaInfo: req, - itemHandler: itemHandler, - } - - curPl := new(testExecutionEntity) - prevPl := new(testExecutionEntity) - wp := new(testExecutionEntity) - s := &coreOperationFinalizer{ - curPlacementBuilder: curPl, - prevPlacementBuilder: prevPl, - interceptorPreparer: &testExecutionEntity{ - res: func(context.Context, multiaddr.Multiaddr) bool { return true }, - }, - workerPool: wp, - traverseExec: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct traverse executor params", func(t *testing.T) { - p := items[0].(transport.TraverseParams) - - require.True(t, p.ExecutionInterceptor(ctx, nil)) - require.Equal(t, req, p.TransportInfo) - require.Equal(t, wp, p.WorkerPool) - - tr := p.Traverser.(*coreTraverser) - require.Equal(t, opParams.addr, tr.addr) - require.Equal(t, opParams.tryPreviousNetMap, tr.tryPrevNM) - require.Equal(t, curPl, tr.curPlacementBuilder) - require.Equal(t, prevPl, tr.prevPlacementBuilder) - require.Equal(t, opParams.maxRecycleCount, tr.maxRecycleCount) - require.Equal(t, opParams.stopCount, tr.stopCount) - - h := p.Handler.(*coreHandler) - require.Equal(t, tr, h.traverser) - require.Equal(t, itemHandler, h.itemHandler) - }) - }, - }, - log: zap.L(), - } - - require.EqualError(t, s.completeExecution(ctx, opParams), errIncompleteOperation.Error()) - }) -} - -func Test_coreInterceptorPreparer_prepareInterceptor(t *testing.T) { - t.Run("address store failure", func(t *testing.T) { - asErr := errors.New("test error for address store") - - s := &coreInterceptorPreparer{ - addressStore: &testExecutionEntity{ - err: asErr, - }, - } - - res, err := s.prepareInterceptor(interceptorItems{}) - require.EqualError(t, err, asErr.Error()) - require.Nil(t, res) - }) - - t.Run("correct interceptor", func(t *testing.T) { - ctx := context.TODO() - selfAddr := testNode(t, 0) - - t.Run("local node", func(t *testing.T) { - req := new(transportRequest) - itemHandler := new(testExecutionEntity) - - localErr := errors.New("test error for local executor") - - p := interceptorItems{ - selfForward: true, - handler: &testExecutionEntity{ - f: func(items ...interface{}) { - t.Run("correct local executor params", func(t *testing.T) { - require.Equal(t, selfAddr, items[0].(multiaddr.Multiaddr)) - require.Nil(t, items[1]) - require.EqualError(t, items[2].(error), localErr.Error()) - }) - }, - }, - metaInfo: req, - itemHandler: itemHandler, - } - - s := &coreInterceptorPreparer{ - localExec: &testExecutionEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0].(transport.MetaInfo)) - require.Equal(t, itemHandler, items[1].(responseItemHandler)) - }, - err: localErr, - }, - addressStore: &testExecutionEntity{ - res: selfAddr, - }, - } - - res, err := s.prepareInterceptor(p) - require.NoError(t, err) - require.False(t, res(ctx, selfAddr)) - }) - - t.Run("remote node", func(t *testing.T) { - node := testNode(t, 1) - remoteNode := testNode(t, 2) - - p := interceptorItems{} - - s := &coreInterceptorPreparer{ - addressStore: &testExecutionEntity{ - res: remoteNode, - }, - } - - res, err := s.prepareInterceptor(p) - require.NoError(t, err) - require.False(t, res(ctx, node)) - }) - }) -} - -// testAddrList returns count random object addresses. -func testAddrList(t *testing.T, count int) (res []Address) { - for i := 0; i < count; i++ { - res = append(res, testObjectAddress(t)) - } - return -} diff --git a/pkg/network/transport/object/grpc/filter.go b/pkg/network/transport/object/grpc/filter.go deleted file mode 100644 index 809b4f588..000000000 --- a/pkg/network/transport/object/grpc/filter.go +++ /dev/null @@ -1,244 +0,0 @@ -package object - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/pkg/errors" -) - -type ( - filterParams struct { - sgInfoRecv storagegroup.InfoReceiver - tsPresChecker tombstonePresenceChecker - maxProcSize uint64 - storageCap uint64 - localStore localstore.Localstore - epochRecv EpochReceiver - verifier verifier.Verifier - - maxPayloadSize uint64 - } - - filterConstructor func(p *filterParams) localstore.FilterFunc - - tombstonePresenceChecker interface { - hasLocalTombstone(addr Address) (bool, error) - } - - coreTSPresChecker struct { - localStore localstore.Localstore - } -) - -const ( - ttlValue = "TTL" -) - -const ( - commonObjectFN = "OBJECTS_OVERALL" - storageGroupFN = "STORAGE_GROUP" - tombstoneOverwriteFN = "TOMBSTONE_OVERWRITE" - objSizeFN = "OBJECT_SIZE" - creationEpochFN = "CREATION_EPOCH" - objIntegrityFN = "OBJECT_INTEGRITY" - payloadSizeFN = "PAYLOAD_SIZE" -) - -var errObjectFilter = errors.New("incoming object has not passed filter") - -var mFilters = map[string]filterConstructor{ - tombstoneOverwriteFN: tombstoneOverwriteFC, - storageGroupFN: storageGroupFC, - creationEpochFN: creationEpochFC, - objIntegrityFN: objectIntegrityFC, - payloadSizeFN: payloadSizeFC, -} - -var mBasicFilters = map[string]filterConstructor{ - objSizeFN: objectSizeFC, -} - -func newIncomingObjectFilter(p *Params) (Filter, error) { - filter, err := newFilter(p, readyObjectsCheckpointFilterName, mFilters) - if err != nil { - return nil, err - } - - return filter, nil -} - -func newFilter(p *Params, name string, m map[string]filterConstructor) (Filter, error) { - filter := localstore.NewFilter(&localstore.FilterParams{ - Name: name, - FilterFunc: localstore.SkippingFilterFunc, - }) - - fp := &filterParams{ - sgInfoRecv: p.SGInfoReceiver, - tsPresChecker: &coreTSPresChecker{localStore: p.LocalStore}, - maxProcSize: p.MaxProcessingSize, - storageCap: p.StorageCapacity, - localStore: p.LocalStore, - epochRecv: p.EpochReceiver, - verifier: p.Verifier, - - maxPayloadSize: p.MaxPayloadSize, - } - - items := make([]*localstore.FilterParams, 0, len(m)) - for fName, fCons := range m { - items = append(items, &localstore.FilterParams{Name: fName, FilterFunc: fCons(fp)}) - } - - f, err := localstore.AllPassIncludingFilter(commonObjectFN, items...) - if err != nil { - return nil, err - } - - if err := filter.PutSubFilter(localstore.SubFilterParams{ - PriorityFlag: localstore.PriorityValue, - FilterPipeline: f, - OnFail: localstore.CodeFail, - }); err != nil { - return nil, errors.Wrapf(err, "could not put filter %s in pipeline", f.GetName()) - } - - return filter, nil -} - -func (s *coreTSPresChecker) hasLocalTombstone(addr Address) (bool, error) { - m, err := s.localStore.Meta(addr) - if err != nil { - if errors.Is(errors.Cause(err), bucket.ErrNotFound) { - return false, nil - } - - return false, err - } - - return m.Object.IsTombstone(), nil -} - -func storageGroupFC(p *filterParams) localstore.FilterFunc { - return func(ctx context.Context, meta *Meta) *localstore.FilterResult { - if sgInfo, err := meta.Object.StorageGroup(); err != nil { - return localstore.ResultPass() - } else if group := meta.Object.Group(); len(group) == 0 { - return localstore.ResultFail() - } else if realSGInfo, err := p.sgInfoRecv.GetSGInfo(ctx, meta.Object.SystemHeader.CID, group); err != nil { - return localstore.ResultWithError(localstore.CodeFail, err) - } else if sgInfo.ValidationDataSize != realSGInfo.ValidationDataSize { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ - error: errWrongSGSize, - d: sgSizeDetails(sgInfo.ValidationDataSize, realSGInfo.ValidationDataSize), - }, - ) - } else if !sgInfo.ValidationHash.Equal(realSGInfo.ValidationHash) { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ - error: errWrongSGHash, - d: sgHashDetails(sgInfo.ValidationHash, realSGInfo.ValidationHash), - }, - ) - } - - return localstore.ResultPass() - } -} - -func tombstoneOverwriteFC(p *filterParams) localstore.FilterFunc { - return func(ctx context.Context, meta *Meta) *localstore.FilterResult { - if meta.Object.IsTombstone() { - return localstore.ResultPass() - } else if hasTombstone, err := p.tsPresChecker.hasLocalTombstone(*meta.Object.Address()); err != nil { - return localstore.ResultFail() - } else if hasTombstone { - return localstore.ResultFail() - } - - return localstore.ResultPass() - } -} - -func objectSizeFC(p *filterParams) localstore.FilterFunc { - return func(ctx context.Context, meta *Meta) *localstore.FilterResult { - if need := meta.Object.SystemHeader.PayloadLength; need > p.maxProcSize { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ // // TODO: NSPCC-1048 - error: errProcPayloadSize, - d: maxProcPayloadSizeDetails(p.maxProcSize), - }, - ) - } else if ctx.Value(ttlValue).(uint32) < service.NonForwardingTTL { - if left := p.storageCap - uint64(p.localStore.Size()); need > left { - return localstore.ResultWithError( - localstore.CodeFail, - errLocalStorageOverflow, - ) - } - } - - return localstore.ResultPass() - } -} - -func payloadSizeFC(p *filterParams) localstore.FilterFunc { - return func(ctx context.Context, meta *Meta) *localstore.FilterResult { - if meta.Object.SystemHeader.PayloadLength > p.maxPayloadSize { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ // TODO: NSPCC-1048 - error: errObjectPayloadSize, - d: maxObjectPayloadSizeDetails(p.maxPayloadSize), - }, - ) - } - - return localstore.ResultPass() - } -} - -func creationEpochFC(p *filterParams) localstore.FilterFunc { - return func(_ context.Context, meta *Meta) *localstore.FilterResult { - if current := p.epochRecv.Epoch(); meta.Object.SystemHeader.CreatedAt.Epoch > current { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ // TODO: NSPCC-1048 - error: errObjectFromTheFuture, - d: objectCreationEpochDetails(current), - }, - ) - } - - return localstore.ResultPass() - } -} - -func objectIntegrityFC(p *filterParams) localstore.FilterFunc { - return func(ctx context.Context, meta *Meta) *localstore.FilterResult { - if err := p.verifier.Verify(ctx, meta.Object); err != nil { - return localstore.ResultWithError( - localstore.CodeFail, - &detailedError{ - error: errObjectHeadersVerification, - d: objectHeadersVerificationDetails(err), - }, - ) - } - - return localstore.ResultPass() - } -} - -func basicFilter(p *Params) (Filter, error) { - return newFilter(p, allObjectsCheckpointFilterName, mBasicFilters) -} diff --git a/pkg/network/transport/object/grpc/filter_test.go b/pkg/network/transport/object/grpc/filter_test.go deleted file mode 100644 index e3ce2781f..000000000 --- a/pkg/network/transport/object/grpc/filter_test.go +++ /dev/null @@ -1,399 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/bucket" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testFilterEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - localstore.Localstore - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } - - testFilterUnit struct { - obj *Object - exp localstore.FilterCode - } -) - -var ( - _ storagegroup.InfoReceiver = (*testFilterEntity)(nil) - _ verifier.Verifier = (*testFilterEntity)(nil) - _ EpochReceiver = (*testFilterEntity)(nil) - _ localstore.Localstore = (*testFilterEntity)(nil) - _ tombstonePresenceChecker = (*testFilterEntity)(nil) -) - -func (s *testFilterEntity) Meta(addr Address) (*Meta, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Meta), nil -} - -func (s *testFilterEntity) GetSGInfo(ctx context.Context, cid CID, group []ID) (*storagegroup.StorageGroup, error) { - if s.f != nil { - s.f(cid, group) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*storagegroup.StorageGroup), nil -} - -func (s *testFilterEntity) hasLocalTombstone(addr Address) (bool, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return false, s.err - } - return s.res.(bool), nil -} - -func (s *testFilterEntity) Size() int64 { return s.res.(int64) } - -func (s *testFilterEntity) Epoch() uint64 { return s.res.(uint64) } - -func (s *testFilterEntity) Verify(_ context.Context, obj *Object) error { - if s.f != nil { - s.f(obj) - } - return s.err -} - -func Test_creationEpochFC(t *testing.T) { - ctx := context.TODO() - localEpoch := uint64(100) - - ff := creationEpochFC(&filterParams{epochRecv: &testFilterEntity{res: localEpoch}}) - - valid := []Object{ - {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch - 1}}}, - {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch}}}, - } - - invalid := []Object{ - {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch + 1}}}, - {SystemHeader: SystemHeader{CreatedAt: CreationPoint{Epoch: localEpoch + 2}}}, - } - - testFilteringObjects(t, ctx, ff, valid, invalid, nil) -} - -func Test_objectSizeFC(t *testing.T) { - maxProcSize := uint64(100) - - t.Run("forwarding TTL", func(t *testing.T) { - var ( - ctx = context.WithValue(context.TODO(), ttlValue, uint32(service.SingleForwardingTTL)) - ff = objectSizeFC(&filterParams{maxProcSize: maxProcSize}) - ) - - valid := []Object{ - {SystemHeader: SystemHeader{PayloadLength: maxProcSize - 1}}, - {SystemHeader: SystemHeader{PayloadLength: maxProcSize}}, - } - - invalid := []Object{ - {SystemHeader: SystemHeader{PayloadLength: maxProcSize + 1}}, - {SystemHeader: SystemHeader{PayloadLength: maxProcSize + 2}}, - } - - testFilteringObjects(t, ctx, ff, valid, invalid, nil) - }) - - t.Run("non-forwarding TTL", func(t *testing.T) { - var ( - ctx = context.WithValue(context.TODO(), ttlValue, uint32(service.NonForwardingTTL-1)) - objSize = maxProcSize / 2 - ls = &testFilterEntity{res: int64(maxProcSize - objSize)} - ) - - ff := objectSizeFC(&filterParams{ - maxProcSize: maxProcSize, - storageCap: maxProcSize, - localStore: ls, - }) - - valid := []Object{{SystemHeader: SystemHeader{PayloadLength: objSize}}} - invalid := []Object{{SystemHeader: SystemHeader{PayloadLength: objSize + 1}}} - - testFilteringObjects(t, ctx, ff, valid, invalid, nil) - }) -} - -func Test_objectIntegrityFC(t *testing.T) { - var ( - ctx = context.TODO() - valid = &Object{SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}} - invalid = &Object{SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}} - ) - valid.Headers = append(valid.Headers, Header{Value: new(object.Header_PayloadChecksum)}) - - ver := new(testFilterEntity) - ver.f = func(items ...interface{}) { - if items[0].(*Object).SystemHeader.ID.Equal(valid.SystemHeader.ID) { - ver.err = nil - } else { - ver.err = errors.New("") - } - } - - ff := objectIntegrityFC(&filterParams{verifier: ver}) - - testFilterFunc(t, ctx, ff, testFilterUnit{obj: valid, exp: localstore.CodePass}) - testFilterFunc(t, ctx, ff, testFilterUnit{obj: invalid, exp: localstore.CodeFail}) -} - -func Test_tombstoneOverwriteFC(t *testing.T) { - var ( - obj1 = Object{ - SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, - Headers: []Header{{Value: new(object.Header_Tombstone)}}, - } - obj2 = Object{ - SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, - } - obj3 = Object{ - SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, - } - obj4 = Object{ - SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, - } - ) - - ts := new(testFilterEntity) - ts.f = func(items ...interface{}) { - addr := items[0].(Address) - if addr.ObjectID.Equal(obj2.SystemHeader.ID) { - ts.res, ts.err = nil, errors.New("") - } else if addr.ObjectID.Equal(obj3.SystemHeader.ID) { - ts.res, ts.err = true, nil - } else { - ts.res, ts.err = false, nil - } - } - - valid := []Object{obj1, obj4} - invalid := []Object{obj2, obj3} - - ff := tombstoneOverwriteFC(&filterParams{tsPresChecker: ts}) - - testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) -} - -func Test_storageGroupFC(t *testing.T) { - var ( - valid, invalid []Object - cid = testObjectAddress(t).CID - sgSize, sgHash = uint64(10), hash.Sum(testData(t, 10)) - - sg = &storagegroup.StorageGroup{ - ValidationDataSize: sgSize, - ValidationHash: sgHash, - } - - sgHeaders = []Header{ - {Value: &object.Header_StorageGroup{StorageGroup: sg}}, - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_StorageGroup}}}, - } - ) - - valid = append(valid, Object{ - SystemHeader: SystemHeader{ - CID: cid, - }, - }) - - valid = append(valid, Object{ - SystemHeader: SystemHeader{ - CID: cid, - }, - Headers: sgHeaders, - }) - - invalid = append(invalid, Object{ - SystemHeader: SystemHeader{ - CID: cid, - }, - Headers: sgHeaders[:1], - }) - - invalid = append(invalid, Object{ - SystemHeader: SystemHeader{ - CID: cid, - }, - Headers: []Header{ - { - Value: &object.Header_StorageGroup{ - StorageGroup: &storagegroup.StorageGroup{ - ValidationDataSize: sg.ValidationDataSize + 1, - }, - }, - }, - { - Value: &object.Header_Link{ - Link: &object.Link{ - Type: object.Link_StorageGroup, - }, - }, - }, - }, - }) - - invalid = append(invalid, Object{ - SystemHeader: SystemHeader{ - CID: cid, - }, - Headers: []Header{ - { - Value: &object.Header_StorageGroup{ - StorageGroup: &storagegroup.StorageGroup{ - ValidationDataSize: sg.ValidationDataSize, - ValidationHash: Hash{1, 2, 3}, - }, - }, - }, - { - Value: &object.Header_Link{ - Link: &object.Link{ - Type: object.Link_StorageGroup, - }, - }, - }, - }, - }) - - sr := &testFilterEntity{ - f: func(items ...interface{}) { - require.Equal(t, cid, items[0]) - }, - res: sg, - } - - ff := storageGroupFC(&filterParams{sgInfoRecv: sr}) - - testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) -} - -func Test_coreTSPresChecker(t *testing.T) { - addr := testObjectAddress(t) - - t.Run("local storage failure", func(t *testing.T) { - ls := &testFilterEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0]) - }, - err: errors.Wrap(bucket.ErrNotFound, "some message"), - } - - s := &coreTSPresChecker{localStore: ls} - - res, err := s.hasLocalTombstone(addr) - require.NoError(t, err) - require.False(t, res) - - lsErr := errors.New("test error for local storage") - ls.err = lsErr - - res, err = s.hasLocalTombstone(addr) - require.EqualError(t, err, lsErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - m := &Meta{Object: new(Object)} - - ls := &testFilterEntity{res: m} - - s := &coreTSPresChecker{localStore: ls} - - res, err := s.hasLocalTombstone(addr) - require.NoError(t, err) - require.False(t, res) - - m.Object.AddHeader(&object.Header{Value: new(object.Header_Tombstone)}) - - res, err = s.hasLocalTombstone(addr) - require.NoError(t, err) - require.True(t, res) - }) -} - -func testFilteringObjects(t *testing.T, ctx context.Context, f localstore.FilterFunc, valid, invalid, ignored []Object) { - units := make([]testFilterUnit, 0, len(valid)+len(invalid)+len(ignored)) - - for i := range valid { - units = append(units, testFilterUnit{ - obj: &valid[i], - exp: localstore.CodePass, - }) - } - - for i := range invalid { - units = append(units, testFilterUnit{ - obj: &invalid[i], - exp: localstore.CodeFail, - }) - } - - for i := range ignored { - units = append(units, testFilterUnit{ - obj: &ignored[i], - exp: localstore.CodeIgnore, - }) - } - - testFilterFunc(t, ctx, f, units...) -} - -func testFilterFunc(t *testing.T, ctx context.Context, f localstore.FilterFunc, units ...testFilterUnit) { - for i := range units { - res := f(ctx, &Meta{Object: units[i].obj}) - require.Equal(t, units[i].exp, res.Code()) - } -} - -func Test_payloadSizeFC(t *testing.T) { - maxPayloadSize := uint64(100) - - valid := []Object{ - {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize - 1}}, - {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize}}, - } - - invalid := []Object{ - {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize + 1}}, - {SystemHeader: SystemHeader{PayloadLength: maxPayloadSize + 2}}, - } - - ff := payloadSizeFC(&filterParams{ - maxPayloadSize: maxPayloadSize, - }) - - testFilteringObjects(t, context.TODO(), ff, valid, invalid, nil) -} diff --git a/pkg/network/transport/object/grpc/get.go b/pkg/network/transport/object/grpc/get.go deleted file mode 100644 index 666721283..000000000 --- a/pkg/network/transport/object/grpc/get.go +++ /dev/null @@ -1,111 +0,0 @@ -package object - -import ( - "bytes" - "io" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - getServerWriter struct { - req *object.GetRequest - - srv object.Service_GetServer - - respPreparer responsePreparer - } -) - -const ( - maxGetPayloadSize = 3584 * 1024 // 3.5 MiB - - emSendObjectHead = "could not send object head" -) - -var _ io.Writer = (*getServerWriter)(nil) - -func (s *objectService) Get(req *object.GetRequest, server object.Service_GetServer) (err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestGet), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestGet, - e: err, - }) - }() - - var r interface{} - - if r, err = s.requestHandler.handleRequest(server.Context(), handleRequestParams{ - request: req, - executor: s, - }); err != nil { - return err - } - - obj := r.(*objectData) - - var payload []byte - payload, obj.Payload = obj.Payload, nil - - resp := makeGetHeaderResponse(obj.Object) - if err = s.respPreparer.prepareResponse(server.Context(), req, resp); err != nil { - return - } - - if err = server.Send(resp); err != nil { - return errors.Wrap(err, emSendObjectHead) - } - - _, err = io.CopyBuffer( - &getServerWriter{ - req: req, - srv: server, - respPreparer: s.getChunkPreparer, - }, - io.MultiReader(bytes.NewReader(payload), obj.payload), - make([]byte, maxGetPayloadSize)) - - return err -} - -func splitBytes(data []byte, maxSize int) (result [][]byte) { - l := len(data) - if l == 0 { - return nil - } - - for i := 0; i < l; i += maxSize { - last := i + maxSize - if last > l { - last = l - } - - result = append(result, data[i:last]) - } - - return -} - -func (s *getServerWriter) Write(p []byte) (int, error) { - resp := makeGetChunkResponse(p) - if err := s.respPreparer.prepareResponse(s.srv.Context(), s.req, resp); err != nil { - return 0, err - } - - if err := s.srv.Send(resp); err != nil { - return 0, err - } - - return len(p), nil -} diff --git a/pkg/network/transport/object/grpc/get_test.go b/pkg/network/transport/object/grpc/get_test.go deleted file mode 100644 index 1cea57e7e..000000000 --- a/pkg/network/transport/object/grpc/get_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testGetEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - localstore.Localstore - object.Service_GetServer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ object.Service_GetServer = (*testGetEntity)(nil) - _ requestHandler = (*testGetEntity)(nil) - _ responsePreparer = (*testGetEntity)(nil) -) - -func (s *testGetEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testGetEntity) Context() context.Context { return context.TODO() } - -func (s *testGetEntity) Send(r *object.GetResponse) error { - if s.f != nil { - s.f(r) - } - return s.err -} - -func (s *testGetEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func Test_makeGetHeaderResponse(t *testing.T) { - obj := &Object{Payload: testData(t, 10)} - - require.Equal(t, &object.GetResponse{R: &object.GetResponse_Object{Object: obj}}, makeGetHeaderResponse(obj)) -} - -func Test_makeGetChunkResponse(t *testing.T) { - chunk := testData(t, 10) - - require.Equal(t, &object.GetResponse{R: &object.GetResponse_Chunk{Chunk: chunk}}, makeGetChunkResponse(chunk)) -} - -func Test_splitBytes(t *testing.T) { - t.Run("empty data", func(t *testing.T) { - testSplit(t, make([]byte, 0), 0) - testSplit(t, nil, 0) - }) - - t.Run("less size", func(t *testing.T) { - testSplit(t, make([]byte, 10), 20) - }) - - t.Run("equal size", func(t *testing.T) { - testSplit(t, make([]byte, 20), 20) - }) - - t.Run("oversize", func(t *testing.T) { - testSplit(t, make([]byte, 3), 17) - }) -} - -func testSplit(t *testing.T, initData []byte, maxSize int) { - res := splitBytes(initData, maxSize) - restored := make([]byte, 0, len(initData)) - for i := range res { - require.LessOrEqual(t, len(res[i]), maxSize) - restored = append(restored, res[i]...) - } - require.Len(t, restored, len(initData)) - if len(initData) > 0 { - require.Equal(t, initData, restored) - } -} - -func TestObjectService_Get(t *testing.T) { - req := &object.GetRequest{Address: testObjectAddress(t)} - - t.Run("request handler failure", func(t *testing.T) { - hErr := errors.New("test error for request handler") - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testGetEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, req, p.request) - require.Equal(t, s, p.executor) - }) - }, - err: hErr, - } - - require.EqualError(t, s.Get(req, new(testGetEntity)), hErr.Error()) - }) - - t.Run("send object head failure", func(t *testing.T) { - srvErr := errors.New("test error for get server") - - obj := &Object{ - SystemHeader: SystemHeader{ - ID: testObjectAddress(t).ObjectID, - CID: testObjectAddress(t).CID, - }, - } - - s := objectService{ - requestHandler: &testGetEntity{res: &objectData{Object: obj}}, - respPreparer: &testGetEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, makeGetHeaderResponse(obj), items[1]) - }, - res: new(object.GetResponse), - }, - - statusCalculator: newStatusCalculator(), - } - - require.EqualError(t, s.Get(req, &testGetEntity{err: srvErr}), errors.Wrap(srvErr, emSendObjectHead).Error()) - }) - - t.Run("send chunk failure", func(t *testing.T) { - srvErr := errors.New("test error for get server") - payload := testData(t, 10) - - obj := &Object{ - SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}, - Headers: []Header{{ - Value: &object.Header_UserHeader{UserHeader: &UserHeader{Key: "key", Value: "value"}}, - }}, - Payload: payload, - } - - headResp := makeGetHeaderResponse(&Object{ - SystemHeader: obj.SystemHeader, - Headers: obj.Headers, - }) - - chunkResp := makeGetChunkResponse(payload) - - callNum := 0 - - respPrep := new(testGetEntity) - respPrep.f = func(items ...interface{}) { - if callNum == 0 { - respPrep.res = headResp - } else { - respPrep.res = chunkResp - } - } - - s := objectService{ - requestHandler: &testGetEntity{res: &objectData{Object: obj}}, - respPreparer: respPrep, - - getChunkPreparer: respPrep, - - statusCalculator: newStatusCalculator(), - } - - srv := new(testGetEntity) - srv.f = func(items ...interface{}) { - t.Run("correct get server params", func(t *testing.T) { - if callNum == 0 { - require.Equal(t, headResp, items[0]) - } else { - require.Equal(t, chunkResp, items[0]) - srv.err = srvErr - } - callNum++ - }) - } - - require.EqualError(t, s.Get(req, srv), srvErr.Error()) - }) - - t.Run("send success", func(t *testing.T) { - s := objectService{ - requestHandler: &testGetEntity{res: &objectData{ - Object: new(Object), - payload: new(emptyReader), - }}, - respPreparer: &testGetEntity{ - res: new(object.GetResponse), - }, - - statusCalculator: newStatusCalculator(), - } - - require.NoError(t, s.Get(req, new(testGetEntity))) - }) -} diff --git a/pkg/network/transport/object/grpc/handler.go b/pkg/network/transport/object/grpc/handler.go deleted file mode 100644 index 9d704239f..000000000 --- a/pkg/network/transport/object/grpc/handler.go +++ /dev/null @@ -1,109 +0,0 @@ -package object - -import ( - "context" - "fmt" - - "github.com/nspcc-dev/neofs-api-go/object" -) - -type ( - // requestHandler is an interface of Object service cross-request handler. - requestHandler interface { - // Handles request by parameter-bound logic. - handleRequest(context.Context, handleRequestParams) (interface{}, error) - } - - handleRequestParams struct { - // Processing request. - request serviceRequest - - // Processing request executor. - executor requestHandleExecutor - } - - // coreRequestHandler is an implementation of requestHandler interface used in Object service production. - coreRequestHandler struct { - // Request preprocessor. - preProc requestPreProcessor - - // Request postprocessor. - postProc requestPostProcessor - } - - // requestHandleExecutor is an interface of universal Object operation executor. - requestHandleExecutor interface { - // Executes actions parameter-bound logic and returns execution result. - executeRequest(context.Context, serviceRequest) (interface{}, error) - } -) - -var _ requestHandler = (*coreRequestHandler)(nil) - -// requestHandler method implementation. -// -// If internal requestPreProcessor returns non-nil error for request argument, it returns. -// Otherwise, requestHandleExecutor argument performs actions. Received error is passed to requestPoistProcessor routine. -// Returned results of requestHandleExecutor are return. -func (s *coreRequestHandler) handleRequest(ctx context.Context, p handleRequestParams) (interface{}, error) { - if err := s.preProc.preProcess(ctx, p.request); err != nil { - return nil, err - } - - res, err := p.executor.executeRequest(ctx, p.request) - - go s.postProc.postProcess(ctx, p.request, err) - - return res, err -} - -// TODO: separate executors for each operation -// requestHandleExecutor method implementation. -func (s *objectService) executeRequest(ctx context.Context, req serviceRequest) (interface{}, error) { - switch r := req.(type) { - case *object.SearchRequest: - return s.objSearcher.searchObjects(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pSrch.Timeout, - }) - case *putRequest: - addr, err := s.objStorer.putObject(ctx, r) - if err != nil { - return nil, err - } - - resp := makePutResponse(*addr) - if err := s.respPreparer.prepareResponse(ctx, r.PutRequest, resp); err != nil { - return nil, err - } - - return nil, r.srv.SendAndClose(resp) - case *object.DeleteRequest: - return nil, s.objRemover.delete(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pDel.Timeout, - }) - case *object.GetRequest: - return s.objRecv.getObject(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pGet.Timeout, - }) - case *object.HeadRequest: - return s.objRecv.getObject(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pHead.Timeout, - }) - case *GetRangeRequest: - return s.payloadRngRecv.getRangeData(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pRng.Timeout, - }) - case *object.GetRangeHashRequest: - return s.rngRecv.getRange(ctx, &transportRequest{ - serviceRequest: r, - timeout: s.pRng.Timeout, - }) - default: - panic(fmt.Sprintf(pmWrongRequestType, r)) - } -} diff --git a/pkg/network/transport/object/grpc/handler_test.go b/pkg/network/transport/object/grpc/handler_test.go deleted file mode 100644 index ce41776b0..000000000 --- a/pkg/network/transport/object/grpc/handler_test.go +++ /dev/null @@ -1,442 +0,0 @@ -package object - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testHandlerEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - object.Service_PutServer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ requestPreProcessor = (*testHandlerEntity)(nil) - _ requestPostProcessor = (*testHandlerEntity)(nil) - _ requestHandleExecutor = (*testHandlerEntity)(nil) - _ objectSearcher = (*testHandlerEntity)(nil) - _ objectStorer = (*testHandlerEntity)(nil) - _ object.Service_PutServer = (*testHandlerEntity)(nil) - _ objectRemover = (*testHandlerEntity)(nil) - _ objectReceiver = (*testHandlerEntity)(nil) - _ objectRangeReceiver = (*testHandlerEntity)(nil) - _ payloadRangeReceiver = (*testHandlerEntity)(nil) - _ responsePreparer = (*testHandlerEntity)(nil) -) - -func (s *testHandlerEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testHandlerEntity) getRangeData(_ context.Context, info transport.RangeInfo, l ...Object) (io.Reader, error) { - if s.f != nil { - s.f(info, l) - } - if s.err != nil { - return nil, s.err - } - return s.res.(io.Reader), nil -} - -func (s *testHandlerEntity) getRange(_ context.Context, r rangeTool) (interface{}, error) { - if s.f != nil { - s.f(r) - } - return s.res, s.err -} - -func (s *testHandlerEntity) getObject(_ context.Context, r ...transport.GetInfo) (*objectData, error) { - if s.f != nil { - s.f(r) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*objectData), nil -} - -func (s *testHandlerEntity) delete(_ context.Context, r deleteInfo) error { - if s.f != nil { - s.f(r) - } - return s.err -} - -func (s *testHandlerEntity) SendAndClose(r *object.PutResponse) error { - if s.f != nil { - s.f(r) - } - return s.err -} - -func (s *testHandlerEntity) putObject(_ context.Context, r transport.PutInfo) (*Address, error) { - if s.f != nil { - s.f(r) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Address), nil -} - -func (s *testHandlerEntity) searchObjects(_ context.Context, r transport.SearchInfo) ([]Address, error) { - if s.f != nil { - s.f(r) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Address), nil -} - -func (s *testHandlerEntity) preProcess(_ context.Context, req serviceRequest) error { - if s.f != nil { - s.f(req) - } - return s.err -} - -func (s *testHandlerEntity) postProcess(_ context.Context, req serviceRequest, e error) { - if s.f != nil { - s.f(req, e) - } -} - -func (s *testHandlerEntity) executeRequest(_ context.Context, req serviceRequest) (interface{}, error) { - if s.f != nil { - s.f(req) - } - return s.res, s.err -} - -func TestCoreRequestHandler_HandleRequest(t *testing.T) { - ctx := context.TODO() - - // create custom serviceRequest - req := new(testHandlerEntity) - - t.Run("pre processor error", func(t *testing.T) { - // create custom error - pErr := errors.New("test error for pre-processor") - - s := &coreRequestHandler{ - preProc: &testHandlerEntity{ - f: func(items ...interface{}) { - t.Run("correct pre processor params", func(t *testing.T) { - require.Equal(t, req, items[0].(serviceRequest)) - }) - }, - err: pErr, // force requestPreProcessor to return pErr - }, - } - - res, err := s.handleRequest(ctx, handleRequestParams{request: req}) - - // ascertain that error returns as expected - require.EqualError(t, err, pErr.Error()) - - // ascertain that nil result returns as expected - require.Nil(t, res) - }) - - t.Run("correct behavior", func(t *testing.T) { - // create custom error - eErr := errors.New("test error for request executor") - - // create custom result - eRes := testData(t, 10) - - // create channel for requestPostProcessor - ch := make(chan struct{}) - - executor := &testHandlerEntity{ - f: func(items ...interface{}) { - t.Run("correct executor params", func(t *testing.T) { - require.Equal(t, req, items[0].(serviceRequest)) - }) - }, - res: eRes, // force requestHandleExecutor to return created result - err: eErr, // force requestHandleExecutor to return created error - } - - s := &coreRequestHandler{ - preProc: &testHandlerEntity{ - err: nil, // force requestPreProcessor to return nil error - }, - postProc: &testHandlerEntity{ - f: func(items ...interface{}) { - t.Run("correct pre processor params", func(t *testing.T) { - require.Equal(t, req, items[0].(serviceRequest)) - require.Equal(t, eErr, items[1].(error)) - }) - ch <- struct{}{} // write to channel - }, - }, - } - - res, err := s.handleRequest(ctx, handleRequestParams{ - request: req, - executor: executor, - }) - - // ascertain that results return as expected - require.EqualError(t, err, eErr.Error()) - require.Equal(t, eRes, res) - - <-ch // read from channel - }) -} - -func Test_objectService_executeRequest(t *testing.T) { - ctx := context.TODO() - - t.Run("invalid request", func(t *testing.T) { - req := new(testHandlerEntity) - require.PanicsWithValue(t, fmt.Sprintf(pmWrongRequestType, req), func() { - _, _ = new(objectService).executeRequest(ctx, req) - }) - }) - - t.Run("search request", func(t *testing.T) { - var ( - timeout = 3 * time.Second - req = &object.SearchRequest{ContainerID: testObjectAddress(t).CID} - addrList = testAddrList(t, 3) - ) - - s := &objectService{ - pSrch: OperationParams{Timeout: timeout}, - objSearcher: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, &transportRequest{ - serviceRequest: req, - timeout: timeout, - }, items[0]) - }, - res: addrList, - }, - } - - res, err := s.executeRequest(ctx, req) - require.NoError(t, err) - require.Equal(t, addrList, res) - }) - - t.Run("put request", func(t *testing.T) { - t.Run("storer error", func(t *testing.T) { - sErr := errors.New("test error for object storer") - - req := &putRequest{ - PutRequest: new(object.PutRequest), - srv: new(testHandlerEntity), - timeout: 3 * time.Second, - } - - s := &objectService{ - objStorer: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - }, - err: sErr, - }, - respPreparer: &testHandlerEntity{ - res: serviceResponse(nil), - }, - } - - _, err := s.executeRequest(ctx, req) - require.EqualError(t, err, sErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - addr := testObjectAddress(t) - - srvErr := errors.New("test error for stream server") - - resp := &object.PutResponse{Address: addr} - - pReq := new(object.PutRequest) - - s := &objectService{ - objStorer: &testHandlerEntity{ - res: &addr, - }, - respPreparer: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, pReq, items[0]) - require.Equal(t, makePutResponse(addr), items[1]) - }, - res: resp, - }, - } - - req := &putRequest{ - PutRequest: pReq, - srv: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, resp, items[0]) - }, - err: srvErr, - }, - } - - res, err := s.executeRequest(ctx, req) - require.EqualError(t, err, srvErr.Error()) - require.Nil(t, res) - }) - }) - - t.Run("delete request", func(t *testing.T) { - var ( - timeout = 3 * time.Second - dErr = errors.New("test error for object remover") - req = &object.DeleteRequest{Address: testObjectAddress(t)} - ) - - s := &objectService{ - objRemover: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, &transportRequest{ - serviceRequest: req, - timeout: timeout, - }, items[0]) - }, - err: dErr, - }, - pDel: OperationParams{Timeout: timeout}, - } - - res, err := s.executeRequest(ctx, req) - require.EqualError(t, err, dErr.Error()) - require.Nil(t, res) - }) - - t.Run("get request", func(t *testing.T) { - var ( - timeout = 3 * time.Second - obj = &objectData{Object: &Object{Payload: testData(t, 10)}} - req = &object.GetRequest{Address: testObjectAddress(t)} - ) - - s := &objectService{ - objRecv: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, []transport.GetInfo{&transportRequest{ - serviceRequest: req, - timeout: timeout, - }}, items[0]) - }, - res: obj, - }, - pGet: OperationParams{Timeout: timeout}, - } - - res, err := s.executeRequest(ctx, req) - require.NoError(t, err) - require.Equal(t, obj, res) - }) - - t.Run("head request", func(t *testing.T) { - var ( - timeout = 3 * time.Second - hErr = errors.New("test error for head receiver") - req = &object.HeadRequest{Address: testObjectAddress(t)} - ) - - s := &objectService{ - objRecv: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, []transport.GetInfo{&transportRequest{ - serviceRequest: req, - timeout: timeout, - }}, items[0]) - }, - err: hErr, - }, - pHead: OperationParams{Timeout: timeout}, - } - - _, err := s.executeRequest(ctx, req) - require.EqualError(t, err, hErr.Error()) - }) - - t.Run("range requests", func(t *testing.T) { - t.Run("data", func(t *testing.T) { - var ( - timeout = 3 * time.Second - rData = testData(t, 10) - req = &GetRangeRequest{Address: testObjectAddress(t)} - ) - - s := &objectService{ - payloadRngRecv: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, &transportRequest{ - serviceRequest: req, - timeout: timeout, - }, items[0]) - require.Empty(t, items[1]) - }, - res: bytes.NewReader(rData), - }, - pRng: OperationParams{Timeout: timeout}, - } - - res, err := s.executeRequest(ctx, req) - require.NoError(t, err) - d, err := ioutil.ReadAll(res.(io.Reader)) - require.NoError(t, err) - require.Equal(t, rData, d) - }) - - t.Run("hashes", func(t *testing.T) { - var ( - timeout = 3 * time.Second - rErr = errors.New("test error for range receiver") - req = &object.GetRangeHashRequest{Address: testObjectAddress(t)} - ) - - s := &objectService{ - rngRecv: &testHandlerEntity{ - f: func(items ...interface{}) { - require.Equal(t, &transportRequest{ - serviceRequest: req, - timeout: timeout, - }, items[0]) - }, - err: rErr, - }, - pRng: OperationParams{Timeout: timeout}, - } - - _, err := s.executeRequest(ctx, req) - require.EqualError(t, err, rErr.Error()) - }) - }) -} diff --git a/pkg/network/transport/object/grpc/head.go b/pkg/network/transport/object/grpc/head.go deleted file mode 100644 index c5aba3269..000000000 --- a/pkg/network/transport/object/grpc/head.go +++ /dev/null @@ -1,639 +0,0 @@ -package object - -import ( - "context" - "fmt" - "io" - "sync" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - _range "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc/range" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/panjf2000/ants/v2" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - objectData struct { - *Object - payload io.Reader - } - - objectReceiver interface { - getObject(context.Context, ...transport.GetInfo) (*objectData, error) - } - - rangeDataReceiver interface { - recvData(context.Context, transport.RangeInfo, io.Writer) error - } - - rangeReaderAccumulator interface { - responseItemHandler - rangeData() io.Reader - } - - rangeRdrAccum struct { - *sync.Once - r io.Reader - } - - straightRangeDataReceiver struct { - executor operationExecutor - } - - coreObjectReceiver struct { - straightObjRecv objectReceiver - childLister objectChildrenLister - ancestralRecv ancestralObjectsReceiver - - log *zap.Logger - } - - straightObjectReceiver struct { - executor operationExecutor - } - - objectRewinder interface { - rewind(context.Context, ...Object) (*Object, error) - } - - payloadPartReceiver interface { - recvPayload(context.Context, []transport.RangeInfo) (io.Reader, error) - } - - corePayloadPartReceiver struct { - rDataRecv rangeDataReceiver - windowController slidingWindowController - } - - slidingWindowController interface { - newWindow() (WorkerPool, error) - } - - simpleWindowController struct { - windowSize int - } - - coreObjectRewinder struct { - transformer transformer.ObjectRestorer - } - - objectAccumulator interface { - responseItemHandler - object() *Object - } - - coreObjAccum struct { - *sync.Once - obj *Object - } - - rawGetInfo struct { - *rawAddrInfo - } - - rawHeadInfo struct { - rawGetInfo - fullHeaders bool - } - - childrenReceiver interface { - getChildren(context.Context, Address, []ID) ([]Object, error) - } - - coreChildrenReceiver struct { - coreObjRecv objectReceiver - timeout time.Duration - } - - payloadRangeReceiver interface { - getRangeData(context.Context, transport.RangeInfo, ...Object) (io.Reader, error) - } - - corePayloadRangeReceiver struct { - chopTable _range.ChopperTable - relRecv _range.RelativeReceiver - payloadRecv payloadPartReceiver - - // Set of errors that won't be converted to errPayloadRangeNotFound - mErr map[error]struct{} - - log *zap.Logger - } - - ancestralObjectsReceiver interface { - getFromChildren(context.Context, Address, []ID, bool) (*objectData, error) - } - - coreAncestralReceiver struct { - childrenRecv childrenReceiver - objRewinder objectRewinder - pRangeRecv payloadRangeReceiver - timeout time.Duration - } - - emptyReader struct{} -) - -const emHeadRecvFail = "could not receive %d of %d object head" - -var ( - childrenNotFound = errors.New("could not find child objects") - errNonAssembly = errors.New("node is not capable to assemble the object") -) - -var ( - _ objectReceiver = (*straightObjectReceiver)(nil) - _ objectReceiver = (*coreObjectReceiver)(nil) - _ objectRewinder = (*coreObjectRewinder)(nil) - _ objectAccumulator = (*coreObjAccum)(nil) - _ transport.HeadInfo = (*transportRequest)(nil) - _ transport.HeadInfo = (*rawHeadInfo)(nil) - _ transport.GetInfo = (*transportRequest)(nil) - _ transport.GetInfo = (*rawGetInfo)(nil) - - _ payloadPartReceiver = (*corePayloadPartReceiver)(nil) - - _ ancestralObjectsReceiver = (*coreAncestralReceiver)(nil) - - _ childrenReceiver = (*coreChildrenReceiver)(nil) - - _ payloadRangeReceiver = (*corePayloadRangeReceiver)(nil) - - _ rangeDataReceiver = (*straightRangeDataReceiver)(nil) - - _ slidingWindowController = (*simpleWindowController)(nil) - - _ io.Reader = (*emptyReader)(nil) - - _ rangeReaderAccumulator = (*rangeRdrAccum)(nil) -) - -func (s *objectService) Head(ctx context.Context, req *object.HeadRequest) (res *object.HeadResponse, err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestHead), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestHead, - e: err, - }) - }() - - var r interface{} - - if r, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ - request: req, - executor: s, - }); err != nil { - return - } - - obj := r.(*objectData).Object - if !req.FullHeaders { - obj.Headers = nil - } - - res = makeHeadResponse(obj) - err = s.respPreparer.prepareResponse(ctx, req, res) - - return res, err -} - -func (s *coreObjectReceiver) getObject(ctx context.Context, info ...transport.GetInfo) (*objectData, error) { - var ( - childCount int - children []ID - ) - - obj, err := s.straightObjRecv.getObject(ctx, s.sendingRequest(info[0])) - - if info[0].GetRaw() { - return obj, err - } else if err == nil { - children = obj.Links(object.Link_Child) - if childCount = len(children); childCount <= 0 { - return obj, nil - } - } - - if s.ancestralRecv == nil { - return nil, errNonAssembly - } - - ctx = contextWithValues(ctx, - transformer.PublicSessionToken, info[0].GetSessionToken(), - storagegroup.BearerToken, info[0].GetBearerToken(), - storagegroup.ExtendedHeaders, info[0].ExtendedHeaders(), - ) - - if childCount <= 0 { - if children = s.childLister.children(ctx, info[0].GetAddress()); len(children) == 0 { - return nil, childrenNotFound - } - } - - res, err := s.ancestralRecv.getFromChildren(ctx, info[0].GetAddress(), children, info[0].Type() == object.RequestHead) - if err != nil { - s.log.Error("could not get object from children", - zap.String("error", err.Error()), - ) - - return nil, errIncompleteOperation - } - - return res, nil -} - -func (s *coreObjectReceiver) sendingRequest(src transport.GetInfo) transport.GetInfo { - if s.ancestralRecv == nil || src.GetRaw() { - return src - } - - getInfo := *newRawGetInfo() - getInfo.setTimeout(src.GetTimeout()) - getInfo.setAddress(src.GetAddress()) - getInfo.setRaw(true) - getInfo.setSessionToken(src.GetSessionToken()) - getInfo.setBearerToken(src.GetBearerToken()) - getInfo.setExtendedHeaders(src.ExtendedHeaders()) - getInfo.setTTL( - maxu32( - src.GetTTL(), - service.NonForwardingTTL, - ), - ) - - if src.Type() == object.RequestHead { - headInfo := newRawHeadInfo() - headInfo.setGetInfo(getInfo) - headInfo.setFullHeaders(true) - - return headInfo - } - - return getInfo -} - -func (s *coreAncestralReceiver) getFromChildren(ctx context.Context, addr Address, children []ID, head bool) (*objectData, error) { - var ( - err error - childObjs []Object - res = new(objectData) - ) - - if childObjs, err = s.childrenRecv.getChildren(ctx, addr, children); err != nil { - return nil, err - } else if res.Object, err = s.objRewinder.rewind(ctx, childObjs...); err != nil { - return nil, err - } - - if head { - return res, nil - } - - rngInfo := newRawRangeInfo() - rngInfo.setTTL(service.NonForwardingTTL) - rngInfo.setTimeout(s.timeout) - rngInfo.setAddress(addr) - rngInfo.setSessionToken(tokenFromContext(ctx)) - rngInfo.setBearerToken(bearerFromContext(ctx)) - rngInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) - rngInfo.setRange(Range{ - Length: res.SystemHeader.PayloadLength, - }) - - res.payload, err = s.pRangeRecv.getRangeData(ctx, rngInfo, childObjs...) - - return res, err -} - -func (s *corePayloadRangeReceiver) getRangeData(ctx context.Context, info transport.RangeInfo, selection ...Object) (res io.Reader, err error) { - defer func() { - if err != nil { - if _, ok := s.mErr[errors.Cause(err)]; !ok { - s.log.Error("get payload range data failure", - zap.String("error", err.Error()), - ) - - err = errPayloadRangeNotFound - } - } - }() - - var ( - chopper RangeChopper - addr = info.GetAddress() - ) - - chopper, err = s.chopTable.GetChopper(addr, _range.RCCharybdis) - if err != nil || !chopper.Closed() { - if len(selection) == 0 { - if chopper, err = s.chopTable.GetChopper(addr, _range.RCScylla); err != nil { - if chopper, err = _range.NewScylla(&_range.ChopperParams{ - RelativeReceiver: s.relRecv, - Addr: addr, - }); err != nil { - return - } - } - } else { - rs := make([]RangeDescriptor, 0, len(selection)) - for i := range selection { - rs = append(rs, RangeDescriptor{ - Size: int64(selection[i].SystemHeader.PayloadLength), - Addr: *selection[i].Address(), - - LeftBound: i == 0, - RightBound: i == len(selection)-1, - }) - } - - if chopper, err = _range.NewCharybdis(&_range.CharybdisParams{ - Addr: addr, - ReadySelection: rs, - }); err != nil { - return - } - } - } - - _ = s.chopTable.PutChopper(addr, chopper) - - r := info.GetRange() - - ctx = contextWithValues(ctx, - transformer.PublicSessionToken, info.GetSessionToken(), - storagegroup.BearerToken, info.GetBearerToken(), - storagegroup.ExtendedHeaders, info.ExtendedHeaders(), - ) - - var rList []RangeDescriptor - - if rList, err = chopper.Chop(ctx, int64(r.Length), int64(r.Offset), true); err != nil { - return - } - - return s.payloadRecv.recvPayload(ctx, newRangeInfoList(info, rList)) -} - -func newRangeInfoList(src transport.RangeInfo, rList []RangeDescriptor) []transport.RangeInfo { - var infoList []transport.RangeInfo - if l := len(rList); l == 1 && src.GetAddress().Equal(&rList[0].Addr) { - infoList = []transport.RangeInfo{src} - } else { - infoList = make([]transport.RangeInfo, 0, l) - for i := range rList { - rngInfo := newRawRangeInfo() - - rngInfo.setTTL(src.GetTTL()) - rngInfo.setTimeout(src.GetTimeout()) - rngInfo.setAddress(rList[i].Addr) - rngInfo.setSessionToken(src.GetSessionToken()) - rngInfo.setBearerToken(src.GetBearerToken()) - rngInfo.setExtendedHeaders(src.ExtendedHeaders()) - rngInfo.setRange(Range{ - Offset: uint64(rList[i].Offset), - Length: uint64(rList[i].Size), - }) - - infoList = append(infoList, rngInfo) - } - } - - return infoList -} - -func (s *corePayloadPartReceiver) recvPayload(ctx context.Context, rList []transport.RangeInfo) (io.Reader, error) { - pool, err := s.windowController.newWindow() - if err != nil { - return nil, err - } - - var ( - readers = make([]io.Reader, 0, len(rList)) - writers = make([]*io.PipeWriter, 0, len(rList)) - ) - - for range rList { - r, w := io.Pipe() - readers = append(readers, r) - writers = append(writers, w) - } - - ctx, cancel := context.WithCancel(ctx) - - go func() { - for i := range rList { - select { - case <-ctx.Done(): - return - default: - } - - rd, w := rList[i], writers[i] - - if err := pool.Submit(func() { - err := s.rDataRecv.recvData(ctx, rd, w) - if err != nil { - cancel() - } - _ = w.CloseWithError(err) - }); err != nil { - _ = w.CloseWithError(err) - - cancel() - - break - } - } - }() - - return io.MultiReader(readers...), nil -} - -func (s *simpleWindowController) newWindow() (WorkerPool, error) { return ants.NewPool(s.windowSize) } - -func (s *straightRangeDataReceiver) recvData(ctx context.Context, info transport.RangeInfo, w io.Writer) error { - rAccum := newRangeReaderAccumulator() - err := s.executor.executeOperation(ctx, info, rAccum) - - if err == nil { - _, err = io.Copy(w, rAccum.rangeData()) - } - - return err -} - -func maxu32(a, b uint32) uint32 { - if a > b { - return a - } - - return b -} - -func (s *straightObjectReceiver) getObject(ctx context.Context, info ...transport.GetInfo) (*objectData, error) { - accum := newObjectAccumulator() - if err := s.executor.executeOperation(ctx, info[0], accum); err != nil { - return nil, err - } - - return &objectData{ - Object: accum.object(), - payload: new(emptyReader), - }, nil -} - -func (s *coreChildrenReceiver) getChildren(ctx context.Context, parent Address, children []ID) ([]Object, error) { - objList := make([]Object, 0, len(children)) - - headInfo := newRawHeadInfo() - headInfo.setTTL(service.NonForwardingTTL) - headInfo.setTimeout(s.timeout) - headInfo.setFullHeaders(true) - headInfo.setSessionToken(tokenFromContext(ctx)) - headInfo.setBearerToken(bearerFromContext(ctx)) - headInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) - - for i := range children { - headInfo.setAddress(Address{ - ObjectID: children[i], - CID: parent.CID, - }) - - obj, err := s.coreObjRecv.getObject(ctx, headInfo) - if err != nil { - return nil, errors.Errorf(emHeadRecvFail, i+1, len(children)) - } - - objList = append(objList, *obj.Object) - } - - return transformer.GetChain(objList...) -} - -func tokenFromContext(ctx context.Context) service.SessionToken { - if v, ok := ctx.Value(transformer.PublicSessionToken).(service.SessionToken); ok { - return v - } - - return nil -} - -func bearerFromContext(ctx context.Context) service.BearerToken { - if v, ok := ctx.Value(storagegroup.BearerToken).(service.BearerToken); ok { - return v - } - - return nil -} - -func extendedHeadersFromContext(ctx context.Context) []service.ExtendedHeader { - if v, ok := ctx.Value(storagegroup.ExtendedHeaders).([]service.ExtendedHeader); ok { - return v - } - - return nil -} - -func (s *coreObjectRewinder) rewind(ctx context.Context, objs ...Object) (*Object, error) { - objList, err := s.transformer.Restore(ctx, objs...) - if err != nil { - return nil, err - } - - return &objList[0], nil -} - -func (s *coreObjAccum) handleItem(v interface{}) { s.Do(func() { s.obj = v.(*Object) }) } - -func (s *coreObjAccum) object() *Object { return s.obj } - -func newObjectAccumulator() objectAccumulator { return &coreObjAccum{Once: new(sync.Once)} } - -func (s *rawGetInfo) getAddrInfo() *rawAddrInfo { - return s.rawAddrInfo -} - -func (s *rawGetInfo) setAddrInfo(v *rawAddrInfo) { - s.rawAddrInfo = v - s.setType(object.RequestGet) -} - -func newRawGetInfo() *rawGetInfo { - res := new(rawGetInfo) - - res.setAddrInfo(newRawAddressInfo()) - - return res -} - -func (s rawHeadInfo) GetFullHeaders() bool { - return s.fullHeaders -} - -func (s *rawHeadInfo) setFullHeaders(v bool) { - s.fullHeaders = v -} - -func (s rawHeadInfo) getGetInfo() rawGetInfo { - return s.rawGetInfo -} - -func (s *rawHeadInfo) setGetInfo(v rawGetInfo) { - s.rawGetInfo = v - s.setType(object.RequestHead) -} - -func newRawHeadInfo() *rawHeadInfo { - res := new(rawHeadInfo) - - res.setGetInfo(*newRawGetInfo()) - - return res -} - -func (s *transportRequest) GetAddress() Address { - switch t := s.serviceRequest.(type) { - case *object.HeadRequest: - return t.Address - case *GetRangeRequest: - return t.Address - case *object.GetRangeHashRequest: - return t.Address - case *object.DeleteRequest: - return t.Address - case *object.GetRequest: - return t.Address - default: - panic(fmt.Sprintf(pmWrongRequestType, t)) - } -} - -func (s *transportRequest) GetFullHeaders() bool { - return s.serviceRequest.(*object.HeadRequest).GetFullHeaders() -} - -func (s *transportRequest) Raw() bool { - return s.serviceRequest.GetRaw() -} - -func (s *emptyReader) Read([]byte) (int, error) { return 0, io.EOF } - -func newRangeReaderAccumulator() rangeReaderAccumulator { return &rangeRdrAccum{Once: new(sync.Once)} } - -func (s *rangeRdrAccum) rangeData() io.Reader { return s.r } - -func (s *rangeRdrAccum) handleItem(r interface{}) { s.Do(func() { s.r = r.(io.Reader) }) } diff --git a/pkg/network/transport/object/grpc/head_test.go b/pkg/network/transport/object/grpc/head_test.go deleted file mode 100644 index 50da24341..000000000 --- a/pkg/network/transport/object/grpc/head_test.go +++ /dev/null @@ -1,595 +0,0 @@ -package object - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testHeadEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - transformer.ObjectRestorer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ ancestralObjectsReceiver = (*testHeadEntity)(nil) - _ objectChildrenLister = (*testHeadEntity)(nil) - _ objectReceiver = (*testHeadEntity)(nil) - _ requestHandler = (*testHeadEntity)(nil) - _ operationExecutor = (*testHeadEntity)(nil) - _ objectRewinder = (*testHeadEntity)(nil) - _ transformer.ObjectRestorer = (*testHeadEntity)(nil) - _ responsePreparer = (*testHeadEntity)(nil) -) - -func (s *testHeadEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testHeadEntity) getFromChildren(ctx context.Context, addr Address, ids []ID, h bool) (*objectData, error) { - if s.f != nil { - s.f(addr, ids, h, ctx) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*objectData), nil -} - -func (s *testHeadEntity) Restore(_ context.Context, objs ...Object) ([]Object, error) { - if s.f != nil { - s.f(objs) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Object), nil -} - -func (s *testHeadEntity) rewind(ctx context.Context, objs ...Object) (*Object, error) { - if s.f != nil { - s.f(objs) - } - return s.res.(*Object), s.err -} - -func (s *testHeadEntity) executeOperation(_ context.Context, i transport.MetaInfo, h responseItemHandler) error { - if s.f != nil { - s.f(i, h) - } - return s.err -} - -func (s *testHeadEntity) children(ctx context.Context, addr Address) []ID { - if s.f != nil { - s.f(addr, ctx) - } - return s.res.([]ID) -} - -func (s *testHeadEntity) getObject(_ context.Context, p ...transport.GetInfo) (*objectData, error) { - if s.f != nil { - s.f(p) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*objectData), nil -} - -func (s *testHeadEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func Test_transportRequest_HeadInfo(t *testing.T) { - t.Run("address", func(t *testing.T) { - t.Run("valid request", func(t *testing.T) { - addr := testObjectAddress(t) - - reqs := []transportRequest{ - {serviceRequest: &object.HeadRequest{Address: addr}}, - {serviceRequest: &object.GetRequest{Address: addr}}, - {serviceRequest: &GetRangeRequest{Address: addr}}, - {serviceRequest: &object.GetRangeHashRequest{Address: addr}}, - {serviceRequest: &object.DeleteRequest{Address: addr}}, - } - - for i := range reqs { - require.Equal(t, addr, reqs[i].GetAddress()) - } - }) - - t.Run("unknown request", func(t *testing.T) { - req := new(object.SearchRequest) - - r := &transportRequest{ - serviceRequest: req, - } - - require.PanicsWithValue(t, fmt.Sprintf(pmWrongRequestType, req), func() { - _ = r.GetAddress() - }) - }) - }) - - t.Run("full headers", func(t *testing.T) { - r := &transportRequest{ - serviceRequest: &object.HeadRequest{ - FullHeaders: true, - }, - } - - require.True(t, r.GetFullHeaders()) - }) - - t.Run("raw", func(t *testing.T) { - hReq := new(object.HeadRequest) - hReq.SetRaw(true) - - r := &transportRequest{ - serviceRequest: hReq, - } - require.True(t, r.Raw()) - - hReq.SetRaw(false) - require.False(t, r.Raw()) - }) -} - -func Test_rawHeadInfo(t *testing.T) { - t.Run("address", func(t *testing.T) { - addr := testObjectAddress(t) - - r := newRawHeadInfo() - r.setAddress(addr) - - require.Equal(t, addr, r.GetAddress()) - }) - - t.Run("full headers", func(t *testing.T) { - r := newRawHeadInfo() - r.setFullHeaders(true) - - require.True(t, r.GetFullHeaders()) - }) -} - -func Test_coreObjAccum(t *testing.T) { - t.Run("new", func(t *testing.T) { - s := newObjectAccumulator() - v := s.(*coreObjAccum) - require.Nil(t, v.obj) - require.NotNil(t, v.Once) - }) - - t.Run("handle/object", func(t *testing.T) { - obj1 := new(Object) - - s := newObjectAccumulator() - - // add first object - s.handleItem(obj1) - - // ascertain tha object was added - require.Equal(t, obj1, s.object()) - - obj2 := new(Object) - - // add second object - s.handleItem(obj2) - - // ascertain that second object was ignored - require.Equal(t, obj1, s.object()) - }) -} - -func Test_objectService_Head(t *testing.T) { - ctx := context.TODO() - - t.Run("request handler error", func(t *testing.T) { - // create custom error for test - rhErr := errors.New("test error for request handler") - - // create custom request for test - req := new(object.HeadRequest) - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, s, p.executor) - require.Equal(t, req, p.request) - }) - }, - err: rhErr, // force requestHandler to return rhErr - } - - res, err := s.Head(ctx, req) - require.EqualError(t, err, rhErr.Error()) - require.Nil(t, res) - }) - - t.Run("correct resulst", func(t *testing.T) { - obj := &objectData{Object: new(Object)} - - resp := &object.HeadResponse{Object: obj.Object} - - req := new(object.HeadRequest) - - s := &objectService{ - requestHandler: &testHeadEntity{ - res: obj, // force request handler to return obj - }, - respPreparer: &testHeadEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, makeHeadResponse(obj.Object), items[1]) - }, - res: resp, - }, - - statusCalculator: newStatusCalculator(), - } - - res, err := s.Head(ctx, new(object.HeadRequest)) - require.NoError(t, err) - require.Equal(t, resp, res) - }) -} - -func Test_coreHeadReceiver_head(t *testing.T) { - ctx := context.TODO() - - t.Run("raw handling", func(t *testing.T) { - // create custom head info for test - hInfo := newRawHeadInfo() - hInfo.setRaw(true) - - // create custom error for test - srErr := errors.New("test error for straight object receiver") - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - err: srErr, // force straightObjectReceiver to return srErr - }, - } - - _, err := s.getObject(ctx, hInfo) - // ascertain that straightObjectReceiver result returns in raw case as expected - require.EqualError(t, err, srErr.Error()) - }) - - t.Run("straight receive of non-linking object", func(t *testing.T) { - // create custom head info for test - hInfo := newRawHeadInfo() - - // create object w/o children for test - obj := &objectData{Object: new(Object)} - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct straight receiver params", func(t *testing.T) { - require.Equal(t, []transport.GetInfo{hInfo}, items[0]) - }) - }, - res: obj, - }, - } - - res, err := s.getObject(ctx, hInfo) - require.NoError(t, err) - require.Equal(t, obj, res) - }) - - t.Run("linking object/non-assembly", func(t *testing.T) { - // create custom head info for test - hInfo := newRawHeadInfo() - - // create object w/ children for test - obj := &objectData{ - Object: &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child}}}}}, - } - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - res: obj, // force straightObjectReceiver to return obj - }, - ancestralRecv: nil, // make component to be non-assembly - } - - res, err := s.getObject(ctx, hInfo) - require.EqualError(t, err, errNonAssembly.Error()) - require.Nil(t, res) - }) - - t.Run("children search failure", func(t *testing.T) { - addr := testObjectAddress(t) - - hInfo := newRawHeadInfo() - hInfo.setAddress(addr) - hInfo.setSessionToken(new(service.Token)) - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - err: errors.New(""), // force straightObjectReceiver to return non-empty error - }, - childLister: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct child lister params", func(t *testing.T) { - require.Equal(t, addr, items[0]) - require.Equal(t, - hInfo.GetSessionToken(), - items[1].(context.Context).Value(transformer.PublicSessionToken), - ) - }) - }, - res: make([]ID, 0), // force objectChildren lister to return empty list - }, - ancestralRecv: new(testHeadEntity), - } - - res, err := s.getObject(ctx, hInfo) - require.EqualError(t, err, childrenNotFound.Error()) - require.Nil(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - var ( - childCount = 5 - rErr = errors.New("test error for rewinding receiver") - children = make([]ID, 0, childCount) - ) - - for i := 0; i < childCount; i++ { - id := testObjectAddress(t).ObjectID - children = append(children, id) - } - - // create custom head info - hInfo := newRawHeadInfo() - hInfo.setTTL(5) - hInfo.setTimeout(3 * time.Second) - hInfo.setAddress(testObjectAddress(t)) - hInfo.setSessionToken(new(service.Token)) - - t.Run("error/children from straight receiver", func(t *testing.T) { - obj := &objectData{Object: new(Object)} - - for i := range children { - // add child reference to object - obj.Headers = append(obj.Headers, Header{ - Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child, ID: children[i]}}, - }) - } - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - res: obj, // force straight receiver to return obj - }, - ancestralRecv: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct rewinding receiver", func(t *testing.T) { - require.Equal(t, hInfo.GetAddress(), items[0]) - require.Equal(t, children, items[1]) - require.True(t, items[2].(bool)) - require.Equal(t, - hInfo.GetSessionToken(), - items[3].(context.Context).Value(transformer.PublicSessionToken), - ) - }) - }, - err: rErr, // force rewinding receiver to return rErr - }, - log: zap.L(), - } - - res, err := s.getObject(ctx, hInfo) - require.EqualError(t, err, errIncompleteOperation.Error()) - require.Nil(t, res) - }) - - t.Run("success/children from child lister", func(t *testing.T) { - obj := &objectData{Object: new(Object)} - - s := &coreObjectReceiver{ - straightObjRecv: &testHeadEntity{ - err: errors.New(""), // force straight receiver to return non-nil error - }, - ancestralRecv: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct rewinding receiver", func(t *testing.T) { - require.Equal(t, hInfo.GetAddress(), items[0]) - require.Equal(t, children, items[1]) - require.True(t, items[2].(bool)) - }) - }, - res: obj, // force rewinding receiver to return obj - }, - childLister: &testHeadEntity{ - res: children, // force objectChildrenLister to return particular list - }, - } - - res, err := s.getObject(ctx, hInfo) - require.NoError(t, err, rErr.Error()) - require.Equal(t, obj, res) - }) - }) -} - -func Test_straightHeadReceiver_head(t *testing.T) { - ctx := context.TODO() - - hInfo := newRawHeadInfo() - hInfo.setFullHeaders(true) - - t.Run("executor error", func(t *testing.T) { - exErr := errors.New("test error for operation executor") - - s := &straightObjectReceiver{ - executor: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct operation executor params", func(t *testing.T) { - require.Equal(t, hInfo, items[0]) - _ = items[1].(objectAccumulator) - }) - }, - err: exErr, // force operationExecutor to return exErr - }, - } - - _, err := s.getObject(ctx, hInfo) - require.EqualError(t, err, exErr.Error()) - - hInfo = newRawHeadInfo() - hInfo.setFullHeaders(true) - - _, err = s.getObject(ctx, hInfo) - require.EqualError(t, err, exErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - obj := &objectData{Object: new(Object), payload: new(emptyReader)} - - s := &straightObjectReceiver{ - executor: &testHeadEntity{ - f: func(items ...interface{}) { - items[1].(objectAccumulator).handleItem(obj.Object) - }, - }, - } - - res, err := s.getObject(ctx, hInfo) - require.NoError(t, err) - require.Equal(t, obj, res) - }) -} - -func Test_coreObjectRewinder_rewind(t *testing.T) { - ctx := context.TODO() - - t.Run("transformer failure", func(t *testing.T) { - tErr := errors.New("test error for object transformer") - objs := []Object{*new(Object), *new(Object)} - - s := &coreObjectRewinder{ - transformer: &testHeadEntity{ - f: func(items ...interface{}) { - t.Run("correct transformer params", func(t *testing.T) { - require.Equal(t, objs, items[0]) - }) - }, - err: tErr, // force transformer to return tErr - }, - } - - res, err := s.rewind(ctx, objs...) - require.EqualError(t, err, tErr.Error()) - require.Empty(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - objs := []Object{ - {SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}}, - {SystemHeader: SystemHeader{ID: testObjectAddress(t).ObjectID}}, - } - - s := &coreObjectRewinder{ - transformer: &testHeadEntity{ - res: objs, // force transformer to return objs - }, - } - - res, err := s.rewind(ctx, objs...) - require.NoError(t, err) - require.Equal(t, &objs[0], res) - }) -} - -func Test_coreObjectReceiver_sendingRequest(t *testing.T) { - t.Run("non-assembly", func(t *testing.T) { - src := &transportRequest{serviceRequest: new(object.GetRequest)} - // ascertain that request not changed if node is non-assembled - require.Equal(t, src, new(coreObjectReceiver).sendingRequest(src)) - }) - - t.Run("assembly", func(t *testing.T) { - s := &coreObjectReceiver{ancestralRecv: new(testHeadEntity)} - - t.Run("raw request", func(t *testing.T) { - src := newRawGetInfo() - src.setRaw(true) - // ascertain that request not changed if request is raw - require.Equal(t, src, s.sendingRequest(src)) - }) - - t.Run("non-raw request", func(t *testing.T) { - getInfo := *newRawGetInfo() - getInfo.setTTL(uint32(5)) - getInfo.setTimeout(3 * time.Second) - getInfo.setAddress(testObjectAddress(t)) - getInfo.setRaw(false) - getInfo.setSessionToken(new(service.Token)) - - t.Run("get", func(t *testing.T) { - res := s.sendingRequest(getInfo) - require.Equal(t, getInfo.GetTimeout(), res.GetTimeout()) - require.Equal(t, getInfo.GetAddress(), res.GetAddress()) - require.Equal(t, getInfo.GetTTL(), res.GetTTL()) - require.Equal(t, getInfo.GetSessionToken(), res.GetSessionToken()) - require.True(t, res.GetRaw()) - - t.Run("zero ttl", func(t *testing.T) { - res := s.sendingRequest(newRawGetInfo()) - require.Equal(t, uint32(service.NonForwardingTTL), res.GetTTL()) - }) - }) - - t.Run("head", func(t *testing.T) { - hInfo := newRawHeadInfo() - hInfo.setGetInfo(getInfo) - hInfo.setFullHeaders(false) - - res := s.sendingRequest(hInfo) - require.Equal(t, getInfo.GetTimeout(), res.GetTimeout()) - require.Equal(t, getInfo.GetAddress(), res.GetAddress()) - require.Equal(t, getInfo.GetTTL(), res.GetTTL()) - require.Equal(t, getInfo.GetSessionToken(), res.GetSessionToken()) - require.True(t, res.GetRaw()) - require.True(t, res.(transport.HeadInfo).GetFullHeaders()) - }) - }) - }) -} diff --git a/pkg/network/transport/object/grpc/implementations.go b/pkg/network/transport/object/grpc/implementations.go deleted file mode 100644 index c63f3519b..000000000 --- a/pkg/network/transport/object/grpc/implementations.go +++ /dev/null @@ -1,32 +0,0 @@ -package object - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/pkg/errors" -) - -type ( - remoteService struct { - ps peers.Interface - } -) - -// NewRemoteService is a remote service controller's constructor. -func NewRemoteService(ps peers.Interface) RemoteService { - return &remoteService{ - ps: ps, - } -} - -func (rs remoteService) Remote(ctx context.Context, addr multiaddr.Multiaddr) (object.ServiceClient, error) { - con, err := rs.ps.GRPCConnection(ctx, addr) - if err != nil { - return nil, errors.Wrapf(err, "remoteService.Remote failed on GRPCConnection to %s", addr) - } - - return object.NewServiceClient(con), nil -} diff --git a/pkg/network/transport/object/grpc/listing.go b/pkg/network/transport/object/grpc/listing.go deleted file mode 100644 index 8d3f7c866..000000000 --- a/pkg/network/transport/object/grpc/listing.go +++ /dev/null @@ -1,276 +0,0 @@ -package object - -import ( - "context" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - v1 "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - objectChildrenLister interface { - children(context.Context, Address) []ID - } - - coreChildrenLister struct { - queryFn relationQueryFunc - objSearcher objectSearcher - log *zap.Logger - timeout time.Duration - } - - relationQueryFunc func(Address) ([]byte, error) - - rawSearchInfo struct { - *rawMetaInfo - cid CID - query []byte - } - - neighborReceiver struct { - firstChildQueryFn relationQueryFunc - leftNeighborQueryFn relationQueryFunc - rightNeighborQueryFn relationQueryFunc - rangeDescRecv selectiveRangeReceiver - } - - selectiveRangeReceiver interface { - rangeDescriptor(context.Context, Address, relationQueryFunc) (RangeDescriptor, error) - } - - selectiveRangeRecv struct { - executor transport.SelectiveContainerExecutor - } -) - -const ( - lmQueryMarshalFail = "marshal search query failure" - lmListFail = "searching inside children listing failure" -) - -var errRelationNotFound = errors.New("relation not found") - -func (s *neighborReceiver) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { - if res, err := s.rangeDescRecv.rangeDescriptor(ctx, addr, s.firstChildQueryFn); err == nil { - return res, nil - } - - return s.rangeDescRecv.rangeDescriptor(ctx, addr, nil) -} - -func (s *neighborReceiver) Neighbor(ctx context.Context, addr Address, left bool) (res RangeDescriptor, err error) { - if left { - res, err = s.rangeDescRecv.rangeDescriptor(ctx, addr, s.leftNeighborQueryFn) - } else { - res, err = s.rangeDescRecv.rangeDescriptor(ctx, addr, s.rightNeighborQueryFn) - } - - return -} - -func (s *selectiveRangeRecv) rangeDescriptor(ctx context.Context, addr Address, fn relationQueryFunc) (res RangeDescriptor, err error) { - b := false - - p := &transport.HeadParams{ - GetParams: transport.GetParams{ - SelectiveParams: transport.SelectiveParams{ - CID: addr.CID, - ServeLocal: true, - TTL: service.SingleForwardingTTL, - Token: tokenFromContext(ctx), - Bearer: bearerFromContext(ctx), - - ExtendedHeaders: extendedHeadersFromContext(ctx), - }, - Handler: func(_ multiaddr.Multiaddr, obj *Object) { - res.Addr = *obj.Address() - res.Offset = 0 - res.Size = int64(obj.SystemHeader.PayloadLength) - - sameID := res.Addr.ObjectID.Equal(addr.ObjectID) - bound := boundaryChild(obj) - res.LeftBound = sameID || bound == boundBoth || bound == boundLeft - res.RightBound = sameID || bound == boundBoth || bound == boundRight - - b = true - }, - }, - FullHeaders: true, - } - - if fn != nil { - if p.Query, err = fn(addr); err != nil { - return - } - } else { - p.IDList = []ID{addr.ObjectID} - } - - if err = s.executor.Head(ctx, p); err != nil { - return - } else if !b { - err = errRelationNotFound - } - - return res, err -} - -const ( - boundBoth = iota - boundLeft - boundRight - boundMid -) - -func boundaryChild(obj *Object) (res int) { - splitInd, _ := obj.LastHeader(object.HeaderType(object.TransformHdr)) - if splitInd < 0 { - return - } - - for i := len(obj.Headers) - 1; i > splitInd; i-- { - hVal := obj.Headers[i].GetValue() - if hVal == nil { - continue - } - - hLink, ok := hVal.(*object.Header_Link) - if !ok || hLink == nil || hLink.Link == nil { - continue - } - - linkType := hLink.Link.GetType() - if linkType != object.Link_Previous && linkType != object.Link_Next { - continue - } - - res = boundMid - - if hLink.Link.ID.Empty() { - if linkType == object.Link_Next { - res = boundRight - } else if linkType == object.Link_Previous { - res = boundLeft - } - - return - } - } - - return res -} - -func firstChildQueryFunc(addr Address) ([]byte, error) { - return (&v1.Query{ - Filters: append(parentFilters(addr), QueryFilter{ - Type: v1.Filter_Exact, - Name: KeyPrev, - Value: ID{}.String(), - }), - }).Marshal() -} - -func leftNeighborQueryFunc(addr Address) ([]byte, error) { - return idQueryFunc(KeyNext, addr.ObjectID) -} - -func rightNeighborQueryFunc(addr Address) ([]byte, error) { - return idQueryFunc(KeyPrev, addr.ObjectID) -} - -func idQueryFunc(key string, id ID) ([]byte, error) { - return (&v1.Query{Filters: []QueryFilter{ - { - Type: v1.Filter_Exact, - Name: key, - Value: id.String(), - }, - }}).Marshal() -} - -func coreChildrenQueryFunc(addr Address) ([]byte, error) { - return (&v1.Query{Filters: parentFilters(addr)}).Marshal() -} - -func (s *coreChildrenLister) children(ctx context.Context, parent Address) []ID { - query, err := s.queryFn(parent) - if err != nil { - s.log.Error(lmQueryMarshalFail, zap.Error(err)) - return nil - } - - sInfo := newRawSearchInfo() - sInfo.setTTL(service.NonForwardingTTL) - sInfo.setTimeout(s.timeout) - sInfo.setCID(parent.CID) - sInfo.setQuery(query) - sInfo.setSessionToken(tokenFromContext(ctx)) - sInfo.setBearerToken(bearerFromContext(ctx)) - sInfo.setExtendedHeaders(extendedHeadersFromContext(ctx)) - - children, err := s.objSearcher.searchObjects(ctx, sInfo) - if err != nil { - s.log.Error(lmListFail, zap.Error(err)) - return nil - } - - res := make([]ID, 0, len(children)) - for i := range children { - res = append(res, children[i].ObjectID) - } - - return res -} - -func (s *rawSearchInfo) GetCID() CID { - return s.cid -} - -func (s *rawSearchInfo) setCID(v CID) { - s.cid = v -} - -func (s *rawSearchInfo) GetQuery() []byte { - return s.query -} - -func (s *rawSearchInfo) setQuery(v []byte) { - s.query = v -} - -func (s *rawSearchInfo) getMetaInfo() *rawMetaInfo { - return s.rawMetaInfo -} - -func (s *rawSearchInfo) setMetaInfo(v *rawMetaInfo) { - s.rawMetaInfo = v - s.setType(object.RequestSearch) -} - -func newRawSearchInfo() *rawSearchInfo { - res := new(rawSearchInfo) - - res.setMetaInfo(newRawMetaInfo()) - - return res -} - -func parentFilters(addr Address) []QueryFilter { - return []QueryFilter{ - { - Type: v1.Filter_Exact, - Name: transport.KeyHasParent, - }, - { - Type: v1.Filter_Exact, - Name: transport.KeyParent, - Value: addr.ObjectID.String(), - }, - } -} diff --git a/pkg/network/transport/object/grpc/listing_test.go b/pkg/network/transport/object/grpc/listing_test.go deleted file mode 100644 index 614237af0..000000000 --- a/pkg/network/transport/object/grpc/listing_test.go +++ /dev/null @@ -1,512 +0,0 @@ -package object - -import ( - "context" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/query" - v1 "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testListingEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - transport.SelectiveContainerExecutor - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ objectSearcher = (*testListingEntity)(nil) - _ selectiveRangeReceiver = (*testListingEntity)(nil) - - _ transport.SelectiveContainerExecutor = (*testListingEntity)(nil) -) - -func (s *testListingEntity) rangeDescriptor(_ context.Context, a Address, f relationQueryFunc) (RangeDescriptor, error) { - if s.f != nil { - s.f(a, f) - } - if s.err != nil { - return RangeDescriptor{}, s.err - } - return s.res.(RangeDescriptor), nil -} - -func (s *testListingEntity) Head(_ context.Context, p *transport.HeadParams) error { - if s.f != nil { - s.f(p) - } - return s.err -} - -func (s *testListingEntity) searchObjects(ctx context.Context, i transport.SearchInfo) ([]Address, error) { - if s.f != nil { - s.f(i) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Address), nil -} - -func Test_rawSeachInfo(t *testing.T) { - t.Run("TTL", func(t *testing.T) { - ttl := uint32(3) - - r := newRawSearchInfo() - r.setTTL(ttl) - - require.Equal(t, ttl, r.GetTTL()) - }) - - t.Run("timeout", func(t *testing.T) { - timeout := 3 * time.Second - - r := newRawSearchInfo() - r.setTimeout(timeout) - - require.Equal(t, timeout, r.GetTimeout()) - }) - - t.Run("CID", func(t *testing.T) { - cid := testObjectAddress(t).CID - - r := newRawSearchInfo() - r.setCID(cid) - - require.Equal(t, cid, r.GetCID()) - }) - - t.Run("query", func(t *testing.T) { - query := testData(t, 10) - - r := newRawSearchInfo() - r.setQuery(query) - - require.Equal(t, query, r.GetQuery()) - }) -} - -func Test_coreChildrenQueryFunc(t *testing.T) { - t.Run("correct query composition", func(t *testing.T) { - // create custom address for test - addr := testObjectAddress(t) - - res, err := coreChildrenQueryFunc(addr) - require.NoError(t, err) - - // unmarshal query - q := v1.Query{} - require.NoError(t, q.Unmarshal(res)) - - // ascertain that filter list composed correctly - require.Len(t, q.Filters, 2) - - require.Contains(t, q.Filters, QueryFilter{ - Type: v1.Filter_Exact, - Name: transport.KeyHasParent, - }) - - require.Contains(t, q.Filters, QueryFilter{ - Type: v1.Filter_Exact, - Name: transport.KeyParent, - Value: addr.ObjectID.String(), - }) - }) -} - -func Test_coreChildrenLister_children(t *testing.T) { - ctx := context.TODO() - addr := testObjectAddress(t) - - t.Run("query function failure", func(t *testing.T) { - s := &coreChildrenLister{ - queryFn: func(v Address) ([]byte, error) { - t.Run("correct query function params", func(t *testing.T) { - require.Equal(t, addr, v) - }) - return nil, errors.New("") // force relationQueryFunc to return some non-nil error - }, - log: test.NewLogger(false), - } - - require.Empty(t, s.children(ctx, addr)) - }) - - t.Run("object searcher failure", func(t *testing.T) { - // create custom timeout for test - sErr := errors.New("test error for object searcher") - // create custom timeout for test - timeout := 3 * time.Second - // create custom query for test - query := testData(t, 10) - - s := &coreChildrenLister{ - queryFn: func(v Address) ([]byte, error) { - return query, nil // force relationQueryFunc to return created query - }, - objSearcher: &testListingEntity{ - f: func(items ...interface{}) { - t.Run("correct object searcher params", func(t *testing.T) { - p := items[0].(transport.SearchInfo) - require.Equal(t, timeout, p.GetTimeout()) - require.Equal(t, query, p.GetQuery()) - require.Equal(t, addr.CID, p.GetCID()) - require.Equal(t, uint32(service.NonForwardingTTL), p.GetTTL()) - }) - }, - err: sErr, // force objectSearcher to return sErr - }, - log: test.NewLogger(false), - timeout: timeout, - } - - require.Empty(t, s.children(ctx, addr)) - }) - - t.Run("correct result", func(t *testing.T) { - // create custom child list - addrList := testAddrList(t, 5) - idList := make([]ID, 0, len(addrList)) - for i := range addrList { - idList = append(idList, addrList[i].ObjectID) - } - - s := &coreChildrenLister{ - queryFn: func(address Address) ([]byte, error) { - return nil, nil // force relationQueryFunc to return nil error - }, - objSearcher: &testListingEntity{ - res: addrList, - }, - } - - require.Equal(t, idList, s.children(ctx, addr)) - }) -} - -func Test_queryGenerators(t *testing.T) { - t.Run("object ID", func(t *testing.T) { - var ( - q = new(query.Query) - key = "key for test" - id = testObjectAddress(t).ObjectID - ) - - res, err := idQueryFunc(key, id) - require.NoError(t, err) - - require.NoError(t, q.Unmarshal(res)) - require.Len(t, q.Filters, 1) - - require.Equal(t, query.Filter{ - Type: v1.Filter_Exact, - Name: key, - Value: id.String(), - }, q.Filters[0]) - }) - - t.Run("left neighbor", func(t *testing.T) { - var ( - q = new(query.Query) - addr = testObjectAddress(t) - ) - - res, err := leftNeighborQueryFunc(addr) - require.NoError(t, err) - - require.NoError(t, q.Unmarshal(res)) - require.Len(t, q.Filters, 1) - - require.Equal(t, query.Filter{ - Type: v1.Filter_Exact, - Name: KeyNext, - Value: addr.ObjectID.String(), - }, q.Filters[0]) - }) - - t.Run("right neighbor", func(t *testing.T) { - var ( - q = new(query.Query) - addr = testObjectAddress(t) - ) - - res, err := rightNeighborQueryFunc(addr) - require.NoError(t, err) - - require.NoError(t, q.Unmarshal(res)) - require.Len(t, q.Filters, 1) - - require.Equal(t, query.Filter{ - Type: v1.Filter_Exact, - Name: KeyPrev, - Value: addr.ObjectID.String(), - }, q.Filters[0]) - }) - - t.Run("first child", func(t *testing.T) { - var ( - q = new(query.Query) - addr = testObjectAddress(t) - ) - - res, err := firstChildQueryFunc(addr) - require.NoError(t, err) - - require.NoError(t, q.Unmarshal(res)) - require.Len(t, q.Filters, 3) - - require.Contains(t, q.Filters, query.Filter{ - Type: v1.Filter_Exact, - Name: transport.KeyHasParent, - }) - require.Contains(t, q.Filters, query.Filter{ - Type: v1.Filter_Exact, - Name: transport.KeyParent, - Value: addr.ObjectID.String(), - }) - require.Contains(t, q.Filters, query.Filter{ - Type: v1.Filter_Exact, - Name: KeyPrev, - Value: ID{}.String(), - }) - }) -} - -func Test_selectiveRangeRecv(t *testing.T) { - ctx := context.TODO() - addr := testObjectAddress(t) - - t.Run("query function failure", func(t *testing.T) { - qfErr := errors.New("test error for query function") - _, err := new(selectiveRangeRecv).rangeDescriptor(ctx, testObjectAddress(t), func(Address) ([]byte, error) { - return nil, qfErr - }) - require.EqualError(t, err, qfErr.Error()) - }) - - t.Run("correct executor params", func(t *testing.T) { - t.Run("w/ query function", func(t *testing.T) { - qBytes := testData(t, 10) - - s := &selectiveRangeRecv{ - executor: &testListingEntity{ - f: func(items ...interface{}) { - p := items[0].(*transport.HeadParams) - require.Equal(t, addr.CID, p.CID) - require.True(t, p.ServeLocal) - require.Equal(t, uint32(service.SingleForwardingTTL), p.TTL) - require.True(t, p.FullHeaders) - require.Equal(t, qBytes, p.Query) - require.Empty(t, p.IDList) - }, - }, - } - - _, _ = s.rangeDescriptor(ctx, addr, func(Address) ([]byte, error) { return qBytes, nil }) - }) - - t.Run("w/o query function", func(t *testing.T) { - s := &selectiveRangeRecv{ - executor: &testListingEntity{ - f: func(items ...interface{}) { - p := items[0].(*transport.HeadParams) - require.Equal(t, addr.CID, p.CID) - require.True(t, p.ServeLocal) - require.Equal(t, uint32(service.SingleForwardingTTL), p.TTL) - require.True(t, p.FullHeaders) - require.Empty(t, p.Query) - require.Equal(t, []ID{addr.ObjectID}, p.IDList) - }, - }, - } - - _, _ = s.rangeDescriptor(ctx, addr, nil) - }) - }) - - t.Run("correct result", func(t *testing.T) { - t.Run("failure", func(t *testing.T) { - t.Run("executor failure", func(t *testing.T) { - exErr := errors.New("test error for executor") - - s := &selectiveRangeRecv{ - executor: &testListingEntity{ - err: exErr, - }, - } - - _, err := s.rangeDescriptor(ctx, addr, nil) - require.EqualError(t, err, exErr.Error()) - }) - - t.Run("not found", func(t *testing.T) { - s := &selectiveRangeRecv{ - executor: new(testListingEntity), - } - - _, err := s.rangeDescriptor(ctx, addr, nil) - require.EqualError(t, err, errRelationNotFound.Error()) - }) - }) - - t.Run("success", func(t *testing.T) { - foundAddr := testObjectAddress(t) - - obj := &Object{ - SystemHeader: SystemHeader{ - PayloadLength: 100, - ID: foundAddr.ObjectID, - CID: foundAddr.CID, - }, - } - - s := &selectiveRangeRecv{ - executor: &testListingEntity{ - SelectiveContainerExecutor: nil, - f: func(items ...interface{}) { - p := items[0].(*transport.HeadParams) - p.Handler(nil, obj) - }, - }, - } - - res, err := s.rangeDescriptor(ctx, addr, nil) - require.NoError(t, err) - require.Equal(t, RangeDescriptor{ - Size: int64(obj.SystemHeader.PayloadLength), - Offset: 0, - Addr: foundAddr, - - LeftBound: true, - RightBound: true, - }, res) - }) - }) -} - -func Test_neighborReceiver(t *testing.T) { - ctx := context.TODO() - addr := testObjectAddress(t) - - t.Run("neighbor", func(t *testing.T) { - t.Run("correct internal logic", func(t *testing.T) { - rightCalled, leftCalled := false, false - - s := &neighborReceiver{ - leftNeighborQueryFn: func(a Address) ([]byte, error) { - require.Equal(t, addr, a) - leftCalled = true - return nil, nil - }, - rightNeighborQueryFn: func(a Address) ([]byte, error) { - require.Equal(t, addr, a) - rightCalled = true - return nil, nil - }, - rangeDescRecv: &testListingEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0]) - _, _ = items[1].(relationQueryFunc)(addr) - }, - err: errors.New(""), - }, - } - - _, _ = s.Neighbor(ctx, addr, true) - require.False(t, rightCalled) - require.True(t, leftCalled) - - leftCalled = false - - _, _ = s.Neighbor(ctx, addr, false) - require.False(t, leftCalled) - require.True(t, rightCalled) - }) - - t.Run("correct result", func(t *testing.T) { - rErr := errors.New("test error for range receiver") - - rngRecv := &testListingEntity{err: rErr} - s := &neighborReceiver{rangeDescRecv: rngRecv} - - _, err := s.Neighbor(ctx, addr, false) - require.EqualError(t, err, rErr.Error()) - - rngRecv.err = errRelationNotFound - - _, err = s.Neighbor(ctx, addr, false) - require.EqualError(t, err, errRelationNotFound.Error()) - - rd := RangeDescriptor{Size: 1, Offset: 2, Addr: addr} - rngRecv.res, rngRecv.err = rd, nil - - res, err := s.Neighbor(ctx, addr, false) - require.NoError(t, err) - require.Equal(t, rd, res) - }) - }) - - t.Run("base", func(t *testing.T) { - rd := RangeDescriptor{Size: 1, Offset: 2, Addr: addr} - - t.Run("first child exists", func(t *testing.T) { - called := false - - s := &neighborReceiver{ - firstChildQueryFn: func(a Address) ([]byte, error) { - require.Equal(t, addr, a) - called = true - return nil, nil - }, - rangeDescRecv: &testListingEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0]) - _, _ = items[1].(relationQueryFunc)(addr) - }, - res: rd, - }, - } - - res, err := s.Base(ctx, addr) - require.NoError(t, err) - require.Equal(t, rd, res) - require.True(t, called) - }) - - t.Run("first child doesn't exist", func(t *testing.T) { - called := false - - recv := &testListingEntity{err: errors.New("")} - - recv.f = func(...interface{}) { - if called { - recv.res, recv.err = rd, nil - } - called = true - } - - s := &neighborReceiver{rangeDescRecv: recv} - - res, err := s.Base(ctx, addr) - require.NoError(t, err) - require.Equal(t, rd, res) - }) - }) -} diff --git a/pkg/network/transport/object/grpc/postprocessor.go b/pkg/network/transport/object/grpc/postprocessor.go deleted file mode 100644 index 52d474d4c..000000000 --- a/pkg/network/transport/object/grpc/postprocessor.go +++ /dev/null @@ -1,47 +0,0 @@ -package object - -import ( - "context" -) - -type ( - // requestPostProcessor is an interface of RPC call outcome handler. - requestPostProcessor interface { - // Performs actions based on the outcome of request processing. - postProcess(context.Context, serviceRequest, error) - } - - // complexPostProcessor is an implementation of requestPostProcessor interface. - complexPostProcessor struct { - // Sequence of requestPostProcessor instances. - list []requestPostProcessor - } -) - -var _ requestPostProcessor = (*complexPostProcessor)(nil) - -// requestPostProcessor method implementation. -// -// Panics with pmEmptyServiceRequest on nil request argument. -// -// Passes request through the sequence of requestPostProcessor instances. -// -// Warn: adding instance to list itself provoke endless recursion. -func (s *complexPostProcessor) postProcess(ctx context.Context, req serviceRequest, e error) { - if req == nil { - panic(pmEmptyServiceRequest) - } - - for i := range s.list { - s.list[i].postProcess(ctx, req, e) - } -} - -// Creates requestPostProcessor based on Params. -// -// Uses complexPostProcessor instance as a result implementation. -func newPostProcessor() requestPostProcessor { - return &complexPostProcessor{ - list: []requestPostProcessor{}, - } -} diff --git a/pkg/network/transport/object/grpc/postprocessor_test.go b/pkg/network/transport/object/grpc/postprocessor_test.go deleted file mode 100644 index ad48fb85c..000000000 --- a/pkg/network/transport/object/grpc/postprocessor_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testPostProcessorEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var _ requestPostProcessor = (*testPostProcessorEntity)(nil) - -func (s *testPostProcessorEntity) postProcess(_ context.Context, req serviceRequest, e error) { - if s.f != nil { - s.f(req, e) - } -} - -func TestComplexPostProcessor_PostProcess(t *testing.T) { - ctx := context.TODO() - - t.Run("empty request argument", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - // ascertain that nil request causes panic - new(complexPostProcessor).postProcess(ctx, nil, nil) - }) - }) - - t.Run("correct behavior", func(t *testing.T) { - // create serviceRequest instance. - req := new(testPostProcessorEntity) - - // create custom error - pErr := errors.New("test error for post processor") - - // create list of post processors - postProcCount := 10 - postProcessors := make([]requestPostProcessor, 0, postProcCount) - - postProcessorCalls := make([]struct{}, 0, postProcCount) - - for i := 0; i < postProcCount; i++ { - postProcessors = append(postProcessors, &testPostProcessorEntity{ - f: func(items ...interface{}) { - t.Run("correct arguments", func(t *testing.T) { - postProcessorCalls = append(postProcessorCalls, struct{}{}) - }) - }, - }) - } - - s := &complexPostProcessor{list: postProcessors} - - s.postProcess(ctx, req, pErr) - - // ascertain all internal requestPostProcessor instances were called - require.Len(t, postProcessorCalls, postProcCount) - }) -} - -func Test_newPostProcessor(t *testing.T) { - res := newPostProcessor() - - pp := res.(*complexPostProcessor) - require.Len(t, pp.list, 0) -} diff --git a/pkg/network/transport/object/grpc/preprocessor.go b/pkg/network/transport/object/grpc/preprocessor.go deleted file mode 100644 index 7b3291dfb..000000000 --- a/pkg/network/transport/object/grpc/preprocessor.go +++ /dev/null @@ -1,160 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - - "github.com/nspcc-dev/neofs-api-go/service" - "go.uber.org/zap" -) - -type ( - // requestPreProcessor is an interface of Object service request installer. - requestPreProcessor interface { - // Performs preliminary request validation and preparation. - preProcess(context.Context, serviceRequest) error - } - - // complexPreProcessor is an implementation of requestPreProcessor interface. - complexPreProcessor struct { - // Sequence of requestPreProcessor instances. - list []requestPreProcessor - } - - signingPreProcessor struct { - preProc requestPreProcessor - key *ecdsa.PrivateKey - - log *zap.Logger - } -) - -const pmEmptyServiceRequest = "empty service request" - -var ( - _ requestPreProcessor = (*signingPreProcessor)(nil) - _ requestPreProcessor = (*complexPreProcessor)(nil) -) - -// requestPreProcessor method implementation. -// -// Passes request through internal requestPreProcessor. -// If internal requestPreProcessor returns non-nil error, this error returns. -// Returns result of signRequest function. -func (s *signingPreProcessor) preProcess(ctx context.Context, req serviceRequest) (err error) { - if err = s.preProc.preProcess(ctx, req); err != nil { - return - } else if err = signRequest(s.key, req); err != nil { - s.log.Error("could not re-sign request", - zap.Error(err), - ) - err = errReSigning - } - - return -} - -// requestPreProcessor method implementation. -// -// Panics with pmEmptyServiceRequest on nil request argument. -// -// Passes request through the sequence of requestPreProcessor instances. -// Any non-nil error returned by some instance returns. -// -// Warn: adding instance to list itself provoke endless recursion. -func (s *complexPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { - if req == nil { - panic(pmEmptyServiceRequest) - } - - for i := range s.list { - if err := s.list[i].preProcess(ctx, req); err != nil { - return err - } - } - - return nil -} - -// Creates requestPreProcessor based on Params. -// -// Uses complexPreProcessor instance as a result implementation. -// -// Adds to next preprocessors to list: -// * verifyPreProcessor; -// * ttlPreProcessor; -// * epochPreProcessor, if CheckEpochSync flag is set in params. -// * aclPreProcessor, if CheckAcl flag is set in params. -func newPreProcessor(p *Params) requestPreProcessor { - preProcList := make([]requestPreProcessor, 0) - - if p.CheckACL { - preProcList = append(preProcList, &aclPreProcessor{ - log: p.Logger, - - aclInfoReceiver: p.aclInfoReceiver, - - reqActionCalc: p.requestActionCalculator, - - localStore: p.LocalStore, - - extACLSource: p.ExtendedACLSource, - - bearerVerifier: &complexBearerVerifier{ - items: []bearerTokenVerifier{ - &bearerActualityVerifier{ - epochRecv: p.EpochReceiver, - }, - new(bearerSignatureVerifier), - &bearerOwnershipVerifier{ - cnrStorage: p.ContainerStorage, - }, - }, - }, - }) - } - - preProcList = append(preProcList, - &verifyPreProcessor{ - fVerify: requestVerifyFunc, - }, - - &ttlPreProcessor{ - staticCond: []service.TTLCondition{ - validTTLCondition, - }, - condPreps: []ttlConditionPreparer{ - &coreTTLCondPreparer{ - curAffChecker: &corePlacementUtil{ - prevNetMap: false, - localAddrStore: p.AddressStore, - placementBuilder: p.Placer, - log: p.Logger, - }, - prevAffChecker: &corePlacementUtil{ - prevNetMap: true, - localAddrStore: p.AddressStore, - placementBuilder: p.Placer, - log: p.Logger, - }, - }, - }, - fProc: processTTLConditions, - }, - - &tokenPreProcessor{ - staticVerifier: newComplexTokenVerifier( - &tokenEpochsVerifier{ - epochRecv: p.EpochReceiver, - }, - ), - }, - - new(decTTLPreProcessor), - ) - - return &signingPreProcessor{ - preProc: &complexPreProcessor{list: preProcList}, - key: p.Key, - } -} diff --git a/pkg/network/transport/object/grpc/preprocessor_test.go b/pkg/network/transport/object/grpc/preprocessor_test.go deleted file mode 100644 index bdadc95ff..000000000 --- a/pkg/network/transport/object/grpc/preprocessor_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testPreProcessorEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - Placer - storage.AddressStoreComponent - EpochReceiver - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var _ requestPreProcessor = (*testPreProcessorEntity)(nil) - -func (s *testPreProcessorEntity) preProcess(_ context.Context, req serviceRequest) error { - if s.f != nil { - s.f(req) - } - return s.err -} - -func TestSigningPreProcessor_preProcess(t *testing.T) { - ctx := context.TODO() - - req := new(object.SearchRequest) - - t.Run("internal pre-processor error", func(t *testing.T) { - ppErr := errors.New("test error for pre-processor") - - s := &signingPreProcessor{ - preProc: &testPreProcessorEntity{ - f: func(items ...interface{}) { - t.Run("correct internal pre-processor params", func(t *testing.T) { - require.Equal(t, req, items[0].(serviceRequest)) - }) - }, - err: ppErr, - }, - } - - require.EqualError(t, s.preProcess(ctx, req), ppErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - key := test.DecodeKey(0) - - exp := signRequest(key, req) - - s := &signingPreProcessor{ - preProc: new(testPreProcessorEntity), - key: key, - } - - require.Equal(t, exp, s.preProcess(ctx, req)) - }) -} - -func TestComplexPreProcessor_PreProcess(t *testing.T) { - ctx := context.TODO() - - t.Run("empty request argument", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - // ascertain that nil request causes panic - _ = new(complexPreProcessor).preProcess(ctx, nil) - }) - }) - - // create serviceRequest instance. - req := new(testPreProcessorEntity) - - t.Run("empty list", func(t *testing.T) { - require.NoError(t, new(complexPreProcessor).preProcess(ctx, req)) - }) - - t.Run("non-empty list", func(t *testing.T) { - firstCalled := false - p1 := &testPreProcessorEntity{ - f: func(items ...interface{}) { - t.Run("correct nested pre processor params", func(t *testing.T) { - require.Equal(t, req, items[0].(serviceRequest)) - }) - - firstCalled = true // mark first requestPreProcessor call - }, - err: nil, // force requestPreProcessor to return nil error - } - - // create custom error - pErr := errors.New("pre processor error for test") - p2 := &testPreProcessorEntity{ - err: pErr, // force second requestPreProcessor to return created error - } - - thirdCalled := false - p3 := &testPreProcessorEntity{ - f: func(_ ...interface{}) { - thirdCalled = true // mark third requestPreProcessor call - }, - err: nil, // force requestPreProcessor to return nil error - } - - // create complex requestPreProcessor - p := &complexPreProcessor{ - list: []requestPreProcessor{p1, p2, p3}, // order is important - } - - // ascertain error returns as expected - require.EqualError(t, - p.preProcess(ctx, req), - pErr.Error(), - ) - - // ascertain first requestPreProcessor was called - require.True(t, firstCalled) - - // ascertain first requestPreProcessor was not called - require.False(t, thirdCalled) - }) -} diff --git a/pkg/network/transport/object/grpc/put.go b/pkg/network/transport/object/grpc/put.go deleted file mode 100644 index 404445f4c..000000000 --- a/pkg/network/transport/object/grpc/put.go +++ /dev/null @@ -1,424 +0,0 @@ -package object - -import ( - "context" - "io" - "sync" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - objectStorer interface { - putObject(context.Context, transport.PutInfo) (*Address, error) - } - - bifurcatingObjectStorer struct { - straightStorer objectStorer - tokenStorer objectStorer - } - - receivingObjectStorer struct { - straightStorer objectStorer - vPayload verifier.Verifier - } - - filteringObjectStorer struct { - filter Filter - objStorer objectStorer - } - - tokenObjectStorer struct { - tokenStore session.PrivateTokenStore - objStorer objectStorer - } - - transformingObjectStorer struct { - transformer transformer.Transformer - objStorer objectStorer - - // Set of errors that won't be converted to errTransformer - mErr map[error]struct{} - } - - straightObjectStorer struct { - executor operationExecutor - } - - putRequest struct { - *object.PutRequest - srv object.Service_PutServer - timeout time.Duration - } - - addressAccumulator interface { - responseItemHandler - address() *Address - } - - coreAddrAccum struct { - *sync.Once - addr *Address - } - - rawPutInfo struct { - *rawMetaInfo - obj *Object - r io.Reader - copyNum uint32 - } - - putStreamReader struct { - tail []byte - srv object.Service_PutServer - } -) - -type transformerHandlerErr struct { - error -} - -var ( - errObjectExpected = errors.New("missing object") - errChunkExpected = errors.New("empty chunk received") -) - -var ( - errMissingOwnerKeys = errors.New("missing owner keys") - errBrokenToken = errors.New("broken token structure") - errNilToken = errors.New("missing session token") - errWrongTokenAddress = errors.New("wrong object address in token") -) - -var errTransformer = errors.New("could not transform the object") - -func (s *objectService) Put(srv object.Service_PutServer) (err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestPut), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestPut, - e: err, - }) - }() - - var req *object.PutRequest - - if req, err = recvPutHeaderMsg(srv); err != nil { - return - } - - _, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ - request: &putRequest{ - PutRequest: req, - srv: srv, - }, - executor: s, - }) - - return err -} - -func (s *bifurcatingObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { - if withTokenFromOwner(info) { - return s.tokenStorer.putObject(ctx, info) - } - - return s.straightStorer.putObject(ctx, info) -} - -func withTokenFromOwner(src service.SessionTokenSource) bool { - if src == nil { - return false - } - - token := src.GetSessionToken() - if token == nil { - return false - } - - signedReq, ok := src.(service.SignKeyPairSource) - if !ok { - return false - } - - signKeyPairs := signedReq.GetSignKeyPairs() - if len(signKeyPairs) == 0 { - return false - } - - firstKey := signKeyPairs[0].GetPublicKey() - if firstKey == nil { - return false - } - - reqOwner, err := refs.NewOwnerID(firstKey) - if err != nil { - return false - } - - return reqOwner.Equal(token.GetOwnerID()) -} - -func (s *tokenObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { - token := info.GetSessionToken() - - key := session.PrivateTokenKey{} - key.SetOwnerID(token.GetOwnerID()) - key.SetTokenID(token.GetID()) - - pToken, err := s.tokenStore.Fetch(key) - if err != nil { - return nil, &detailedError{ - error: errTokenRetrieval, - d: privateTokenRecvDetails(token.GetID(), token.GetOwnerID()), - } - } - - return s.objStorer.putObject( - contextWithValues(ctx, - transformer.PrivateSessionToken, pToken, - transformer.PublicSessionToken, token, - storagegroup.BearerToken, info.GetBearerToken(), - storagegroup.ExtendedHeaders, info.ExtendedHeaders(), - ), - info, - ) -} - -func (s *filteringObjectStorer) putObject(ctx context.Context, info transport.PutInfo) (*Address, error) { - if res := s.filter.Pass( - contextWithValues(ctx, ttlValue, info.GetTTL()), - &Meta{Object: info.GetHead()}, - ); res.Code() != localstore.CodePass { - if err := res.Err(); err != nil { - return nil, err - } - - return nil, errObjectFilter - } - - return s.objStorer.putObject(ctx, info) -} - -func (s *receivingObjectStorer) putObject(ctx context.Context, src transport.PutInfo) (*Address, error) { - obj := src.GetHead() - obj.Payload = make([]byte, obj.SystemHeader.PayloadLength) - - if _, err := io.ReadFull(src.Payload(), obj.Payload); err != nil && err != io.EOF { - if errors.Is(err, io.ErrUnexpectedEOF) { - err = transformer.ErrPayloadEOF - } - - return nil, err - } else if err = s.vPayload.Verify(ctx, obj); err != nil { - return nil, errPayloadChecksum - } - - putInfo := newRawPutInfo() - putInfo.setTimeout(src.GetTimeout()) - putInfo.setTTL(src.GetTTL()) - putInfo.setCopiesNumber(src.CopiesNumber()) - putInfo.setHead(obj) - putInfo.setSessionToken(src.GetSessionToken()) - putInfo.setBearerToken(src.GetBearerToken()) - putInfo.setExtendedHeaders(src.ExtendedHeaders()) - - return s.straightStorer.putObject(ctx, putInfo) -} - -func (s *transformingObjectStorer) putObject(ctx context.Context, src transport.PutInfo) (res *Address, err error) { - var ( - ttl = src.GetTTL() - timeout = src.GetTimeout() - copyNum = src.CopiesNumber() - token = src.GetSessionToken() - bearer = src.GetBearerToken() - extHdrs = src.ExtendedHeaders() - ) - - err = s.transformer.Transform(ctx, - transformer.ProcUnit{ - Head: src.GetHead(), - Payload: src.Payload(), - }, func(ctx context.Context, unit transformer.ProcUnit) error { - res = unit.Head.Address() - - putInfo := newRawPutInfo() - putInfo.setHead(unit.Head) - putInfo.setPayload(unit.Payload) - putInfo.setTimeout(timeout) - putInfo.setTTL(ttl) - putInfo.setCopiesNumber(copyNum) - putInfo.setSessionToken(token) - putInfo.setBearerToken(bearer) - putInfo.setExtendedHeaders(extHdrs) - - _, err := s.objStorer.putObject(ctx, putInfo) - if err != nil { - err = &transformerHandlerErr{ - error: err, - } - } - return err - }, - ) - - if e := errors.Cause(err); e != nil { - if v, ok := e.(*transformerHandlerErr); ok { - err = v.error - } else if _, ok := s.mErr[e]; !ok { - err = errTransformer - } - } - - return res, err -} - -func (s *putStreamReader) Read(p []byte) (n int, err error) { - if s.srv == nil { - return 0, io.EOF - } - - n += copy(p, s.tail) - if n > 0 { - s.tail = s.tail[n:] - return - } - - var msg *object.PutRequest - - if msg, err = s.srv.Recv(); err != nil { - return - } - - chunk := msg.GetChunk() - if len(chunk) == 0 { - return 0, errChunkExpected - } - - r := copy(p, chunk) - - s.tail = chunk[r:] - - n += r - - return -} - -func (s *straightObjectStorer) putObject(ctx context.Context, pInfo transport.PutInfo) (*Address, error) { - addrAccum := newAddressAccumulator() - if err := s.executor.executeOperation(ctx, pInfo, addrAccum); err != nil { - return nil, err - } - - return addrAccum.address(), nil -} - -func recvPutHeaderMsg(srv object.Service_PutServer) (*object.PutRequest, error) { - req, err := srv.Recv() - if err != nil { - return nil, err - } else if req == nil { - return nil, errHeaderExpected - } else if h := req.GetHeader(); h == nil { - return nil, errHeaderExpected - } else if h.GetObject() == nil { - return nil, errObjectExpected - } - - return req, nil -} - -func contextWithValues(parentCtx context.Context, items ...interface{}) context.Context { - fCtx := parentCtx - for i := 0; i < len(items); i += 2 { - fCtx = context.WithValue(fCtx, items[i], items[i+1]) - } - - return fCtx -} - -func (s *putRequest) GetTimeout() time.Duration { return s.timeout } - -func (s *putRequest) GetHead() *Object { return s.GetHeader().GetObject() } - -func (s *putRequest) CopiesNumber() uint32 { - h := s.GetHeader() - if h == nil { - return 0 - } - - return h.GetCopiesNumber() -} - -func (s *putRequest) Payload() io.Reader { - return &putStreamReader{ - srv: s.srv, - } -} - -func (s *rawPutInfo) GetHead() *Object { - return s.obj -} - -func (s *rawPutInfo) setHead(obj *Object) { - s.obj = obj -} - -func (s *rawPutInfo) Payload() io.Reader { - return s.r -} - -func (s *rawPutInfo) setPayload(r io.Reader) { - s.r = r -} - -func (s *rawPutInfo) CopiesNumber() uint32 { - return s.copyNum -} - -func (s *rawPutInfo) setCopiesNumber(v uint32) { - s.copyNum = v -} - -func (s *rawPutInfo) getMetaInfo() *rawMetaInfo { - return s.rawMetaInfo -} - -func (s *rawPutInfo) setMetaInfo(v *rawMetaInfo) { - s.rawMetaInfo = v - s.setType(object.RequestPut) -} - -func newRawPutInfo() *rawPutInfo { - res := new(rawPutInfo) - - res.setMetaInfo(newRawMetaInfo()) - - return res -} - -func (s *coreAddrAccum) handleItem(item interface{}) { s.Do(func() { s.addr = item.(*Address) }) } - -func (s *coreAddrAccum) address() *Address { return s.addr } - -func newAddressAccumulator() addressAccumulator { return &coreAddrAccum{Once: new(sync.Once)} } diff --git a/pkg/network/transport/object/grpc/put_test.go b/pkg/network/transport/object/grpc/put_test.go deleted file mode 100644 index 1bdd58803..000000000 --- a/pkg/network/transport/object/grpc/put_test.go +++ /dev/null @@ -1,957 +0,0 @@ -package object - -import ( - "bytes" - "context" - "io" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testPutEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - object.Service_PutServer - transport.PutInfo - Filter - session.PrivateTokenStore - transport.SelectiveContainerExecutor - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ object.Service_PutServer = (*testPutEntity)(nil) - _ requestHandler = (*testPutEntity)(nil) - _ objectStorer = (*testPutEntity)(nil) - _ transport.PutInfo = (*testPutEntity)(nil) - _ Filter = (*testPutEntity)(nil) - _ operationExecutor = (*testPutEntity)(nil) - _ session.PrivateTokenStore = (*testPutEntity)(nil) - _ EpochReceiver = (*testPutEntity)(nil) - _ transformer.Transformer = (*testPutEntity)(nil) -) - -func (s *testPutEntity) Verify(_ context.Context, obj *Object) error { - if s.f != nil { - s.f(obj) - } - return s.err -} - -func (s *testPutEntity) Transform(_ context.Context, u transformer.ProcUnit, h ...transformer.ProcUnitHandler) error { - if s.f != nil { - s.f(u, h) - } - return s.err -} - -func (s *testPutEntity) verify(_ context.Context, token *session.Token, obj *Object) error { - if s.f != nil { - s.f(token, obj) - } - return s.err -} - -func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) } - -func (s *testPutEntity) Direct(ctx context.Context, objs ...Object) ([]Object, error) { - if s.f != nil { - s.f(ctx, objs) - } - return s.res.([]Object), s.err -} - -func (s *testPutEntity) Fetch(id session.PrivateTokenKey) (session.PrivateToken, error) { - if s.f != nil { - s.f(id) - } - if s.err != nil { - return nil, s.err - } - return s.res.(session.PrivateToken), nil -} - -func (s *testPutEntity) executeOperation(_ context.Context, m transport.MetaInfo, h responseItemHandler) error { - if s.f != nil { - s.f(m, h) - } - return s.err -} - -func (s *testPutEntity) Pass(ctx context.Context, m *Meta) *localstore.FilterResult { - if s.f != nil { - s.f(ctx, m) - } - items := s.res.([]interface{}) - return items[0].(*localstore.FilterResult) -} - -func (s *testPutEntity) GetTTL() uint32 { return s.res.(uint32) } - -func (s *testPutEntity) GetToken() *session.Token { return s.res.(*session.Token) } - -func (s *testPutEntity) GetHead() *Object { return s.res.(*Object) } - -func (s *testPutEntity) putObject(ctx context.Context, p transport.PutInfo) (*Address, error) { - if s.f != nil { - s.f(p, ctx) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*Address), nil -} - -func (s *testPutEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func (s *testPutEntity) Recv() (*object.PutRequest, error) { - if s.f != nil { - s.f() - } - if s.err != nil { - return nil, s.err - } else if s.res == nil { - return nil, nil - } - return s.res.(*object.PutRequest), nil -} - -func (s *testPutEntity) Context() context.Context { return context.TODO() } - -func Test_objectService_Put(t *testing.T) { - - t.Run("stream error", func(t *testing.T) { - // create custom error for test - psErr := errors.New("test error for put stream server") - - s := &testPutEntity{ - err: psErr, // force server to return psErr - } - - srv := &objectService{ - statusCalculator: newStatusCalculator(), - } - - // ascertain that error returns as expected - require.EqualError(t, - srv.Put(s), - psErr.Error(), - ) - }) - - t.Run("request handling", func(t *testing.T) { - // create custom request for test - req := &object.PutRequest{R: &object.PutRequest_Header{ - Header: &object.PutRequest_PutHeader{ - Object: new(Object), - }, - }} - - // create custom error for test - hErr := errors.New("test error for request handler") - - srv := &testPutEntity{ - res: req, // force server to return req - } - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, s, p.executor) - require.Equal(t, &putRequest{ - PutRequest: req, - srv: srv, - }, p.request) - }) - }, - err: hErr, // force requestHandler to return hErr - } - - // ascertain that error returns as expected - require.EqualError(t, - s.Put(srv), - hErr.Error(), - ) - }) -} - -func Test_straightObjectStorer_putObject(t *testing.T) { - ctx := context.TODO() - - t.Run("executor error", func(t *testing.T) { - // create custom error for test - exErr := errors.New("test error for operation executor") - - // create custom meta info for test - info := new(testPutEntity) - - s := &straightObjectStorer{ - executor: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct operation executor params", func(t *testing.T) { - require.Equal(t, info, items[0]) - acc := items[1].(*coreAddrAccum) - require.NotNil(t, acc.Once) - }) - }, - err: exErr, - }, - } - - _, err := s.putObject(ctx, info) - - // ascertain that error returns as expected - require.EqualError(t, err, exErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - addr := testObjectAddress(t) - - s := &straightObjectStorer{ - executor: &testPutEntity{ - f: func(items ...interface{}) { - // add address to accumulator - items[1].(addressAccumulator).handleItem(&addr) - }, - }, - } - - res, err := s.putObject(ctx, new(testPutEntity)) - require.NoError(t, err) - - // ascertain that result returns as expected - require.Equal(t, &addr, res) - }) -} - -func Test_recvPutHeaderMsg(t *testing.T) { - t.Run("server error", func(t *testing.T) { - // create custom error for test - srvErr := errors.New("test error for put server") - - srv := &testPutEntity{ - err: srvErr, // force put server to return srvErr - } - - res, err := recvPutHeaderMsg(srv) - - // ascertain that error returns as expected - require.EqualError(t, err, srvErr.Error()) - require.Nil(t, res) - }) - - t.Run("empty message", func(t *testing.T) { - srv := &testPutEntity{ - res: nil, // force put server to return nil, nil - } - - res, err := recvPutHeaderMsg(srv) - - // ascertain that error returns as expected - require.EqualError(t, err, errHeaderExpected.Error()) - require.Nil(t, res) - }) - - t.Run("empty put header in message", func(t *testing.T) { - srv := &testPutEntity{ - res: new(object.PutRequest), // force put server to return message w/o put header - } - - res, err := recvPutHeaderMsg(srv) - - // ascertain that error returns as expected - require.EqualError(t, err, object.ErrHeaderExpected.Error()) - require.Nil(t, res) - }) - - t.Run("empty object in put header", func(t *testing.T) { - srv := &testPutEntity{ - res: object.MakePutRequestHeader(nil), // force put server to return message w/ nil object - } - - res, err := recvPutHeaderMsg(srv) - - // ascertain that error returns as expected - require.EqualError(t, err, errObjectExpected.Error()) - require.Nil(t, res) - }) -} - -func Test_putRequest(t *testing.T) { - t.Run("timeout", func(t *testing.T) { - timeout := 3 * time.Second - - req := &putRequest{ - timeout: timeout, - } - - // ascertain that timeout returns as expected - require.Equal(t, timeout, req.GetTimeout()) - }) - - t.Run("head", func(t *testing.T) { - // create custom object for test - obj := new(Object) - - req := &putRequest{ - PutRequest: object.MakePutRequestHeader(obj), // wrap object to test message - } - - // ascertain that head returns as expected - require.Equal(t, obj, req.GetHead()) - }) - - t.Run("payload", func(t *testing.T) { - req := &putRequest{ - srv: new(testPutEntity), - } - - require.Equal(t, &putStreamReader{srv: req.srv}, req.Payload()) - }) - - t.Run("copies number", func(t *testing.T) { - cn := uint32(5) - - req := &putRequest{ - PutRequest: &object.PutRequest{ - R: &object.PutRequest_Header{ - Header: &object.PutRequest_PutHeader{ - CopiesNumber: cn, - }, - }, - }, - } - - require.Equal(t, cn, req.CopiesNumber()) - }) -} - -func Test_coreAddrAccum(t *testing.T) { - t.Run("new", func(t *testing.T) { - s := newAddressAccumulator() - // ascertain that type is correct and Once entity initialize - require.NotNil(t, s.(*coreAddrAccum).Once) - }) - - t.Run("address", func(t *testing.T) { - addr := testObjectAddress(t) - - s := &coreAddrAccum{addr: &addr} - - // ascertain that address returns as expected - require.Equal(t, &addr, s.address()) - }) - - t.Run("handle", func(t *testing.T) { - addr := testObjectAddress(t) - - s := newAddressAccumulator() - - s.handleItem(&addr) - - // ascertain that address saved - require.Equal(t, &addr, s.address()) - - // create another address for test - addr2 := testObjectAddress(t) - - s.handleItem(&addr2) - - // ascertain that second address is ignored - require.Equal(t, &addr, s.address()) - }) -} - -func Test_rawPutInfo(t *testing.T) { - t.Run("TTL", func(t *testing.T) { - ttl := uint32(3) - - s := newRawPutInfo() - s.setTTL(ttl) - - require.Equal(t, ttl, s.GetTTL()) - }) - - t.Run("head", func(t *testing.T) { - obj := new(Object) - - s := newRawPutInfo() - s.setHead(obj) - - require.Equal(t, obj, s.GetHead()) - }) - - t.Run("payload", func(t *testing.T) { - // ascertain that nil chunk returns as expected - r := bytes.NewBuffer(nil) - - req := newRawPutInfo() - req.setPayload(r) - - require.Equal(t, r, req.Payload()) - }) - - t.Run("token", func(t *testing.T) { - // ascertain that nil token returns as expected - require.Nil(t, newRawPutInfo().GetSessionToken()) - }) - - t.Run("copies number", func(t *testing.T) { - cn := uint32(100) - - s := newRawPutInfo() - s.setCopiesNumber(cn) - - require.Equal(t, cn, s.CopiesNumber()) - }) -} - -func Test_contextWithValues(t *testing.T) { - k1, k2 := "key 1", "key2" - v1, v2 := "value 1", "value 2" - - ctx := contextWithValues(context.TODO(), k1, v1, k2, v2) - - // ascertain that all values added - require.Equal(t, v1, ctx.Value(k1)) - require.Equal(t, v2, ctx.Value(k2)) -} - -func Test_bifurcatingObjectStorer(t *testing.T) { - ctx := context.TODO() - - // create custom error for test - sErr := errors.New("test error for object storer") - - t.Run("w/ token", func(t *testing.T) { - // create custom request w/ token - sk := test.DecodeKey(0) - - owner, err := refs.NewOwnerID(&sk.PublicKey) - require.NoError(t, err) - - token := new(service.Token) - token.SetOwnerID(owner) - - req := &putRequest{ - PutRequest: object.MakePutRequestHeader(new(Object)), - } - req.SetToken(token) - require.NoError(t, requestSignFunc(sk, req)) - - s := &bifurcatingObjectStorer{ - tokenStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct token storer params", func(t *testing.T) { - require.Equal(t, req, items[0]) - }) - }, - err: sErr, // force token storer to return sErr - }, - } - - _, err = s.putObject(ctx, req) - require.EqualError(t, err, sErr.Error()) - }) - - t.Run("w/o token", func(t *testing.T) { - // create custom request w/o token - req := newRawPutInfo() - require.Nil(t, req.GetSessionToken()) - - s := &bifurcatingObjectStorer{ - straightStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct token storer params", func(t *testing.T) { - require.Equal(t, req, items[0]) - }) - }, - err: sErr, // force token storer to return sErr - }, - } - - _, err := s.putObject(ctx, req) - require.EqualError(t, err, sErr.Error()) - }) -} - -func TestWithTokenFromOwner(t *testing.T) { - // nil request - require.False(t, withTokenFromOwner(nil)) - - // create test request - req := &putRequest{ - PutRequest: new(object.PutRequest), - } - - // w/o session token - require.Nil(t, req.GetSessionToken()) - require.False(t, withTokenFromOwner(req)) - - // create test session token and add it to request - token := new(service.Token) - req.SetToken(token) - - // w/o signatures - require.False(t, withTokenFromOwner(req)) - - // create test public key - pk := &test.DecodeKey(0).PublicKey - - // add key-signature pair - req.AddSignKey(nil, pk) - - // wrong token owner - require.False(t, withTokenFromOwner(req)) - - // set correct token owner - owner, err := refs.NewOwnerID(pk) - require.NoError(t, err) - - token.SetOwnerID(owner) - - require.True(t, withTokenFromOwner(req)) -} - -func Test_tokenObjectStorer(t *testing.T) { - ctx := context.TODO() - - token := new(service.Token) - token.SetID(session.TokenID{1, 2, 3}) - token.SetSignature(testData(t, 10)) - - // create custom request w/ token and object for test - req := newRawPutInfo() - req.setSessionToken(token) - req.setHead(&Object{ - Payload: testData(t, 10), - }) - - t.Run("token store failure", func(t *testing.T) { - s := &tokenObjectStorer{ - tokenStore: &testPutEntity{ - err: errors.New(""), // force token store to return a non-nil error - }, - } - - _, err := s.putObject(ctx, req) - require.EqualError(t, err, errTokenRetrieval.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - addr := testObjectAddress(t) - - pToken, err := session.NewPrivateToken(0) - require.NoError(t, err) - - s := &tokenObjectStorer{ - tokenStore: &testPutEntity{ - res: pToken, - }, - objStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct object storer params", func(t *testing.T) { - require.Equal(t, req, items[0]) - ctx := items[1].(context.Context) - require.Equal(t, pToken, ctx.Value(transformer.PrivateSessionToken)) - require.Equal(t, token, ctx.Value(transformer.PublicSessionToken)) - }) - }, - res: &addr, - }, - } - - res, err := s.putObject(ctx, req) - require.NoError(t, err) - require.Equal(t, addr, *res) - }) -} - -func Test_filteringObjectStorer(t *testing.T) { - ctx := context.TODO() - - t.Run("filter failure", func(t *testing.T) { - var ( - ttl = uint32(5) - obj = &Object{Payload: testData(t, 10)} - ) - - req := newRawPutInfo() - req.setHead(obj) - req.setTTL(ttl) - - s := &filteringObjectStorer{ - filter: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct filter params", func(t *testing.T) { - require.Equal(t, &Meta{Object: obj}, items[1]) - ctx := items[0].(context.Context) - require.Equal(t, ttl, ctx.Value(ttlValue)) - }) - }, - res: []interface{}{localstore.ResultFail()}, - }, - } - - _, err := s.putObject(ctx, req) - require.EqualError(t, err, errObjectFilter.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - req := newRawPutInfo() - req.setHead(&Object{ - Payload: testData(t, 10), - }) - - addr := testObjectAddress(t) - - s := &filteringObjectStorer{ - filter: &testPutEntity{ - res: []interface{}{localstore.ResultPass()}, - }, - objStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct object storer params", func(t *testing.T) { - require.Equal(t, req, items[0]) - }) - }, - res: &addr, - }, - } - - res, err := s.putObject(ctx, req) - require.NoError(t, err) - require.Equal(t, &addr, res) - }) -} - -func Test_receivingObjectStorer(t *testing.T) { - ctx := context.TODO() - - t.Run("cut payload", func(t *testing.T) { - payload := testData(t, 10) - - req := newRawPutInfo() - req.setHead(&Object{ - SystemHeader: SystemHeader{ - PayloadLength: uint64(len(payload)) + 1, - }, - }) - req.setPayload(bytes.NewBuffer(payload)) - - _, err := new(receivingObjectStorer).putObject(ctx, req) - require.EqualError(t, err, transformer.ErrPayloadEOF.Error()) - }) - - t.Run("payload verification failure", func(t *testing.T) { - vErr := errors.New("payload verification error for test") - - req := newRawPutInfo() - req.setHead(&Object{ - Payload: testData(t, 10), - }) - - s := &receivingObjectStorer{ - vPayload: &testPutEntity{ - f: func(items ...interface{}) { - require.Equal(t, req.obj, items[0]) - }, - err: vErr, - }, - } - - _, err := s.putObject(ctx, req) - - require.EqualError(t, err, errPayloadChecksum.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - var ( - cn = uint32(10) - ttl = uint32(5) - timeout = 3 * time.Second - payload = testData(t, 10) - addr = testObjectAddress(t) - ) - - obj := &Object{ - SystemHeader: SystemHeader{ - PayloadLength: uint64(len(payload)), - ID: addr.ObjectID, - CID: addr.CID, - }, - } - - req := newRawPutInfo() - req.setHead(obj) - req.setPayload(bytes.NewBuffer(payload)) - req.setTimeout(timeout) - req.setTTL(ttl) - req.setCopiesNumber(cn) - req.setSessionToken(new(service.Token)) - - s := &receivingObjectStorer{ - straightStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct straight storer params", func(t *testing.T) { - exp := newRawPutInfo() - exp.setHead(obj) - exp.setTimeout(timeout) - exp.setTTL(ttl) - exp.setCopiesNumber(cn) - exp.setSessionToken(req.GetSessionToken()) - - require.Equal(t, exp, items[0]) - }) - }, - res: &addr, - }, - vPayload: new(testPutEntity), - } - - res, err := s.putObject(ctx, req) - require.NoError(t, err) - require.Equal(t, &addr, res) - }) -} - -func Test_transformingObjectStorer(t *testing.T) { - ctx := context.TODO() - - t.Run("correct behavior", func(t *testing.T) { - var ( - tErr = errors.New("test error for transformer") - addr = testObjectAddress(t) - obj = &Object{ - SystemHeader: SystemHeader{ - ID: addr.ObjectID, - CID: addr.CID, - }, - Payload: testData(t, 10), - } - ) - - req := newRawPutInfo() - req.setHead(obj) - req.setPayload(bytes.NewBuffer(obj.Payload)) - req.setTimeout(3 * time.Second) - req.setTTL(5) - req.setCopiesNumber(100) - req.setSessionToken(new(service.Token)) - - tr := &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct transformer params", func(t *testing.T) { - require.Equal(t, transformer.ProcUnit{ - Head: req.obj, - Payload: req.r, - }, items[0]) - fns := items[1].([]transformer.ProcUnitHandler) - require.Len(t, fns, 1) - _ = fns[0](ctx, transformer.ProcUnit{ - Head: req.obj, - Payload: req.r, - }) - }) - }, - } - - s := &transformingObjectStorer{ - transformer: tr, - objStorer: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct object storer params", func(t *testing.T) { - exp := newRawPutInfo() - exp.setHead(req.GetHead()) - exp.setPayload(req.Payload()) - exp.setTimeout(req.GetTimeout()) - exp.setTTL(req.GetTTL()) - exp.setCopiesNumber(req.CopiesNumber()) - exp.setSessionToken(req.GetSessionToken()) - - require.Equal(t, exp, items[0]) - }) - }, - err: errors.New(""), - }, - mErr: map[error]struct{}{ - tErr: {}, - }, - } - - res, err := s.putObject(ctx, req) - require.NoError(t, err) - require.Equal(t, &addr, res) - - tr.err = tErr - - _, err = s.putObject(ctx, req) - require.EqualError(t, err, tErr.Error()) - - tr.err = errors.New("some other error") - - _, err = s.putObject(ctx, req) - require.EqualError(t, err, errTransformer.Error()) - - e := &transformerHandlerErr{ - error: errors.New("transformer handler error"), - } - - tr.err = e - - _, err = s.putObject(ctx, req) - require.EqualError(t, err, e.error.Error()) - }) -} - -func Test_putStreamReader(t *testing.T) { - t.Run("empty server", func(t *testing.T) { - s := new(putStreamReader) - n, err := s.Read(make([]byte, 1)) - require.EqualError(t, err, io.EOF.Error()) - require.Zero(t, n) - }) - - t.Run("fail presence", func(t *testing.T) { - initTail := testData(t, 10) - - s := putStreamReader{ - tail: initTail, - srv: new(testPutEntity), - } - - buf := make([]byte, len(s.tail)/2) - - n, err := s.Read(buf) - require.NoError(t, err) - require.Equal(t, len(buf), n) - require.Equal(t, buf, initTail[:n]) - require.Equal(t, initTail[n:], s.tail) - }) - - t.Run("receive message failure", func(t *testing.T) { - t.Run("stream problem", func(t *testing.T) { - srvErr := errors.New("test error for stream server") - - s := &putStreamReader{ - srv: &testPutEntity{ - err: srvErr, - }, - } - - n, err := s.Read(make([]byte, 1)) - require.EqualError(t, err, srvErr.Error()) - require.Zero(t, n) - }) - - t.Run("incorrect chunk", func(t *testing.T) { - t.Run("empty data", func(t *testing.T) { - s := &putStreamReader{ - srv: &testPutEntity{ - res: object.MakePutRequestChunk(make([]byte, 0)), - }, - } - - n, err := s.Read(make([]byte, 1)) - require.EqualError(t, err, errChunkExpected.Error()) - require.Zero(t, n) - }) - - t.Run("wrong message type", func(t *testing.T) { - s := &putStreamReader{ - srv: &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - }, - } - - n, err := s.Read(make([]byte, 1)) - require.EqualError(t, err, errChunkExpected.Error()) - require.Zero(t, n) - }) - }) - }) - - t.Run("correct read", func(t *testing.T) { - chunk := testData(t, 10) - buf := make([]byte, len(chunk)/2) - - s := &putStreamReader{ - srv: &testPutEntity{ - res: object.MakePutRequestChunk(chunk), - }, - } - - n, err := s.Read(buf) - require.NoError(t, err) - require.Equal(t, chunk[:n], buf) - require.Equal(t, chunk[n:], s.tail) - }) - - t.Run("ful read", func(t *testing.T) { - var ( - callNum = 0 - chunk1, chunk2 = testData(t, 100), testData(t, 88) - ) - - srv := new(testPutEntity) - srv.f = func(items ...interface{}) { - if callNum == 0 { - srv.res = object.MakePutRequestChunk(chunk1) - } else if callNum == 1 { - srv.res = object.MakePutRequestChunk(chunk2) - } else { - srv.res, srv.err = 0, io.EOF - } - callNum++ - } - - s := &putStreamReader{ - srv: srv, - } - - var ( - n int - err error - res = make([]byte, 0) - buf = make([]byte, 10) - ) - - for err != io.EOF { - n, err = s.Read(buf) - res = append(res, buf[:n]...) - } - - require.Equal(t, append(chunk1, chunk2...), res) - }) -} diff --git a/pkg/network/transport/object/grpc/query.go b/pkg/network/transport/object/grpc/query.go deleted file mode 100644 index 12dea3ee2..000000000 --- a/pkg/network/transport/object/grpc/query.go +++ /dev/null @@ -1,234 +0,0 @@ -package object - -import ( - "context" - "fmt" - "regexp" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - queryVersionController struct { - m map[int]localQueryImposer - } - - coreQueryImposer struct { - fCreator filterCreator - lsLister localstore.Iterator - - log *zap.Logger - } - - filterCreator interface { - createFilter(query.Query) Filter - } - - coreFilterCreator struct{} -) - -const ( - queryFilterName = "QUERY_FILTER" - - pmUndefinedFilterType = "undefined filter type %d" -) - -var errUnsupportedQueryVersion = errors.New("unsupported query version number") - -var errSearchQueryUnmarshal = errors.New("query unmarshal failure") - -var errLocalQueryImpose = errors.New("local query imposing failure") - -var ( - _ filterCreator = (*coreFilterCreator)(nil) - _ localQueryImposer = (*queryVersionController)(nil) - _ localQueryImposer = (*coreQueryImposer)(nil) -) - -func (s *queryVersionController) imposeQuery(ctx context.Context, c CID, d []byte, v int) ([]Address, error) { - imp := s.m[v] - if imp == nil { - return nil, errUnsupportedQueryVersion - } - - return imp.imposeQuery(ctx, c, d, v) -} - -func (s *coreQueryImposer) imposeQuery(ctx context.Context, cid CID, qData []byte, _ int) (res []Address, err error) { - defer func() { - switch err { - case nil, errSearchQueryUnmarshal: - default: - s.log.Error("local query imposing failure", - zap.String("error", err.Error()), - ) - - err = errLocalQueryImpose - } - }() - - var q query.Query - - if err = q.Unmarshal(qData); err != nil { - s.log.Error("could not unmarshal search query", - zap.String("error", err.Error()), - ) - - return nil, errSearchQueryUnmarshal - } else if err = mouldQuery(cid, &q); err != nil { - return - } - - err = s.lsLister.Iterate( - s.fCreator.createFilter(q), - func(meta *Meta) (stop bool) { - res = append(res, Address{ - CID: meta.Object.SystemHeader.CID, - ObjectID: meta.Object.SystemHeader.ID, - }) - return - }, - ) - - return res, err -} - -func (s *coreFilterCreator) createFilter(q query.Query) Filter { - f, err := localstore.AllPassIncludingFilter(queryFilterName, &localstore.FilterParams{ - FilterFunc: func(_ context.Context, o *Meta) *localstore.FilterResult { - if !imposeQuery(q, o.Object) { - return localstore.ResultFail() - } - return localstore.ResultPass() - }, - }) - if err != nil { - panic(err) // TODO: test panic occasion - } - - return f -} - -func mouldQuery(cid CID, q *query.Query) error { - var ( - withCID bool - cidStr = cid.String() - ) - - for i := range q.Filters { - if q.Filters[i].Name == KeyCID { - if q.Filters[i].Value != cidStr { - return errInvalidCIDFilter - } - - withCID = true - } - } - - if !withCID { - q.Filters = append(q.Filters, QueryFilter{ - Type: query.Filter_Exact, - Name: KeyCID, - Value: cidStr, - }) - } - - return nil -} - -func imposeQuery(q query.Query, o *Object) bool { - fs := make(map[string]*QueryFilter) - - for i := range q.Filters { - switch q.Filters[i].Name { - case transport.KeyTombstone: - if !o.IsTombstone() { - return false - } - default: - fs[q.Filters[i].Name] = &q.Filters[i] - } - } - - if !filterSystemHeader(fs, &o.SystemHeader) { - return false - } - - orphan := true - - for i := range o.Headers { - var key, value string - - switch h := o.Headers[i].Value.(type) { - case *object.Header_Link: - switch h.Link.Type { - case object.Link_Parent: - delete(fs, transport.KeyHasParent) - key = transport.KeyParent - orphan = false - case object.Link_Previous: - key = KeyPrev - case object.Link_Next: - key = KeyNext - case object.Link_Child: - if _, ok := fs[transport.KeyNoChildren]; ok { - return false - } - - key = KeyChild - default: - continue - } - - value = h.Link.ID.String() - case *object.Header_UserHeader: - key, value = h.UserHeader.Key, h.UserHeader.Value - case *object.Header_StorageGroup: - key = transport.KeyStorageGroup - default: - continue - } - - if !applyFilter(fs, key, value) { - return false - } - } - - if _, ok := fs[KeyRootObject]; ok && orphan { // we think that object without parents is a root or user's object - delete(fs, KeyRootObject) - } - - delete(fs, transport.KeyNoChildren) - - return len(fs) == 0 -} - -func filterSystemHeader(fs map[string]*QueryFilter, sysHead *SystemHeader) bool { - return applyFilter(fs, KeyID, sysHead.ID.String()) && - applyFilter(fs, KeyCID, sysHead.CID.String()) && - applyFilter(fs, KeyOwnerID, sysHead.OwnerID.String()) -} - -func applyFilter(fs map[string]*QueryFilter, key, value string) bool { - f := fs[key] - if f == nil { - return true - } - - delete(fs, key) - - switch f.Type { - case query.Filter_Exact: - return value == f.Value - case query.Filter_Regex: - regex, err := regexp.Compile(f.Value) - return err == nil && regex.MatchString(value) - default: - panic(fmt.Sprintf(pmUndefinedFilterType, f.Type)) - } -} diff --git a/pkg/network/transport/object/grpc/query_test.go b/pkg/network/transport/object/grpc/query_test.go deleted file mode 100644 index 8e1a17dda..000000000 --- a/pkg/network/transport/object/grpc/query_test.go +++ /dev/null @@ -1,828 +0,0 @@ -package object - -import ( - "context" - "fmt" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testQueryEntity struct { - // Set of interfaces which testQueryEntity must implement, but some methods from those does not call. - Filter - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ filterCreator = (*testQueryEntity)(nil) - _ localQueryImposer = (*testQueryEntity)(nil) -) - -func (s *testQueryEntity) imposeQuery(_ context.Context, c CID, q []byte, v int) ([]Address, error) { - if s.f != nil { - s.f(c, q, v) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]Address), nil -} - -func (s *testQueryEntity) createFilter(p query.Query) Filter { - if s.f != nil { - s.f(p) - } - return s -} - -func (s *testQueryEntity) Iterate(p Filter, h localstore.MetaHandler) error { - if s.f != nil { - s.f(p) - } - if s.err != nil { - return s.err - } - for _, item := range s.res.([]localstore.ListItem) { - h(&item.ObjectMeta) - } - return nil -} - -func Test_queryVersionController_imposeQuery(t *testing.T) { - ctx := context.TODO() - cid := testObjectAddress(t).CID - - t.Run("unsupported version", func(t *testing.T) { - qImp := &queryVersionController{ - m: make(map[int]localQueryImposer), - } - - res, err := qImp.imposeQuery(ctx, cid, nil, 1) - require.EqualError(t, err, errUnsupportedQueryVersion.Error()) - require.Empty(t, res) - }) - - t.Run("correct imposer choose", func(t *testing.T) { - m := make(map[int]localQueryImposer) - qData := testData(t, 10) - - qImp := &queryVersionController{m: m} - - m[0] = &testQueryEntity{ - f: func(items ...interface{}) { - t.Run("correct imposer params", func(t *testing.T) { - require.Equal(t, cid, items[0].(CID)) - require.Equal(t, qData, items[1].([]byte)) - require.Equal(t, 0, items[2].(int)) - }) - }, - err: errors.New(""), // just to prevent panic - } - - _, _ = qImp.imposeQuery(ctx, cid, qData, 0) - }) - - t.Run("correct imposer result", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - m := make(map[int]localQueryImposer) - qImp := &queryVersionController{m: m} - - impErr := errors.New("test error for query imposer") - - m[0] = &testQueryEntity{ - err: impErr, // force localQueryImposer to return impErr - } - - res, err := qImp.imposeQuery(ctx, cid, nil, 0) - - // ascertain that error returns as expected - require.EqualError(t, err, impErr.Error()) - // ascertain that result is empty - require.Empty(t, res) - - // create test address list - addrList := testAddrList(t, 5) - - m[1] = &testQueryEntity{ - res: addrList, // force localQueryImposer to return addrList - } - - res, err = qImp.imposeQuery(ctx, cid, nil, 1) - require.NoError(t, err) - - // ascertain that result returns as expected - require.Equal(t, addrList, res) - }) - }) -} - -func Test_coreQueryImposer_imposeQuery(t *testing.T) { - v := 1 - ctx := context.TODO() - cid := testObjectAddress(t).CID - log := zap.L() - - t.Run("query unmarshal failure", func(t *testing.T) { - var ( - qErr error - data []byte - ) - - // create invalid query binary representation - for { - data = testData(t, 1024) - if qErr = new(query.Query).Unmarshal(data); qErr != nil { - break - } - } - - s := &coreQueryImposer{ - log: zap.L(), - } - - // trying to impose invalid query data - res, err := s.imposeQuery(ctx, cid, data, v) - - // ascertain that reached error exactly like in unmarshal - require.EqualError(t, err, errSearchQueryUnmarshal.Error()) - - // ascertain that empty result returned - require.Nil(t, res) - }) - - t.Run("mould query failure", func(t *testing.T) { - // create testQuery with CID filter with value other than cid - testQuery := &query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String() + "1"}}} - - // try to mould this testQuery - mErr := mouldQuery(cid, testQuery) - - // ascertain that testQuery mould failed - require.Error(t, mErr) - - // ascertain that testQuery marshals normally - d, err := testQuery.Marshal() - require.NoError(t, err) - - s := &coreQueryImposer{ - log: log, - } - - // try to impose testQuery - res, err := s.imposeQuery(ctx, cid, d, v) - - // ascertain that impose fails with same error as mould - require.EqualError(t, err, errLocalQueryImpose.Error()) - - // ascertain that result is empty - require.Nil(t, res) - }) - - t.Run("local store listing", func(t *testing.T) { - // create testQuery and object which matches to it - testQuery, obj := testFullObjectWithQuery(t) - - // ascertain testQuery marshals normally - qBytes, err := testQuery.Marshal() - require.NoError(t, err) - - t.Run("listing error", func(t *testing.T) { - // create new error for test - lsErr := errors.New("test error of local store listing") - - // create test query imposer with mocked always failing lister - qImposer := &coreQueryImposer{ - fCreator: new(coreFilterCreator), - lsLister: &testQueryEntity{err: lsErr}, - log: log, - } - - // try to impose testQuery - list, err := qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) - - // ascertain that impose fails same error as lister - require.EqualError(t, err, errLocalQueryImpose.Error()) - - // ascertain that result is empty - require.Empty(t, list) - }) - - t.Run("correct parameter", func(t *testing.T) { - // create new mocked filter creator - fc := new(testQueryEntity) - fc.res = fc - - // create testQuery imposer - qImposer := &coreQueryImposer{ - fCreator: fc, - lsLister: &testQueryEntity{ - f: func(p ...interface{}) { - // intercept lister arguments - // ascertain that argument is as expected - require.Equal(t, fc, p[0].(Filter)) - }, - err: errors.New(""), - }, - log: log, - } - - _, _ = qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) - }) - - t.Run("correct result", func(t *testing.T) { - // create list of random address items - addrList := testAddrList(t, 10) - items := make([]localstore.ListItem, 0, len(addrList)) - for i := range addrList { - items = append(items, localstore.ListItem{ - ObjectMeta: Meta{ - Object: &Object{ - SystemHeader: SystemHeader{ - ID: addrList[i].ObjectID, - CID: addrList[i].CID, - }, - }, - }, - }) - } - - // create imposer with mocked lister - qImposer := &coreQueryImposer{ - fCreator: new(coreFilterCreator), - lsLister: &testQueryEntity{res: items}, - } - - // try to impose testQuery - list, err := qImposer.imposeQuery(ctx, obj.SystemHeader.CID, qBytes, v) - - // ascertain that imposing finished normally - require.NoError(t, err) - - // ascertain that resulting list size as expected - require.Len(t, list, len(addrList)) - - // ascertain that all source items are presented in result - for i := range addrList { - require.Contains(t, list, addrList[i]) - } - }) - }) -} - -func Test_coreFilterCreator_createFilter(t *testing.T) { - ctx := context.TODO() - fCreator := new(coreFilterCreator) - - t.Run("composing correct filter", func(t *testing.T) { - var f Filter - - // ascertain filter creation does not panic - require.NotPanics(t, func() { f = fCreator.createFilter(query.Query{}) }) - - // ascertain that created filter is not empty - require.NotNil(t, f) - - // ascertain that created filter has expected name - require.Equal(t, queryFilterName, f.GetName()) - }) - - t.Run("passage on matching query", func(t *testing.T) { - // create testQuery and object which matches to it - testQuery, obj := testFullObjectWithQuery(t) - - // create filter for testQuery and pass object to it - res := fCreator.createFilter(testQuery).Pass(ctx, &Meta{Object: obj}) - - // ascertain that filter is passed - require.Equal(t, localstore.CodePass, res.Code()) - }) - - t.Run("failure on mismatching query", func(t *testing.T) { - testQuery, obj := testFullObjectWithQuery(t) - obj.SystemHeader.ID[0]++ - require.False(t, imposeQuery(testQuery, obj)) - - res := fCreator.createFilter(testQuery).Pass(ctx, &Meta{Object: obj}) - - require.Equal(t, localstore.CodeFail, res.Code()) - }) -} - -func Test_mouldQuery(t *testing.T) { - cid := testObjectAddress(t).CID - - t.Run("invalid CID filter", func(t *testing.T) { - // create query with CID filter with other than cid value - query := &query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String() + "1"}}} - - // try to mould this query for cid - err := mouldQuery(cid, query) - - // ascertain wrong CID value is not allowed - require.EqualError(t, err, errInvalidCIDFilter.Error()) - }) - - t.Run("correct CID filter", func(t *testing.T) { - // create testQuery with CID filter with cid value - cidF := QueryFilter{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()} - testQuery := &query.Query{Filters: []QueryFilter{cidF}} - - // ascertain mould is processed - require.NoError(t, mouldQuery(cid, testQuery)) - - // ascertain filter is still in testQuery - require.Contains(t, testQuery.Filters, cidF) - }) - - t.Run("missing CID filter", func(t *testing.T) { - // create CID filter with cid value - expF := QueryFilter{Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()} - - // create empty testQuery - testQuery := new(query.Query) - - // ascertain mould is processed - require.NoError(t, mouldQuery(cid, testQuery)) - - // ascertain exact CID filter added to testQuery - require.Contains(t, testQuery.Filters, expF) - }) -} - -func Test_applyFilter(t *testing.T) { - k, v := "key", "value" - - t.Run("empty map", func(t *testing.T) { - // ascertain than applyFilter always return true on empty filter map - require.True(t, applyFilter(nil, k, v)) - }) - - t.Run("passage on missing key", func(t *testing.T) { - t.Run("exact", func(t *testing.T) { - require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v + "1"}}, k+"1", v)) - }) - - t.Run("regex", func(t *testing.T) { - require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "1"}}, k+"1", v)) - }) - }) - - t.Run("passage on key presence and matching value", func(t *testing.T) { - t.Run("exact", func(t *testing.T) { - require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v}}, k, v)) - }) - - t.Run("regex", func(t *testing.T) { - require.True(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "|" + v + "1"}}, k, v)) - }) - }) - - t.Run("failure on key presence and mismatching value", func(t *testing.T) { - t.Run("exact", func(t *testing.T) { - require.False(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Exact, Value: v + "1"}}, k, v)) - }) - - t.Run("regex", func(t *testing.T) { - require.False(t, applyFilter(map[string]*QueryFilter{k: {Type: query.Filter_Regex, Value: v + "&" + v + "1"}}, k, v)) - }) - }) - - t.Run("key removes from filter map", func(t *testing.T) { - // create filter map with several elements - m := map[string]*QueryFilter{ - k: {Type: query.Filter_Exact, Value: v}, - k + "1": {Type: query.Filter_Exact, Value: v}, - } - - // save initial len - initLen := len(m) - - // apply filter with key from filter map - applyFilter(m, k, v) - - // ascertain exactly key was removed from filter map - require.Len(t, m, initLen-1) - - // ascertain this is exactly applyFilter argument - _, ok := m[k] - require.False(t, ok) - }) - - t.Run("panic on unknown filter type", func(t *testing.T) { - // create filter type other than FilterExact and FilterRegex - fType := query.Filter_Exact + query.Filter_Regex + 1 - require.NotEqual(t, query.Filter_Exact, fType) - require.NotEqual(t, query.Filter_Regex, fType) - - // ascertain applyFilter does not process this type but panic - require.PanicsWithValue(t, - fmt.Sprintf(pmUndefinedFilterType, fType), - func() { applyFilter(map[string]*QueryFilter{k: {Type: fType}}, k, v) }, - ) - }) -} - -func Test_imposeQuery(t *testing.T) { - t.Run("tombstone filter", func(t *testing.T) { - // create testQuery with only tombstone filter - testQuery := query.Query{Filters: []QueryFilter{{Name: transport.KeyTombstone}}} - - // create object which is not a tombstone - obj := new(Object) - - testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { - // adding tombstone header makes object to satisfy tombstone testQuery - obj.Headers = append(obj.Headers, Header{Value: new(object.Header_Tombstone)}) - }) - }) - - t.Run("system header", func(t *testing.T) { - addr := testObjectAddress(t) - cid, oid, ownerID := addr.CID, addr.ObjectID, OwnerID{3} - - // create testQuery with system header filters - testQuery := query.Query{Filters: []QueryFilter{ - {Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()}, - {Type: query.Filter_Exact, Name: KeyID, Value: oid.String()}, - {Type: query.Filter_Exact, Name: KeyOwnerID, Value: ownerID.String()}, - }} - - // fn sets system header fields values to ones from filters - fn := func(t *testing.T, obj *Object) { obj.SystemHeader = SystemHeader{CID: cid, ID: oid, OwnerID: ownerID} } - - // create object with empty system header fields - obj := new(Object) - testQueryMatch(t, testQuery, obj, fn) - - // create object with CID from filters - sysHdr := SystemHeader{CID: cid} - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with OID from filters - sysHdr.CID = CID{} - sysHdr.ID = oid - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with OwnerID from filters - sysHdr.ID = ID{} - sysHdr.OwnerID = ownerID - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with CID and OwnerID from filters - sysHdr.CID = cid - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with OID and OwnerID from filters - sysHdr.CID = CID{} - sysHdr.ID = oid - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with OID and OwnerID from filters - sysHdr.ID = oid - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - - // create object with CID and OID from filters - sysHdr.CID = cid - sysHdr.OwnerID = OwnerID{} - obj = &Object{SystemHeader: sysHdr} - testQueryMatch(t, testQuery, obj, fn) - }) - - t.Run("no children filter", func(t *testing.T) { - // create testQuery with only orphan filter - testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyNoChildren}}} - - // create object with child relation - obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child}}}}} - - testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { - // child relation removal makes object to satisfy orphan testQuery - obj.Headers = nil - }) - }) - - t.Run("has parent filter", func(t *testing.T) { - // create testQuery with parent relation filter - testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyHasParent}}} - - // create object w/o parent - obj := new(Object) - - testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { - // adding parent relation makes object to satisfy parent testQuery - obj.Headers = append(obj.Headers, Header{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent}}}) - }) - }) - - t.Run("root object filter", func(t *testing.T) { - // create testQuery with only root filter - testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: KeyRootObject}}} - - // create object with parent relation - obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent}}}}} - - testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { - // parent removal makes object to satisfy root testQuery - obj.Headers = nil - }) - }) - - t.Run("link value filters", func(t *testing.T) { - t.Run("parent", func(t *testing.T) { - testLinkQuery(t, transport.KeyParent, object.Link_Parent) - }) - - t.Run("child", func(t *testing.T) { - testLinkQuery(t, KeyChild, object.Link_Child) - }) - - t.Run("previous", func(t *testing.T) { - testLinkQuery(t, KeyPrev, object.Link_Previous) - }) - - t.Run("next", func(t *testing.T) { - testLinkQuery(t, KeyNext, object.Link_Next) - }) - - t.Run("other", func(t *testing.T) { - // create not usable link type - linkKey := object.Link_Parent + object.Link_Child + object.Link_Next + object.Link_Previous - - // add some usable link to testQuery - par := ID{1, 2, 3} - testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyParent, Value: par.String()}}} - - // ascertain that undefined link type has no affect on testQuery imposing - require.True(t, imposeQuery(testQuery, &Object{ - Headers: []Header{ - {Value: &object.Header_Link{Link: &object.Link{Type: linkKey}}}, - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent, ID: par}}}, - }, - })) - }) - }) - - t.Run("user header filter", func(t *testing.T) { - // user header key-value pair - k, v := "header", "value" - - // query with user header filter - query := query.Query{Filters: []QueryFilter{{ - Type: query.Filter_Exact, - Name: k, - Value: v, - }}} - - // create user header with same key and different value - hdr := &UserHeader{Key: k, Value: v + "1"} - - // create object with this user header - obj := &Object{Headers: []Header{{Value: &object.Header_UserHeader{UserHeader: hdr}}}} - - testQueryMatch(t, query, obj, func(t *testing.T, obj *Object) { - // correcting value to one from filter makes object to satisfy query - hdr.Value = v - }) - }) - - t.Run("storage group filter", func(t *testing.T) { - // create testQuery with only storage group filter - testQuery := query.Query{Filters: []QueryFilter{{Type: query.Filter_Exact, Name: transport.KeyStorageGroup}}} - - // create object w/o storage group header - obj := new(Object) - - testQueryMatch(t, testQuery, obj, func(t *testing.T, obj *Object) { - // adding storage group headers make object to satisfy testQuery - obj.Headers = append(obj.Headers, Header{Value: &object.Header_StorageGroup{StorageGroup: new(storagegroup.StorageGroup)}}) - }) - }) -} - -func Test_filterSystemHeader(t *testing.T) { - var ( - ownerID1, ownerID2 = OwnerID{1}, OwnerID{2} - addr1, addr2 = testObjectAddress(t), testObjectAddress(t) - cid1, cid2 = addr1.CID, addr2.CID - oid1, oid2 = addr1.ObjectID, addr2.ObjectID - sysHdr = SystemHeader{ID: oid1, OwnerID: ownerID1, CID: cid1} - ) - require.NotEqual(t, ownerID1, ownerID2) - require.NotEqual(t, cid1, cid2) - require.NotEqual(t, oid1, oid2) - - t.Run("empty filter map", func(t *testing.T) { - // ascertain that any system header satisfies to empty (nil) filter map - require.True(t, filterSystemHeader(nil, &sysHdr)) - }) - - t.Run("missing of some of the fields", func(t *testing.T) { - // create filter map for system header - m := sysHeaderFilterMap(sysHdr) - - // copy system header for initial values saving - h := sysHdr - - // change CID - h.CID = cid2 - - // ascertain filter failure - require.False(t, filterSystemHeader(m, &h)) - - // remove CID from filter map - delete(m, KeyCID) - - // ascertain filter passage - require.True(t, filterSystemHeader(m, &h)) - - m = sysHeaderFilterMap(sysHdr) - h = sysHdr - - // change OwnerID - h.OwnerID = ownerID2 - - // ascertain filter failure - require.False(t, filterSystemHeader(m, &h)) - - // remove OwnerID from filter map - delete(m, KeyOwnerID) - - // ascertain filter passage - require.True(t, filterSystemHeader(m, &h)) - - m = sysHeaderFilterMap(sysHdr) - h = sysHdr - - // change ObjectID - h.ID = oid2 - - // ascertain filter failure - require.False(t, filterSystemHeader(m, &h)) - - // remove ObjectID from filter map - delete(m, KeyID) - - // ascertain filter passage - require.True(t, filterSystemHeader(m, &h)) - }) - - t.Run("valid fields passage", func(t *testing.T) { - require.True(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &sysHdr)) - }) - - t.Run("mismatching values failure", func(t *testing.T) { - h := sysHdr - - // make CID value not matching - h.CID = cid2 - - require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) - - h = sysHdr - - // make ObjectID value not matching - h.ID = oid2 - - require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) - - h = sysHdr - - // make OwnerID value not matching - h.OwnerID = ownerID2 - - require.False(t, filterSystemHeader(sysHeaderFilterMap(sysHdr), &h)) - }) -} - -// testQueryMatch imposes passed query to passed object for tests. -// Passed object should not match to passed query. -// Passed function must mutate object so that becomes query matching. -func testQueryMatch(t *testing.T, q query.Query, obj *Object, fn func(*testing.T, *Object)) { - require.False(t, imposeQuery(q, obj)) - fn(t, obj) - require.True(t, imposeQuery(q, obj)) -} - -// testLinkQuery tests correctness of imposing query with link filters. -// Inits object with value different from one from filter. Then uses testQueryMatch with correcting value func. -func testLinkQuery(t *testing.T, key string, lt object.Link_Type) { - // create new relation link - relative, err := refs.NewObjectID() - require.NoError(t, err) - - // create another relation link - wrongRelative := relative - for wrongRelative.Equal(relative) { - wrongRelative, err = refs.NewObjectID() - require.NoError(t, err) - } - - // create query with relation filter - query := query.Query{Filters: []QueryFilter{{ - Type: query.Filter_Exact, - Name: key, - Value: relative.String(), - }}} - - // create link with relation different from one from filter - link := &object.Link{Type: lt, ID: wrongRelative} - // create object with this link - obj := &Object{Headers: []Header{{Value: &object.Header_Link{Link: link}}}} - testQueryMatch(t, query, obj, func(t *testing.T, object *Object) { - // changing link value to one from filter make object to satisfy relation query - link.ID = relative - }) -} - -// sysHeaderFilterMap creates filter map for passed system header. -func sysHeaderFilterMap(hdr SystemHeader) map[string]*QueryFilter { - return map[string]*QueryFilter{ - KeyCID: { - Type: query.Filter_Exact, - Name: KeyCID, - Value: hdr.CID.String(), - }, - KeyOwnerID: { - Type: query.Filter_Exact, - Name: KeyOwnerID, - Value: hdr.OwnerID.String(), - }, - KeyID: { - Type: query.Filter_Exact, - Name: KeyID, - Value: hdr.ID.String(), - }, - } -} - -// testFullObjectWithQuery creates query with set of permissible filters and object matching to this query. -func testFullObjectWithQuery(t *testing.T) (query.Query, *Object) { - addr := testObjectAddress(t) - selfID, cid := addr.ObjectID, addr.CID - - ownerID := OwnerID{} - copy(ownerID[:], testData(t, refs.OwnerIDSize)) - - addrList := testAddrList(t, 4) - - parID, childID, nextID, prevID := addrList[0].ObjectID, addrList[1].ObjectID, addrList[2].ObjectID, addrList[3].ObjectID - - query := query.Query{Filters: []QueryFilter{ - {Type: query.Filter_Exact, Name: transport.KeyParent, Value: parID.String()}, - {Type: query.Filter_Exact, Name: KeyPrev, Value: prevID.String()}, - {Type: query.Filter_Exact, Name: KeyNext, Value: nextID.String()}, - {Type: query.Filter_Exact, Name: KeyChild, Value: childID.String()}, - {Type: query.Filter_Exact, Name: KeyOwnerID, Value: ownerID.String()}, - {Type: query.Filter_Exact, Name: KeyID, Value: selfID.String()}, - {Type: query.Filter_Exact, Name: KeyCID, Value: cid.String()}, - {Type: query.Filter_Exact, Name: transport.KeyStorageGroup}, - {Type: query.Filter_Exact, Name: transport.KeyTombstone}, - {Type: query.Filter_Exact, Name: transport.KeyHasParent}, - }} - - obj := &Object{ - SystemHeader: SystemHeader{ - ID: selfID, - OwnerID: ownerID, - CID: cid, - }, - Headers: []Header{ - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Parent, ID: parID}}}, - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Previous, ID: prevID}}}, - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Next, ID: nextID}}}, - {Value: &object.Header_Link{Link: &object.Link{Type: object.Link_Child, ID: childID}}}, - {Value: &object.Header_StorageGroup{StorageGroup: new(storagegroup.StorageGroup)}}, - {Value: &object.Header_Tombstone{Tombstone: new(object.Tombstone)}}, - }, - } - - require.True(t, imposeQuery(query, obj)) - - return query, obj -} diff --git a/pkg/network/transport/object/grpc/range/range.go b/pkg/network/transport/object/grpc/range/range.go deleted file mode 100644 index 864888ff2..000000000 --- a/pkg/network/transport/object/grpc/range/range.go +++ /dev/null @@ -1,459 +0,0 @@ -package _range - -import ( - "context" - "io" - "sync" - - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/pkg/errors" -) - -type ( - // Address is a type alias of - // Address from refs package of neofs-api-go. - Address = refs.Address - - // ChopperTable is an interface of RangeChopper storage. - ChopperTable interface { - PutChopper(addr Address, chopper RangeChopper) error - GetChopper(addr Address, rc RCType) (RangeChopper, error) - } - - // RangeChopper is an interface of the chooper of object payload range. - RangeChopper interface { - GetType() RCType - GetAddress() Address - Closed() bool - Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) - } - - // RelativeReceiver is an interface of object relations controller. - RelativeReceiver interface { - Base(ctx context.Context, addr Address) (RangeDescriptor, error) - Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) - } - - // ChildLister is an interface of object children info storage. - ChildLister interface { - List(ctx context.Context, parent Address) ([]RangeDescriptor, error) - } - - // RangeDescriptor groups the information about object payload range. - RangeDescriptor struct { - Size int64 - Offset int64 - Addr Address - - LeftBound bool - RightBound bool - } - - chopCache struct { - rangeList []RangeDescriptor - } - - chopper struct { - *sync.RWMutex - ct RCType - addr Address - nr RelativeReceiver - cacheOffset int64 - cache *chopCache - } - - // ChopperParams groups the parameters of Scylla chopper. - ChopperParams struct { - RelativeReceiver RelativeReceiver - Addr Address - } - - charybdis struct { - skr *chopper - cl ChildLister - } - - // CharybdisParams groups the parameters of Charybdis chopper. - CharybdisParams struct { - Addr Address - ChildLister ChildLister - - ReadySelection []RangeDescriptor - } - - // RCType is an enumeration of object payload range chopper types. - RCType int - - chopperTable struct { - *sync.RWMutex - items map[RCType]map[string]RangeChopper - } -) - -const ( - // RCScylla is an RCType of payload range post-pouncing chopper. - RCScylla RCType = iota - - // RCCharybdis is an RCType of payload range pre-pouncing chopper. - RCCharybdis -) - -var errNilRelativeReceiver = errors.New("relative receiver is nil") - -var errEmptyObjectID = errors.New("object ID is empty") - -var errNilChildLister = errors.New("child lister is nil") - -var errNotFound = errors.New("object range chopper not found") - -var errInvalidBound = errors.New("invalid payload bounds") - -// NewChopperTable is a RangeChopper storage constructor. -func NewChopperTable() ChopperTable { - return &chopperTable{ - new(sync.RWMutex), - make(map[RCType]map[string]RangeChopper), - } -} - -// NewScylla constructs object payload range chopper that collects parts of a range on the go. -func NewScylla(p *ChopperParams) (RangeChopper, error) { - if p.RelativeReceiver == nil { - return nil, errNilRelativeReceiver - } - - if p.Addr.ObjectID.Empty() { - return nil, errEmptyObjectID - } - - return &chopper{ - RWMutex: new(sync.RWMutex), - ct: RCScylla, - nr: p.RelativeReceiver, - addr: p.Addr, - cache: &chopCache{ - rangeList: make([]RangeDescriptor, 0), - }, - }, nil -} - -// NewCharybdis constructs object payload range that pre-collects all parts of the range. -func NewCharybdis(p *CharybdisParams) (RangeChopper, error) { - if p.ChildLister == nil && len(p.ReadySelection) == 0 { - return nil, errNilChildLister - } - - if p.Addr.ObjectID.Empty() { - return nil, errEmptyObjectID - } - - cache := new(chopCache) - - if len(p.ReadySelection) > 0 { - cache.rangeList = p.ReadySelection - } - - return &charybdis{ - skr: &chopper{ - RWMutex: new(sync.RWMutex), - ct: RCCharybdis, - addr: p.Addr, - cache: cache, - }, - cl: p.ChildLister, - }, nil -} - -func (ct *chopperTable) PutChopper(addr Address, chopper RangeChopper) error { - ct.Lock() - defer ct.Unlock() - - sAddr := addr.String() - chopperType := chopper.GetType() - - m, ok := ct.items[chopperType] - if !ok { - m = make(map[string]RangeChopper) - } - - if _, ok := m[sAddr]; !ok { - m[sAddr] = chopper - } - - ct.items[chopperType] = m - - return nil -} - -func (ct *chopperTable) GetChopper(addr Address, rc RCType) (RangeChopper, error) { - ct.Lock() - defer ct.Unlock() - - choppers, ok := ct.items[rc] - if !ok { - return nil, errNotFound - } - - chp, ok := choppers[addr.String()] - if !ok { - return nil, errNotFound - } - - return chp, nil -} - -func (c charybdis) GetAddress() Address { - return c.skr.addr -} - -func (c charybdis) GetType() RCType { - return c.skr.ct -} - -func (c charybdis) Closed() bool { - return len(c.skr.cache.rangeList) > 0 -} - -func (c *charybdis) devour(ctx context.Context) error { - if len(c.skr.cache.rangeList) == 0 { - rngs, err := c.cl.List(ctx, c.skr.addr) - if err != nil { - return errors.Wrap(err, "charybdis.pounce faild on children list") - } - - if ln := len(rngs); ln > 0 { - rngs[0].LeftBound = true - rngs[ln-1].RightBound = true - } - - c.skr.cache.rangeList = rngs - } - - return nil -} - -func (c *charybdis) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { - if err := c.devour(ctx); err != nil { - return nil, errors.Wrap(err, "charybdis.Chop failed on devour") - } - - return c.skr.Chop(ctx, length, offset, fromStart) -} - -func (sc *chopCache) Size() (res int64) { - for i := range sc.rangeList { - res += sc.rangeList[i].Size - } - - return -} - -func (sc *chopCache) touchStart() bool { - return len(sc.rangeList) > 0 && sc.rangeList[0].LeftBound -} - -func (sc *chopCache) touchEnd() bool { - ln := len(sc.rangeList) - - return ln > 0 && sc.rangeList[ln-1].RightBound -} - -func min(a, b int64) int64 { - if a < b { - return a - } - - return b -} - -func (sc *chopCache) Chop(offset, size int64) ([]RangeDescriptor, error) { - if offset*size < 0 { - return nil, errInvalidBound - } - - if offset+size > sc.Size() { - return nil, localstore.ErrOutOfRange - } - - var ( - off int64 - res = make([]RangeDescriptor, 0) - ind int - firstOffset int64 - ) - - for i := range sc.rangeList { - diff := offset - off - if diff > sc.rangeList[i].Size { - off += sc.rangeList[i].Size - continue - } else if diff < sc.rangeList[i].Size { - ind = i - firstOffset = diff - break - } - - ind = i + 1 - - break - } - - var ( - r RangeDescriptor - num int64 - ) - - for i := ind; num < size; i++ { - cut := min(size-num, sc.rangeList[i].Size-firstOffset) - r = RangeDescriptor{ - Size: cut, - Addr: sc.rangeList[i].Addr, - - LeftBound: sc.rangeList[i].LeftBound, - RightBound: sc.rangeList[i].RightBound, - } - - if i == ind { - r.Offset = firstOffset - firstOffset = 0 - } - - if cut == size-num { - r.Size = cut - } - - res = append(res, r) - - num += cut - } - - return res, nil -} - -func (c *chopper) GetAddress() Address { - return c.addr -} - -func (c *chopper) GetType() RCType { - return c.ct -} - -func (c *chopper) Closed() bool { - return c.cache.touchStart() && c.cache.touchEnd() -} - -func (c *chopper) pounce(ctx context.Context, off int64, set bool) error { - if len(c.cache.rangeList) == 0 { - child, err := c.nr.Base(ctx, c.addr) - if err != nil { - return errors.Wrap(err, "chopper.pounce failed on cache init") - } - - c.cache.rangeList = []RangeDescriptor{child} - } - - oldOff := c.cacheOffset - - defer func() { - if !set { - c.cacheOffset = oldOff - } - }() - - var ( - cacheSize = c.cache.Size() - v = c.cacheOffset + off - ) - - switch { - case v >= 0 && v <= cacheSize: - c.cacheOffset = v - return nil - case v < 0 && c.cache.touchStart(): - c.cacheOffset = 0 - return io.EOF - case v > cacheSize && c.cache.touchEnd(): - c.cacheOffset = cacheSize - return io.EOF - } - - var ( - alloc, written int64 - toLeft = v < 0 - procAddr Address - fPush = func(r RangeDescriptor) { - if toLeft { - c.cache.rangeList = append([]RangeDescriptor{r}, c.cache.rangeList...) - return - } - c.cache.rangeList = append(c.cache.rangeList, r) - } - ) - - if toLeft { - alloc = -v - procAddr = c.cache.rangeList[0].Addr - c.cacheOffset -= cacheSize - } else { - alloc = v - cacheSize - procAddr = c.cache.rangeList[len(c.cache.rangeList)-1].Addr - c.cacheOffset += cacheSize - } - - for written < alloc { - rng, err := c.nr.Neighbor(ctx, procAddr, toLeft) - if err != nil { - return errors.Wrap(err, "chopper.pounce failed on get neighbor") - } - - if diff := alloc - written; diff < rng.Size { - if toLeft { - rng.Offset = rng.Size - diff - } - - c.cacheOffset += diff - - fPush(rng) - - break - } - - c.cacheOffset += rng.Size - fPush(rng) - - written += rng.Size - - if written < alloc && - (rng.LeftBound && toLeft || rng.RightBound && !toLeft) { - return localstore.ErrOutOfRange - } - - procAddr = rng.Addr - } - - return nil -} - -func (c *chopper) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { - c.Lock() - defer c.Unlock() - - if fromStart { - if err := c.pounce(ctx, -(1 << 63), true); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce to start") - } - } - - if err := c.pounce(ctx, offset, true); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce with set") - } - - if c.cache.Size()-c.cacheOffset < length { - if err := c.pounce(ctx, length, false); err != nil && err != io.EOF { - return nil, errors.Wrap(err, "chopper.Chop failed on chopper.pounce") - } - } - - return c.cache.Chop(c.cacheOffset, length) -} diff --git a/pkg/network/transport/object/grpc/range/range_test.go b/pkg/network/transport/object/grpc/range/range_test.go deleted file mode 100644 index 82ec3d32f..000000000 --- a/pkg/network/transport/object/grpc/range/range_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package _range - -import ( - "context" - "crypto/rand" - "io" - "sync" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - addressSet struct { - *sync.RWMutex - items []RangeDescriptor - data [][]byte - } - - testReader struct { - pr object.PositionReader - ct ChopperTable - } -) - -func (r testReader) Read(ctx context.Context, rd RangeDescriptor, rc RCType) ([]byte, error) { - chopper, err := r.ct.GetChopper(rd.Addr, rc) - if err != nil { - return nil, errors.Wrap(err, "testReader.Read failed on get range chopper") - } - - rngs, err := chopper.Chop(ctx, rd.Size, rd.Offset, true) - if err != nil { - return nil, errors.Wrap(err, "testReader.Read failed on chopper.Chop") - } - - var sz int64 - for i := range rngs { - sz += rngs[i].Size - } - - res := make([]byte, 0, sz) - - for i := range rngs { - data, err := r.pr.PRead(ctx, rngs[i].Addr, object.Range{ - Offset: uint64(rngs[i].Offset), - Length: uint64(rngs[i].Size), - }) - if err != nil { - return nil, errors.Wrapf(err, "testReader.Read failed on PRead of range #%d", i) - } - - res = append(res, data...) - } - - return res, nil -} - -func (as addressSet) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) { - as.RLock() - defer as.RUnlock() - - for i := range as.items { - if as.items[i].Addr.CID.Equal(addr.CID) && as.items[i].Addr.ObjectID.Equal(addr.ObjectID) { - return as.data[i][rng.Offset : rng.Offset+rng.Length], nil - } - } - - return nil, errors.New("pread failed") -} - -func (as addressSet) List(ctx context.Context, parent Address) ([]RangeDescriptor, error) { - return as.items, nil -} - -func (as addressSet) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { - return as.items[0], nil -} - -func (as addressSet) Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) { - as.Lock() - defer as.Unlock() - - ind := -1 - for i := range as.items { - if as.items[i].Addr.CID.Equal(addr.CID) && as.items[i].Addr.ObjectID.Equal(addr.ObjectID) { - ind = i - break - } - } - - if ind == -1 { - return RangeDescriptor{}, errors.New("range not found") - } - - if left { - if ind > 0 { - ind-- - } else { - return RangeDescriptor{}, io.EOF - } - } else { - if ind < len(as.items)-1 { - ind++ - } else { - return RangeDescriptor{}, io.EOF - } - } - - return as.items[ind], nil -} - -func newTestNeighbor(rngs []RangeDescriptor, data [][]byte) *addressSet { - return &addressSet{ - RWMutex: new(sync.RWMutex), - items: rngs, - data: data, - } -} - -func rangeSize(rngs []RangeDescriptor) (res int64) { - for i := range rngs { - res += rngs[i].Size - } - return -} - -func TestScylla(t *testing.T) { - var ( - cid = [refs.CIDSize]byte{1} - rngs = make([]RangeDescriptor, 0, 10) - pieceSize int64 = 100 - pieceCount int64 = 99 - fullSize = pieceCount * pieceSize - ) - - for i := int64(0); i < pieceCount; i++ { - oid, err := refs.NewObjectID() - require.NoError(t, err) - - rngs = append(rngs, RangeDescriptor{ - Size: pieceSize, - Offset: 0, - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - LeftBound: i == 0, - RightBound: i == pieceCount-1, - }) - } - - oid, err := refs.NewObjectID() - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - t.Run("Zero values in scylla notch/chop", func(t *testing.T) { - scylla, err := NewScylla(&ChopperParams{ - RelativeReceiver: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - res, err := scylla.Chop(ctx, 0, 0, true) - require.NoError(t, err) - require.Len(t, res, 0) - }) - - t.Run("Common scylla operations in both directions", func(t *testing.T) { - var ( - off = fullSize / 2 - length = fullSize / 4 - ) - - scylla, err := NewScylla(&ChopperParams{ - RelativeReceiver: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - choppedCount := int((length-1)/pieceSize + 1) - - if pieceCount > 1 && off%pieceSize > 0 { - choppedCount++ - } - - res, err := scylla.Chop(ctx, fullSize, 0, true) - require.NoError(t, err) - require.Len(t, res, int(pieceCount)) - require.Equal(t, rangeSize(res), fullSize) - require.Equal(t, res, rngs) - - res, err = scylla.Chop(ctx, length, off, true) - require.NoError(t, err) - require.Len(t, res, choppedCount) - - for i := int64(0); i < int64(choppedCount); i++ { - require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/2+i].Addr.ObjectID) - } - - require.Equal(t, rangeSize(res), length) - - res, err = scylla.Chop(ctx, length, -length, false) - require.NoError(t, err) - require.Len(t, res, choppedCount) - - for i := int64(0); i < int64(choppedCount); i++ { - require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/4+i].Addr.ObjectID) - } - - require.Equal(t, rangeSize(res), length) - }) - - t.Run("Border scylla Chop", func(t *testing.T) { - var ( - err error - res []RangeDescriptor - ) - - scylla, err := NewScylla(&ChopperParams{ - RelativeReceiver: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - res, err = scylla.Chop(ctx, fullSize, 0, false) - require.NoError(t, err) - require.Equal(t, res, rngs) - - res, err = scylla.Chop(ctx, fullSize, -100, false) - require.NoError(t, err) - require.Equal(t, res, rngs) - - res, err = scylla.Chop(ctx, fullSize, 1, false) - require.Error(t, err) - - res, err = scylla.Chop(ctx, fullSize, -fullSize, false) - require.NoError(t, err) - require.Equal(t, rangeSize(res), fullSize) - }) -} - -func TestCharybdis(t *testing.T) { - var ( - cid = [refs.CIDSize]byte{1} - rngs = make([]RangeDescriptor, 0, 10) - pieceSize int64 = 100 - pieceCount int64 = 99 - fullSize = pieceCount * pieceSize - data = make([]byte, fullSize) - dataChunks = make([][]byte, 0, pieceCount) - ) - - _, err := rand.Read(data) - require.NoError(t, err) - - for i := int64(0); i < pieceCount; i++ { - oid, err := refs.NewObjectID() - require.NoError(t, err) - - dataChunks = append(dataChunks, data[i*pieceSize:(i+1)*pieceSize]) - - rngs = append(rngs, RangeDescriptor{ - Size: pieceSize, - Offset: 0, - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - } - - oid, err := refs.NewObjectID() - require.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - t.Run("Zero values in scylla notch/chop", func(t *testing.T) { - charybdis, err := NewCharybdis(&CharybdisParams{ - ChildLister: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - res, err := charybdis.Chop(ctx, 0, 0, false) - require.NoError(t, err) - require.Len(t, res, 0) - }) - - t.Run("Common charybdis operations in both directions", func(t *testing.T) { - var ( - off = fullSize / 2 - length = fullSize / 4 - ) - - charybdis, err := NewCharybdis(&CharybdisParams{ - ChildLister: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - choppedCount := int((length-1)/pieceSize + 1) - - if pieceCount > 1 && off%pieceSize > 0 { - choppedCount++ - } - - res, err := charybdis.Chop(ctx, fullSize, 0, false) - require.NoError(t, err) - require.Len(t, res, int(pieceCount)) - require.Equal(t, rangeSize(res), fullSize) - require.Equal(t, res, rngs) - - res, err = charybdis.Chop(ctx, length, off, false) - require.NoError(t, err) - require.Len(t, res, choppedCount) - - for i := int64(0); i < int64(choppedCount); i++ { - require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/2+i].Addr.ObjectID) - } - - require.Equal(t, rangeSize(res), length) - - res, err = charybdis.Chop(ctx, length, -length, false) - require.NoError(t, err) - require.Len(t, res, choppedCount) - - for i := int64(0); i < int64(choppedCount); i++ { - require.Equal(t, res[i].Addr.ObjectID, rngs[pieceCount/4+i].Addr.ObjectID) - } - - require.Equal(t, rangeSize(res), length) - }) - - t.Run("Border charybdis Chop", func(t *testing.T) { - var ( - err error - res []RangeDescriptor - ) - - charybdis, err := NewCharybdis(&CharybdisParams{ - ChildLister: newTestNeighbor(rngs, nil), - Addr: Address{ - ObjectID: oid, - CID: cid, - }, - }) - require.NoError(t, err) - - res, err = charybdis.Chop(ctx, fullSize, 0, false) - require.NoError(t, err) - require.Equal(t, res, rngs) - - res, err = charybdis.Chop(ctx, fullSize, -100, false) - require.NoError(t, err) - require.Equal(t, res, rngs) - - res, err = charybdis.Chop(ctx, fullSize, 1, false) - require.Error(t, err) - - res, err = charybdis.Chop(ctx, fullSize, -fullSize, false) - require.NoError(t, err) - require.Equal(t, rangeSize(res), fullSize) - }) -} diff --git a/pkg/network/transport/object/grpc/ranges.go b/pkg/network/transport/object/grpc/ranges.go deleted file mode 100644 index 68196dfea..000000000 --- a/pkg/network/transport/object/grpc/ranges.go +++ /dev/null @@ -1,467 +0,0 @@ -package object - -import ( - "context" - "io" - "sync" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - _range "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc/range" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // Range is a type alias of - // Range from object package of neofs-api-go. - Range = object.Range - - // RangeDescriptor is a type alias of - // RangeDescriptor from objio package. - RangeDescriptor = _range.RangeDescriptor - - // RangeChopper is a type alias of - // RangeChopper from objio package. - RangeChopper = _range.RangeChopper - - // GetRangeRequest is a type alias of - // GetRangeRequest from object package of neofs-api-go. - GetRangeRequest = object.GetRangeRequest - - // GetRangeResponse is a type alias of - // GetRangeResponse from object package of neofs-api-go. - GetRangeResponse = object.GetRangeResponse - - // GetRangeHashRequest is a type alias of - // GetRangeResponse from object package of neofs-api-go. - GetRangeHashRequest = object.GetRangeHashRequest - - // GetRangeHashResponse is a type alias of - // GetRangeHashResponse from object package of neofs-api-go. - GetRangeHashResponse = object.GetRangeHashResponse - - objectRangeReceiver interface { - getRange(context.Context, rangeTool) (interface{}, error) - } - - rangeTool interface { - transport.RangeHashInfo - budOff(*RangeDescriptor) rangeTool - handler() rangeItemAccumulator - } - - rawRangeInfo struct { - *rawAddrInfo - rng Range - } - - rawRangeHashInfo struct { - *rawAddrInfo - rngList []Range - salt []byte - } - - coreRangeReceiver struct { - rngRevealer rangeRevealer - straightRngRecv objectRangeReceiver - - // Set of errors that won't be converted into errPayloadRangeNotFound - mErr map[error]struct{} - - log *zap.Logger - } - - straightRangeReceiver struct { - executor operationExecutor - } - - singleItemHandler struct { - *sync.Once - item interface{} - } - - rangeItemAccumulator interface { - responseItemHandler - collect() (interface{}, error) - } - - rangeHashAccum struct { - concat bool - h []Hash - } - - rangeRevealer interface { - reveal(context.Context, *RangeDescriptor) ([]RangeDescriptor, error) - } - - coreRngRevealer struct { - relativeRecv _range.RelativeReceiver - chopTable _range.ChopperTable - } - - getRangeServerWriter struct { - req *GetRangeRequest - - srv object.Service_GetRangeServer - - respPreparer responsePreparer - } -) - -const ( - emGetRangeFail = "could get object range #%d part #%d" - emRangeRevealFail = "could not reveal object range #%d" - emRangeCollect = "could not collect result of object range #%d" -) - -var errRangeReveal = errors.New("could not reveal payload range") - -func (s *objectService) GetRange(req *GetRangeRequest, srv object.Service_GetRangeServer) (err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestRange), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestRange, - e: err, - }) - }() - - var r interface{} - - if r, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ - request: req, - executor: s, - }); err == nil { - _, err = io.CopyBuffer( - &getRangeServerWriter{ - req: req, - srv: srv, - respPreparer: s.rangeChunkPreparer, - }, - r.(io.Reader), - make([]byte, maxGetPayloadSize), - ) - } - - return err -} - -func (s *objectService) GetRangeHash(ctx context.Context, req *GetRangeHashRequest) (res *GetRangeHashResponse, err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestRangeHash), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestRangeHash, - e: err, - }) - }() - - var r interface{} - - if r, err = s.requestHandler.handleRequest(ctx, handleRequestParams{ - request: req, - executor: s, - }); err != nil { - return - } - - res = makeRangeHashResponse(r.([]Hash)) - err = s.respPreparer.prepareResponse(ctx, req, res) - - return -} - -func (s *coreRangeReceiver) getRange(ctx context.Context, rt rangeTool) (res interface{}, err error) { - defer func() { - if err != nil { - if _, ok := s.mErr[errors.Cause(err)]; !ok { - s.log.Error("get range failure", - zap.String("error", err.Error()), - ) - - err = errPayloadRangeNotFound - } - } - }() - - var ( - subRngSet []RangeDescriptor - rngSet = rt.GetRanges() - addr = rt.GetAddress() - handler = rt.handler() - ) - - for i := range rngSet { - rd := RangeDescriptor{ - Size: int64(rngSet[i].Length), - Offset: int64(rngSet[i].Offset), - Addr: addr, - } - - if rt.GetTTL() < service.NonForwardingTTL { - subRngSet = []RangeDescriptor{rd} - } else if subRngSet, err = s.rngRevealer.reveal(ctx, &rd); err != nil { - return nil, errors.Wrapf(err, emRangeRevealFail, i+1) - } else if len(subRngSet) == 0 { - return nil, errRangeReveal - } - - subRangeTool := rt.budOff(&rd) - subHandler := subRangeTool.handler() - - for j := range subRngSet { - tool := subRangeTool.budOff(&subRngSet[j]) - - if subRngSet[j].Addr.Equal(&addr) { - res, err = s.straightRngRecv.getRange(ctx, tool) - } else { - res, err = s.getRange(ctx, tool) - } - - if err != nil { - return nil, errors.Wrapf(err, emGetRangeFail, i+1, j+1) - } - - subHandler.handleItem(res) - } - - rngRes, err := subHandler.collect() - if err != nil { - return nil, errors.Wrapf(err, emRangeCollect, i+1) - } - - handler.handleItem(rngRes) - } - - return handler.collect() -} - -func (s *straightRangeReceiver) getRange(ctx context.Context, rt rangeTool) (interface{}, error) { - handler := newSingleItemHandler() - if err := s.executor.executeOperation(ctx, rt, handler); err != nil { - return nil, err - } - - return handler.collect() -} - -func (s *coreRngRevealer) reveal(ctx context.Context, r *RangeDescriptor) ([]RangeDescriptor, error) { - chopper, err := s.getChopper(r.Addr) - if err != nil { - return nil, err - } - - return chopper.Chop(ctx, r.Size, r.Offset, true) -} - -func (s *coreRngRevealer) getChopper(addr Address) (res RangeChopper, err error) { - if res, err = s.chopTable.GetChopper(addr, _range.RCCharybdis); err == nil && res.Closed() { - return - } else if res, err = s.chopTable.GetChopper(addr, _range.RCScylla); err == nil { - return - } else if res, err = _range.NewScylla(&_range.ChopperParams{ - RelativeReceiver: s.relativeRecv, - Addr: addr, - }); err != nil { - return nil, err - } - - _ = s.chopTable.PutChopper(addr, res) - - return -} - -func loopData(data []byte, size, off int64) []byte { - if len(data) == 0 { - return make([]byte, 0) - } - - res := make([]byte, 0, size) - - var ( - cut int64 - tail = data[off%int64(len(data)):] - ) - - for added := int64(0); added < size; added += cut { - cut = min(int64(len(tail)), size-added) - res = append(res, tail[:cut]...) - tail = data - } - - return res -} - -func min(a, b int64) int64 { - if a < b { - return a - } - - return b -} - -func newSingleItemHandler() rangeItemAccumulator { return &singleItemHandler{Once: new(sync.Once)} } - -func (s *singleItemHandler) handleItem(item interface{}) { s.Do(func() { s.item = item }) } - -func (s *singleItemHandler) collect() (interface{}, error) { return s.item, nil } - -func (s *rangeHashAccum) handleItem(h interface{}) { - if v, ok := h.(Hash); ok { - s.h = append(s.h, v) - return - } - - s.h = append(s.h, h.([]Hash)...) -} - -func (s *rangeHashAccum) collect() (interface{}, error) { - if s.concat { - return hash.Concat(s.h) - } - - return s.h, nil -} - -func (s *rawRangeHashInfo) GetRanges() []Range { - return s.rngList -} - -func (s *rawRangeHashInfo) setRanges(v []Range) { - s.rngList = v -} - -func (s *rawRangeHashInfo) GetSalt() []byte { - return s.salt -} - -func (s *rawRangeHashInfo) setSalt(v []byte) { - s.salt = v -} - -func (s *rawRangeHashInfo) getAddrInfo() *rawAddrInfo { - return s.rawAddrInfo -} - -func (s *rawRangeHashInfo) setAddrInfo(v *rawAddrInfo) { - s.rawAddrInfo = v - s.setType(object.RequestRangeHash) -} - -func newRawRangeHashInfo() *rawRangeHashInfo { - res := new(rawRangeHashInfo) - - res.setAddrInfo(newRawAddressInfo()) - - return res -} - -func (s *rawRangeHashInfo) budOff(r *RangeDescriptor) rangeTool { - res := newRawRangeHashInfo() - - res.setMetaInfo(s.getMetaInfo()) - res.setAddress(r.Addr) - res.setRanges([]Range{ - { - Offset: uint64(r.Offset), - Length: uint64(r.Size), - }, - }) - res.setSalt(loopData(s.salt, int64(len(s.salt)), r.Offset)) - res.setSessionToken(s.GetSessionToken()) - res.setBearerToken(s.GetBearerToken()) - res.setExtendedHeaders(s.ExtendedHeaders()) - - return res -} - -func (s *rawRangeHashInfo) handler() rangeItemAccumulator { return &rangeHashAccum{concat: true} } - -func (s *transportRequest) GetRanges() []Range { - return s.serviceRequest.(*object.GetRangeHashRequest).Ranges -} - -func (s *transportRequest) GetSalt() []byte { - return s.serviceRequest.(*object.GetRangeHashRequest).Salt -} - -func (s *transportRequest) budOff(rd *RangeDescriptor) rangeTool { - res := newRawRangeHashInfo() - - res.setTTL(s.GetTTL()) - res.setTimeout(s.GetTimeout()) - res.setAddress(rd.Addr) - res.setRanges([]Range{ - { - Offset: uint64(rd.Offset), - Length: uint64(rd.Size), - }, - }) - res.setSalt(s.serviceRequest.(*object.GetRangeHashRequest).GetSalt()) - res.setSessionToken(s.GetSessionToken()) - res.setBearerToken(s.GetBearerToken()) - res.setExtendedHeaders(s.ExtendedHeaders()) - - return res -} - -func (s *transportRequest) handler() rangeItemAccumulator { return new(rangeHashAccum) } - -func (s *getRangeServerWriter) Write(p []byte) (int, error) { - resp := makeRangeResponse(p) - if err := s.respPreparer.prepareResponse(s.srv.Context(), s.req, resp); err != nil { - return 0, err - } - - if err := s.srv.Send(resp); err != nil { - return 0, err - } - - return len(p), nil -} - -func (s *rawRangeInfo) GetRange() Range { - return s.rng -} - -func (s *rawRangeInfo) setRange(rng Range) { - s.rng = rng -} - -func (s *rawRangeInfo) getAddrInfo() *rawAddrInfo { - return s.rawAddrInfo -} - -func (s *rawRangeInfo) setAddrInfo(v *rawAddrInfo) { - s.rawAddrInfo = v - s.setType(object.RequestRange) -} - -func newRawRangeInfo() *rawRangeInfo { - res := new(rawRangeInfo) - - res.setAddrInfo(newRawAddressInfo()) - - return res -} - -func (s *transportRequest) GetRange() Range { - return s.serviceRequest.(*GetRangeRequest).Range -} diff --git a/pkg/network/transport/object/grpc/ranges_test.go b/pkg/network/transport/object/grpc/ranges_test.go deleted file mode 100644 index 065846be3..000000000 --- a/pkg/network/transport/object/grpc/ranges_test.go +++ /dev/null @@ -1,778 +0,0 @@ -package object - -import ( - "bytes" - "context" - "testing" - "time" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - _range "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc/range" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testRangeEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - RangeChopper - object.Service_GetRangeServer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ _range.RelativeReceiver = (*testRangeEntity)(nil) - _ RangeChopper = (*testRangeEntity)(nil) - _ operationExecutor = (*testRangeEntity)(nil) - _ requestHandler = (*testRangeEntity)(nil) - _ rangeRevealer = (*testRangeEntity)(nil) - _ objectRangeReceiver = (*testRangeEntity)(nil) - _ object.Service_GetRangeServer = (*testRangeEntity)(nil) - _ responsePreparer = (*testRangeEntity)(nil) -) - -func (s *testRangeEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testRangeEntity) Context() context.Context { return context.TODO() } - -func (s *testRangeEntity) Send(r *GetRangeResponse) error { - if s.f != nil { - s.f(r) - } - return s.err -} - -func (s *testRangeEntity) getRange(_ context.Context, t rangeTool) (interface{}, error) { - if s.f != nil { - s.f(t) - } - return s.res, s.err -} - -func (s *testRangeEntity) reveal(_ context.Context, r *RangeDescriptor) ([]RangeDescriptor, error) { - if s.f != nil { - s.f(r) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]RangeDescriptor), nil -} - -func (s *testRangeEntity) Base(ctx context.Context, addr Address) (RangeDescriptor, error) { - if s.f != nil { - s.f(addr) - } - if s.err != nil { - return RangeDescriptor{}, s.err - } - return s.res.(RangeDescriptor), nil -} - -func (s *testRangeEntity) Neighbor(ctx context.Context, addr Address, left bool) (RangeDescriptor, error) { - if s.f != nil { - s.f(addr, left) - } - if s.err != nil { - return RangeDescriptor{}, s.err - } - return s.res.(RangeDescriptor), nil -} - -func (s *testRangeEntity) Chop(ctx context.Context, length, offset int64, fromStart bool) ([]RangeDescriptor, error) { - if s.f != nil { - s.f(length, offset, fromStart) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]RangeDescriptor), nil -} - -func (s *testRangeEntity) Closed() bool { return s.res.(bool) } - -func (s *testRangeEntity) PutChopper(addr Address, chopper RangeChopper) error { - if s.f != nil { - s.f(addr, chopper) - } - return s.err -} - -func (s *testRangeEntity) GetChopper(addr Address, rc _range.RCType) (RangeChopper, error) { - if s.f != nil { - s.f(addr, rc) - } - if s.err != nil { - return nil, s.err - } - return s.res.(RangeChopper), nil -} - -func (s *testRangeEntity) executeOperation(_ context.Context, i transport.MetaInfo, h responseItemHandler) error { - if s.f != nil { - s.f(i, h) - } - return s.err -} - -func (s *testRangeEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func Test_objectService_GetRange(t *testing.T) { - req := &GetRangeRequest{Address: testObjectAddress(t)} - - t.Run("request handler error", func(t *testing.T) { - rhErr := errors.New("test error for request handler") - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, s, p.executor) - require.Equal(t, req, p.request) - }) - }, - err: rhErr, // force requestHandler to return rhErr - } - - // ascertain that error returns as expected - require.EqualError(t, s.GetRange(req, new(testRangeEntity)), rhErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - fragment := testData(t, 10) - - resp := &GetRangeResponse{Fragment: fragment} - - s := objectService{ - requestHandler: &testRangeEntity{ - res: bytes.NewReader(fragment), // force requestHandler to return fragment - }, - rangeChunkPreparer: &testRangeEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, makeRangeResponse(fragment), items[1]) - }, - res: resp, - }, - - statusCalculator: newStatusCalculator(), - } - - srv := &testRangeEntity{ - f: func(items ...interface{}) { - require.Equal(t, resp, items[0]) - }, - } - - require.NoError(t, s.GetRange(req, srv)) - }) -} - -func Test_objectService_GetRangeHash(t *testing.T) { - ctx := context.TODO() - - req := &GetRangeHashRequest{Address: testObjectAddress(t)} - - t.Run("request handler error", func(t *testing.T) { - rhErr := errors.New("test error for request handler") - - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct request handler params", func(t *testing.T) { - p := items[0].(handleRequestParams) - require.Equal(t, s, p.executor) - require.Equal(t, req, p.request) - }) - }, - err: rhErr, // force requestHandler to return rhErr - } - - // ascertain that error returns as expected - res, err := s.GetRangeHash(ctx, req) - require.EqualError(t, err, rhErr.Error()) - require.Nil(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - hCount := 5 - hashes := make([]Hash, 0, hCount) - - for i := 0; i < hCount; i++ { - hashes = append(hashes, hash.Sum(testData(t, 10))) - } - - s := objectService{ - requestHandler: &testRangeEntity{ - res: hashes, // force requestHandler to return fragments - }, - respPreparer: &testRangeEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, makeRangeHashResponse(hashes), items[1]) - }, - res: &GetRangeHashResponse{Hashes: hashes}, - }, - - statusCalculator: newStatusCalculator(), - } - - res, err := s.GetRangeHash(ctx, req) - require.NoError(t, err) - require.Equal(t, hashes, res.Hashes) - }) -} - -func Test_coreRangeReceiver(t *testing.T) { - ctx := context.TODO() - log := zap.L() - - t.Run("range reveal failure", func(t *testing.T) { - revErr := errors.New("test error for range revealer") - - rt := newRawRangeHashInfo() - rt.setTTL(service.NonForwardingTTL) - rt.setAddress(testObjectAddress(t)) - rt.setRanges([]Range{ - { - Offset: 1, - Length: 2, - }, - }) - - revealer := &testRangeEntity{ - f: func(items ...interface{}) { - require.Equal(t, &RangeDescriptor{ - Size: int64(rt.rngList[0].Length), - Offset: int64(rt.rngList[0].Offset), - Addr: rt.addr, - }, items[0]) - }, - err: revErr, - } - - s := &coreRangeReceiver{ - rngRevealer: revealer, - log: log, - } - - res, err := s.getRange(ctx, rt) - require.EqualError(t, err, errPayloadRangeNotFound.Error()) - require.Nil(t, res) - - revealer.err = nil - revealer.res = make([]RangeDescriptor, 0) - - res, err = s.getRange(ctx, rt) - require.EqualError(t, err, errPayloadRangeNotFound.Error()) - require.Nil(t, res) - }) - - t.Run("get sub range failure", func(t *testing.T) { - gErr := errors.New("test error for get range") - - rt := newRawRangeHashInfo() - rt.setTTL(service.NonForwardingTTL) - rt.setAddress(testObjectAddress(t)) - rt.setRanges([]Range{ - { - Offset: 1, - Length: 2, - }, - }) - - revealer := &testRangeEntity{ - res: []RangeDescriptor{{Size: 3, Offset: 4, Addr: testObjectAddress(t)}}, - } - - called := false - revealer.f = func(items ...interface{}) { - if called { - revealer.err = gErr - return - } - called = true - } - - s := &coreRangeReceiver{ - rngRevealer: revealer, - log: log, - } - - res, err := s.getRange(ctx, rt) - require.EqualError(t, err, errPayloadRangeNotFound.Error()) - require.Nil(t, res) - }) - - t.Run("non-forwarding behavior", func(t *testing.T) { - rt := newRawRangeHashInfo() - rt.setTTL(service.NonForwardingTTL - 1) - rt.setAddress(testObjectAddress(t)) - rt.setRanges([]Range{ - { - Offset: 1, - Length: 2, - }, - }) - - rd := RangeDescriptor{ - Size: int64(rt.rngList[0].Length), - Offset: int64(rt.rngList[0].Offset), - Addr: rt.addr, - } - - d := hash.Sum(testData(t, 10)) - - s := &coreRangeReceiver{ - straightRngRecv: &testRangeEntity{ - f: func(items ...interface{}) { - require.Equal(t, rt.budOff(&rd), items[0]) - }, - res: d, - }, - } - - res, err := s.getRange(ctx, rt) - require.NoError(t, err) - require.Equal(t, d, res) - }) - - t.Run("correct result concat", func(t *testing.T) { - rt := newRawRangeHashInfo() - rt.setTTL(service.NonForwardingTTL) - rt.setRanges([]Range{ - {}, - }) - - revealer := new(testRangeEntity) - revCalled := false - revealer.f = func(items ...interface{}) { - if revCalled { - revealer.res = []RangeDescriptor{items[0].(RangeDescriptor)} - } else { - revealer.res = make([]RangeDescriptor, 2) - } - revCalled = true - } - - h1, h2 := hash.Sum(testData(t, 10)), hash.Sum(testData(t, 10)) - - recvCalled := false - receiver := new(testRangeEntity) - receiver.f = func(...interface{}) { - if recvCalled { - receiver.res = h2 - } else { - receiver.res = h1 - } - recvCalled = true - } - - s := &coreRangeReceiver{ - rngRevealer: revealer, - straightRngRecv: receiver, - } - - exp, err := hash.Concat([]Hash{h1, h2}) - require.NoError(t, err) - - res, err := s.getRange(ctx, rt) - require.NoError(t, err) - require.Equal(t, exp, res) - }) -} - -func Test_straightRangeReceiver_getRange(t *testing.T) { - ctx := context.TODO() - - req := new(transportRequest) - - t.Run("executor error", func(t *testing.T) { - exErr := errors.New("test error for executor") - - s := &straightRangeReceiver{ - executor: &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct executor params", func(t *testing.T) { - require.Equal(t, req, items[0]) - require.Equal(t, newSingleItemHandler(), items[1]) - }) - }, - err: exErr, // force operationExecutor to return exErr - }, - } - - res, err := s.getRange(ctx, req) - require.EqualError(t, err, exErr.Error()) - require.Nil(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - v := testData(t, 10) - - s := &straightRangeReceiver{ - executor: &testRangeEntity{ - f: func(items ...interface{}) { - items[1].(rangeItemAccumulator).handleItem(v) - }, - err: nil, // force operationExecutor to return nil error - }, - } - - res, err := s.getRange(ctx, req) - require.NoError(t, err) - require.Equal(t, v, res) - }) -} - -func Test_coreRngRevealer_reveal(t *testing.T) { - ctx := context.TODO() - - rd := RangeDescriptor{ - Size: 5, - Offset: 6, - Addr: testObjectAddress(t), - } - - t.Run("charybdis chopper presence", func(t *testing.T) { - cErr := errors.New("test error for charybdis") - - s := &coreRngRevealer{ - chopTable: &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct chopper table params", func(t *testing.T) { - require.Equal(t, rd.Addr, items[0]) - require.Equal(t, _range.RCCharybdis, items[1]) - }) - }, - res: &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct chopper params", func(t *testing.T) { - require.Equal(t, rd.Size, items[0]) - require.Equal(t, rd.Offset, items[1]) - require.True(t, items[2].(bool)) - }) - }, - res: true, // close chopper - err: cErr, // force RangeChopper to return cErr - }, - }, - } - - res, err := s.reveal(ctx, &rd) - require.EqualError(t, err, cErr.Error()) - require.Empty(t, res) - }) - - t.Run("scylla chopper presence", func(t *testing.T) { - scErr := errors.New("test error for scylla") - - scylla := &testRangeEntity{ - err: scErr, // force RangeChopper to return scErr - } - - ct := new(testRangeEntity) - - ct.f = func(items ...interface{}) { - if items[1].(_range.RCType) == _range.RCCharybdis { - ct.err = errors.New("") - } else { - ct.res = scylla - ct.err = nil - } - } - - s := &coreRngRevealer{ - chopTable: ct, - } - - res, err := s.reveal(ctx, &rd) - require.EqualError(t, err, scErr.Error()) - require.Empty(t, res) - }) - - t.Run("new scylla", func(t *testing.T) { - t.Run("error", func(t *testing.T) { - s := &coreRngRevealer{ - relativeRecv: nil, // pass empty relation receiver to fail constructor - chopTable: &testRangeEntity{ - err: errors.New(""), // force ChopperTable to return non-nil error - }, - } - - res, err := s.reveal(ctx, &rd) - require.Error(t, err) - require.Nil(t, res) - }) - - t.Run("success", func(t *testing.T) { - rrErr := errors.New("test error for relative receiver") - - relRecv := &testRangeEntity{ - err: rrErr, // force relative receiver to return rrErr - } - - scylla, err := _range.NewScylla(&_range.ChopperParams{ - RelativeReceiver: relRecv, - Addr: rd.Addr, - }) - require.NoError(t, err) - - callNum := 0 - - s := &coreRngRevealer{ - relativeRecv: relRecv, - chopTable: &testRangeEntity{ - f: func(items ...interface{}) { - t.Run("correct put chopper params", func(t *testing.T) { - if callNum >= 2 { - require.Equal(t, rd.Addr, items[0]) - require.Equal(t, scylla, items[1]) - } - }) - }, - err: errors.New(""), // force ChopperTable to return non-nil error - }, - } - - expRes, expErr := scylla.Chop(ctx, rd.Size, rd.Offset, true) - require.Error(t, expErr) - - res, err := s.reveal(ctx, &rd) - require.EqualError(t, err, expErr.Error()) - require.Equal(t, expRes, res) - }) - }) -} - -func Test_transportRequest_rangeTool(t *testing.T) { - t.Run("get ranges", func(t *testing.T) { - rngs := []Range{ - {Offset: 1, Length: 2}, - {Offset: 3, Length: 4}, - } - - reqs := []transportRequest{ - {serviceRequest: &GetRangeHashRequest{Ranges: rngs}}, - } - - for i := range reqs { - require.Equal(t, reqs[i].GetRanges(), rngs) - } - }) - - t.Run("bud off", func(t *testing.T) { - var ( - timeout = 6 * time.Second - ttl = uint32(16) - rd = RangeDescriptor{ - Size: 1, - Offset: 2, - Addr: testObjectAddress(t), - } - ) - - t.Run("get range hash request", func(t *testing.T) { - salt := testData(t, 10) - - r := &GetRangeHashRequest{Salt: salt} - r.SetToken(new(service.Token)) - - req := &transportRequest{ - serviceRequest: r, - timeout: timeout, - } - req.SetTTL(ttl) - - tool := req.budOff(&rd).(transport.RangeHashInfo) - - require.Equal(t, timeout, tool.GetTimeout()) - require.Equal(t, ttl, tool.GetTTL()) - require.Equal(t, rd.Addr, tool.GetAddress()) - require.Equal(t, []Range{{Offset: uint64(rd.Offset), Length: uint64(rd.Size)}}, tool.GetRanges()) - require.Equal(t, salt, tool.GetSalt()) - require.Equal(t, r.GetSessionToken(), tool.GetSessionToken()) - }) - }) - - t.Run("handler", func(t *testing.T) { - t.Run("get range request", func(t *testing.T) { - req := &transportRequest{serviceRequest: new(GetRangeHashRequest)} - handler := req.handler() - require.Equal(t, new(rangeHashAccum), handler) - }) - }) -} - -func Test_rawRangeHashInfo(t *testing.T) { - t.Run("get ranges", func(t *testing.T) { - rngs := []Range{ - {Offset: 1, Length: 2}, - {Offset: 3, Length: 4}, - } - - r := newRawRangeHashInfo() - r.setRanges(rngs) - - require.Equal(t, rngs, r.GetRanges()) - }) - - t.Run("handler", func(t *testing.T) { - require.Equal(t, - &rangeHashAccum{concat: true}, - newRawRangeHashInfo().handler(), - ) - }) - - t.Run("bud off", func(t *testing.T) { - var ( - ttl = uint32(12) - timeout = 7 * time.Hour - ) - - r := newRawRangeHashInfo() - r.setTTL(ttl) - r.setTimeout(timeout) - r.setSalt(testData(t, 20)) - r.setSessionToken(new(service.Token)) - - rd := RangeDescriptor{ - Size: 120, - Offset: 71, - Addr: testObjectAddress(t), - } - - tool := r.budOff(&rd) - - require.Equal(t, ttl, tool.GetTTL()) - require.Equal(t, timeout, tool.GetTimeout()) - require.Equal(t, rd.Addr, tool.GetAddress()) - require.Equal(t, []Range{{Offset: uint64(rd.Offset), Length: uint64(rd.Size)}}, tool.GetRanges()) - require.Equal(t, r.GetSessionToken(), tool.GetSessionToken()) - require.Equal(t, - loopData(r.salt, int64(len(r.salt)), rd.Offset), - tool.(transport.RangeHashInfo).GetSalt(), - ) - }) -} - -func Test_rawRangeInfo(t *testing.T) { - t.Run("get ranges", func(t *testing.T) { - rng := Range{Offset: 1, Length: 2} - - r := newRawRangeInfo() - r.setRange(rng) - - require.Equal(t, rng, r.GetRange()) - }) -} - -func Test_loopSalt(t *testing.T) { - t.Run("empty data", func(t *testing.T) { - require.Empty(t, loopData(nil, 20, 10)) - require.Empty(t, loopData(make([]byte, 0), 20, 10)) - }) - - t.Run("data part", func(t *testing.T) { - var ( - off, size int64 = 10, 20 - d = testData(t, 40) - ) - require.Equal(t, d[off:off+size], loopData(d, size, off)) - }) - - t.Run("with recycle", func(t *testing.T) { - var ( - d = testData(t, 40) - off = int64(len(d) / 2) - size = 2 * off - ) - - require.Equal(t, - append(d[off:], d[:size-off]...), - loopData(d, size, off), - ) - }) -} - -func Test_rangeHashAccum(t *testing.T) { - t.Run("handle item", func(t *testing.T) { - s := &rangeHashAccum{ - h: []Hash{hash.Sum(testData(t, 10))}, - } - - h := hash.Sum(testData(t, 10)) - - exp := append(s.h, h) - - s.handleItem(h) - - require.Equal(t, exp, s.h) - - exp = append(s.h, s.h...) - - s.handleItem(s.h) - - require.Equal(t, exp, s.h) - }) - - t.Run("collect", func(t *testing.T) { - hashes := []Hash{hash.Sum(testData(t, 10)), hash.Sum(testData(t, 10))} - - t.Run("w/ concat", func(t *testing.T) { - s := &rangeHashAccum{ - concat: true, - h: hashes, - } - - expRes, expErr := hash.Concat(hashes) - - res, err := s.collect() - - require.Equal(t, expRes, res) - require.Equal(t, expErr, err) - }) - - t.Run("w/o concat", func(t *testing.T) { - s := &rangeHashAccum{ - concat: false, - h: hashes, - } - - res, err := s.collect() - require.NoError(t, err) - require.Equal(t, hashes, res) - }) - }) -} diff --git a/pkg/network/transport/object/grpc/response.go b/pkg/network/transport/object/grpc/response.go deleted file mode 100644 index 8e08941f8..000000000 --- a/pkg/network/transport/object/grpc/response.go +++ /dev/null @@ -1,144 +0,0 @@ -package object - -import ( - "context" - - eacl "github.com/nspcc-dev/neofs-api-go/acl/extended" - "github.com/nspcc-dev/neofs-api-go/object" - eaclstorage "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" -) - -type ( - serviceResponse interface { - SetEpoch(uint64) - } - - responsePreparer interface { - prepareResponse(context.Context, serviceRequest, serviceResponse) error - } - - epochResponsePreparer struct { - epochRecv EpochReceiver - } -) - -type complexResponsePreparer struct { - items []responsePreparer -} - -type aclResponsePreparer struct { - eaclSrc eaclstorage.Storage - - aclInfoReceiver aclInfoReceiver - - reqActCalc requestActionCalculator -} - -type headersFromObject struct { - obj *Object -} - -var ( - _ responsePreparer = (*epochResponsePreparer)(nil) -) - -func (s headersFromObject) getHeaders() (*Object, bool) { - return s.obj, true -} - -func (s complexResponsePreparer) prepareResponse(ctx context.Context, req serviceRequest, resp serviceResponse) error { - for i := range s.items { - if err := s.items[i].prepareResponse(ctx, req, resp); err != nil { - return err - } - } - - return nil -} - -func (s *epochResponsePreparer) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - resp.SetEpoch(s.epochRecv.Epoch()) - - return nil -} - -func (s *aclResponsePreparer) prepareResponse(ctx context.Context, req serviceRequest, resp serviceResponse) error { - aclInfo, err := s.aclInfoReceiver.getACLInfo(ctx, req) - if err != nil { - return errAccessDenied - } else if !aclInfo.checkBearer && !aclInfo.checkExtended { - return nil - } - - var obj *Object - - switch r := resp.(type) { - case *object.GetResponse: - obj = r.GetObject() - case *object.HeadResponse: - obj = r.GetObject() - case interface { - GetObject() *Object - }: - obj = r.GetObject() - } - - if obj == nil { - return nil - } - - // FIXME: do not check request headers. - // At this stage request is already validated, but action calculator will check it again. - p := requestActionParams{ - eaclSrc: s.eaclSrc, - request: req, - objHdrSrc: headersFromObject{ - obj: obj, - }, - group: aclInfo.targetInfo.group, - } - - if aclInfo.checkBearer { - p.eaclSrc = eaclFromBearer{ - bearer: req.GetBearerToken(), - } - } - - if action := s.reqActCalc.calculateRequestAction(ctx, p); action != eacl.ActionAllow { - return errAccessDenied - } - - return nil -} - -func makeDeleteResponse() *object.DeleteResponse { - return new(object.DeleteResponse) -} - -func makeRangeHashResponse(v []Hash) *GetRangeHashResponse { - return &GetRangeHashResponse{Hashes: v} -} - -func makeRangeResponse(v []byte) *GetRangeResponse { - return &GetRangeResponse{Fragment: v} -} - -func makeSearchResponse(v []Address) *object.SearchResponse { - return &object.SearchResponse{Addresses: v} -} - -func makeHeadResponse(v *Object) *object.HeadResponse { - return &object.HeadResponse{Object: v} -} - -func makePutResponse(v Address) *object.PutResponse { - return &object.PutResponse{Address: v} -} - -func makeGetHeaderResponse(v *Object) *object.GetResponse { - return &object.GetResponse{R: &object.GetResponse_Object{Object: v}} -} - -func makeGetChunkResponse(v []byte) *object.GetResponse { - return &object.GetResponse{R: &object.GetResponse_Chunk{Chunk: v}} -} diff --git a/pkg/network/transport/object/grpc/response_test.go b/pkg/network/transport/object/grpc/response_test.go deleted file mode 100644 index 5057029ab..000000000 --- a/pkg/network/transport/object/grpc/response_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/stretchr/testify/require" -) - -func TestEpochResponsePreparer(t *testing.T) { - epoch := uint64(33) - - s := &epochResponsePreparer{ - epochRecv: &testPutEntity{res: epoch}, - } - - ctx := context.TODO() - - t.Run("get", func(t *testing.T) { - t.Run("head", func(t *testing.T) { - obj := &Object{ - SystemHeader: SystemHeader{ - ID: testObjectAddress(t).ObjectID, - CID: testObjectAddress(t).CID, - }, - } - - resp := makeGetHeaderResponse(obj) - - require.NoError(t, s.prepareResponse(ctx, new(object.GetRequest), resp)) - - require.Equal(t, obj, resp.GetObject()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("chunk", func(t *testing.T) { - chunk := testData(t, 10) - - resp := makeGetChunkResponse(chunk) - - require.NoError(t, s.prepareResponse(ctx, new(object.GetRequest), resp)) - - require.Equal(t, chunk, resp.GetChunk()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - }) - - t.Run("put", func(t *testing.T) { - addr := testObjectAddress(t) - - resp := makePutResponse(addr) - require.NoError(t, s.prepareResponse(ctx, new(object.PutRequest), resp)) - - require.Equal(t, addr, resp.GetAddress()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("head", func(t *testing.T) { - obj := &Object{ - SystemHeader: SystemHeader{ - PayloadLength: 7, - ID: testObjectAddress(t).ObjectID, - CID: testObjectAddress(t).CID, - }, - } - - resp := makeHeadResponse(obj) - require.NoError(t, s.prepareResponse(ctx, new(object.HeadRequest), resp)) - - require.Equal(t, obj, resp.GetObject()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("search", func(t *testing.T) { - addrList := testAddrList(t, 5) - - resp := makeSearchResponse(addrList) - require.NoError(t, s.prepareResponse(ctx, new(object.SearchRequest), resp)) - - require.Equal(t, addrList, resp.GetAddresses()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("range", func(t *testing.T) { - data := testData(t, 10) - - resp := makeRangeResponse(data) - require.NoError(t, s.prepareResponse(ctx, new(GetRangeRequest), resp)) - - require.Equal(t, data, resp.GetFragment()) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("range hash", func(t *testing.T) { - hashes := []Hash{ - hash.Sum(testData(t, 10)), - hash.Sum(testData(t, 10)), - } - - resp := makeRangeHashResponse(hashes) - require.NoError(t, s.prepareResponse(ctx, new(object.GetRangeHashRequest), resp)) - - require.Equal(t, hashes, resp.Hashes) - require.Equal(t, epoch, resp.GetEpoch()) - }) - - t.Run("delete", func(t *testing.T) { - resp := makeDeleteResponse() - require.NoError(t, s.prepareResponse(ctx, new(object.DeleteRequest), resp)) - - require.IsType(t, new(object.DeleteResponse), resp) - require.Equal(t, epoch, resp.GetEpoch()) - }) -} diff --git a/pkg/network/transport/object/grpc/search.go b/pkg/network/transport/object/grpc/search.go deleted file mode 100644 index cb6b35d27..000000000 --- a/pkg/network/transport/object/grpc/search.go +++ /dev/null @@ -1,169 +0,0 @@ -package object - -import ( - "context" - "sync" - - "github.com/nspcc-dev/neofs-api-go/object" - v1 "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "go.uber.org/zap" -) - -// QueryFilter is a type alias of -// Filter from query package of neofs-api-go. -type QueryFilter = v1.Filter - -const ( - // KeyChild is a filter key to child link. - KeyChild = "CHILD" - - // KeyPrev is a filter key to previous link. - KeyPrev = "PREV" - - // KeyNext is a filter key to next link. - KeyNext = "NEXT" - - // KeyID is a filter key to object ID. - KeyID = "ID" - - // KeyCID is a filter key to container ID. - KeyCID = "CID" - - // KeyOwnerID is a filter key to owner ID. - KeyOwnerID = "OWNERID" - - // KeyRootObject is a filter key to objects w/o parent links. - KeyRootObject = "ROOT_OBJECT" -) - -type ( - objectSearcher interface { - searchObjects(context.Context, transport.SearchInfo) ([]Address, error) - } - - coreObjectSearcher struct { - executor operationExecutor - } - - // objectAddressSet is and interface of object address set. - objectAddressSet interface { - responseItemHandler - - // list returns all elements of set. - list() []Address - } - - // coreObjAddrSet is and implementation of objectAddressSet interface used in Object service production. - coreObjAddrSet struct { - // Read-write mutex for race protection. - *sync.RWMutex - - // Storing element of set. - items []Address - } -) - -var addrPerMsg = int64(maxGetPayloadSize / new(Address).Size()) - -var ( - _ transport.SearchInfo = (*transportRequest)(nil) - _ objectSearcher = (*coreObjectSearcher)(nil) - _ objectAddressSet = (*coreObjAddrSet)(nil) -) - -func (s *transportRequest) GetCID() CID { return s.serviceRequest.(*object.SearchRequest).CID() } - -func (s *transportRequest) GetQuery() []byte { - return s.serviceRequest.(*object.SearchRequest).GetQuery() -} - -func (s *objectService) Search(req *object.SearchRequest, srv object.Service_SearchServer) (err error) { - defer func() { - if r := recover(); r != nil { - s.log.Error(panicLogMsg, - zap.Stringer("request", object.RequestSearch), - zap.Any("reason", r), - ) - - err = errServerPanic - } - - err = s.statusCalculator.make(requestError{ - t: object.RequestSearch, - e: err, - }) - }() - - var r interface{} - - if r, err = s.requestHandler.handleRequest(srv.Context(), handleRequestParams{ - request: req, - executor: s, - }); err != nil { - return err - } - - addrList := r.([]Address) - - for { - cut := min(int64(len(addrList)), addrPerMsg) - - resp := makeSearchResponse(addrList[:cut]) - if err = s.respPreparer.prepareResponse(srv.Context(), req, resp); err != nil { - return - } - - if err = srv.Send(resp); err != nil { - return - } - - addrList = addrList[cut:] - if len(addrList) == 0 { - break - } - } - - return err -} - -func (s *coreObjectSearcher) searchObjects(ctx context.Context, sInfo transport.SearchInfo) ([]Address, error) { - addrSet := newUniqueAddressAccumulator() - if err := s.executor.executeOperation(ctx, sInfo, addrSet); err != nil { - return nil, err - } - - return addrSet.list(), nil -} - -func newUniqueAddressAccumulator() objectAddressSet { - return &coreObjAddrSet{ - RWMutex: new(sync.RWMutex), - items: make([]Address, 0, 10), - } -} - -func (s *coreObjAddrSet) handleItem(v interface{}) { - addrList := v.([]Address) - - s.Lock() - -loop: - for i := range addrList { - for j := range s.items { - if s.items[j].Equal(&addrList[i]) { - continue loop - } - } - s.items = append(s.items, addrList[i]) - } - - s.Unlock() -} - -func (s *coreObjAddrSet) list() []Address { - s.RLock() - defer s.RUnlock() - - return s.items -} diff --git a/pkg/network/transport/object/grpc/search_test.go b/pkg/network/transport/object/grpc/search_test.go deleted file mode 100644 index bd1e73679..000000000 --- a/pkg/network/transport/object/grpc/search_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - v1 "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testSearchEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - object.Service_SearchServer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ requestHandler = (*testSearchEntity)(nil) - _ operationExecutor = (*testSearchEntity)(nil) - _ responsePreparer = (*testSearchEntity)(nil) - - _ object.Service_SearchServer = (*testSearchEntity)(nil) -) - -func (s *testSearchEntity) prepareResponse(_ context.Context, req serviceRequest, resp serviceResponse) error { - if s.f != nil { - s.f(req, resp) - } - return s.err -} - -func (s *testSearchEntity) Send(r *object.SearchResponse) error { - if s.f != nil { - s.f(r) - } - return s.err -} - -func (s *testSearchEntity) Context() context.Context { return context.TODO() } - -func (s *testSearchEntity) executeOperation(_ context.Context, p transport.MetaInfo, h responseItemHandler) error { - if s.f != nil { - s.f(p, h) - } - return s.err -} - -func (s *testSearchEntity) handleRequest(_ context.Context, p handleRequestParams) (interface{}, error) { - if s.f != nil { - s.f(p) - } - return s.res, s.err -} - -func TestSearchVerify(t *testing.T) { - t.Run("KeyNoChildren", func(t *testing.T) { - var ( - q = v1.Query{ - Filters: []QueryFilter{ - { - Type: v1.Filter_Exact, - Name: transport.KeyNoChildren, - }, - }, - } - obj = new(Object) - ) - require.True(t, imposeQuery(q, obj)) - - obj.Headers = append(obj.Headers, Header{Value: &object.Header_Link{ - Link: &object.Link{ - Type: object.Link_Child, - }, - }}) - require.False(t, imposeQuery(q, obj)) - }) -} - -func Test_coreObjAddrSet(t *testing.T) { - // create address accumulator - acc := newUniqueAddressAccumulator() - require.NotNil(t, acc) - - // check type correctness - v, ok := acc.(*coreObjAddrSet) - require.True(t, ok) - - // check fields initialization - require.NotNil(t, v.items) - require.NotNil(t, v.RWMutex) - - t.Run("add/list", func(t *testing.T) { - // ascertain that initial list is empty - require.Empty(t, acc.list()) - - // add first set of addresses - addrList1 := testAddrList(t, 5) - acc.handleItem(addrList1) - - // ascertain that list is equal to added list - require.Equal(t, addrList1, acc.list()) - - // add more addresses - addrList2 := testAddrList(t, 5) - acc.handleItem(addrList2) - - twoLists := append(addrList1, addrList2...) - - // ascertain that list is a concatenation of added lists - require.Equal(t, twoLists, acc.list()) - - // add second list again - acc.handleItem(addrList2) - - // ascertain that list have not changed after adding existing elements - require.Equal(t, twoLists, acc.list()) - }) -} - -func TestObjectService_Search(t *testing.T) { - req := &object.SearchRequest{ - ContainerID: testObjectAddress(t).CID, - Query: testData(t, 10), - } - - addrList := testAddrList(t, int(addrPerMsg)+5) - - t.Run("request handler failure", func(t *testing.T) { - rhErr := errors.New("test error for request handler") - s := &objectService{ - statusCalculator: newStatusCalculator(), - } - - s.requestHandler = &testSearchEntity{ - f: func(items ...interface{}) { - p := items[0].(handleRequestParams) - require.Equal(t, req, p.request) - require.Equal(t, s, p.executor) - }, - err: rhErr, - } - - require.EqualError(t, s.Search(req, new(testSearchEntity)), rhErr.Error()) - }) - - t.Run("server error", func(t *testing.T) { - srvErr := errors.New("test error for search server") - - resp := &object.SearchResponse{Addresses: addrList[:addrPerMsg]} - - s := &objectService{ - requestHandler: &testSearchEntity{ - res: addrList, - }, - respPreparer: &testSearchEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, makeSearchResponse(addrList[:addrPerMsg]), items[1]) - }, - res: resp, - }, - - statusCalculator: newStatusCalculator(), - } - - srv := &testSearchEntity{ - f: func(items ...interface{}) { - require.Equal(t, resp, items[0]) - }, - err: srvErr, // force server to return srvErr - } - - require.EqualError(t, s.Search(req, srv), srvErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - handler := &testSearchEntity{res: make([]Address, 0)} - - off := 0 - - var resp *object.SearchResponse - - s := &objectService{ - requestHandler: handler, - respPreparer: &testSearchEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - resp = items[1].(*object.SearchResponse) - sz := len(resp.GetAddresses()) - require.Equal(t, makeSearchResponse(addrList[off:off+sz]), items[1]) - off += sz - }, - }, - - statusCalculator: newStatusCalculator(), - } - - srv := &testSearchEntity{ - f: func(items ...interface{}) { - require.Equal(t, resp, items[0]) - }, - } - - require.NoError(t, s.Search(req, srv)) - - handler.res = addrList - - require.NoError(t, s.Search(req, srv)) - }) -} - -func Test_coreObjectSearcher(t *testing.T) { - ctx := context.TODO() - - req := newRawSearchInfo() - req.setQuery(testData(t, 10)) - - t.Run("operation executor failure", func(t *testing.T) { - execErr := errors.New("test error for operation executor") - - s := &coreObjectSearcher{ - executor: &testSearchEntity{ - f: func(items ...interface{}) { - require.Equal(t, req, items[0]) - require.Equal(t, newUniqueAddressAccumulator(), items[1]) - }, - err: execErr, - }, - } - - res, err := s.searchObjects(ctx, req) - require.EqualError(t, err, execErr.Error()) - require.Empty(t, res) - }) - - t.Run("correct result", func(t *testing.T) { - addrList := testAddrList(t, 5) - - s := &coreObjectSearcher{ - executor: &testSearchEntity{ - f: func(items ...interface{}) { - items[1].(responseItemHandler).handleItem(addrList) - }, - }, - } - - res, err := s.searchObjects(ctx, req) - require.NoError(t, err) - require.Equal(t, addrList, res) - }) -} diff --git a/pkg/network/transport/object/grpc/service.go b/pkg/network/transport/object/grpc/service.go deleted file mode 100644 index 142a9ee9e..000000000 --- a/pkg/network/transport/object/grpc/service.go +++ /dev/null @@ -1,674 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - "math" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - eaclstorage "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/extended/storage" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - _range "github.com/nspcc-dev/neofs-node/pkg/network/transport/object/grpc/range" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - storage2 "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - storagegroup2 "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/panjf2000/ants/v2" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // CID is a type alias of - // CID from refs package of neofs-api-go. - CID = refs.CID - - // Object is a type alias of - // Object from object package of neofs-api-go. - Object = object.Object - - // ID is a type alias of - // ObjectID from refs package of neofs-api-go. - ID = refs.ObjectID - - // OwnerID is a type alias of - // OwnerID from refs package of neofs-api-go. - OwnerID = refs.OwnerID - - // Address is a type alias of - // Address from refs package of neofs-api-go. - Address = refs.Address - - // Hash is a type alias of - // Hash from hash package of neofs-api-go. - Hash = hash.Hash - - // Meta is a type alias of - // ObjectMeta from localstore package. - Meta = localstore.ObjectMeta - - // Filter is a type alias of - // FilterPipeline from localstore package. - Filter = localstore.FilterPipeline - - // Header is a type alias of - // Header from object package of neofs-api-go. - Header = object.Header - - // UserHeader is a type alias of - // UserHeader from object package of neofs-api-go. - UserHeader = object.UserHeader - - // SystemHeader is a type alias of - // SystemHeader from object package of neofs-api-go. - SystemHeader = object.SystemHeader - - // CreationPoint is a type alias of - // CreationPoint from object package of neofs-api-go. - CreationPoint = object.CreationPoint - - // Service is an interface of the server of Object service. - Service interface { - grpc.Service - CapacityMeter - object.ServiceServer - } - - // CapacityMeter is an interface of node storage capacity meter. - CapacityMeter interface { - RelativeAvailableCap() float64 - AbsoluteAvailableCap() uint64 - } - - // EpochReceiver is an interface of the container of epoch number with read access. - EpochReceiver interface { - Epoch() uint64 - } - - // RemoteService is an interface of Object service client constructor. - RemoteService interface { - Remote(context.Context, multiaddr.Multiaddr) (object.ServiceClient, error) - } - - // Placer is an interface of placement component. - Placer interface { - IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) - GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) - } - - // WorkerPool is an interface of go-routing pool. - WorkerPool interface { - Submit(func()) error - } - - // Salitor is a salting slice function. - Salitor func(data []byte, salt []byte) []byte - - serviceRequest interface { - object.Request - service.RequestData - service.SignKeyPairAccumulator - service.SignKeyPairSource - - SetToken(*service.Token) - - SetBearer(*service.BearerTokenMsg) - - SetHeaders([]service.RequestExtendedHeader_KV) - } - - NetmapClient = wrapper.Wrapper - - // Params groups the parameters of Object service server's constructor. - Params struct { - CheckACL bool - - Assembly bool - - WindowSize int - - MaxProcessingSize uint64 - StorageCapacity uint64 - PoolSize int - Salitor Salitor - LocalStore localstore.Localstore - Placer Placer - ObjectRestorer transformer.ObjectRestorer - RemoteService RemoteService - AddressStore storage2.AddressStoreComponent - Logger *zap.Logger - TokenStore session.PrivateTokenStore - EpochReceiver EpochReceiver - - PlacementWrapper *placement.PlacementWrapper - - DialTimeout time.Duration - - Key *ecdsa.PrivateKey - - PutParams OperationParams - GetParams OperationParams - DeleteParams OperationParams - HeadParams OperationParams - SearchParams OperationParams - RangeParams OperationParams - RangeHashParams OperationParams - - headRecv objectReceiver - - Verifier verifier.Verifier - - Transformer transformer.Transformer - - MaxPayloadSize uint64 - - // ACL pre-processor params - ContainerStorage storage.Storage - NetmapClient *NetmapClient - - SGInfoReceiver storagegroup.InfoReceiver - - ExtendedACLSource eaclstorage.Storage - - requestActionCalculator requestActionCalculator - - targetFinder RequestTargeter - - aclInfoReceiver aclInfoReceiver - } - - // OperationParams groups the parameters of particular object operation. - OperationParams struct { - Timeout time.Duration - LogErrors bool - } - - objectService struct { - ls localstore.Localstore - storageCap uint64 - - executor transport.SelectiveContainerExecutor - - pPut OperationParams - pGet OperationParams - pDel OperationParams - pHead OperationParams - pSrch OperationParams - pRng OperationParams - pRngHash OperationParams - - log *zap.Logger - - requestHandler requestHandler - - objSearcher objectSearcher - objRecv objectReceiver - objStorer objectStorer - objRemover objectRemover - rngRecv objectRangeReceiver - - payloadRngRecv payloadRangeReceiver - - respPreparer responsePreparer - - getChunkPreparer responsePreparer - rangeChunkPreparer responsePreparer - - statusCalculator *statusCalculator - } -) - -const ( - defaultDialTimeout = 5 * time.Second - defaultPutTimeout = time.Second - defaultGetTimeout = time.Second - defaultDeleteTimeout = time.Second - defaultHeadTimeout = time.Second - defaultSearchTimeout = time.Second - defaultRangeTimeout = time.Second - defaultRangeHashTimeout = time.Second - - defaultPoolSize = 10 - - readyObjectsCheckpointFilterName = "READY_OBJECTS_PUT_CHECKPOINT" - allObjectsCheckpointFilterName = "ALL_OBJECTS_PUT_CHECKPOINT" -) - -var ( - errEmptyTokenStore = errors.New("objectService.New failed: key store not provided") - errEmptyPlacer = errors.New("objectService.New failed: placer not provided") - errEmptyTransformer = errors.New("objectService.New failed: transformer pipeline not provided") - errEmptyGRPC = errors.New("objectService.New failed: gRPC connector not provided") - errEmptyAddress = errors.New("objectService.New failed: address store not provided") - errEmptyLogger = errors.New("objectService.New failed: logger not provided") - errEmptyEpochReceiver = errors.New("objectService.New failed: epoch receiver not provided") - errEmptyLocalStore = errors.New("new local client failed: localstore passed") - errEmptyPrivateKey = errors.New("objectService.New failed: private key not provided") - errEmptyVerifier = errors.New("objectService.New failed: object verifier not provided") - errEmptyACLHelper = errors.New("objectService.New failed: ACL helper not provided") - errEmptyCnrLister = errors.New("objectService.New failed: container lister not provided") - errEmptySGInfoRecv = errors.New("objectService.New failed: SG info receiver not provided") - errInvalidCIDFilter = errors.New("invalid CID filter") - errTokenRetrieval = errors.New("objectService.Put failed on token retrieval") - errHeaderExpected = errors.New("expected header as a first message in stream") -) - -var requestSignFunc = service.SignRequestData - -var requestVerifyFunc = libgrpc.VerifyRequestWithSignatures - -// New is an Object service server's constructor. -func New(p *Params) (Service, error) { - if p.PutParams.Timeout <= 0 { - p.PutParams.Timeout = defaultPutTimeout - } - - if p.GetParams.Timeout <= 0 { - p.GetParams.Timeout = defaultGetTimeout - } - - if p.DeleteParams.Timeout <= 0 { - p.DeleteParams.Timeout = defaultDeleteTimeout - } - - if p.HeadParams.Timeout <= 0 { - p.HeadParams.Timeout = defaultHeadTimeout - } - - if p.SearchParams.Timeout <= 0 { - p.SearchParams.Timeout = defaultSearchTimeout - } - - if p.RangeParams.Timeout <= 0 { - p.RangeParams.Timeout = defaultRangeTimeout - } - - if p.RangeHashParams.Timeout <= 0 { - p.RangeHashParams.Timeout = defaultRangeHashTimeout - } - - if p.DialTimeout <= 0 { - p.DialTimeout = defaultDialTimeout - } - - if p.PoolSize <= 0 { - p.PoolSize = defaultPoolSize - } - - switch { - case p.TokenStore == nil: - return nil, errEmptyTokenStore - case p.Placer == nil: - return nil, errEmptyPlacer - case p.LocalStore == nil: - return nil, errEmptyLocalStore - case (p.ObjectRestorer == nil || p.Transformer == nil) && p.Assembly: - return nil, errEmptyTransformer - case p.RemoteService == nil: - return nil, errEmptyGRPC - case p.AddressStore == nil: - return nil, errEmptyAddress - case p.Logger == nil: - return nil, errEmptyLogger - case p.EpochReceiver == nil: - return nil, errEmptyEpochReceiver - case p.Key == nil: - return nil, errEmptyPrivateKey - case p.Verifier == nil: - return nil, errEmptyVerifier - case p.NetmapClient == nil: - return nil, contract.ErrNilWrapper - case p.PlacementWrapper == nil: - return nil, errEmptyCnrLister - case p.ContainerStorage == nil: - return nil, storage.ErrNilStorage - case p.SGInfoReceiver == nil: - return nil, errEmptySGInfoRecv - case p.ExtendedACLSource == nil: - return nil, eaclstorage.ErrNilStorage - } - - pool, err := ants.NewPool(p.PoolSize) - if err != nil { - return nil, errors.Wrap(err, "objectService.New failed: could not create worker pool") - } - - if p.MaxProcessingSize <= 0 { - p.MaxProcessingSize = math.MaxUint64 - } - - if p.StorageCapacity <= 0 { - p.StorageCapacity = math.MaxUint64 - } - - epochRespPreparer := &epochResponsePreparer{ - epochRecv: p.EpochReceiver, - } - - p.targetFinder = &targetFinder{ - log: p.Logger, - irKeysRecv: p.NetmapClient, - cnrLister: p.PlacementWrapper, - cnrStorage: p.ContainerStorage, - } - - p.requestActionCalculator = &reqActionCalc{ - storage: p.ExtendedACLSource, - - log: p.Logger, - } - - p.aclInfoReceiver = aclInfoReceiver{ - cnrStorage: p.ContainerStorage, - - targetFinder: p.targetFinder, - } - - srv := &objectService{ - ls: p.LocalStore, - log: p.Logger, - pPut: p.PutParams, - pGet: p.GetParams, - pDel: p.DeleteParams, - pHead: p.HeadParams, - pSrch: p.SearchParams, - pRng: p.RangeParams, - pRngHash: p.RangeHashParams, - storageCap: p.StorageCapacity, - - requestHandler: &coreRequestHandler{ - preProc: newPreProcessor(p), - postProc: newPostProcessor(), - }, - - respPreparer: &complexResponsePreparer{ - items: []responsePreparer{ - epochRespPreparer, - &aclResponsePreparer{ - aclInfoReceiver: p.aclInfoReceiver, - - reqActCalc: p.requestActionCalculator, - - eaclSrc: p.ExtendedACLSource, - }, - }, - }, - - getChunkPreparer: epochRespPreparer, - - rangeChunkPreparer: epochRespPreparer, - - statusCalculator: serviceStatusCalculator(), - } - - tr, err := NewMultiTransport(MultiTransportParams{ - AddressStore: p.AddressStore, - EpochReceiver: p.EpochReceiver, - RemoteService: p.RemoteService, - Logger: p.Logger, - Key: p.Key, - PutTimeout: p.PutParams.Timeout, - GetTimeout: p.GetParams.Timeout, - HeadTimeout: p.HeadParams.Timeout, - SearchTimeout: p.SearchParams.Timeout, - RangeHashTimeout: p.RangeHashParams.Timeout, - DialTimeout: p.DialTimeout, - - PrivateTokenStore: p.TokenStore, - }) - if err != nil { - return nil, err - } - - exec, err := transport.NewContainerTraverseExecutor(tr) - if err != nil { - return nil, err - } - - srv.executor, err = transport.NewObjectContainerHandler(transport.ObjectContainerHandlerParams{ - NodeLister: p.PlacementWrapper, - Executor: exec, - Logger: p.Logger, - }) - if err != nil { - return nil, err - } - - local := &localStoreExecutor{ - salitor: p.Salitor, - epochRecv: p.EpochReceiver, - localStore: p.LocalStore, - } - - qvc := &queryVersionController{ - m: make(map[int]localQueryImposer), - } - - qvc.m[1] = &coreQueryImposer{ - fCreator: new(coreFilterCreator), - lsLister: p.LocalStore, - log: p.Logger, - } - - localExec := &localOperationExecutor{ - objRecv: local, - headRecv: local, - objStore: local, - queryImp: qvc, - rngReader: local, - rngHasher: local, - } - - opExec := &coreOperationExecutor{ - pre: new(coreExecParamsComp), - fin: &coreOperationFinalizer{ - curPlacementBuilder: &corePlacementUtil{ - prevNetMap: false, - placementBuilder: p.Placer, - log: p.Logger, - }, - prevPlacementBuilder: &corePlacementUtil{ - prevNetMap: true, - placementBuilder: p.Placer, - log: p.Logger, - }, - interceptorPreparer: &coreInterceptorPreparer{ - localExec: localExec, - addressStore: p.AddressStore, - }, - workerPool: pool, - traverseExec: exec, - resLogger: &coreResultLogger{ - mLog: requestLogMap(p), - log: p.Logger, - }, - log: p.Logger, - }, - loc: localExec, - } - - srv.objSearcher = &coreObjectSearcher{ - executor: opExec, - } - - childLister := &coreChildrenLister{ - queryFn: coreChildrenQueryFunc, - objSearcher: srv.objSearcher, - log: p.Logger, - timeout: p.SearchParams.Timeout, - } - - childrenRecv := &coreChildrenReceiver{ - timeout: p.HeadParams.Timeout, - } - - chopperTable := _range.NewChopperTable() - - relRecv := &neighborReceiver{ - firstChildQueryFn: firstChildQueryFunc, - leftNeighborQueryFn: leftNeighborQueryFunc, - rightNeighborQueryFn: rightNeighborQueryFunc, - rangeDescRecv: &selectiveRangeRecv{executor: srv.executor}, - } - - straightObjRecv := &straightObjectReceiver{ - executor: opExec, - } - - rngRecv := &corePayloadRangeReceiver{ - chopTable: chopperTable, - relRecv: relRecv, - payloadRecv: &corePayloadPartReceiver{ - rDataRecv: &straightRangeDataReceiver{ - executor: opExec, - }, - windowController: &simpleWindowController{ - windowSize: p.WindowSize, - }, - }, - mErr: map[error]struct{}{ - localstore.ErrOutOfRange: {}, - }, - log: p.Logger, - } - - coreObjRecv := &coreObjectReceiver{ - straightObjRecv: straightObjRecv, - childLister: childLister, - ancestralRecv: &coreAncestralReceiver{ - childrenRecv: childrenRecv, - objRewinder: &coreObjectRewinder{ - transformer: p.ObjectRestorer, - }, - pRangeRecv: rngRecv, - }, - log: p.Logger, - } - childrenRecv.coreObjRecv = coreObjRecv - srv.objRecv = coreObjRecv - srv.payloadRngRecv = rngRecv - - if !p.Assembly { - coreObjRecv.ancestralRecv, coreObjRecv.childLister = nil, nil - } - - p.headRecv = srv.objRecv - - filter, err := newIncomingObjectFilter(p) - if err != nil { - return nil, err - } - - straightStorer := &straightObjectStorer{ - executor: opExec, - } - - bf, err := basicFilter(p) - if err != nil { - return nil, err - } - - transformerObjStorer := &transformingObjectStorer{ - transformer: p.Transformer, - objStorer: straightStorer, - mErr: map[error]struct{}{ - transformer.ErrInvalidSGLinking: {}, - - storagegroup2.ErrIncompleteSGInfo: {}, - }, - } - - srv.objStorer = &filteringObjectStorer{ - filter: bf, - objStorer: &bifurcatingObjectStorer{ - straightStorer: &filteringObjectStorer{ - filter: filter, - objStorer: &receivingObjectStorer{ - straightStorer: straightStorer, - vPayload: storage2.NewPayloadVerifier(), - }, - }, - tokenStorer: &tokenObjectStorer{ - tokenStore: p.TokenStore, - objStorer: transformerObjStorer, - }, - }, - } - - srv.objRemover = &coreObjRemover{ - delPrep: &coreDelPreparer{ - childLister: childLister, - }, - straightRem: &straightObjRemover{ - tombCreator: new(coreTombCreator), - objStorer: transformerObjStorer, - }, - tokenStore: p.TokenStore, - mErr: map[error]struct{}{}, - log: p.Logger, - } - - srv.rngRecv = &coreRangeReceiver{ - rngRevealer: &coreRngRevealer{ - relativeRecv: relRecv, - chopTable: chopperTable, - }, - straightRngRecv: &straightRangeReceiver{ - executor: opExec, - }, - mErr: map[error]struct{}{ - localstore.ErrOutOfRange: {}, - }, - log: p.Logger, - } - - return srv, nil -} - -func requestLogMap(p *Params) map[object.RequestType]struct{} { - m := make(map[object.RequestType]struct{}) - - if p.PutParams.LogErrors { - m[object.RequestPut] = struct{}{} - } - - if p.GetParams.LogErrors { - m[object.RequestGet] = struct{}{} - } - - if p.HeadParams.LogErrors { - m[object.RequestHead] = struct{}{} - } - - if p.SearchParams.LogErrors { - m[object.RequestSearch] = struct{}{} - } - - if p.RangeParams.LogErrors { - m[object.RequestRange] = struct{}{} - } - - if p.RangeHashParams.LogErrors { - m[object.RequestRangeHash] = struct{}{} - } - - return m -} - -func (s *objectService) Name() string { return "Object Service" } - -func (s *objectService) Register(g *grpc.Server) { object.RegisterServiceServer(g, s) } diff --git a/pkg/network/transport/object/grpc/status.go b/pkg/network/transport/object/grpc/status.go deleted file mode 100644 index 212ab27a2..000000000 --- a/pkg/network/transport/object/grpc/status.go +++ /dev/null @@ -1,902 +0,0 @@ -package object - -import ( - "fmt" - "sync" - - "github.com/golang/protobuf/proto" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - "github.com/pkg/errors" - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// group of value for status error construction. -type statusInfo struct { - // status code - c codes.Code - // error message - m string - // error details - d []proto.Message -} - -type requestError struct { - // type of request - t object.RequestType - // request handler error - e error -} - -// error implementation used for details attaching. -type detailedError struct { - error - - d []proto.Message -} - -type statusCalculator struct { - *sync.RWMutex - - common map[error]*statusInfo - - custom map[requestError]*statusInfo -} - -const panicLogMsg = "rpc handler caused panic" - -const msgServerPanic = "panic occurred during request processing" - -var errServerPanic = errors.New("panic on call handler") - -const msgUnauthenticated = "request does not have valid authentication credentials for the operation" - -var errUnauthenticated = errors.New("unauthenticated request") - -const msgReSigning = "server could not re-sign request" - -var errReSigning = errors.New("could not re-sign request") - -const msgInvalidTTL = "invalid TTL value" - -var errInvalidTTL = errors.New("invalid TTL value") - -const ( - msgNotLocalContainer = "server is not presented in container" - descNotLocalContainer = "server is outside container" -) - -var errNotLocalContainer = errors.New("not local container") - -const msgContainerAffiliationProblem = "server could not check container affiliation" - -var errContainerAffiliationProblem = errors.New("could not check container affiliation") - -const ( - msgContainerNotFound = "container not found" - descContainerNotFound = "handling a non-existent container" -) - -var errContainerNotFound = errors.New("container not found") - -const msgPlacementProblem = "there were problems building the placement vector on the server" - -var errPlacementProblem = errors.New("could not traverse over container") - -const msgOverloaded = "system resource overloaded" - -var errOverloaded = errors.New("system resource overloaded") - -const msgAccessDenied = "access to requested operation is denied" - -var errAccessDenied = errors.New("access denied") - -const msgPutMessageProblem = "invalid message type" - -var msgPutNilObject = "object is null" - -const msgCutObjectPayload = "lack of object payload data" - -const ( - msgMissingTokenKeys = "missing public keys in token" - msgBrokenToken = "token structure failed verification" - msgTokenObjectID = "missing object ID in token" -) - -const msgProcPayloadSize = "max payload size of processing object overflow" - -var errProcPayloadSize = errors.New("max processing object payload size overflow") - -const msgObjectCreationEpoch = "invalid creation epoch of object" - -var errObjectFromTheFuture = errors.New("object from the future") - -const msgObjectPayloadSize = "max object payload size overflow" - -var errObjectPayloadSize = errors.New("max object payload size overflow") - -const msgLocalStorageOverflow = "not enough space in local storage" - -var errLocalStorageOverflow = errors.New("local storage overflow") - -const msgPayloadChecksum = "invalid payload checksum" - -var errPayloadChecksum = errors.New("invalid payload checksum") - -const msgObjectHeadersVerification = "object headers failed verification" - -var errObjectHeadersVerification = errors.New("object headers failed verification") - -const msgForwardPutObject = "forward object failure" - -const msgPutLocalFailure = "local object put failure" - -var errPutLocal = errors.New("local object put failure") - -const msgPrivateTokenRecv = "private token receive failure" - -const msgInvalidSGLinking = "invalid storage group headers" - -const msgIncompleteSGInfo = "collect storage group info failure" - -const msgTransformationFailure = "object preparation failure" - -const msgWrongSGSize = "wrong storage group size" - -var errWrongSGSize = errors.New("wrong storage group size") - -const msgWrongSGHash = "wrong storage group homomorphic hash" - -var errWrongSGHash = errors.New("wrong storage group homomorphic hash") - -const msgObjectNotFound = "object not found" - -const msgObjectHeaderNotFound = "object header not found" - -const msgNonAssembly = "assembly option is not enabled on the server" - -const msgPayloadOutOfRange = "range is out of object payload bounds" - -const msgPayloadRangeNotFound = "object payload range not found" - -var errPayloadRangeNotFound = errors.New("object payload range not found") - -const msgMissingToken = "missing token in request" - -const msgPutTombstone = "could not store tombstone" - -const msgDeletePrepare = "delete information preparation failure" - -var errDeletePrepare = errors.New("delete information preparation failure") - -const msgQueryVersion = "unsupported query version" - -const msgSearchQueryUnmarshal = "query unmarshal failure" - -const msgLocalQueryImpose = "local query imposing failure" - -var mStatusCommon = map[error]*statusInfo{ - // RPC implementation recovered panic - errServerPanic: { - c: codes.Internal, - m: msgServerPanic, - }, - // Request authentication credentials problem - errUnauthenticated: { - c: codes.Unauthenticated, - m: msgUnauthenticated, - d: requestAuthDetails(), - }, - // Request re-signing problem - errReSigning: { - c: codes.Internal, - m: msgReSigning, - }, - // Invalid request TTL - errInvalidTTL: { - c: codes.InvalidArgument, - m: msgInvalidTTL, - d: invalidTTLDetails(), - }, - // Container affiliation check problem - errContainerAffiliationProblem: { - c: codes.Internal, - m: msgContainerAffiliationProblem, - }, - // Server is outside container - errNotLocalContainer: { - c: codes.FailedPrecondition, - m: msgNotLocalContainer, - d: containerAbsenceDetails(), - }, - // Container not found in storage - errContainerNotFound: { - c: codes.NotFound, - m: msgContainerNotFound, - }, - // Container placement build problem - errPlacementProblem: { - c: codes.Internal, - m: msgPlacementProblem, - }, - // System resource overloaded - errOverloaded: { - c: codes.Unavailable, - m: msgOverloaded, - }, - // Access violations - errAccessDenied: { - c: codes.PermissionDenied, - m: msgAccessDenied, - }, - // Maximum processing payload size overflow - errProcPayloadSize: { - c: codes.FailedPrecondition, - m: msgProcPayloadSize, - d: nil, // TODO: NSPCC-1048 - }, -} - -var mStatusCustom = map[requestError]*statusInfo{ - // Invalid first message in Put client stream - { - t: object.RequestPut, - e: errHeaderExpected, - }: { - c: codes.InvalidArgument, - m: msgPutMessageProblem, - d: putFirstMessageDetails(), - }, - // Nil object in Put request - { - t: object.RequestPut, - e: errObjectExpected, - }: { - c: codes.InvalidArgument, - m: msgPutNilObject, - d: putNilObjectDetails(), - }, - // Lack of object payload data - { - t: object.RequestPut, - e: transformer.ErrPayloadEOF, - }: { - c: codes.InvalidArgument, - m: msgCutObjectPayload, - d: payloadSizeDetails(), - }, - // Lack of public keys in the token - { - t: object.RequestPut, - e: errMissingOwnerKeys, - }: { - c: codes.PermissionDenied, - m: msgMissingTokenKeys, - d: tokenKeysDetails(), - }, - // Broken token structure - { - t: object.RequestPut, - e: errBrokenToken, - }: { - c: codes.PermissionDenied, - m: msgBrokenToken, - }, - // Missing object ID in token - { - t: object.RequestPut, - e: errWrongTokenAddress, - }: { - c: codes.PermissionDenied, - m: msgTokenObjectID, - d: tokenOIDDetails(), - }, - // Invalid after-first message in stream - { - t: object.RequestPut, - e: errChunkExpected, - }: { - c: codes.InvalidArgument, - m: msgPutMessageProblem, - d: putChunkMessageDetails(), - }, - { - t: object.RequestPut, - e: errObjectFromTheFuture, - }: { - c: codes.FailedPrecondition, - m: msgObjectCreationEpoch, - d: nil, // TODO: NSPCC-1048 - }, - { - t: object.RequestPut, - e: errObjectPayloadSize, - }: { - c: codes.FailedPrecondition, - m: msgObjectPayloadSize, - d: nil, // TODO: NSPCC-1048 - }, - { - t: object.RequestPut, - e: errLocalStorageOverflow, - }: { - c: codes.Unavailable, - m: msgLocalStorageOverflow, - d: localStorageOverflowDetails(), - }, - { - t: object.RequestPut, - e: errPayloadChecksum, - }: { - c: codes.InvalidArgument, - m: msgPayloadChecksum, - d: payloadChecksumHeaderDetails(), - }, - { - t: object.RequestPut, - e: errObjectHeadersVerification, - }: { - c: codes.InvalidArgument, - m: msgObjectHeadersVerification, - }, - { - t: object.RequestPut, - e: errIncompleteOperation, - }: { - c: codes.Unavailable, - m: msgForwardPutObject, - }, - { - t: object.RequestPut, - e: errPutLocal, - }: { - c: codes.Internal, - m: msgPutLocalFailure, - }, - { - t: object.RequestPut, - e: errTokenRetrieval, - }: { - c: codes.Aborted, - m: msgPrivateTokenRecv, - }, - { - t: object.RequestPut, - e: transformer.ErrInvalidSGLinking, - }: { - c: codes.InvalidArgument, - m: msgInvalidSGLinking, - d: sgLinkingDetails(), - }, - { - t: object.RequestPut, - e: storagegroup.ErrIncompleteSGInfo, - }: { - c: codes.NotFound, - m: msgIncompleteSGInfo, - }, - { - t: object.RequestPut, - e: errTransformer, - }: { - c: codes.Internal, - m: msgTransformationFailure, - }, - { - t: object.RequestPut, - e: errWrongSGSize, - }: { - c: codes.InvalidArgument, - m: msgWrongSGSize, - }, - { - t: object.RequestPut, - e: errWrongSGHash, - }: { - c: codes.InvalidArgument, - m: msgWrongSGHash, - }, - { - t: object.RequestGet, - e: errIncompleteOperation, - }: { - c: codes.NotFound, - m: msgObjectNotFound, - }, - { - t: object.RequestHead, - e: errIncompleteOperation, - }: { - c: codes.NotFound, - m: msgObjectHeaderNotFound, - }, - { - t: object.RequestGet, - e: errNonAssembly, - }: { - c: codes.Unimplemented, - m: msgNonAssembly, - }, - { - t: object.RequestHead, - e: errNonAssembly, - }: { - c: codes.Unimplemented, - m: msgNonAssembly, - }, - { - t: object.RequestGet, - e: childrenNotFound, - }: { - c: codes.NotFound, - m: msgObjectNotFound, - }, - { - t: object.RequestHead, - e: childrenNotFound, - }: { - c: codes.NotFound, - m: msgObjectHeaderNotFound, - }, - { - t: object.RequestRange, - e: localstore.ErrOutOfRange, - }: { - c: codes.OutOfRange, - m: msgPayloadOutOfRange, - }, - { - t: object.RequestRange, - e: errPayloadRangeNotFound, - }: { - c: codes.NotFound, - m: msgPayloadRangeNotFound, - }, - { - t: object.RequestDelete, - e: errNilToken, - }: { - c: codes.InvalidArgument, - m: msgMissingToken, - d: missingTokenDetails(), - }, - { - t: object.RequestDelete, - e: errMissingOwnerKeys, - }: { - c: codes.PermissionDenied, - m: msgMissingTokenKeys, - d: tokenKeysDetails(), - }, - { - t: object.RequestDelete, - e: errBrokenToken, - }: { - c: codes.PermissionDenied, - m: msgBrokenToken, - }, - { - t: object.RequestDelete, - e: errWrongTokenAddress, - }: { - c: codes.PermissionDenied, - m: msgTokenObjectID, - d: tokenOIDDetails(), - }, - { - t: object.RequestDelete, - e: errTokenRetrieval, - }: { - c: codes.Aborted, - m: msgPrivateTokenRecv, - }, - { - t: object.RequestDelete, - e: errIncompleteOperation, - }: { - c: codes.Unavailable, - m: msgPutTombstone, - }, - { - t: object.RequestDelete, - e: errDeletePrepare, - }: { - c: codes.Internal, - m: msgDeletePrepare, - }, - { - t: object.RequestSearch, - e: errUnsupportedQueryVersion, - }: { - c: codes.Unimplemented, - m: msgQueryVersion, - }, - { - t: object.RequestSearch, - e: errSearchQueryUnmarshal, - }: { - c: codes.InvalidArgument, - m: msgSearchQueryUnmarshal, - }, - { - t: object.RequestSearch, - e: errLocalQueryImpose, - }: { - c: codes.Internal, - m: msgLocalQueryImpose, - }, - { - t: object.RequestRangeHash, - e: errPayloadRangeNotFound, - }: { - c: codes.NotFound, - m: msgPayloadRangeNotFound, - }, - { - t: object.RequestRangeHash, - e: localstore.ErrOutOfRange, - }: { - c: codes.OutOfRange, - m: msgPayloadOutOfRange, - }, -} - -func serviceStatusCalculator() *statusCalculator { - s := newStatusCalculator() - - for k, v := range mStatusCommon { - s.addCommon(k, v) - } - - for k, v := range mStatusCustom { - s.addCustom(k, v) - } - - return s -} - -func statusError(v *statusInfo) (bool, error) { - st, err := status.New(v.c, v.m).WithDetails(v.d...) - if err != nil { - return false, nil - } - - return true, st.Err() -} - -func (s *statusCalculator) addCommon(k error, v *statusInfo) { - s.Lock() - s.common[k] = v - s.Unlock() -} - -func (s *statusCalculator) addCustom(k requestError, v *statusInfo) { - s.Lock() - s.custom[k] = v - s.Unlock() -} - -func (s *statusCalculator) make(e requestError) error { - s.RLock() - defer s.RUnlock() - - var ( - ok bool - v *statusInfo - d []proto.Message - err = errors.Cause(e.e) - ) - - if v, ok := err.(*detailedError); ok { - d = v.d - err = v.error - } else if v, ok := err.(detailedError); ok { - d = v.d - err = v.error - } - - if v, ok = s.common[err]; !ok { - if v, ok = s.custom[requestError{ - t: e.t, - e: err, - }]; !ok { - return e.e - } - } - - vv := *v - - vv.d = append(vv.d, d...) - - if ok, res := statusError(&vv); ok { - return res - } - - return e.e -} - -func newStatusCalculator() *statusCalculator { - return &statusCalculator{ - RWMutex: new(sync.RWMutex), - common: make(map[error]*statusInfo), - custom: make(map[requestError]*statusInfo), - } -} - -func requestAuthDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "Signatures", - Description: "should be formed according to VerificationHeader signing", - }, - }, - }, - } -} - -func invalidTTLDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "TTL", - Description: "should greater or equal than NonForwardingTTL", - }, - }, - }, - } -} - -func containerAbsenceDetails() []proto.Message { - return []proto.Message{ - &errdetails.PreconditionFailure{ - Violations: []*errdetails.PreconditionFailure_Violation{ - { - Type: "container options", - Subject: "container nodes", - Description: "server node should be presented container", - }, - }, - }, - } -} - -func containerDetails(cid CID, desc string) []proto.Message { - return []proto.Message{ - &errdetails.ResourceInfo{ - ResourceType: "container", - ResourceName: cid.String(), - Description: desc, - }, - } -} - -func putFirstMessageDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R", - Description: "should be PutRequest_Header", - }, - }, - }, - } -} - -func putChunkMessageDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R", - Description: "should be PutRequest_Chunk", - }, - { - Field: "R.Chunk", - Description: "should not be empty", - }, - }, - }, - } -} - -func putNilObjectDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object", - Description: "should not be null", - }, - }, - }, - } -} - -func payloadSizeDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.SystemHeader.PayloadLength", - Description: "should be equal to the sum of the sizes of the streaming payload chunks", - }, - }, - }, - } -} - -func tokenKeysDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Token.PublicKeys", - Description: "should be non-empty list of marshaled ecdsa public keys", - }, - }, - }, - } -} - -func tokenOIDDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Token.ObjectID", - Description: "should contain requested object", - }, - }, - }, - } -} - -func maxProcPayloadSizeDetails(sz uint64) []proto.Message { - return []proto.Message{ - &errdetails.PreconditionFailure{ - Violations: []*errdetails.PreconditionFailure_Violation{ - { - Type: "object requirements", - Subject: "max processing payload size", - Description: fmt.Sprintf("should not be greater than %d bytes", sz), - }, - }, - }, - } -} - -func objectCreationEpochDetails(e uint64) []proto.Message { - return []proto.Message{ - &errdetails.PreconditionFailure{ - Violations: []*errdetails.PreconditionFailure_Violation{ - { - Type: "object requirements", - Subject: "creation epoch", - Description: fmt.Sprintf("should not be greater than %d", e), - }, - }, - }, - } -} - -func maxObjectPayloadSizeDetails(sz uint64) []proto.Message { - return []proto.Message{ - &errdetails.PreconditionFailure{ - Violations: []*errdetails.PreconditionFailure_Violation{ - { - Type: "object requirements", - Subject: "max object payload size", - Description: fmt.Sprintf("should not be greater than %d bytes", sz), - }, - }, - }, - } -} - -func localStorageOverflowDetails() []proto.Message { - return []proto.Message{ - &errdetails.ResourceInfo{ - ResourceType: "local storage", - ResourceName: "disk storage", - Description: "not enough space", - }, - } -} - -func payloadChecksumHeaderDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.Headers", - Description: "should contain correct payload checksum header", - }, - }, - }, - } -} - -func objectHeadersVerificationDetails(e error) []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.Headers", - Description: e.Error(), - }, - }, - }, - } -} - -func privateTokenRecvDetails(id session.TokenID, owner OwnerID) []proto.Message { - return []proto.Message{ - &errdetails.ResourceInfo{ - ResourceType: "private token", - ResourceName: id.String(), - Owner: owner.String(), - Description: "problems with getting a private token", - }, - } -} - -func sgLinkingDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.Headers", - Description: "should not contain Header_StorageGroup and Link_StorageGroup or should contain both", - }, - }, - }, - } -} - -func sgSizeDetails(exp, act uint64) []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.Headers", - Description: fmt.Sprintf("wrong storage group size: expected %d, collected %d", exp, act), - }, - }, - }, - } -} - -func sgHashDetails(exp, act Hash) []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "R.Object.Headers", - Description: fmt.Sprintf("wrong storage group hash: expected %s, collected %s", exp, act), - }, - }, - }, - } -} - -func missingTokenDetails() []proto.Message { - return []proto.Message{ - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequest_FieldViolation{ - { - Field: "Token", - Description: "should not be null", - }, - }, - }, - } -} diff --git a/pkg/network/transport/object/grpc/status_test.go b/pkg/network/transport/object/grpc/status_test.go deleted file mode 100644 index d6c32cdbe..000000000 --- a/pkg/network/transport/object/grpc/status_test.go +++ /dev/null @@ -1,1210 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/golang/protobuf/proto" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport/storagegroup" - testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type testPanickingHandler struct{} - -func (*testPanickingHandler) handleRequest(context.Context, handleRequestParams) (interface{}, error) { - panic("panicking handler") -} - -func TestStatusCalculator(t *testing.T) { - t.Run("unknown error", func(t *testing.T) { - e := errors.New("error for test") - - s := newStatusCalculator() - - require.Equal(t, e, s.make(requestError{ - e: e, - })) - }) - - t.Run("common error", func(t *testing.T) { - v := &statusInfo{ - c: codes.Aborted, - m: "test error message", - d: []proto.Message{ - &errdetails.ResourceInfo{ - ResourceType: "type", - ResourceName: "name", - Owner: "owner", - Description: "description", - }, - }, - } - - s := newStatusCalculator() - - e := errors.New("error for test") - - s.addCommon(e, v) - - ok, err := statusError(v) - require.True(t, ok) - - require.Equal(t, - err, - s.make(requestError{ - e: e, - }), - ) - }) - - t.Run("custom error", func(t *testing.T) { - var ( - c1, c2 = codes.Aborted, codes.AlreadyExists - t1, t2 = object.RequestPut, object.RequestGet - e1, e2 = errors.New("test error 1"), errors.New("test error 2") - m1, m2 = "message 1", "message 2" - ) - - s := newStatusCalculator() - - s1 := &statusInfo{ - c: c1, - m: m1, - } - - re1 := requestError{ - t: t1, - e: e1, - } - - s.addCustom(re1, s1) - - s2 := &statusInfo{ - c: c2, - m: m2, - } - - r2 := requestError{ - t: t2, - e: e2, - } - - s.addCustom(r2, s2) - - ok, err1 := statusError(s1) - require.True(t, ok) - - ok, err2 := statusError(s2) - require.True(t, ok) - - require.Equal(t, - err1, - s.make(re1), - ) - - require.Equal(t, - err2, - s.make(r2), - ) - }) -} - -func testStatusCommon(t *testing.T, h requestHandler, c codes.Code, m string, d []interface{}) { - ctx := context.TODO() - - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - errPut := s.Put(&testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - }) - - errGet := s.Get(new(object.GetRequest), new(testGetEntity)) - - _, errHead := s.Head(ctx, new(object.HeadRequest)) - - _, errDelete := s.Head(ctx, new(object.HeadRequest)) - - errRange := s.GetRange(new(GetRangeRequest), new(testRangeEntity)) - - _, errRangeHash := s.GetRangeHash(ctx, new(object.GetRangeHashRequest)) - - errSearch := s.Search(new(object.SearchRequest), new(testSearchEntity)) - - errs := []error{ - errPut, - errGet, - errHead, - errRange, - errRangeHash, - errSearch, - errDelete, - } - - for _, err := range errs { - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, c, st.Code()) - require.Equal(t, m, st.Message()) - require.Equal(t, d, st.Details()) - } -} - -func TestStatusCommon(t *testing.T) { - t.Run("handler panic", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - new(testPanickingHandler), - codes.Internal, - msgServerPanic, - ds, - ) - }) - - t.Run("request authentication", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range requestAuthDetails() { - ds = append(ds, d) - } - - testStatusCommon(t, - &testPutEntity{ - err: errUnauthenticated, - }, - codes.Unauthenticated, - msgUnauthenticated, - ds, - ) - }) - - t.Run("re-signing problem", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errReSigning, - }, - codes.Internal, - msgReSigning, - ds, - ) - }) - - t.Run("invalid TTL", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range invalidTTLDetails() { - ds = append(ds, d) - } - - testStatusCommon(t, - &testPutEntity{ - err: errInvalidTTL, - }, - codes.InvalidArgument, - msgInvalidTTL, - ds, - ) - }) - - t.Run("container affiliation problem", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errContainerAffiliationProblem, - }, - codes.Internal, - msgContainerAffiliationProblem, - ds, - ) - }) - - t.Run("container not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errContainerNotFound, - }, - codes.NotFound, - msgContainerNotFound, - ds, - ) - }) - - t.Run("server is missing in container", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range containerAbsenceDetails() { - ds = append(ds, d) - } - - testStatusCommon(t, - &testPutEntity{ - err: errNotLocalContainer, - }, - codes.FailedPrecondition, - msgNotLocalContainer, - ds, - ) - }) - - t.Run("placement problem", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errPlacementProblem, - }, - codes.Internal, - msgPlacementProblem, - ds, - ) - }) - - t.Run("system resource overloaded", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errOverloaded, - }, - codes.Unavailable, - msgOverloaded, - ds, - ) - }) - - t.Run("access denied", func(t *testing.T) { - ds := make([]interface{}, 0) - - testStatusCommon(t, - &testPutEntity{ - err: errAccessDenied, - }, - codes.PermissionDenied, - msgAccessDenied, - ds, - ) - }) - - t.Run("max processing payload size overflow", func(t *testing.T) { - maxSz := uint64(100) - - ds := make([]interface{}, 0) - - for _, d := range maxProcPayloadSizeDetails(maxSz) { - ds = append(ds, d) - } - - testStatusCommon(t, - &testPutEntity{ - err: &detailedError{ - error: errProcPayloadSize, - d: maxProcPayloadSizeDetails(maxSz), - }, - }, - codes.FailedPrecondition, - msgProcPayloadSize, - ds, - ) - }) -} - -func testStatusPut(t *testing.T, h requestHandler, srv object.Service_PutServer, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - err := s.Put(srv) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusPut(t *testing.T) { - t.Run("invalid first message type", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range putFirstMessageDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestChunk(nil), - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgPutMessageProblem, - } - - testStatusPut(t, nil, srv, info, ds) - }) - - t.Run("invalid first message type", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range putNilObjectDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(nil), - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgPutNilObject, - } - - testStatusPut(t, nil, srv, info, ds) - }) - - t.Run("invalid first message type", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range payloadSizeDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: transformer.ErrPayloadEOF, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgCutObjectPayload, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("token w/o public keys", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range tokenKeysDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errMissingOwnerKeys, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgMissingTokenKeys, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("broken token", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errBrokenToken, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgBrokenToken, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("missing object in token", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range tokenOIDDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errWrongTokenAddress, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgTokenObjectID, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("object from future", func(t *testing.T) { - e := uint64(3) - - ds := make([]interface{}, 0) - - for _, d := range objectCreationEpochDetails(e) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errObjectFromTheFuture, - d: objectCreationEpochDetails(e), - }, - } - - info := statusInfo{ - c: codes.FailedPrecondition, - m: msgObjectCreationEpoch, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("max object payload size", func(t *testing.T) { - sz := uint64(3) - - ds := make([]interface{}, 0) - - for _, d := range maxObjectPayloadSizeDetails(sz) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errObjectPayloadSize, - d: maxObjectPayloadSizeDetails(sz), - }, - } - - info := statusInfo{ - c: codes.FailedPrecondition, - m: msgObjectPayloadSize, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("local storage overflow", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range localStorageOverflowDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errLocalStorageOverflow, - } - - info := statusInfo{ - c: codes.Unavailable, - m: msgLocalStorageOverflow, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("invalid payload checksum", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range payloadChecksumHeaderDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errPayloadChecksum, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgPayloadChecksum, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("invalid object header structure", func(t *testing.T) { - e := errors.New("test error") - - ds := make([]interface{}, 0) - - for _, d := range objectHeadersVerificationDetails(e) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errObjectHeadersVerification, - d: objectHeadersVerificationDetails(e), - }, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgObjectHeadersVerification, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("put generated object failure", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errIncompleteOperation, - } - - info := statusInfo{ - c: codes.Unavailable, - m: msgForwardPutObject, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("private token receive failure", func(t *testing.T) { - owner := OwnerID{1, 2, 3} - tokenID := session.TokenID{4, 5, 6} - - ds := make([]interface{}, 0) - - for _, d := range privateTokenRecvDetails(tokenID, owner) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errTokenRetrieval, - d: privateTokenRecvDetails(tokenID, owner), - }, - } - - info := statusInfo{ - c: codes.Aborted, - m: msgPrivateTokenRecv, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("invalid SG headers", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range sgLinkingDetails() { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: transformer.ErrInvalidSGLinking, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgInvalidSGLinking, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("incomplete SG info", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: storagegroup.ErrIncompleteSGInfo, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgIncompleteSGInfo, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("object transformation failure", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: errTransformer, - } - - info := statusInfo{ - c: codes.Internal, - m: msgTransformationFailure, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("wrong SG size", func(t *testing.T) { - var exp, act uint64 = 1, 2 - - ds := make([]interface{}, 0) - - for _, d := range sgSizeDetails(exp, act) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errWrongSGSize, - d: sgSizeDetails(exp, act), - }, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgWrongSGSize, - } - - testStatusPut(t, h, srv, info, ds) - }) - - t.Run("wrong SG size", func(t *testing.T) { - var exp, act = Hash{1}, Hash{2} - - ds := make([]interface{}, 0) - - for _, d := range sgHashDetails(exp, act) { - ds = append(ds, d) - } - - srv := &testPutEntity{ - res: object.MakePutRequestHeader(new(Object)), - } - - h := &testPutEntity{ - err: &detailedError{ - error: errWrongSGHash, - d: sgHashDetails(exp, act), - }, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgWrongSGHash, - } - - testStatusPut(t, h, srv, info, ds) - }) -} - -func testStatusGet(t *testing.T, h requestHandler, srv object.Service_GetServer, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - err := s.Get(new(object.GetRequest), srv) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusGet(t *testing.T) { - t.Run("object not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testGetEntity) - - h := &testGetEntity{ - err: errIncompleteOperation, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgObjectNotFound, - } - - testStatusGet(t, h, srv, info, ds) - }) - - t.Run("non-assembly", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testGetEntity) - - h := &testGetEntity{ - err: errNonAssembly, - } - - info := statusInfo{ - c: codes.Unimplemented, - m: msgNonAssembly, - } - - testStatusGet(t, h, srv, info, ds) - }) - - t.Run("children not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testGetEntity) - - h := &testGetEntity{ - err: childrenNotFound, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgObjectNotFound, - } - - testStatusGet(t, h, srv, info, ds) - }) -} - -func testStatusHead(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - _, err := s.Head(context.TODO(), new(object.HeadRequest)) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusHead(t *testing.T) { - t.Run("object not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errIncompleteOperation, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgObjectHeaderNotFound, - } - - testStatusHead(t, h, info, ds) - }) - - t.Run("non-assembly", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errNonAssembly, - } - - info := statusInfo{ - c: codes.Unimplemented, - m: msgNonAssembly, - } - - testStatusHead(t, h, info, ds) - }) - - t.Run("children not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: childrenNotFound, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgObjectHeaderNotFound, - } - - testStatusHead(t, h, info, ds) - }) -} - -func testStatusGetRange(t *testing.T, h requestHandler, srv object.Service_GetRangeServer, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - err := s.GetRange(new(GetRangeRequest), srv) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusGetRange(t *testing.T) { - t.Run("payload range is out of bounds", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testRangeEntity) - - h := &testRangeEntity{ - err: localstore.ErrOutOfRange, - } - - info := statusInfo{ - c: codes.OutOfRange, - m: msgPayloadOutOfRange, - } - - testStatusGetRange(t, h, srv, info, ds) - }) - - t.Run("payload range not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testRangeEntity) - - h := &testRangeEntity{ - err: errPayloadRangeNotFound, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgPayloadRangeNotFound, - } - - testStatusGetRange(t, h, srv, info, ds) - }) -} - -func testStatusDelete(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - _, err := s.Delete(context.TODO(), new(object.DeleteRequest)) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusDelete(t *testing.T) { - t.Run("missing token", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range missingTokenDetails() { - ds = append(ds, d) - } - - h := &testHeadEntity{ - err: errNilToken, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgMissingToken, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("missing public keys in token", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range tokenKeysDetails() { - ds = append(ds, d) - } - - h := &testHeadEntity{ - err: errMissingOwnerKeys, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgMissingTokenKeys, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("broken token structure", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errBrokenToken, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgBrokenToken, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("missing object ID in token", func(t *testing.T) { - ds := make([]interface{}, 0) - - for _, d := range tokenOIDDetails() { - ds = append(ds, d) - } - - h := &testHeadEntity{ - err: errWrongTokenAddress, - } - - info := statusInfo{ - c: codes.PermissionDenied, - m: msgTokenObjectID, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("private token receive", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errTokenRetrieval, - } - - info := statusInfo{ - c: codes.Aborted, - m: msgPrivateTokenRecv, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("incomplete tombstone put", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errIncompleteOperation, - } - - info := statusInfo{ - c: codes.Unavailable, - m: msgPutTombstone, - } - - testStatusDelete(t, h, info, ds) - }) - - t.Run("delete preparation failure", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testHeadEntity{ - err: errDeletePrepare, - } - - info := statusInfo{ - c: codes.Internal, - m: msgDeletePrepare, - } - - testStatusDelete(t, h, info, ds) - }) -} - -func testStatusSearch(t *testing.T, h requestHandler, srv object.Service_SearchServer, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - err := s.Search(new(object.SearchRequest), srv) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusSearch(t *testing.T) { - t.Run("unsupported query version", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testSearchEntity) - - h := &testSearchEntity{ - err: errUnsupportedQueryVersion, - } - - info := statusInfo{ - c: codes.Unimplemented, - m: msgQueryVersion, - } - - testStatusSearch(t, h, srv, info, ds) - }) - - t.Run("query unmarshal failure", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testSearchEntity) - - h := &testSearchEntity{ - err: errSearchQueryUnmarshal, - } - - info := statusInfo{ - c: codes.InvalidArgument, - m: msgSearchQueryUnmarshal, - } - - testStatusSearch(t, h, srv, info, ds) - }) - - t.Run("query imposing problems", func(t *testing.T) { - ds := make([]interface{}, 0) - - srv := new(testSearchEntity) - - h := &testSearchEntity{ - err: errLocalQueryImpose, - } - - info := statusInfo{ - c: codes.Internal, - m: msgLocalQueryImpose, - } - - testStatusSearch(t, h, srv, info, ds) - }) -} - -func testStatusGetRangeHash(t *testing.T, h requestHandler, info statusInfo, d []interface{}) { - s := &objectService{ - log: testlogger.NewLogger(false), - requestHandler: h, - statusCalculator: serviceStatusCalculator(), - } - - _, err := s.GetRangeHash(context.TODO(), new(object.GetRangeHashRequest)) - - st, ok := status.FromError(err) - require.True(t, ok) - - require.Equal(t, info.c, st.Code()) - require.Equal(t, info.m, st.Message()) - require.Equal(t, d, st.Details()) -} - -func TestStatusGetRangeHash(t *testing.T) { - t.Run("payload range not found", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testRangeEntity{ - err: errPayloadRangeNotFound, - } - - info := statusInfo{ - c: codes.NotFound, - m: msgPayloadRangeNotFound, - } - - testStatusGetRangeHash(t, h, info, ds) - }) - - t.Run("range out-of-bounds", func(t *testing.T) { - ds := make([]interface{}, 0) - - h := &testRangeEntity{ - err: localstore.ErrOutOfRange, - } - - info := statusInfo{ - c: codes.OutOfRange, - m: msgPayloadOutOfRange, - } - - testStatusGetRangeHash(t, h, info, ds) - }) -} diff --git a/pkg/network/transport/object/grpc/token.go b/pkg/network/transport/object/grpc/token.go deleted file mode 100644 index 989a8cfd7..000000000 --- a/pkg/network/transport/object/grpc/token.go +++ /dev/null @@ -1,105 +0,0 @@ -package object - -import ( - "context" - "crypto/ecdsa" - - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/services/id" - "github.com/pkg/errors" -) - -type sessionTokenVerifier interface { - verifySessionToken(context.Context, service.SessionToken) error -} - -type complexTokenVerifier struct { - verifiers []sessionTokenVerifier -} - -type tokenSignatureVerifier struct { - ownerKeys []*ecdsa.PublicKey -} - -type tokenEpochsVerifier struct { - epochRecv EpochReceiver -} - -type tokenPreProcessor struct { - staticVerifier sessionTokenVerifier -} - -var errCreatedAfterExpiration = errors.New("creation epoch number is greater than expired one") - -var errTokenExpired = errors.New("token is expired") - -var errForbiddenSpawn = errors.New("request spawn is forbidden") - -func (s tokenPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { - token := req.GetSessionToken() - if token == nil { - return nil - } - - if !allowedSpawn(token.GetVerb(), req.Type()) { - return errForbiddenSpawn - } - - if err := id.VerifyKey(token); err != nil { - return err - } - - ownerKeyBytes := token.GetOwnerKey() - - verifier := newComplexTokenVerifier( - s.staticVerifier, - &tokenSignatureVerifier{ - ownerKeys: []*ecdsa.PublicKey{ - crypto.UnmarshalPublicKey(ownerKeyBytes), - }, - }, - ) - - return verifier.verifySessionToken(ctx, token) -} - -func newComplexTokenVerifier(verifiers ...sessionTokenVerifier) sessionTokenVerifier { - return &complexTokenVerifier{ - verifiers: verifiers, - } -} - -func (s complexTokenVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { - for i := range s.verifiers { - if s.verifiers[i] == nil { - continue - } else if err := s.verifiers[i].verifySessionToken(ctx, token); err != nil { - return err - } - } - - return nil -} - -func (s tokenSignatureVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { - verifiedToken := service.NewVerifiedSessionToken(token) - - for i := range s.ownerKeys { - if err := service.VerifySignatureWithKey(s.ownerKeys[i], verifiedToken); err != nil { - return err - } - } - - return nil -} - -func (s tokenEpochsVerifier) verifySessionToken(ctx context.Context, token service.SessionToken) error { - if expired := token.ExpirationEpoch(); token.CreationEpoch() > expired { - return errCreatedAfterExpiration - } else if s.epochRecv.Epoch() > expired { - return errTokenExpired - } - - return nil -} diff --git a/pkg/network/transport/object/grpc/token_test.go b/pkg/network/transport/object/grpc/token_test.go deleted file mode 100644 index d26bf3fb5..000000000 --- a/pkg/network/transport/object/grpc/token_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package object - -import ( - "context" - "errors" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/stretchr/testify/require" -) - -// Entity for mocking interfaces. -// Implementation of any interface intercepts arguments via f (if not nil). -// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. -type testTokenEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error -} - -func (s testTokenEntity) Epoch() uint64 { - return s.res.(uint64) -} - -func (s testTokenEntity) verifySessionToken(_ context.Context, token service.SessionToken) error { - if s.f != nil { - s.f(token) - } - return s.err -} - -func TestTokenPreProcessor(t *testing.T) { - ctx := context.TODO() - - t.Run("nil token", func(t *testing.T) { - var req serviceRequest = new(object.PutRequest) - require.Nil(t, req.GetSessionToken()) - - s := new(tokenPreProcessor) - - require.NoError(t, s.preProcess(ctx, req)) - }) - - t.Run("forbidden spawn", func(t *testing.T) { - token := new(service.Token) - - req := new(object.PutRequest) - req.SetToken(token) - - token.SetVerb(service.Token_Info_Get) - - s := new(tokenPreProcessor) - - require.EqualError(t, s.preProcess(ctx, req), errForbiddenSpawn.Error()) - }) - - t.Run("owner key verifier failure", func(t *testing.T) { - ownerKey := &test.DecodeKey(0).PublicKey - - ownerID, err := refs.NewOwnerID(ownerKey) - require.NoError(t, err) - - token := new(service.Token) - token.SetOwnerID(ownerID) - - ownerKeyBytes := crypto.MarshalPublicKey(ownerKey) - ownerKeyBytes[0]++ - token.SetOwnerKey(ownerKeyBytes) - - req := new(object.PutRequest) - req.SetToken(token) - - s := new(tokenPreProcessor) - - require.Error(t, s.preProcess(ctx, req)) - }) - - t.Run("static verifier error", func(t *testing.T) { - vErr := errors.New("test error for static verifier") - - ownerKey := &test.DecodeKey(0).PublicKey - - ownerID, err := refs.NewOwnerID(ownerKey) - require.NoError(t, err) - - token := new(service.Token) - token.SetOwnerID(ownerID) - token.SetOwnerKey(crypto.MarshalPublicKey(ownerKey)) - - req := new(object.PutRequest) - req.SetToken(token) - - s := &tokenPreProcessor{ - staticVerifier: &testTokenEntity{ - f: func(items ...interface{}) { - require.Equal(t, token, items[0]) - }, - err: vErr, - }, - } - - require.EqualError(t, s.preProcess(ctx, req), vErr.Error()) - }) -} - -func TestTokenEpochsVerifier(t *testing.T) { - ctx := context.TODO() - - t.Run("created after expiration", func(t *testing.T) { - token := new(service.Token) - token.SetExpirationEpoch(1) - token.SetCreationEpoch(token.ExpirationEpoch() + 1) - - s := new(tokenEpochsVerifier) - - require.EqualError(t, s.verifySessionToken(ctx, token), errCreatedAfterExpiration.Error()) - }) - - t.Run("expired token", func(t *testing.T) { - token := new(service.Token) - token.SetExpirationEpoch(1) - - s := &tokenEpochsVerifier{ - epochRecv: &testTokenEntity{ - res: token.ExpirationEpoch() + 1, - }, - } - - require.EqualError(t, s.verifySessionToken(ctx, token), errTokenExpired.Error()) - }) - - t.Run("valid token", func(t *testing.T) { - token := new(service.Token) - token.SetCreationEpoch(1) - token.SetExpirationEpoch(token.CreationEpoch() + 1) - - s := &tokenEpochsVerifier{ - epochRecv: &testTokenEntity{ - res: token.ExpirationEpoch() - 1, - }, - } - - require.NoError(t, s.verifySessionToken(ctx, token)) - }) -} diff --git a/pkg/network/transport/object/grpc/transport_implementations.go b/pkg/network/transport/object/grpc/transport_implementations.go deleted file mode 100644 index 6666c76c3..000000000 --- a/pkg/network/transport/object/grpc/transport_implementations.go +++ /dev/null @@ -1,743 +0,0 @@ -package object - -import ( - "bytes" - "context" - "crypto/ecdsa" - "fmt" - "io" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // MultiTransportParams groups the parameters for object transport component's constructor. - MultiTransportParams struct { - AddressStore storage.AddressStoreComponent - EpochReceiver EpochReceiver - RemoteService RemoteService - Logger *zap.Logger - Key *ecdsa.PrivateKey - PutTimeout time.Duration - GetTimeout time.Duration - HeadTimeout time.Duration - SearchTimeout time.Duration - RangeHashTimeout time.Duration - DialTimeout time.Duration - - PrivateTokenStore session.PrivateTokenStore - } - - transportComponent struct { - reqSender requestSender - - resTracker resultTracker - - getCaller remoteProcessCaller - putCaller remoteProcessCaller - headCaller remoteProcessCaller - rangeCaller remoteProcessCaller - rangeHashCaller remoteProcessCaller - searchCaller remoteProcessCaller - } - - requestSender interface { - sendRequest(context.Context, sendParams) (interface{}, error) - } - - sendParams struct { - req transport.MetaInfo - node multiaddr.Multiaddr - handler remoteProcessCaller - } - - clientInfo struct { - sc object.ServiceClient - key *ecdsa.PublicKey - } - - remoteProcessCaller interface { - call(context.Context, serviceRequest, *clientInfo) (interface{}, error) - } - - getCaller struct { - } - - putCaller struct { - } - - headCaller struct { - } - - rangeCaller struct { - } - - rangeHashCaller struct { - } - - searchCaller struct { - } - - coreRequestSender struct { - requestPrep transportRequestPreparer - addressStore storage.AddressStoreComponent - remoteService RemoteService - - putTimeout time.Duration - getTimeout time.Duration - searchTimeout time.Duration - headTimeout time.Duration - rangeHashTimeout time.Duration - dialTimeout time.Duration - } - - signingFunc func(*ecdsa.PrivateKey, service.RequestSignedData) error - - coreRequestPreparer struct { - epochRecv EpochReceiver - key *ecdsa.PrivateKey - signingFunc signingFunc - - privateTokenStore session.PrivateTokenSource - } - - transportRequestPreparer interface { - prepareRequest(transport.MetaInfo) (serviceRequest, error) - } - - transportRequest struct { - serviceRequest - timeout time.Duration - } - - putRequestSequence struct { - *object.PutRequest - chunks []*object.PutRequest - } - - rawMetaInfo struct { - raw bool - ttl uint32 - timeout time.Duration - token service.SessionToken - rt object.RequestType - bearer service.BearerToken - extHdrs []service.ExtendedHeader - } - - rawAddrInfo struct { - *rawMetaInfo - addr Address - } -) - -const ( - minRemoteRequestTimeout = 5 * time.Second - minDialTimeout = 500 * time.Millisecond -) - -const pmWrongRequestType = "unknown type: %T" - -var ( - _ serviceRequest = (*putRequestSequence)(nil) - _ transport.MetaInfo = (*transportRequest)(nil) - _ requestSender = (*coreRequestSender)(nil) - _ transport.ObjectTransport = (*transportComponent)(nil) - _ transportRequestPreparer = (*coreRequestPreparer)(nil) - _ transport.MetaInfo = (*rawMetaInfo)(nil) - _ transport.AddressInfo = (*rawAddrInfo)(nil) - - _ remoteProcessCaller = (*getCaller)(nil) - _ remoteProcessCaller = (*putCaller)(nil) - _ remoteProcessCaller = (*headCaller)(nil) - _ remoteProcessCaller = (*searchCaller)(nil) - _ remoteProcessCaller = (*rangeCaller)(nil) - _ remoteProcessCaller = (*rangeHashCaller)(nil) -) - -func newRawMetaInfo() *rawMetaInfo { - return new(rawMetaInfo) -} - -func (s *rawMetaInfo) GetTTL() uint32 { - return s.ttl -} - -func (s *rawMetaInfo) setTTL(ttl uint32) { - s.ttl = ttl -} - -func (s *rawMetaInfo) GetTimeout() time.Duration { - return s.timeout -} - -func (s *rawMetaInfo) setTimeout(dur time.Duration) { - s.timeout = dur -} - -func (s *rawMetaInfo) GetSessionToken() service.SessionToken { - return s.token -} - -func (s *rawMetaInfo) setSessionToken(token service.SessionToken) { - s.token = token -} - -func (s *rawMetaInfo) GetBearerToken() service.BearerToken { - return s.bearer -} - -func (s *rawMetaInfo) setBearerToken(token service.BearerToken) { - s.bearer = token -} - -func (s *rawMetaInfo) ExtendedHeaders() []service.ExtendedHeader { - return s.extHdrs -} - -func (s *rawMetaInfo) setExtendedHeaders(v []service.ExtendedHeader) { - s.extHdrs = v -} - -func (s *rawMetaInfo) GetRaw() bool { - return s.raw -} - -func (s *rawMetaInfo) setRaw(raw bool) { - s.raw = raw -} - -func (s *rawMetaInfo) Type() object.RequestType { - return s.rt -} - -func (s *rawMetaInfo) setType(rt object.RequestType) { - s.rt = rt -} - -func (s *rawAddrInfo) GetAddress() Address { - return s.addr -} - -func (s *rawAddrInfo) setAddress(addr Address) { - s.addr = addr -} - -func (s *rawAddrInfo) getMetaInfo() *rawMetaInfo { - return s.rawMetaInfo -} - -func (s *rawAddrInfo) setMetaInfo(v *rawMetaInfo) { - s.rawMetaInfo = v -} - -func newRawAddressInfo() *rawAddrInfo { - res := new(rawAddrInfo) - - res.setMetaInfo(newRawMetaInfo()) - - return res -} - -func (s *transportRequest) GetTimeout() time.Duration { return s.timeout } - -func (s *transportComponent) Transport(ctx context.Context, p transport.ObjectTransportParams) { - res, err := s.sendRequest(ctx, p.TransportInfo, p.TargetNode) - p.ResultHandler.HandleResult(ctx, p.TargetNode, res, err) - - go s.resTracker.trackResult(ctx, resultItems{ - requestType: p.TransportInfo.Type(), - node: p.TargetNode, - satisfactory: err == nil, - }) -} - -func (s *transportComponent) sendRequest(ctx context.Context, reqInfo transport.MetaInfo, node multiaddr.Multiaddr) (interface{}, error) { - p := sendParams{ - req: reqInfo, - node: node, - } - - switch reqInfo.Type() { - case object.RequestSearch: - p.handler = s.searchCaller - case object.RequestPut: - p.handler = s.putCaller - case object.RequestHead: - p.handler = s.headCaller - case object.RequestGet: - p.handler = s.getCaller - case object.RequestRangeHash: - p.handler = s.rangeHashCaller - case object.RequestRange: - p.handler = s.rangeCaller - default: - panic(fmt.Sprintf(pmWrongRequestType, reqInfo)) - } - - return s.reqSender.sendRequest(ctx, p) -} - -func (s *searchCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - cSearch, err := c.sc.Search(ctx, r.(*object.SearchRequest)) - if err != nil { - return nil, err - } - - res := make([]Address, 0) - - for { - r, err := cSearch.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, err - } - - res = append(res, r.Addresses...) - } - - return res, nil -} - -func (s *rangeHashCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - resp, err := c.sc.GetRangeHash(ctx, r.(*object.GetRangeHashRequest)) - if err != nil { - return nil, err - } - - return resp.Hashes, nil -} - -func (s *rangeCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - req := r.(*GetRangeRequest) - - resp, err := c.sc.GetRange(ctx, req) - if err != nil { - return nil, err - } - - data := make([]byte, 0, req.Range.Length) - - for { - resp, err := resp.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, err - } - - data = append(data, resp.Fragment...) - } - - return bytes.NewReader(data), nil -} - -func (s *headCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - resp, err := c.sc.Head(ctx, r.(*object.HeadRequest)) - if err != nil { - return nil, err - } - - return resp.Object, nil -} - -func (s *getCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - getClient, err := c.sc.Get(ctx, r.(*object.GetRequest)) - if err != nil { - return nil, err - } - - resp, err := getClient.Recv() - if err != nil { - return nil, err - } - - obj := resp.GetObject() - - if resp.NotFull() { - obj.Payload = make([]byte, 0, obj.SystemHeader.PayloadLength) - - for { - resp, err := getClient.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, errors.Wrap(err, "get object received error") - } - - obj.Payload = append(obj.Payload, resp.GetChunk()...) - } - } - - return obj, nil -} - -func (s *putCaller) call(ctx context.Context, r serviceRequest, c *clientInfo) (interface{}, error) { - putClient, err := c.sc.Put(ctx) - if err != nil { - return nil, err - } - - req := r.(*putRequestSequence) - - if err := putClient.Send(req.PutRequest); err != nil { - return nil, err - } - - for i := range req.chunks { - if err := putClient.Send(req.chunks[i]); err != nil { - return nil, err - } - } - - resp, err := putClient.CloseAndRecv() - if err != nil { - return nil, err - } - - return &resp.Address, nil -} - -func (s *coreRequestPreparer) prepareRequest(req transport.MetaInfo) (serviceRequest, error) { - var ( - signed bool - tr *transportRequest - r serviceRequest - ) - - if tr, signed = req.(*transportRequest); signed { - r = tr.serviceRequest - } else { - switch req.Type() { - case object.RequestSearch: - r = prepareSearchRequest(req.(transport.SearchInfo)) - case object.RequestPut: - r = preparePutRequest(req.(transport.PutInfo)) - case object.RequestGet: - r = prepareGetRequest(req.(transport.GetInfo)) - case object.RequestHead: - r = prepareHeadRequest(req.(transport.HeadInfo)) - case object.RequestRange: - r = prepareRangeRequest(req.(transport.RangeInfo)) - case object.RequestRangeHash: - r = prepareRangeHashRequest(req.(transport.RangeHashInfo)) - default: - panic(fmt.Sprintf(pmWrongRequestType, req)) - } - } - - r.SetTTL(req.GetTTL()) - r.SetEpoch(s.epochRecv.Epoch()) - r.SetRaw(req.GetRaw()) - r.SetBearer( - toBearerMessage( - req.GetBearerToken(), - ), - ) - r.SetHeaders( - toExtendedHeaderMessages( - req.ExtendedHeaders(), - ), - ) - - if signed { - return r, nil - } - - key := s.key - - if token := req.GetSessionToken(); token != nil { - /* FIXME: here we must determine whether the node is trusted, - and if so, sign the request with a session key. - In current implementation trusted node may lose its reputation - in case of sending user requests in a nonexistent session. - */ - r.SetToken(toTokenMessage(token)) - - privateTokenKey := session.PrivateTokenKey{} - privateTokenKey.SetTokenID(token.GetID()) - privateTokenKey.SetOwnerID(token.GetOwnerID()) - - pToken, err := s.privateTokenStore.Fetch(privateTokenKey) - if err == nil { - if err := signRequest(pToken.PrivateKey(), r); err != nil { - return nil, err - } - } - } - - return r, signRequest(key, r) -} - -func toTokenMessage(token service.SessionToken) *service.Token { - if token == nil { - return nil - } else if v, ok := token.(*service.Token); ok { - return v - } - - res := new(service.Token) - - res.SetID(token.GetID()) - res.SetOwnerID(token.GetOwnerID()) - res.SetVerb(token.GetVerb()) - res.SetAddress(token.GetAddress()) - res.SetCreationEpoch(token.CreationEpoch()) - res.SetExpirationEpoch(token.ExpirationEpoch()) - res.SetSessionKey(token.GetSessionKey()) - res.SetSignature(token.GetSignature()) - - return res -} - -func toBearerMessage(token service.BearerToken) *service.BearerTokenMsg { - if token == nil { - return nil - } else if v, ok := token.(*service.BearerTokenMsg); ok { - return v - } - - res := new(service.BearerTokenMsg) - - res.SetACLRules(token.GetACLRules()) - res.SetOwnerID(token.GetOwnerID()) - res.SetExpirationEpoch(token.ExpirationEpoch()) - res.SetOwnerKey(token.GetOwnerKey()) - res.SetSignature(token.GetSignature()) - - return res -} - -func toExtendedHeaderMessages(hs []service.ExtendedHeader) []service.RequestExtendedHeader_KV { - res := make([]service.RequestExtendedHeader_KV, 0, len(hs)) - - for i := range hs { - if hs[i] == nil { - continue - } - - h := service.RequestExtendedHeader_KV{} - h.SetK(hs[i].Key()) - h.SetV(hs[i].Value()) - - res = append(res, h) - } - - return res -} - -func signRequest(key *ecdsa.PrivateKey, req serviceRequest) error { - signKeys := req.GetSignKeyPairs() - ln := len(signKeys) - - // TODO: public key bytes can be stored in struct once - if ln > 0 && bytes.Equal( - crypto.MarshalPublicKey(signKeys[ln-1].GetPublicKey()), - crypto.MarshalPublicKey(&key.PublicKey), - ) { - return nil - } - - return requestSignFunc(key, req) -} - -// TODO: write docs, write tests. -func prepareSearchRequest(req transport.SearchInfo) serviceRequest { - return &object.SearchRequest{ - ContainerID: req.GetCID(), - Query: req.GetQuery(), - QueryVersion: 1, - } -} - -func prepareGetRequest(req transport.GetInfo) serviceRequest { - return &object.GetRequest{ - Address: req.GetAddress(), - } -} - -func prepareHeadRequest(req transport.HeadInfo) serviceRequest { - return &object.HeadRequest{ - Address: req.GetAddress(), - FullHeaders: req.GetFullHeaders(), - } -} - -func preparePutRequest(req transport.PutInfo) serviceRequest { - obj := req.GetHead() - chunks := splitBytes(obj.Payload, maxGetPayloadSize) - - // copy object to save payload of initial object unchanged - nObj := new(Object) - *nObj = *obj - nObj.Payload = nil - - res := &putRequestSequence{ - PutRequest: object.MakePutRequestHeader(nObj), - chunks: make([]*object.PutRequest, 0, len(chunks)), - } - - // TODO: think about chunk messages signing - for i := range chunks { - res.chunks = append(res.chunks, object.MakePutRequestChunk(chunks[i])) - } - - return res -} - -func prepareRangeHashRequest(req transport.RangeHashInfo) serviceRequest { - return &object.GetRangeHashRequest{ - Address: req.GetAddress(), - Ranges: req.GetRanges(), - Salt: req.GetSalt(), - } -} - -func prepareRangeRequest(req transport.RangeInfo) serviceRequest { - return &GetRangeRequest{ - Address: req.GetAddress(), - Range: req.GetRange(), - } -} - -// TODO: write docs, write tests. -func (s *coreRequestSender) defaultTimeout(req transport.MetaInfo) time.Duration { - switch req.Type() { - case object.RequestSearch: - return s.searchTimeout - case object.RequestPut: - return s.putTimeout - case object.RequestGet: - return s.getTimeout - case object.RequestHead: - return s.headTimeout - case object.RequestRangeHash: - return s.rangeHashTimeout - } - - return minRemoteRequestTimeout -} - -// TODO: write docs, write tests. -func (s *coreRequestSender) sendRequest(ctx context.Context, p sendParams) (interface{}, error) { - var err error - - if p.node == nil { - if p.node, err = s.addressStore.SelfAddr(); err != nil { - return nil, err - } - } - - timeout := p.req.GetTimeout() - if timeout <= 0 { - timeout = s.defaultTimeout(p.req) - } - - r, err := s.requestPrep.prepareRequest(p.req) - if err != nil { - return nil, err - } - - dialCtx, cancel := context.WithTimeout(ctx, s.dialTimeout) - - c, err := s.remoteService.Remote(dialCtx, p.node) - - cancel() - - if err != nil { - return nil, err - } - - ctx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - - return p.handler.call(ctx, r, &clientInfo{ - sc: c, - key: s.addressStore.PublicKey(p.node), - }) -} - -// NewMultiTransport is an object transport component's constructor. -func NewMultiTransport(p MultiTransportParams) (transport.ObjectTransport, error) { - switch { - case p.RemoteService == nil: - return nil, errEmptyGRPC - case p.AddressStore == nil: - return nil, errEmptyAddress - case p.Logger == nil: - return nil, errEmptyLogger - case p.EpochReceiver == nil: - return nil, errEmptyEpochReceiver - case p.Key == nil: - return nil, errEmptyPrivateKey - case p.PrivateTokenStore == nil: - return nil, errEmptyTokenStore - } - - if p.PutTimeout <= 0 { - p.PutTimeout = minRemoteRequestTimeout - } - - if p.GetTimeout <= 0 { - p.GetTimeout = minRemoteRequestTimeout - } - - if p.HeadTimeout <= 0 { - p.HeadTimeout = minRemoteRequestTimeout - } - - if p.SearchTimeout <= 0 { - p.SearchTimeout = minRemoteRequestTimeout - } - - if p.RangeHashTimeout <= 0 { - p.RangeHashTimeout = minRemoteRequestTimeout - } - - if p.DialTimeout <= 0 { - p.DialTimeout = minDialTimeout - } - - return &transportComponent{ - reqSender: &coreRequestSender{ - requestPrep: &coreRequestPreparer{ - epochRecv: p.EpochReceiver, - key: p.Key, - signingFunc: requestSignFunc, - - privateTokenStore: p.PrivateTokenStore, - }, - addressStore: p.AddressStore, - remoteService: p.RemoteService, - putTimeout: p.PutTimeout, - getTimeout: p.GetTimeout, - searchTimeout: p.SearchTimeout, - headTimeout: p.HeadTimeout, - rangeHashTimeout: p.RangeHashTimeout, - dialTimeout: p.DialTimeout, - }, - resTracker: &idleResultTracker{}, - getCaller: &getCaller{}, - putCaller: &putCaller{}, - headCaller: &headCaller{}, - rangeCaller: &rangeCaller{}, - rangeHashCaller: &rangeHashCaller{}, - searchCaller: &searchCaller{}, - }, nil -} diff --git a/pkg/network/transport/object/grpc/transport_test.go b/pkg/network/transport/object/grpc/transport_test.go deleted file mode 100644 index 74ae2899a..000000000 --- a/pkg/network/transport/object/grpc/transport_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testTransportEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - object.ServiceClient - object.Service_PutClient - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ object.ServiceClient = (*testTransportEntity)(nil) - _ object.Service_PutClient = (*testTransportEntity)(nil) -) - -func (s *testTransportEntity) Send(*object.PutRequest) error { return s.err } - -func (s *testTransportEntity) CloseAndRecv() (*object.PutResponse, error) { - if s.err != nil { - return nil, s.err - } - return s.res.(*object.PutResponse), nil -} - -func (s *testTransportEntity) Put(ctx context.Context, opts ...grpc.CallOption) (object.Service_PutClient, error) { - if s.err != nil { - return nil, s.err - } - return s.res.(object.Service_PutClient), nil -} - -func Test_putHandler(t *testing.T) { - ctx := context.TODO() - - t.Run("return type correctness", func(t *testing.T) { - addr := new(Address) - *addr = testObjectAddress(t) - - srvClient := &testTransportEntity{ - res: &testTransportEntity{ - res: &object.PutResponse{ - Address: *addr, - }, - }, - } - - putC := &putCaller{} - - res, err := putC.call(ctx, &putRequestSequence{PutRequest: new(object.PutRequest)}, &clientInfo{ - sc: srvClient, - }) - require.NoError(t, err) - - // ascertain that value returns as expected - require.Equal(t, addr, res) - }) -} diff --git a/pkg/network/transport/object/grpc/traverse.go b/pkg/network/transport/object/grpc/traverse.go deleted file mode 100644 index 1f4c272f3..000000000 --- a/pkg/network/transport/object/grpc/traverse.go +++ /dev/null @@ -1,186 +0,0 @@ -package object - -import ( - "context" - "sync" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" -) - -type ( - containerTraverser interface { - transport.Traverser - add(multiaddr.Multiaddr, bool) - done(multiaddr.Multiaddr) bool - finished() bool - close() - Err() error - } - - placementBuilder interface { - buildPlacement(context.Context, Address, ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) - } - - traverseParams struct { - tryPrevNM bool - addr Address - curPlacementBuilder placementBuilder - prevPlacementBuilder placementBuilder - maxRecycleCount int - stopCount int - } - - coreTraverser struct { - closed bool - - usePrevNM bool - - recycleNum int - - *sync.RWMutex - traverseParams - failed []multiaddr.Multiaddr - mDone map[string]struct{} - err error - } -) - -var ( - _ placementBuilder = (*corePlacementUtil)(nil) - _ containerTraverser = (*coreTraverser)(nil) -) - -func (s *coreTraverser) Next(ctx context.Context) []multiaddr.Multiaddr { - if s.isClosed() || s.finished() { - return nil - } - - s.Lock() - defer s.Unlock() - - return s.next(ctx) -} - -func minInt(a, b int) int { - if a < b { - return a - } - - return b -} - -func (s *coreTraverser) next(ctx context.Context) (nodes []multiaddr.Multiaddr) { - defer func() { - if s.stopCount == 0 { - s.stopCount = len(nodes) - } - - if s.stopCount > 0 { - nodes = nodes[:minInt( - s.stopCount-len(s.mDone), - len(nodes), - )] - } - }() - - var placeBuilder = s.curPlacementBuilder - if s.usePrevNM { - placeBuilder = s.prevPlacementBuilder - } - - nodes, s.err = placeBuilder.buildPlacement(ctx, s.addr, s.failed...) - if errors.Is(errors.Cause(s.err), container.ErrNotFound) { - return - } - - for i := 0; i < len(nodes); i++ { - if _, ok := s.mDone[nodes[i].String()]; ok { - nodes = append(nodes[:i], nodes[i+1:]...) - i-- - } - - continue - } - - if len(nodes) == 0 { - if !s.usePrevNM && s.tryPrevNM { - s.usePrevNM = true - return s.next(ctx) - } - - if s.recycleNum < s.maxRecycleCount { - s.reset() - return s.next(ctx) - } - } - - return nodes -} - -func (s *coreTraverser) reset() { - s.usePrevNM = false - s.failed = s.failed[:0] - s.recycleNum++ -} - -func (s *coreTraverser) add(node multiaddr.Multiaddr, ok bool) { - s.Lock() - if ok { - s.mDone[node.String()] = struct{}{} - } else { - s.failed = append(s.failed, node) - } - s.Unlock() -} - -func (s *coreTraverser) done(node multiaddr.Multiaddr) bool { - s.RLock() - _, ok := s.mDone[node.String()] - s.RUnlock() - - return ok -} - -func (s *coreTraverser) close() { - s.Lock() - s.closed = true - s.Unlock() -} - -func (s *coreTraverser) isClosed() bool { - s.RLock() - defer s.RUnlock() - - return s.closed -} - -func (s *coreTraverser) finished() bool { - s.RLock() - defer s.RUnlock() - - return s.stopCount > 0 && len(s.mDone) >= s.stopCount -} - -func (s *coreTraverser) Err() error { - s.RLock() - defer s.RUnlock() - - return s.err -} - -func newContainerTraverser(p *traverseParams) containerTraverser { - return &coreTraverser{ - RWMutex: new(sync.RWMutex), - traverseParams: *p, - failed: make([]multiaddr.Multiaddr, 0), - mDone: make(map[string]struct{}), - } -} - -func (s *corePlacementUtil) buildPlacement(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - return s.placementBuilder.GetNodes(ctx, addr, s.prevNetMap, excl...) -} diff --git a/pkg/network/transport/object/grpc/traverse_test.go b/pkg/network/transport/object/grpc/traverse_test.go deleted file mode 100644 index 9a6a5cdbb..000000000 --- a/pkg/network/transport/object/grpc/traverse_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package object - -import ( - "context" - "strconv" - "sync" - "testing" - - "github.com/multiformats/go-multiaddr" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testTraverseEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - Placer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ Placer = (*testTraverseEntity)(nil) - _ placementBuilder = (*testTraverseEntity)(nil) -) - -func (s *testTraverseEntity) GetNodes(ctx context.Context, a Address, p bool, e ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - if s.f != nil { - s.f(a, p, e) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]multiaddr.Multiaddr), nil -} - -func (s *testTraverseEntity) buildPlacement(_ context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - if s.f != nil { - s.f(addr, excl) - } - if s.err != nil { - return nil, s.err - } - return s.res.([]multiaddr.Multiaddr), nil -} - -func Test_coreCnrAffChecker_buildPlacement(t *testing.T) { - ctx := context.TODO() - addr := testObjectAddress(t) - nodes := testNodeList(t, 2) - - t.Run("correct placer params", func(t *testing.T) { - s := &corePlacementUtil{ - prevNetMap: true, - placementBuilder: &testTraverseEntity{ - f: func(items ...interface{}) { - require.Equal(t, addr, items[0].(Address)) - require.True(t, items[1].(bool)) - require.Equal(t, nodes, items[2].([]multiaddr.Multiaddr)) - }, - err: errors.New(""), // just to prevent panic - }, - log: zap.L(), - } - - s.buildPlacement(ctx, addr, nodes...) - }) - - t.Run("correct result", func(t *testing.T) { - t.Run("placer error", func(t *testing.T) { - s := &corePlacementUtil{ - placementBuilder: &testTraverseEntity{ - err: errors.New(""), // force Placer to return some error - }, - log: zap.L(), - } - - res, err := s.buildPlacement(ctx, addr) - require.Error(t, err) - require.Empty(t, res) - }) - - t.Run("placer success", func(t *testing.T) { - s := &corePlacementUtil{ - placementBuilder: &testTraverseEntity{ - res: nodes, // force Placer to return nodes - }, - log: zap.L(), - } - - res, err := s.buildPlacement(ctx, addr) - require.NoError(t, err) - require.Equal(t, nodes, res) - }) - }) -} - -func Test_coreTraverser(t *testing.T) { - ctx := context.TODO() - - t.Run("new", func(t *testing.T) { - addr := testObjectAddress(t) - pl := new(testTraverseEntity) - - v := newContainerTraverser(&traverseParams{ - tryPrevNM: true, - addr: addr, - curPlacementBuilder: pl, - prevPlacementBuilder: pl, - maxRecycleCount: 10, - }) - - res := v.(*coreTraverser) - - require.NotNil(t, res.RWMutex) - require.Equal(t, addr, res.addr) - require.True(t, res.tryPrevNM) - require.False(t, res.usePrevNM) - require.NotNil(t, res.mDone) - require.Empty(t, res.mDone) - require.Empty(t, res.failed) - require.Equal(t, 10, res.maxRecycleCount) - require.Equal(t, pl, res.curPlacementBuilder) - require.Equal(t, pl, res.prevPlacementBuilder) - require.Equal(t, 0, res.stopCount) - }) - - t.Run("close", func(t *testing.T) { - v := newContainerTraverser(&traverseParams{ - curPlacementBuilder: &testTraverseEntity{ - res: make([]multiaddr.Multiaddr, 1), - }, - }) - - v.close() - - require.Empty(t, v.Next(ctx)) - require.True(t, v.(*coreTraverser).isClosed()) - }) - - t.Run("done", func(t *testing.T) { - nodes := testNodeList(t, 3) - v := newContainerTraverser(&traverseParams{}) - - v.add(nodes[0], true) - require.True(t, v.done(nodes[0])) - - v.add(nodes[1], false) - require.False(t, v.done(nodes[1])) - - require.False(t, v.done(nodes[2])) - }) - - t.Run("finished", func(t *testing.T) { - - t.Run("zero stop count", func(t *testing.T) { - containerTraverser := &coreTraverser{ - RWMutex: new(sync.RWMutex), - traverseParams: traverseParams{stopCount: 0}, - } - require.False(t, containerTraverser.finished()) - }) - - t.Run("positive stop count", func(t *testing.T) { - containerTraverser := &coreTraverser{ - RWMutex: new(sync.RWMutex), - mDone: make(map[string]struct{}), - traverseParams: traverseParams{stopCount: 3}, - } - - for i := 0; i < containerTraverser.stopCount-1; i++ { - containerTraverser.mDone[strconv.Itoa(i)] = struct{}{} - } - - require.False(t, containerTraverser.finished()) - - containerTraverser.mDone["last node"] = struct{}{} - - require.True(t, containerTraverser.finished()) - }) - }) - - t.Run("add result", func(t *testing.T) { - mAddr := testNode(t, 0) - - containerTraverser := &coreTraverser{ - RWMutex: new(sync.RWMutex), - mDone: make(map[string]struct{}), - } - - containerTraverser.add(mAddr, true) - _, ok := containerTraverser.mDone[mAddr.String()] - require.True(t, ok) - - containerTraverser.add(mAddr, false) - require.Contains(t, containerTraverser.failed, mAddr) - }) - - t.Run("reset", func(t *testing.T) { - initRecycleNum := 1 - - s := &coreTraverser{ - failed: testNodeList(t, 1), - usePrevNM: true, - recycleNum: initRecycleNum, - } - - s.reset() - - require.Empty(t, s.failed) - require.False(t, s.usePrevNM) - require.Equal(t, initRecycleNum+1, s.recycleNum) - }) - - t.Run("next", func(t *testing.T) { - - t.Run("exclude done nodes from result", func(t *testing.T) { - nodes := testNodeList(t, 5) - done := make([]multiaddr.Multiaddr, 2) - copy(done, nodes) - - pl := &testTraverseEntity{res: nodes} - tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) - - for i := range done { - tr.add(done[i], true) - } - - res := tr.Next(ctx) - for i := range done { - require.NotContains(t, res, done[i]) - } - - }) - - t.Run("stop count initialization", func(t *testing.T) { - nodes := testNodeList(t, 5) - - pl := &testTraverseEntity{res: nodes} - - tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) - - _ = tr.Next(ctx) - require.Equal(t, len(nodes), tr.(*coreTraverser).stopCount) - }) - - t.Run("all nodes are done", func(t *testing.T) { - nodes := testNodeList(t, 5) - pl := &testTraverseEntity{res: nodes} - tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) - - require.Equal(t, nodes, tr.Next(ctx)) - - for i := range nodes { - tr.add(nodes[i], true) - } - - require.Empty(t, tr.Next(ctx)) - }) - - t.Run("failed nodes accounting", func(t *testing.T) { - nodes := testNodeList(t, 5) - failed := nodes[:len(nodes)-2] - _ = failed - addr := testObjectAddress(t) - - pl := &testTraverseEntity{ - f: func(items ...interface{}) { - t.Run("correct placer params", func(t *testing.T) { - require.Equal(t, addr, items[0].(Address)) - require.Equal(t, failed, items[1].([]multiaddr.Multiaddr)) - }) - }, - res: nodes, - } - - tr := newContainerTraverser(&traverseParams{ - addr: addr, - curPlacementBuilder: pl, - }) - - for i := range failed { - tr.add(failed[i], false) - } - - _ = tr.Next(ctx) - }) - - t.Run("placement build failure", func(t *testing.T) { - - t.Run("forbid previous network map", func(t *testing.T) { - pl := &testTraverseEntity{res: make([]multiaddr.Multiaddr, 0)} - - tr := newContainerTraverser(&traverseParams{curPlacementBuilder: pl}) - - require.Empty(t, tr.Next(ctx)) - }) - - t.Run("allow previous network map", func(t *testing.T) { - - t.Run("failure", func(t *testing.T) { - pl := &testTraverseEntity{ - res: make([]multiaddr.Multiaddr, 0), - } - - tr := newContainerTraverser(&traverseParams{ - tryPrevNM: true, - curPlacementBuilder: pl, - prevPlacementBuilder: pl, - }) - - require.Empty(t, tr.Next(ctx)) - }) - - t.Run("success", func(t *testing.T) { - nodes := testNodeList(t, 5) - - tr := newContainerTraverser(&traverseParams{ - tryPrevNM: true, - curPlacementBuilder: &testTraverseEntity{ - res: make([]multiaddr.Multiaddr, 0), - }, - prevPlacementBuilder: &testTraverseEntity{ - res: nodes, - }, - }) - - require.Equal(t, nodes, tr.Next(ctx)) - }) - }) - - t.Run("recycle", func(t *testing.T) { - recycleCount := 5 - - curNetMapCallCounter, prevNetMapCallCounter := 0, 0 - - tr := newContainerTraverser(&traverseParams{ - tryPrevNM: true, - curPlacementBuilder: &testTraverseEntity{ - f: func(items ...interface{}) { - curNetMapCallCounter++ - }, - res: make([]multiaddr.Multiaddr, 0), - }, - prevPlacementBuilder: &testTraverseEntity{ - f: func(items ...interface{}) { - prevNetMapCallCounter++ - }, - res: make([]multiaddr.Multiaddr, 0), - }, - maxRecycleCount: recycleCount, - }) - - _ = tr.Next(ctx) - require.Equal(t, recycleCount+1, prevNetMapCallCounter) - require.Equal(t, recycleCount+1, curNetMapCallCounter) - }) - }) - }) -} - -func testNodeList(t *testing.T, count int) (res []multiaddr.Multiaddr) { - for i := 0; i < count; i++ { - res = append(res, testNode(t, i)) - } - return -} diff --git a/pkg/network/transport/object/grpc/ttl.go b/pkg/network/transport/object/grpc/ttl.go deleted file mode 100644 index 031b90285..000000000 --- a/pkg/network/transport/object/grpc/ttl.go +++ /dev/null @@ -1,211 +0,0 @@ -package object - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // ttlPreProcessor is an implementation of requestPreProcessor interface used in Object service production. - ttlPreProcessor struct { - // List of static TTL conditions. - staticCond []service.TTLCondition - - // List of TTL condition constructors. - condPreps []ttlConditionPreparer - - // Processing function. - fProc func(service.TTLSource, ...service.TTLCondition) error - } - - // ttlConditionPreparer is an interface of TTL condition constructor. - ttlConditionPreparer interface { - // prepareTTLCondition creates TTL condition instance based on passed request. - prepareTTLCondition(context.Context, object.Request) service.TTLCondition - } - - // coreTTLCondPreparer is an implementation of ttlConditionPreparer interface used in Object service production. - coreTTLCondPreparer struct { - curAffChecker containerAffiliationChecker - prevAffChecker containerAffiliationChecker - } - - containerAffiliationResult int - - // containerAffiliationChecker is an interface of container membership validator. - containerAffiliationChecker interface { - // Checks local node is affiliated with container with passed ID. - affiliated(context.Context, CID) containerAffiliationResult - } - - // corePlacementUtil is an implementation of containerAffiliationChecker interface used in Object service production. - corePlacementUtil struct { - // Previous network map flag. - prevNetMap bool - - // Local node net address store. - localAddrStore storage.AddressStore - - // Container nodes membership maintainer. - placementBuilder Placer - - // Logging component. - log *zap.Logger - } -) - -// decTTLPreProcessor is an implementation of requestPreProcessor. -type decTTLPreProcessor struct { -} - -const ( - _ containerAffiliationResult = iota - affUnknown - affNotFound - affPresence - affAbsence -) - -const ( - lmSelfAddrRecvFail = "could not receive local network address" -) - -var ( - _ containerAffiliationChecker = (*corePlacementUtil)(nil) - _ ttlConditionPreparer = (*coreTTLCondPreparer)(nil) - _ requestPreProcessor = (*ttlPreProcessor)(nil) - - _ service.TTLCondition = validTTLCondition - - _ requestPreProcessor = (*decTTLPreProcessor)(nil) -) - -// requestPreProcessor method implementation. -// -// Panics with pmEmptyServiceRequest on empty request. -// -// Constructs set of TTL conditions via internal constructors. -// Returns result of internal TTL conditions processing function. -func (s *ttlPreProcessor) preProcess(ctx context.Context, req serviceRequest) error { - if req == nil { - panic(pmEmptyServiceRequest) - } - - dynamicCond := make([]service.TTLCondition, len(s.condPreps)) - - for i := range s.condPreps { - dynamicCond[i] = s.condPreps[i].prepareTTLCondition(ctx, req) - } - - return s.fProc(req, append(s.staticCond, dynamicCond...)...) -} - -// ttlConditionPreparer method implementation. -// -// Condition returns ErrNotLocalContainer if and only if request is non-forwarding and local node is not presented -// in placement vector corresponding to request. -func (s *coreTTLCondPreparer) prepareTTLCondition(ctx context.Context, req object.Request) service.TTLCondition { - if req == nil { - panic(pmEmptyServiceRequest) - } - - return func(ttl uint32) error { - // check forwarding assumption - if ttl >= service.SingleForwardingTTL { - // container affiliation doesn't matter - return nil - } - - // get group container ID from request body - cid := req.CID() - - // check local node affiliation to container - aff := s.curAffChecker.affiliated(ctx, cid) - - if aff == affAbsence && req.AllowPreviousNetMap() { - // request can be forwarded to container members from previous epoch - aff = s.prevAffChecker.affiliated(ctx, cid) - } - - switch aff { - case affUnknown: - return errContainerAffiliationProblem - case affNotFound: - return &detailedError{ - error: errContainerNotFound, - d: containerDetails(cid, descContainerNotFound), - } - case affAbsence: - return &detailedError{ - error: errNotLocalContainer, - d: containerDetails(cid, descNotLocalContainer), - } - } - - return nil - } -} - -// containerAffiliationChecker method implementation. -// -// If local network address store returns error, logger writes error and affUnknown returns. -// If placement builder returns error -// - caused by ErrNotFound, affNotFound returns; -// - status error with NotFound code, affNotFound returns; -// - any other, affUnknown returns, -// Otherwise, if placement builder returns -// - true, affPresence returns; -// - false, affAbsence returns. -func (s *corePlacementUtil) affiliated(ctx context.Context, cid CID) containerAffiliationResult { - selfAddr, err := s.localAddrStore.SelfAddr() - if err != nil { - s.log.Error(lmSelfAddrRecvFail, zap.Error(err)) - return affUnknown - } - - aff, err := s.placementBuilder.IsContainerNode(ctx, selfAddr, cid, s.prevNetMap) - if err != nil { - if err := errors.Cause(err); errors.Is(err, container.ErrNotFound) { - return affNotFound - } - - return affUnknown - } - - if !aff { - return affAbsence - } - - return affPresence -} - -func processTTLConditions(req service.TTLSource, cs ...service.TTLCondition) error { - ttl := req.GetTTL() - - for i := range cs { - if err := cs[i](ttl); err != nil { - return err - } - } - - return nil -} - -func validTTLCondition(ttl uint32) error { - if ttl < service.NonForwardingTTL { - return errInvalidTTL - } - - return nil -} - -func (s *decTTLPreProcessor) preProcess(_ context.Context, req serviceRequest) error { - req.SetTTL(req.GetTTL() - 1) - return nil -} diff --git a/pkg/network/transport/object/grpc/ttl_test.go b/pkg/network/transport/object/grpc/ttl_test.go deleted file mode 100644 index a9c040ed4..000000000 --- a/pkg/network/transport/object/grpc/ttl_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package object - -import ( - "context" - "math/rand" - "strconv" - "testing" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testTTLEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - Placer - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ ttlConditionPreparer = (*testTTLEntity)(nil) - _ storage.AddressStore = (*testTTLEntity)(nil) - _ containerAffiliationChecker = (*testTTLEntity)(nil) - _ Placer = (*testTTLEntity)(nil) -) - -func (s *testTTLEntity) SelfAddr() (multiaddr.Multiaddr, error) { - if s.err != nil { - return nil, s.err - } - return s.res.(multiaddr.Multiaddr), nil -} - -func (s *testTTLEntity) IsContainerNode(_ context.Context, m multiaddr.Multiaddr, c CID, b bool) (bool, error) { - if s.f != nil { - s.f(m, c, b) - } - if s.err != nil { - return false, s.err - } - return s.res.(bool), nil -} - -func (s *testTTLEntity) CID() CID { return s.res.([]interface{})[0].(CID) } - -func (s *testTTLEntity) AllowPreviousNetMap() bool { return s.res.([]interface{})[1].(bool) } - -func (s *testTTLEntity) prepareTTLCondition(_ context.Context, req object.Request) service.TTLCondition { - if s.f != nil { - s.f(req) - } - return s.res.(service.TTLCondition) -} - -func (s *testTTLEntity) affiliated(ctx context.Context, cid CID) containerAffiliationResult { - if s.f != nil { - s.f(cid) - } - return s.res.(containerAffiliationResult) -} - -func Test_ttlPreProcessor_preProcess(t *testing.T) { - ctx := context.TODO() - - // create custom request with forwarding TTL - req := &testTTLEntity{res: uint32(service.SingleForwardingTTL)} - - t.Run("empty request", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - // ascertain that nil request causes panic - _ = new(ttlPreProcessor).preProcess(ctx, nil) - }) - }) - - t.Run("correct processing", func(t *testing.T) { - // create custom error - pErr := errors.New("test error for processing func") - - // create custom ttlConditionPreparer - condPreparer := &testTTLEntity{ - f: func(items ...interface{}) { - t.Run("correct condition preparer params", func(t *testing.T) { - // ascertain that request argument of ttlPreProcessor and ttlConditionPreparer are the same - require.Equal(t, req, items[0].(object.Request)) - }) - }, - res: service.TTLCondition(func(uint32) error { return nil }), - } - - s := &ttlPreProcessor{ - condPreps: []ttlConditionPreparer{condPreparer}, - fProc: func(service.TTLSource, ...service.TTLCondition) error { - return pErr // force processing function to return created error - }, - } - - // ascertain error returns as expected - require.EqualError(t, - s.preProcess(ctx, req), - pErr.Error(), - ) - }) -} - -func Test_coreTTLCondPreparer_prepareTTLCondition(t *testing.T) { - ctx := context.TODO() - - // create container ID - cid := testObjectAddress(t).CID - - // // create network address - // mAddr := testNode(t, 0) - // - // // create custom AddressStore - // as := &testTTLEntity{ - // res: mAddr, // force AddressStore to return created address - // } - - t.Run("empty request", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - // ascertain that nil request causes panic - _ = new(coreTTLCondPreparer).prepareTTLCondition(ctx, nil) - }) - }) - - t.Run("forwarding TTL", func(t *testing.T) { - s := &coreTTLCondPreparer{ - curAffChecker: new(testTTLEntity), - prevAffChecker: new(testTTLEntity), - } - - cond := s.prepareTTLCondition(ctx, new(testTTLEntity)) - - // ascertain that error returns as expected - require.NoError(t, cond(service.SingleForwardingTTL)) - }) - - t.Run("non-forwarding TTL", func(t *testing.T) { - t.Run("container non-affiliation", func(t *testing.T) { - t.Run("disallow previous epoch affiliation", func(t *testing.T) { - // create custom serviceRequest for test - req := &testTTLEntity{res: []interface{}{ - cid, // force serviceRequest to return cid - false, // force serviceRequest to disallow previous network map - }} - - s := &coreTTLCondPreparer{ - curAffChecker: &testTTLEntity{ - f: func(items ...interface{}) { - t.Run("correct current epoch affiliation checker params", func(t *testing.T) { - require.Equal(t, cid, items[0].(CID)) - }) - }, - res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence - }, - prevAffChecker: &testTTLEntity{ - f: func(items ...interface{}) { - t.Run("correct previous epoch affiliation checker params", func(t *testing.T) { - require.Equal(t, cid, items[0].(CID)) - }) - }, - res: affPresence, // force previous epoch containerAffiliationChecker to return affPresence - }, - } - - cond := s.prepareTTLCondition(ctx, req) - - // ascertain that error returns as expected - require.EqualError(t, - cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL - errNotLocalContainer.Error(), - ) - }) - - t.Run("allow previous epoch affiliation", func(t *testing.T) { - // create custom serviceRequest for test - req := &testTTLEntity{res: []interface{}{ - cid, // force serviceRequest to return cid - true, // force serviceRequest to allow previous network map - }} - - s := &coreTTLCondPreparer{ - curAffChecker: &testTTLEntity{ - res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence - }, - prevAffChecker: &testTTLEntity{ - res: affAbsence, // force previous epoch containerAffiliationChecker to return affAbsence - }, - } - - cond := s.prepareTTLCondition(ctx, req) - - // ascertain that error returns as expected - require.EqualError(t, - cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL - errNotLocalContainer.Error(), - ) - }) - }) - - t.Run("container affiliation", func(t *testing.T) { - t.Run("disallow previous epoch affiliation", func(t *testing.T) { - // create custom serviceRequest for test - req := &testTTLEntity{res: []interface{}{ - cid, // force serviceRequest to return cid - false, // force serviceRequest to disallow previous network map - }} - - s := &coreTTLCondPreparer{ - curAffChecker: &testTTLEntity{ - res: affPresence, // force current epoch containerAffiliationChecker to return affPresence - }, - prevAffChecker: &testTTLEntity{ - res: affAbsence, // force previous epoch containerAffiliationChecker to return affAbsence - }, - } - - cond := s.prepareTTLCondition(ctx, req) - - // ascertain that error returns as expected - require.NoError(t, - cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL - ) - }) - - t.Run("allow previous epoch affiliation", func(t *testing.T) { - // create custom serviceRequest for test - req := &testTTLEntity{res: []interface{}{ - cid, // force serviceRequest to return cid - true, // force serviceRequest to allow previous network map - }} - - s := &coreTTLCondPreparer{ - curAffChecker: &testTTLEntity{ - res: affAbsence, // force current epoch containerAffiliationChecker to return affAbsence - }, - prevAffChecker: &testTTLEntity{ - res: affPresence, // force previous epoch containerAffiliationChecker to return affPresence - }, - } - - cond := s.prepareTTLCondition(ctx, req) - - // ascertain that error returns as expected - require.NoError(t, - cond(service.SingleForwardingTTL-1), // pass any non-forwarding TTL - ) - }) - }) - }) -} - -func Test_coreCnrAffChecker_affiliated(t *testing.T) { - ctx := context.TODO() - - // create container ID - cid := testObjectAddress(t).CID - - log := zap.L() - - t.Run("local network address store error", func(t *testing.T) { - // create custom error for test - saErr := errors.New("test error for self addr store") - - s := &corePlacementUtil{ - localAddrStore: &testTTLEntity{ - err: saErr, // force address store to return saErr - }, - log: log, - } - - require.Equal(t, affUnknown, s.affiliated(ctx, cid)) - }) - - t.Run("placement build result", func(t *testing.T) { - // create network address - mAddr := testNode(t, 0) - - // create custom AddressStore - as := &testTTLEntity{ - res: mAddr, // force AddressStore to return created address - } - - t.Run("error", func(t *testing.T) { - pb := &testTTLEntity{ - f: func(items ...interface{}) { - t.Run("correct placement builder params", func(t *testing.T) { - require.Equal(t, mAddr, items[0].(multiaddr.Multiaddr)) - require.Equal(t, cid, items[1].(CID)) - require.Equal(t, true, items[2].(bool)) - }) - }, - } - - pb.err = errors.New("") // force Placer to return some non-nil error - - s := &corePlacementUtil{ - prevNetMap: true, - localAddrStore: as, - placementBuilder: pb, - log: log, - } - - require.Equal(t, affUnknown, s.affiliated(ctx, cid)) - - pb.err = container.ErrNotFound - - require.Equal(t, affNotFound, s.affiliated(ctx, cid)) - }) - - t.Run("no error", func(t *testing.T) { - t.Run("affiliation", func(t *testing.T) { - s := &corePlacementUtil{ - localAddrStore: as, - placementBuilder: &testTTLEntity{ - res: true, // force Placer to return true, nil - }, - log: log, - } - - require.Equal(t, affPresence, s.affiliated(ctx, cid)) - }) - - t.Run("non-affiliation", func(t *testing.T) { - s := &corePlacementUtil{ - localAddrStore: as, - placementBuilder: &testTTLEntity{ - res: false, // force Placer to return false, nil - }, - log: log, - } - - require.Equal(t, affAbsence, s.affiliated(ctx, cid)) - }) - }) - }) -} - -// testNode returns 0.0.0.0:(8000+num). -func testNode(t *testing.T, num int) multiaddr.Multiaddr { - mAddr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/" + strconv.Itoa(8000+num)) - require.NoError(t, err) - return mAddr -} - -// testObjectAddress returns new random object address. -func testObjectAddress(t *testing.T) Address { - oid, err := refs.NewObjectID() - require.NoError(t, err) - return Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid} -} - -// testData returns size bytes of random data. -func testData(t *testing.T, size int) []byte { - res := make([]byte, size) - _, err := rand.Read(res) - require.NoError(t, err) - return res -} diff --git a/pkg/network/transport/object/grpc/verb.go b/pkg/network/transport/object/grpc/verb.go deleted file mode 100644 index 8551b91f1..000000000 --- a/pkg/network/transport/object/grpc/verb.go +++ /dev/null @@ -1,79 +0,0 @@ -package object - -import ( - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" -) - -// Verb is a type alias of -// Token_Info_Verb from service package of neofs-api-go. -type Verb = service.Token_Info_Verb - -const ( - undefinedVerbDesc uint32 = 1 << iota - putVerbDesc - getVerbDesc - headVerbDesc - deleteVerbDesc - searchVerbDesc - rangeVerbDesc - rangeHashVerbDesc -) - -const ( - headSpawnMask = headVerbDesc | getVerbDesc | putVerbDesc | rangeVerbDesc | rangeHashVerbDesc - rangeHashSpawnMask = rangeHashVerbDesc - rangeSpawnMask = rangeVerbDesc | getVerbDesc - getSpawnMask = getVerbDesc - putSpawnMask = putVerbDesc | deleteVerbDesc - deleteSpawnMask = deleteVerbDesc - searchSpawnMask = searchVerbDesc | getVerbDesc | putVerbDesc | headVerbDesc | rangeVerbDesc | rangeHashVerbDesc | deleteVerbDesc -) - -func toVerbDesc(verb Verb) uint32 { - switch verb { - case service.Token_Info_Put: - return putVerbDesc - case service.Token_Info_Get: - return getVerbDesc - case service.Token_Info_Head: - return headVerbDesc - case service.Token_Info_Delete: - return deleteVerbDesc - case service.Token_Info_Search: - return searchVerbDesc - case service.Token_Info_Range: - return rangeVerbDesc - case service.Token_Info_RangeHash: - return rangeHashVerbDesc - default: - return undefinedVerbDesc - } -} - -func toSpawnMask(rt object.RequestType) uint32 { - switch rt { - case object.RequestPut: - return putSpawnMask - case object.RequestGet: - return getSpawnMask - case object.RequestHead: - return headSpawnMask - case object.RequestDelete: - return deleteSpawnMask - case object.RequestSearch: - return searchSpawnMask - case object.RequestRange: - return rangeSpawnMask - case object.RequestRangeHash: - return rangeHashSpawnMask - default: - return undefinedVerbDesc - } -} - -func allowedSpawn(from Verb, to object.RequestType) bool { - desc := toVerbDesc(from) - - return toSpawnMask(to)&desc == desc -} diff --git a/pkg/network/transport/object/grpc/verb_test.go b/pkg/network/transport/object/grpc/verb_test.go deleted file mode 100644 index 0c01e4bed..000000000 --- a/pkg/network/transport/object/grpc/verb_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package object - -import ( - "testing" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/stretchr/testify/require" -) - -func TestAllowedSpawn(t *testing.T) { - items := []struct { - to object.RequestType - ok []Verb - fail []Verb - }{ - { // Put - to: object.RequestPut, - ok: []Verb{ - service.Token_Info_Put, - service.Token_Info_Delete, - }, - fail: []Verb{ - service.Token_Info_Get, - service.Token_Info_Head, - service.Token_Info_Range, - service.Token_Info_RangeHash, - }, - }, - { // Get - to: object.RequestGet, - ok: []Verb{ - service.Token_Info_Get, - }, - fail: []Verb{ - service.Token_Info_Put, - service.Token_Info_Delete, - service.Token_Info_RangeHash, - service.Token_Info_Head, - service.Token_Info_Search, - service.Token_Info_Range, - }, - }, - { // Head - to: object.RequestHead, - ok: []Verb{ - service.Token_Info_Head, - service.Token_Info_Put, - service.Token_Info_Range, - service.Token_Info_Get, - service.Token_Info_RangeHash, - }, - fail: []Verb{ - service.Token_Info_Search, - service.Token_Info_Delete, - }, - }, - { // Delete - to: object.RequestDelete, - ok: []Verb{ - service.Token_Info_Delete, - }, - fail: []Verb{ - service.Token_Info_Get, - service.Token_Info_Head, - service.Token_Info_Range, - service.Token_Info_RangeHash, - service.Token_Info_Put, - service.Token_Info_Search, - }, - }, - { // Search - to: object.RequestSearch, - ok: []Verb{ - service.Token_Info_Put, - service.Token_Info_Get, - service.Token_Info_Head, - service.Token_Info_Delete, - service.Token_Info_Range, - service.Token_Info_RangeHash, - service.Token_Info_Search, - }, - fail: []Verb{}, - }, - { // Range - to: object.RequestRange, - ok: []Verb{ - service.Token_Info_Get, - service.Token_Info_Range, - }, - fail: []Verb{ - service.Token_Info_Put, - service.Token_Info_Delete, - service.Token_Info_RangeHash, - service.Token_Info_Head, - service.Token_Info_Search, - }, - }, - { // RangeHash - to: object.RequestRangeHash, - ok: []Verb{ - service.Token_Info_RangeHash, - }, - fail: []Verb{ - service.Token_Info_Put, - service.Token_Info_Get, - service.Token_Info_Delete, - service.Token_Info_Range, - service.Token_Info_Head, - service.Token_Info_Search, - }, - }, - } - - for _, item := range items { - for _, from := range item.ok { - require.True(t, allowedSpawn(from, item.to)) - } - - for _, from := range item.fail { - require.False(t, allowedSpawn(from, item.to)) - } - } -} diff --git a/pkg/network/transport/object/grpc/verification.go b/pkg/network/transport/object/grpc/verification.go deleted file mode 100644 index de51365c8..000000000 --- a/pkg/network/transport/object/grpc/verification.go +++ /dev/null @@ -1,36 +0,0 @@ -package object - -import ( - "context" - - "github.com/nspcc-dev/neofs-api-go/service" -) - -type ( - verifyRequestFunc func(token service.RequestVerifyData) error - - // verifyPreProcessor is an implementation of requestPreProcessor interface. - verifyPreProcessor struct { - // Verifying function. - fVerify verifyRequestFunc - } -) - -var _ requestPreProcessor = (*verifyPreProcessor)(nil) - -// requestPreProcessor method implementation. -// -// Panics with pmEmptyServiceRequest on empty request. -// -// Returns result of internal requestVerifyFunc instance. -func (s *verifyPreProcessor) preProcess(_ context.Context, req serviceRequest) (err error) { - if req == nil { - panic(pmEmptyServiceRequest) - } - - if err = s.fVerify(req); err != nil { - err = errUnauthenticated - } - - return -} diff --git a/pkg/network/transport/object/grpc/verification_test.go b/pkg/network/transport/object/grpc/verification_test.go deleted file mode 100644 index 82adf0c18..000000000 --- a/pkg/network/transport/object/grpc/verification_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package object - -import ( - "context" - "testing" - - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testVerificationEntity struct { - // Set of interfaces which testCommonEntity must implement, but some methods from those does not call. - serviceRequest - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -func Test_verifyPreProcessor_preProcess(t *testing.T) { - ctx := context.TODO() - - t.Run("empty request", func(t *testing.T) { - require.PanicsWithValue(t, pmEmptyServiceRequest, func() { - _ = new(verifyPreProcessor).preProcess(ctx, nil) - }) - }) - - t.Run("correct result", func(t *testing.T) { - t.Run("failure", func(t *testing.T) { - // create custom error - vErr := errors.New("test error for verifying func") - - s := &verifyPreProcessor{ - fVerify: func(service.RequestVerifyData) error { return vErr }, // force requestVerifyFunc to return vErr - } - - // ascertain that error returns as expected - require.EqualError(t, - s.preProcess(ctx, new(testVerificationEntity)), - errUnauthenticated.Error(), - ) - }) - - t.Run("success", func(t *testing.T) { - s := &verifyPreProcessor{ - fVerify: func(service.RequestVerifyData) error { return nil }, // force requestVerifyFunc to return nil - } - - // ascertain that nil error returns as expected - require.NoError(t, s.preProcess(ctx, new(testVerificationEntity))) - }) - }) -} diff --git a/pkg/network/transport/session/grpc/.gitkeep b/pkg/network/transport/session/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/session/grpc/create.go b/pkg/network/transport/session/grpc/create.go deleted file mode 100644 index 85696fbd2..000000000 --- a/pkg/network/transport/session/grpc/create.go +++ /dev/null @@ -1,53 +0,0 @@ -package session - -import ( - "context" - "errors" - - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/session" -) - -var errExpiredSession = errors.New("expired session") - -func (s sessionService) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) { - // check lifetime - expired := req.ExpirationEpoch() - if s.epochReceiver.Epoch() > expired { - return nil, errExpiredSession - } - - // generate private token for session - pToken, err := session.NewPrivateToken(expired) - if err != nil { - return nil, err - } - - pkBytes, err := session.PublicSessionToken(pToken) - if err != nil { - return nil, err - } - - // generate token ID - tokenID, err := refs.NewUUID() - if err != nil { - return nil, err - } - - // create private token storage key - pTokenKey := session.PrivateTokenKey{} - pTokenKey.SetOwnerID(req.GetOwnerID()) - pTokenKey.SetTokenID(tokenID) - - // store private token - if err := s.ts.Store(pTokenKey, pToken); err != nil { - return nil, err - } - - // construct response - resp := new(session.CreateResponse) - resp.SetID(tokenID) - resp.SetSessionKey(pkBytes) - - return resp, nil -} diff --git a/pkg/network/transport/session/grpc/service.go b/pkg/network/transport/session/grpc/service.go deleted file mode 100644 index 5bf743b48..000000000 --- a/pkg/network/transport/session/grpc/service.go +++ /dev/null @@ -1,67 +0,0 @@ -package session - -import ( - "github.com/nspcc-dev/neofs-api-go/session" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "go.uber.org/zap" - "google.golang.org/grpc" -) - -type ( - sessionService struct { - ts TokenStore - log *zap.Logger - - epochReceiver EpochReceiver - } - - // Service is an interface of the server of Session service. - Service interface { - libgrpc.Service - session.SessionServer - } - - // EpochReceiver is an interface of the container of epoch number with read access. - EpochReceiver interface { - Epoch() uint64 - } - - // Params groups the parameters of Session service server's constructor. - Params struct { - TokenStore TokenStore - - Logger *zap.Logger - - EpochReceiver EpochReceiver - } - - // TokenStore is a type alias of - // TokenStore from session package of neofs-api-go. - TokenStore = session.PrivateTokenStore - - // CreateRequest is a type alias of - // CreateRequest from session package of neofs-api-go. - CreateRequest = session.CreateRequest - - // CreateResponse is a type alias of - // CreateResponse from session package of neofs-api-go. - CreateResponse = session.CreateResponse -) - -// New is an Session service server's constructor. -func New(p Params) Service { - return &sessionService{ - ts: p.TokenStore, - log: p.Logger, - - epochReceiver: p.EpochReceiver, - } -} - -func (sessionService) Name() string { - return "Session Server" -} - -func (s sessionService) Register(srv *grpc.Server) { - session.RegisterSessionServer(srv, s) -} diff --git a/pkg/network/transport/session/grpc/service_test.go b/pkg/network/transport/session/grpc/service_test.go deleted file mode 100644 index 82f85fac1..000000000 --- a/pkg/network/transport/session/grpc/service_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package session - -// TODO: write tests diff --git a/pkg/network/transport/state/grpc/.gitkeep b/pkg/network/transport/state/grpc/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/network/transport/state/grpc/service.go b/pkg/network/transport/state/grpc/service.go deleted file mode 100644 index 49d5af770..000000000 --- a/pkg/network/transport/state/grpc/service.go +++ /dev/null @@ -1,322 +0,0 @@ -package state - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "strconv" - - "github.com/nspcc-dev/neofs-api-go/bootstrap" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/state" - crypto "github.com/nspcc-dev/neofs-crypto" - contract "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap/wrapper" - "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - libgrpc "github.com/nspcc-dev/neofs-node/pkg/network/transport/grpc" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/spf13/viper" - "go.uber.org/zap" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type ( - // Service is an interface of the server of State service. - Service interface { - state.StatusServer - grpc.Service - Healthy() error - } - - // HealthChecker is an interface of node healthiness checking tool. - HealthChecker interface { - Name() string - Healthy() bool - } - - // Stater is an interface of the node's network state storage with read access. - Stater interface { - NetworkState() *bootstrap.SpreadMap - } - - NetMapClient = contract.Wrapper - - // Params groups the parameters of State service server's constructor. - Params struct { - Stater Stater - - Logger *zap.Logger - - Viper *viper.Viper - - Checkers []HealthChecker - - PrivateKey *ecdsa.PrivateKey - - Client *NetMapClient - } - - stateService struct { - state Stater - config *viper.Viper - checkers []HealthChecker - private *ecdsa.PrivateKey - owners map[refs.OwnerID]struct{} - - netMapClient *NetMapClient - } - - // HealthRequest is a type alias of - // HealthRequest from state package of neofs-api-go. - HealthRequest = state.HealthRequest -) - -var ( - errEmptyViper = errors.New("empty config") - errEmptyLogger = errors.New("empty logger") - errEmptyStater = errors.New("empty stater") - errUnknownChangeState = errors.New("received unknown state") -) - -const msgMissingRequestInitiator = "missing request initiator" - -var requestVerifyFunc = libgrpc.VerifyRequestWithSignatures - -// New is an State service server's constructor. -func New(p Params) (Service, error) { - switch { - case p.Logger == nil: - return nil, errEmptyLogger - case p.Viper == nil: - return nil, errEmptyViper - case p.Stater == nil: - return nil, errEmptyStater - case p.PrivateKey == nil: - return nil, crypto.ErrEmptyPrivateKey - } - - svc := &stateService{ - config: p.Viper, - state: p.Stater, - private: p.PrivateKey, - owners: fetchOwners(p.Logger, p.Viper), - checkers: make([]HealthChecker, 0, len(p.Checkers)), - - netMapClient: p.Client, - } - - for i, checker := range p.Checkers { - if checker == nil { - p.Logger.Debug("ignore empty checker", - zap.Int("index", i)) - continue - } - - p.Logger.Info("register health-checker", - zap.String("name", checker.Name())) - - svc.checkers = append(svc.checkers, checker) - } - - return svc, nil -} - -func fetchOwners(l *zap.Logger, v *viper.Viper) map[refs.OwnerID]struct{} { - // if config.yml used: - items := v.GetStringSlice("node.rpc.owners") - - for i := 0; ; i++ { - item := v.GetString("node.rpc.owners." + strconv.Itoa(i)) - - if item == "" { - l.Info("stat: skip empty owner", zap.Int("idx", i)) - break - } - - items = append(items, item) - } - - result := make(map[refs.OwnerID]struct{}, len(items)) - - for i := range items { - var owner refs.OwnerID - - if data, err := hex.DecodeString(items[i]); err != nil { - l.Warn("stat: skip wrong hex data", - zap.Int("idx", i), - zap.String("key", items[i]), - zap.Error(err)) - - continue - } else if key := crypto.UnmarshalPublicKey(data); key == nil { - l.Warn("stat: skip wrong key", - zap.Int("idx", i), - zap.String("key", items[i])) - continue - } else if owner, err = refs.NewOwnerID(key); err != nil { - l.Warn("stat: skip wrong key", - zap.Int("idx", i), - zap.String("key", items[i]), - zap.Error(err)) - continue - } - - result[owner] = struct{}{} - - l.Info("rpc owner added", zap.Stringer("owner", owner)) - } - - return result -} - -func nonForwarding(ttl uint32) error { - if ttl != service.NonForwardingTTL { - return status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()) - } - - return nil -} - -func requestInitiator(req service.SignKeyPairSource) *ecdsa.PublicKey { - if signKeys := req.GetSignKeyPairs(); len(signKeys) > 0 { - return signKeys[0].GetPublicKey() - } - - return nil -} - -// ChangeState allows to change current node state of node. -// To permit access, used server config options. -// The request should be signed. -func (s *stateService) ChangeState(ctx context.Context, in *state.ChangeStateRequest) (*state.ChangeStateResponse, error) { - // verify request structure - if err := requestVerifyFunc(in); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - // verify change state permission - if key := requestInitiator(in); key == nil { - return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) - } else if owner, err := refs.NewOwnerID(key); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } else if _, ok := s.owners[owner]; !ok { - return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) - } - - // convert State field to NodeState - if in.GetState() != state.ChangeStateRequest_Offline { - return nil, status.Error(codes.InvalidArgument, errUnknownChangeState.Error()) - } - - // set update state parameters - if err := s.netMapClient.UpdatePeerState( - crypto.MarshalPublicKey(&s.private.PublicKey), - contract.StateOffline, - ); err != nil { - return nil, status.Error(codes.Aborted, err.Error()) - } - - return new(state.ChangeStateResponse), nil -} - -// DumpConfig request allows dumping settings for the current node. -// To permit access, used server config options. -// The request should be signed. -func (s *stateService) DumpConfig(_ context.Context, req *state.DumpRequest) (*state.DumpResponse, error) { - if err := service.ProcessRequestTTL(req, nonForwarding); err != nil { - return nil, err - } else if err = requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } else if key := requestInitiator(req); key == nil { - return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) - } else if owner, err := refs.NewOwnerID(key); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } else if _, ok := s.owners[owner]; !ok { - return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) - } - - return state.EncodeConfig(s.config) -} - -// Netmap returns SpreadMap from Stater (IRState / Place-component). -func (s *stateService) Netmap(_ context.Context, req *state.NetmapRequest) (*bootstrap.SpreadMap, error) { - if err := service.ProcessRequestTTL(req); err != nil { - return nil, err - } else if err = requestVerifyFunc(req); err != nil { - return nil, err - } - - if s.state != nil { - return s.state.NetworkState(), nil - } - - return nil, status.New(codes.Unavailable, "service unavailable").Err() -} - -func (s *stateService) healthy() error { - for _, svc := range s.checkers { - if !svc.Healthy() { - return errors.Errorf("service(%s) unhealthy", svc.Name()) - } - } - - return nil -} - -// Healthy returns error as status of service, if nil service healthy. -func (s *stateService) Healthy() error { return s.healthy() } - -// Check that all checkers is healthy. -func (s *stateService) HealthCheck(_ context.Context, req *HealthRequest) (*state.HealthResponse, error) { - if err := service.ProcessRequestTTL(req); err != nil { - return nil, err - } else if err = requestVerifyFunc(req); err != nil { - return nil, err - } - - var ( - err = s.healthy() - resp = &state.HealthResponse{Healthy: true, Status: "OK"} - ) - - if err != nil { - resp.Healthy = false - resp.Status = err.Error() - } - - return resp, nil -} - -func (*stateService) Metrics(_ context.Context, req *state.MetricsRequest) (*state.MetricsResponse, error) { - if err := service.ProcessRequestTTL(req); err != nil { - return nil, err - } else if err = requestVerifyFunc(req); err != nil { - return nil, err - } - - return state.EncodeMetrics(prometheus.DefaultGatherer) -} - -func (s *stateService) DumpVars(_ context.Context, req *state.DumpVarsRequest) (*state.DumpVarsResponse, error) { - if err := service.ProcessRequestTTL(req, nonForwarding); err != nil { - return nil, err - } else if err = requestVerifyFunc(req); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } else if key := requestInitiator(req); key == nil { - return nil, status.Error(codes.InvalidArgument, msgMissingRequestInitiator) - } else if owner, err := refs.NewOwnerID(key); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } else if _, ok := s.owners[owner]; !ok { - return nil, status.Error(codes.PermissionDenied, service.ErrWrongOwner.Error()) - } - - return state.EncodeVariables(), nil -} - -// Name of the service. -func (*stateService) Name() string { return "StatusService" } - -// Register service on gRPC server. -func (s *stateService) Register(g *grpc.Server) { state.RegisterStatusServer(g, s) } diff --git a/pkg/network/transport/state/grpc/service_test.go b/pkg/network/transport/state/grpc/service_test.go deleted file mode 100644 index 315fc84a0..000000000 --- a/pkg/network/transport/state/grpc/service_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package state - -import ( - "context" - "crypto/ecdsa" - "encoding/hex" - "encoding/json" - "expvar" - "os" - "strings" - "testing" - - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/state" - crypto "github.com/nspcc-dev/neofs-crypto" - testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/spf13/viper" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -var requestSignFunc = service.SignRequestData - -func Test_nonForwarding(t *testing.T) { - cases := []struct { - err error - ttl uint32 - name string - }{ - { - name: "ZeroTTL", - ttl: service.ZeroTTL, - err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), - }, - { - name: "SingleForwardingTTL", - ttl: service.SingleForwardingTTL, - err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), - }, - { - name: "NonForwardingTTL", - ttl: service.NonForwardingTTL, - err: nil, - }, - } - - for i := range cases { - tt := cases[i] - t.Run(tt.name, func(t *testing.T) { - err := nonForwarding(tt.ttl) - switch tt.err { - case nil: - require.NoError(t, err, tt.name) - default: - require.EqualError(t, err, tt.err.Error()) - } - }) - } -} - -func Test_fetchOwners(t *testing.T) { - l := testlogger.NewLogger(false) - - t.Run("from config options", func(t *testing.T) { - key0 := test.DecodeKey(0) - require.NotEmpty(t, key0) - - data0 := crypto.MarshalPublicKey(&key0.PublicKey) - hKey0 := hex.EncodeToString(data0) - - owner0, err := refs.NewOwnerID(&key0.PublicKey) - require.NoError(t, err) - - v := viper.New() - v.SetDefault("node.rpc.owners", []string{hKey0}) - - owners := fetchOwners(l, v) - require.Len(t, owners, 1) - require.Contains(t, owners, owner0) - }) - - t.Run("from environment and config options", func(t *testing.T) { - key0 := test.DecodeKey(0) - require.NotEmpty(t, key0) - - data0 := crypto.MarshalPublicKey(&key0.PublicKey) - hKey0 := hex.EncodeToString(data0) - - owner0, err := refs.NewOwnerID(&key0.PublicKey) - require.NoError(t, err) - - key1 := test.DecodeKey(1) - require.NotEmpty(t, key1) - - owner1, err := refs.NewOwnerID(&key1.PublicKey) - require.NoError(t, err) - - data1 := crypto.MarshalPublicKey(&key1.PublicKey) - hKey1 := hex.EncodeToString(data1) - - require.NoError(t, os.Setenv("NEOFS_NODE_RPC_OWNERS_0", hKey1)) - - v := viper.New() - v.AutomaticEnv() - v.SetEnvPrefix("NeoFS") - v.SetConfigType("yaml") - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - v.SetDefault("node.rpc.owners", []string{hKey0}) - - require.NoError(t, v.ReadConfig(strings.NewReader(""))) - - owners := fetchOwners(l, v) - - require.Len(t, owners, 2) - require.Contains(t, owners, owner0) - require.Contains(t, owners, owner1) - }) -} - -func TestStateService_DumpConfig(t *testing.T) { - cases := []struct { - err error - ttl uint32 - name string - key *ecdsa.PrivateKey - }{ - { - err: nil, - name: "allow", - key: test.DecodeKey(0), - ttl: service.NonForwardingTTL, - }, - { - name: "wrong ttl", - key: test.DecodeKey(0), - ttl: service.SingleForwardingTTL, - err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), - }, - } - key := test.DecodeKey(0) - require.NotEmpty(t, key) - - owner, err := refs.NewOwnerID(&key.PublicKey) - require.NoError(t, err) - - owners := map[refs.OwnerID]struct{}{ - owner: {}, - } - - viper.SetDefault("test", true) - - svc := stateService{ - owners: owners, - config: viper.GetViper(), - } - - for i := range cases { - tt := cases[i] - t.Run(tt.name, func(t *testing.T) { - req := new(state.DumpRequest) - - req.SetTTL(tt.ttl) - if tt.key != nil { - require.NoError(t, requestSignFunc(tt.key, req)) - } - - res, err := svc.DumpConfig(context.Background(), req) - switch tt.err { - case nil: - require.NoError(t, err, tt.name) - require.NotEmpty(t, res) - require.NotEmpty(t, res.Config) - default: - require.EqualError(t, err, tt.err.Error()) - require.Empty(t, res) - } - }) - } -} - -func TestStateService_DumpVars(t *testing.T) { - cases := []struct { - err error - ttl uint32 - name string - key *ecdsa.PrivateKey - }{ - { - err: nil, - name: "allow", - key: test.DecodeKey(0), - ttl: service.NonForwardingTTL, - }, - { - name: "wrong ttl", - key: test.DecodeKey(0), - ttl: service.SingleForwardingTTL, - err: status.Error(codes.InvalidArgument, service.ErrInvalidTTL.Error()), - }, - } - key := test.DecodeKey(0) - require.NotEmpty(t, key) - - owner, err := refs.NewOwnerID(&key.PublicKey) - require.NoError(t, err) - - owners := map[refs.OwnerID]struct{}{ - owner: {}, - } - - svc := stateService{owners: owners} - - expvar.NewString("test1").Set("test1") - expvar.NewString("test").Set("test") - - for i := range cases { - tt := cases[i] - t.Run(tt.name, func(t *testing.T) { - req := new(state.DumpVarsRequest) - - req.SetTTL(tt.ttl) - if tt.key != nil { - require.NoError(t, requestSignFunc(tt.key, req)) - } - - res, err := svc.DumpVars(nil, req) - switch tt.err { - case nil: - require.NoError(t, err, tt.name) - require.NotEmpty(t, res) - require.NotEmpty(t, res.Variables) - - dump := make(map[string]interface{}) - require.NoError(t, json.Unmarshal(res.Variables, &dump)) - - require.Contains(t, dump, "test1") - require.Equal(t, dump["test1"], "test1") - - require.Contains(t, dump, "test") - require.Equal(t, dump["test"], "test") - default: - require.EqualError(t, err, tt.err.Error()) - require.Empty(t, res) - } - }) - } -} diff --git a/pkg/services/id/.gitkeep b/pkg/services/id/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/services/id/owner_key.go b/pkg/services/id/owner_key.go deleted file mode 100644 index ddb0789a7..000000000 --- a/pkg/services/id/owner_key.go +++ /dev/null @@ -1,53 +0,0 @@ -package id - -import ( - "errors" - - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/container" -) - -type OwnerID = container.OwnerID - -// OwnerKeyContainer is an interface of the container of owner's ID and key pair with read access. -type OwnerKeyContainer interface { - GetOwnerID() OwnerID - GetOwnerKey() []byte -} - -// ErrNilOwnerKeyContainer is returned by functions that expect a non-nil -// OwnerKeyContainer, but received nil. -var ErrNilOwnerKeyContainer = errors.New("owner-key container is nil") - -// VerifyKey checks if the public key converts to owner ID. -// -// If passed OwnerKeyContainer is nil, ErrNilOwnerKeyContainer returns. -// If public key cannot be unmarshaled, service.ErrInvalidPublicKeyBytes returns. -// If public key is not converted to owner ID, service.ErrWrongOwner returns. -// With neo:morph adoption public key can be unrelated to owner ID. In this -// case VerifyKey should call NeoFS.ID smart-contract to check whether public -// key is bounded with owner ID. If there is no bound, then return -// service.ErrWrongOwner. -func VerifyKey(src OwnerKeyContainer) error { - if src == nil { - return ErrNilOwnerKeyContainer - } - - pubKey := crypto.UnmarshalPublicKey(src.GetOwnerKey()) - if pubKey == nil { - return service.ErrInvalidPublicKeyBytes - } - - ownerFromKey, err := refs.NewOwnerID(pubKey) - if err != nil { - return err - } - - if !ownerFromKey.Equal(src.GetOwnerID()) { - return service.ErrWrongOwner - } - - return nil -} diff --git a/pkg/services/object_manager/placement/.gitkeep b/pkg/services/object_manager/placement/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/services/object_manager/placement/graph.go b/pkg/services/object_manager/placement/graph.go deleted file mode 100644 index 9f328edeb..000000000 --- a/pkg/services/object_manager/placement/graph.go +++ /dev/null @@ -1,178 +0,0 @@ -package placement - -import ( - "github.com/gogo/protobuf/proto" - "github.com/multiformats/go-multiaddr" - netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/netmap" - "github.com/pkg/errors" -) - -// method returns copy of current Graph. -func (g *graph) copy() *graph { - var ( - place *netmap.PlacementRule - roots = make([]*netmap.Bucket, 0, len(g.roots)) - items = make([]netmapcore.Info, len(g.items)) - ) - - copy(items, g.items) - - for _, root := range g.roots { - var r *netmap.Bucket - - if root != nil { - tmp := root.Copy() - r = &tmp - } - - roots = append(roots, r) - } - - place = proto.Clone(g.place).(*netmap.PlacementRule) - - return &graph{ - roots: roots, - items: items, - place: place, - } -} - -func (g *graph) Exclude(list []multiaddr.Multiaddr) Graph { - if len(list) == 0 { - return g - } - - var ( - sub = g.copy() - ignore = make([]uint32, 0, len(list)) - ) - - for i := range list { - for j := range sub.items { - if list[i].String() == sub.items[j].Address() { - ignore = append(ignore, uint32(j)) - } - } - } - - return sub.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - group.Exclude = ignore - return bucket.GetMaxSelection(group) - }) -} - -// Filter container by rules. -func (g *graph) Filter(rule FilterRule) Graph { - if rule == nil { - return g - } - - var ( - sub = g.copy() - roots = make([]*netmap.Bucket, len(g.roots)) - items = make([]netmapcore.Info, len(g.items)) - ) - - for i := range g.place.SFGroups { - if g.roots[i] == nil { - continue - } - - root := g.roots[i].Copy() - roots[i] = rule(g.place.SFGroups[i], &root) - } - - copy(items, g.items) - - return &graph{ - roots: roots, - items: items, - place: sub.place, - } -} - -// NodeList returns slice of MultiAddresses for current graph. -func (g *graph) NodeList() ([]multiaddr.Multiaddr, error) { - var ( - ln = uint32(len(g.items)) - result = make([]multiaddr.Multiaddr, 0, ln) - items = make([]netmapcore.Info, len(g.items)) - ) - - if ln == 0 { - return nil, ErrEmptyNodes - } - - copy(items, g.items) - - for _, root := range g.roots { - if root == nil { - continue - } - - list := root.Nodelist() - if len(list) == 0 { - continue - } - - for _, idx := range list { - if ln <= idx.N { - return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx) - } - - addr, err := multiaddr.NewMultiaddr(items[idx.N].Address()) - if err != nil { - return nil, errors.Wrapf(err, "could not convert multi address(%s)", g.items[idx.N].Address()) - } - - result = append(result, addr) - } - } - - if len(result) == 0 { - return nil, ErrEmptyNodes - } - - return result, nil -} - -// NodeInfo returns slice of NodeInfo for current graph. -func (g *graph) NodeInfo() ([]netmapcore.Info, error) { - var ( - ln = uint32(len(g.items)) - result = make([]netmapcore.Info, 0, ln) - items = make([]netmapcore.Info, len(g.items)) - ) - - if ln == 0 { - return nil, ErrEmptyNodes - } - - copy(items, g.items) - - for _, root := range g.roots { - if root == nil { - continue - } - - list := root.Nodelist() - if len(list) == 0 { - continue - } - - for _, idx := range list { - if ln <= idx.N { - return nil, errors.Errorf("could not find index(%d) in list(size: %d)", ln, idx) - } - - result = append(result, items[idx.N]) - } - } - - if len(result) == 0 { - return nil, ErrEmptyNodes - } - - return result, nil -} diff --git a/pkg/services/object_manager/placement/interface.go b/pkg/services/object_manager/placement/interface.go deleted file mode 100644 index 2f4e45472..000000000 --- a/pkg/services/object_manager/placement/interface.go +++ /dev/null @@ -1,106 +0,0 @@ -package placement - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/bootstrap" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/nspcc-dev/netmap" - "go.uber.org/atomic" - "go.uber.org/zap" -) - -type ( - // Component is interface of placement service - Component interface { - // TODO leave for feature request - - NetworkState() *bootstrap.SpreadMap - Neighbours(seed, epoch uint64, full bool) []peers.ID - Update(epoch uint64, nm *netmapcore.NetMap) error - Query(ctx context.Context, opts ...QueryOption) (Graph, error) - } - - // QueryOptions for query request - QueryOptions struct { - CID refs.CID - Previous int - Excludes []multiaddr.Multiaddr - } - - // QueryOption settings closure - QueryOption func(*QueryOptions) - - // FilterRule bucket callback handler - FilterRule func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket - - // Graph is result of request to Placement-component - Graph interface { - Filter(rule FilterRule) Graph - Exclude(list []multiaddr.Multiaddr) Graph - NodeList() ([]multiaddr.Multiaddr, error) - NodeInfo() ([]netmapcore.Info, error) - } - - // Key to fetch node-list - Key []byte - - // Params to create Placement component - Params struct { - Log *zap.Logger - Netmap *NetMap - Peerstore peers.Store - Fetcher storage.Storage - ChronologyDuration uint64 // storing number of past epochs states - } - - networkState struct { - nm *NetMap - epoch uint64 - } - - // placement is implementation of placement.Component - placement struct { - log *zap.Logger - cnr storage.Storage - - chronologyDur uint64 - nmStore *netMapStore - - ps peers.Store - - healthy *atomic.Bool - } - - // graph is implementation of placement.Graph - graph struct { - roots []*netmap.Bucket - items []netmapcore.Info - place *netmap.PlacementRule - } -) - -// ExcludeNodes to ignore some nodes. -func ExcludeNodes(list []multiaddr.Multiaddr) QueryOption { - return func(opt *QueryOptions) { - opt.Excludes = list - } -} - -// ContainerID set by Key. -func ContainerID(cid refs.CID) QueryOption { - return func(opt *QueryOptions) { - opt.CID = cid - } -} - -// UsePreviousNetmap for query. -func UsePreviousNetmap(diff int) QueryOption { - return func(opt *QueryOptions) { - opt.Previous = diff - } -} diff --git a/pkg/services/object_manager/placement/neighbours.go b/pkg/services/object_manager/placement/neighbours.go deleted file mode 100644 index d483213c0..000000000 --- a/pkg/services/object_manager/placement/neighbours.go +++ /dev/null @@ -1,69 +0,0 @@ -package placement - -import ( - "math" - - "github.com/nspcc-dev/hrw" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "go.uber.org/zap" -) - -func calculateCount(n int) int { - if n < 30 { - return n - } - - return int(1.4*math.Log(float64(n))+9) + 1 -} - -// Neighbours peers that which are distributed by hrw(seed) -// If full flag is set, all set of peers returns. -// Otherwise, result size depends on calculateCount function. -func (p *placement) Neighbours(seed, epoch uint64, full bool) []peers.ID { - nm := p.nmStore.get(epoch) - if nm == nil { - p.log.Error("could not receive network state", - zap.Uint64("epoch", epoch), - ) - - return nil - } - - rPeers := p.listPeers(nm.Nodes(), !full) - - hrw.SortSliceByValue(rPeers, seed) - - if full { - return rPeers - } - - var ( - ln = len(rPeers) - cut = calculateCount(ln) - ) - - if cut > ln { - cut = ln - } - - return rPeers[:cut] -} - -func (p *placement) listPeers(nodes []netmap.Info, exclSelf bool) []peers.ID { - var ( - id = p.ps.SelfID() - result = make([]peers.ID, 0, len(nodes)) - ) - - for i := range nodes { - key := peers.IDFromBinary(nodes[i].PublicKey()) - if exclSelf && id.Equal(key) { - continue - } - - result = append(result, key) - } - - return result -} diff --git a/pkg/services/object_manager/placement/neighbours_test.go b/pkg/services/object_manager/placement/neighbours_test.go deleted file mode 100644 index 799eecf27..000000000 --- a/pkg/services/object_manager/placement/neighbours_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package placement - -import ( - "crypto/ecdsa" - "strconv" - "testing" - - "bou.ke/monkey" - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/bootstrap" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/stretchr/testify/require" -) - -func testAddress(t *testing.T) multiaddr.Multiaddr { - addr, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/0") - require.NoError(t, err) - return addr -} - -// -- -- // - -func testPeerstore(t *testing.T) peers.Store { - p, err := peers.NewStore(peers.StoreParams{ - Key: test.DecodeKey(-1), - Logger: testlogger.NewLogger(false), - Addr: testAddress(t), - }) - require.NoError(t, err) - - return p -} - -const address = "/ip4/0.0.0.0/tcp/0/p2p/" - -func TestPlacement_Neighbours(t *testing.T) { - t.Run("Placement component NPE fix test", func(t *testing.T) { - nodes := []bootstrap.NodeInfo{ - {Address: address + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}}, - {Address: address + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}}, - {Address: address + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}}, - } - - ps := testPeerstore(t) - nm := testNetmap(t, nodes) - - p := New(Params{ - Log: testlogger.NewLogger(false), - Peerstore: ps, - }) - - require.NotPanics(t, func() { - require.NoError(t, p.Update(1, nm)) - }) - }) - - t.Run("Placement Neighbours TestSuite", func(t *testing.T) { - keys := []*ecdsa.PrivateKey{ - test.DecodeKey(0), - test.DecodeKey(1), - test.DecodeKey(2), - } - nodes := []bootstrap.NodeInfo{ - { - Address: address + idFromString(t, "USA1"), - PubKey: crypto.MarshalPublicKey(&keys[0].PublicKey), - Options: []string{"/Location:Europe/Country:USA/City:NewYork"}, - }, - { - Address: address + idFromString(t, "ITL1"), - PubKey: crypto.MarshalPublicKey(&keys[1].PublicKey), - Options: []string{"/Location:Europe/Country:Italy/City:Rome"}, - }, - { - Address: address + idFromString(t, "RUS1"), - PubKey: crypto.MarshalPublicKey(&keys[2].PublicKey), - Options: []string{"/Location:Europe/Country:Russia/City:SPB"}, - }, - } - - ps := testPeerstore(t) - nm := testNetmap(t, nodes) - - p := New(Params{ - Log: testlogger.NewLogger(false), - Netmap: nm, - Peerstore: ps, - }) - - t.Run("check, that items have expected length (< 30)", func(t *testing.T) { - items := p.Neighbours(1, 0, false) - require.Len(t, items, len(nm.Nodes())) - }) - - t.Run("check, that items have expected length ( > 30)", func(t *testing.T) { - opts := []string{"/Location:Europe/Country:Russia/City:SPB"} - - key, err := ps.GetPublicKey(ps.SelfID()) - require.NoError(t, err) - - keyBytes := crypto.MarshalPublicKey(key) - - addr := address + idFromString(t, "NewRUS") - info := netmap.Info{} - info.SetAddress(addr) - info.SetPublicKey(keyBytes) - info.SetOptions(opts) - require.NoError(t, nm.AddNode(info)) - - for i := 0; i < 30; i++ { - addr := address + idFromString(t, "RUS"+strconv.Itoa(i+2)) - key := test.DecodeKey(i + len(nodes)) - pub := crypto.MarshalPublicKey(&key.PublicKey) - info := netmap.Info{} - info.SetAddress(addr) - info.SetPublicKey(pub) - info.SetOptions(opts) - require.NoError(t, nm.AddNode(info)) - } - - ln := calculateCount(len(nm.Nodes())) - items := p.Neighbours(1, 0, false) - require.Len(t, items, ln) - }) - - t.Run("check, that items is shuffled", func(t *testing.T) { - var cur, pre []peers.ID - for i := uint64(0); i < 10; i++ { - cur = p.Neighbours(i, 0, false) - require.NotEqual(t, pre, cur) - - pre = cur - } - }) - - t.Run("check, that we can request more items that we have", func(t *testing.T) { - require.NotPanics(t, func() { - monkey.Patch(calculateCount, func(i int) int { return i + 1 }) - defer monkey.Unpatch(calculateCount) - - p.Neighbours(1, 0, false) - }) - }) - }) - - t.Run("unknown epoch", func(t *testing.T) { - s := &placement{ - log: testlogger.NewLogger(false), - nmStore: newNetMapStore(), - ps: testPeerstore(t), - } - - require.Empty(t, s.Neighbours(1, 1, false)) - }) - - t.Run("neighbors w/ set full flag", func(t *testing.T) { - var ( - n = 3 - e uint64 = 5 - nm = netmap.New() - nms = newNetMapStore() - ) - - for i := 0; i < n; i++ { - info := netmap.Info{} - info.SetAddress("node" + strconv.Itoa(i)) - info.SetPublicKey([]byte{byte(i)}) - require.NoError(t, nm.AddNode(info)) - } - - nms.put(e, nm) - - s := &placement{ - log: testlogger.NewLogger(false), - nmStore: nms, - ps: testPeerstore(t), - } - - neighbors := s.Neighbours(1, e, true) - - require.Len(t, neighbors, n) - }) -} diff --git a/pkg/services/object_manager/placement/placement.go b/pkg/services/object_manager/placement/placement.go deleted file mode 100644 index dcdb4bd9a..000000000 --- a/pkg/services/object_manager/placement/placement.go +++ /dev/null @@ -1,260 +0,0 @@ -package placement - -import ( - "bytes" - "context" - "strings" - - "github.com/nspcc-dev/neofs-api-go/bootstrap" - "github.com/nspcc-dev/neofs-api-go/refs" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - libnetmap "github.com/nspcc-dev/netmap" - "github.com/pkg/errors" - "go.uber.org/atomic" - "go.uber.org/zap" -) - -const defaultChronologyDuration = 1 - -var ( - // ErrEmptyNodes when container doesn't contains any nodes - ErrEmptyNodes = errors.New("container doesn't contains nodes") - - // ErrNodesBucketOmitted when in PlacementRule, Selector has not NodesBucket - ErrNodesBucketOmitted = errors.New("nodes-bucket is omitted") - - // ErrEmptyContainer when GetMaxSelection or GetSelection returns empty result - ErrEmptyContainer = errors.New("could not get container, it's empty") -) - -var errNilNetMap = errors.New("network map is nil") - -// New is a placement component constructor. -func New(p Params) Component { - if p.Netmap == nil { - p.Netmap = netmap.New() - } - - if p.ChronologyDuration <= 0 { - p.ChronologyDuration = defaultChronologyDuration - } - - pl := &placement{ - log: p.Log, - cnr: p.Fetcher, - - chronologyDur: p.ChronologyDuration, - nmStore: newNetMapStore(), - - ps: p.Peerstore, - - healthy: atomic.NewBool(false), - } - - pl.nmStore.put(0, p.Netmap) - - return pl -} - -func (p *placement) Name() string { return "PresentInNetwork" } -func (p *placement) Healthy() bool { return p.healthy.Load() } - -type strNodes []netmap.Info - -func (n strNodes) String() string { - list := make([]string, 0, len(n)) - for i := range n { - list = append(list, n[i].Address()) - } - - return `[` + strings.Join(list, ",") + `]` -} - -func (p *placement) Update(epoch uint64, nm *NetMap) error { - cnm := p.nmStore.get(p.nmStore.epoch()) - if cnm == nil { - return errNilNetMap - } - - items := nm.Nodes() - - p.log.Debug("update to new netmap", - zap.Stringer("nodes", strNodes(items))) - - p.log.Debug("update peerstore") - - if err := p.ps.Update(nm); err != nil { - return err - } - - var ( - pubkeyBinary []byte - healthy bool - ) - - // storage nodes must be presented in network map to be healthy - pubkey, err := p.ps.GetPublicKey(p.ps.SelfID()) - if err != nil { - p.log.Error("can't get my own public key") - } - - pubkeyBinary = crypto.MarshalPublicKey(pubkey) - - for i := range items { - if bytes.Equal(pubkeyBinary, items[i].PublicKey()) { - healthy = true - } - - p.log.Debug("new peer for dht", - zap.Stringer("peer", peers.IDFromBinary(items[i].PublicKey())), - zap.String("addr", items[i].Address())) - } - - // make copy to previous - p.log.Debug("update previous netmap") - - if epoch > p.chronologyDur { - p.nmStore.trim(epoch - p.chronologyDur) - } - - p.log.Debug("update current netmap") - p.nmStore.put(epoch, nm) - - p.log.Debug("update current epoch") - - p.healthy.Store(healthy) - - return nil -} - -// NetworkState returns copy of current NetworkMap. -func (p *placement) NetworkState() *bootstrap.SpreadMap { - ns := p.networkState(p.nmStore.epoch()) - if ns == nil { - ns = &networkState{nm: netmap.New()} - } - - nodes := ns.nm.Nodes() - - res := &bootstrap.SpreadMap{ - Epoch: ns.epoch, - NetMap: make([]bootstrap.NodeInfo, 0, len(nodes)), - } - - for i := range nodes { - res.NetMap = append(res.NetMap, bootstrap.NodeInfo{ - Address: nodes[i].Address(), - PubKey: nodes[i].PublicKey(), - Options: nodes[i].Options(), - }) - } - - return res -} - -func (p *placement) networkState(epoch uint64) *networkState { - nm := p.nmStore.get(epoch) - if nm == nil { - return nil - } - - return &networkState{ - nm: nm, - epoch: epoch, - } -} - -// Query returns graph based on container. -func (p *placement) Query(ctx context.Context, opts ...QueryOption) (Graph, error) { - var ( - query QueryOptions - ignore []uint32 - ) - - for _, opt := range opts { - opt(&query) - } - - epoch := p.nmStore.epoch() - if query.Previous > 0 { - epoch -= uint64(query.Previous) - } - - state := p.networkState(epoch) - if state == nil { - return nil, errors.Errorf("could not get network state for epoch #%d", epoch) - } - - items := state.nm.Nodes() - - cnr, err := p.cnr.Get(query.CID) - if err != nil { - return nil, errors.Wrap(err, "could not fetch container") - } - - for i := range query.Excludes { - for j := range items { - if query.Excludes[i].String() == items[j].Address() { - ignore = append(ignore, uint32(j)) - } - } - } - - rule := cnr.PlacementRule() - - return ContainerGraph(state.nm, &rule, ignore, query.CID) -} - -// ContainerGraph applies the placement rules to network map and returns container graph. -func ContainerGraph(nm *NetMap, rule *libnetmap.PlacementRule, ignore []uint32, cid refs.CID) (Graph, error) { - root := nm.Root() - roots := make([]*netmap.Bucket, 0, len(rule.SFGroups)) - - for i := range rule.SFGroups { - rule.SFGroups[i].Exclude = ignore - if ln := len(rule.SFGroups[i].Selectors); ln <= 0 || - rule.SFGroups[i].Selectors[ln-1].Key != libnetmap.NodesBucket { - return nil, errors.Wrapf(ErrNodesBucketOmitted, "container (%s)", cid) - } - - bigSelectors := make([]libnetmap.Select, len(rule.SFGroups[i].Selectors)) - for j := range rule.SFGroups[i].Selectors { - bigSelectors[j] = libnetmap.Select{ - Key: rule.SFGroups[i].Selectors[j].Key, - Count: rule.SFGroups[i].Selectors[j].Count, - } - - if rule.ReplFactor > 1 && rule.SFGroups[i].Selectors[j].Key == libnetmap.NodesBucket { - bigSelectors[j].Count *= rule.ReplFactor - } - } - - sf := libnetmap.SFGroup{ - Selectors: bigSelectors, - Filters: rule.SFGroups[i].Filters, - Exclude: ignore, - } - - if tree := root.Copy().GetMaxSelection(sf); tree != nil { - // fetch graph for replication factor seeded by ContainerID - if tree = tree.GetSelection(bigSelectors, cid[:]); tree == nil { - return nil, errors.Wrapf(ErrEmptyContainer, "for container(%s) with repl-factor(%d)", - cid, rule.ReplFactor) - } - - roots = append(roots, tree) - - continue - } - - return nil, errors.Wrap(ErrEmptyContainer, "empty for bigSelector") - } - - return &graph{ - roots: roots, - items: nm.Nodes(), - place: rule, - }, nil -} diff --git a/pkg/services/object_manager/placement/placement_test.go b/pkg/services/object_manager/placement/placement_test.go deleted file mode 100644 index 9605f1958..000000000 --- a/pkg/services/object_manager/placement/placement_test.go +++ /dev/null @@ -1,411 +0,0 @@ -package placement - -import ( - "context" - "sort" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/mr-tron/base58" - "github.com/multiformats/go-multiaddr" - "github.com/multiformats/go-multihash" - "github.com/nspcc-dev/neofs-api-go/bootstrap" - "github.com/nspcc-dev/neofs-api-go/refs" - crypto "github.com/nspcc-dev/neofs-crypto" - libcnr "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/core/container/acl/basic" - "github.com/nspcc-dev/neofs-node/pkg/core/container/storage" - netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - testlogger "github.com/nspcc-dev/neofs-node/pkg/util/logger/test" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/nspcc-dev/netmap" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - fakeDHT struct { - } - - fakeContainerStorage struct { - storage.Storage - *sync.RWMutex - items map[refs.CID]*storage.Container - } -) - -var ( - testDHTCapacity = 100 -) - -// -- -- // - -func testContainerStorage() *fakeContainerStorage { - return &fakeContainerStorage{ - RWMutex: new(sync.RWMutex), - items: make(map[refs.CID]*storage.Container, testDHTCapacity), - } -} - -func (f *fakeContainerStorage) Get(cid storage.CID) (*storage.Container, error) { - f.RLock() - val, ok := f.items[cid] - f.RUnlock() - - if !ok { - return nil, errors.New("value for requested key not found in DHT") - } - - return val, nil -} - -func (f *fakeContainerStorage) Put(c *storage.Container) (*storage.CID, error) { - id, err := libcnr.CalculateID(c) - if err != nil { - return nil, err - } - f.Lock() - f.items[*id] = c - f.Unlock() - - return id, nil -} - -func (f *fakeDHT) UpdatePeers([]peers.ID) { - // do nothing -} - -func (f *fakeDHT) GetValue(ctx context.Context, key string) ([]byte, error) { - panic("implement me") -} - -func (f *fakeDHT) PutValue(ctx context.Context, key string, val []byte) error { - panic("implement me") -} - -func (f *fakeDHT) Get(ctx context.Context, key string) ([]byte, error) { - panic("implement me") -} - -func (f *fakeDHT) Put(ctx context.Context, key string, val []byte) error { - panic("implement me") -} - -// -- -- // - -func testNetmap(t *testing.T, nodes []bootstrap.NodeInfo) *NetMap { - nm := netmapcore.New() - - for i := range nodes { - info := netmapcore.Info{} - info.SetAddress(nodes[i].Address) - info.SetOptions(nodes[i].Options) - info.SetPublicKey(crypto.MarshalPublicKey(&test.DecodeKey(i).PublicKey)) - err := nm.AddNode(info) - require.NoError(t, err) - } - - return nm -} - -// -- -- // - -func idFromString(t *testing.T, id string) string { - buf, err := multihash.Encode([]byte(id), multihash.ID) - require.NoError(t, err) - - return (multihash.Multihash(buf)).B58String() -} - -func idFromAddress(t *testing.T, addr multiaddr.Multiaddr) string { - id, err := addr.ValueForProtocol(multiaddr.P_P2P) - require.NoError(t, err) - - buf, err := base58.Decode(id) - require.NoError(t, err) - - hs, err := multihash.Decode(buf) - require.NoError(t, err) - - return string(hs.Digest) -} - -// -- -- // - -func TestPlacement(t *testing.T) { - multiaddr.SwapToP2pMultiaddrs() - testAddress := "/ip4/0.0.0.0/tcp/0/p2p/" - key := test.DecodeKey(-1) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - ids := map[string]struct{}{ - "GRM1": {}, "GRM2": {}, "GRM3": {}, "GRM4": {}, - "SPN1": {}, "SPN2": {}, "SPN3": {}, "SPN4": {}, - } - - nodes := []bootstrap.NodeInfo{ - {Address: testAddress + idFromString(t, "USA1"), Options: []string{"/Location:Europe/Country:USA/City:NewYork"}}, - {Address: testAddress + idFromString(t, "ITL1"), Options: []string{"/Location:Europe/Country:Italy/City:Rome"}}, - {Address: testAddress + idFromString(t, "RUS1"), Options: []string{"/Location:Europe/Country:Russia/City:SPB"}}, - } - - for id := range ids { - var opts []string - switch { - case strings.Contains(id, "GRM"): - opts = append(opts, "/Location:Europe/Country:Germany/City:"+id) - case strings.Contains(id, "SPN"): - opts = append(opts, "/Location:Europe/Country:Spain/City:"+id) - } - - for i := 0; i < 4; i++ { - id := id + strconv.Itoa(i) - - nodes = append(nodes, bootstrap.NodeInfo{ - Address: testAddress + idFromString(t, id), - Options: opts, - }) - } - } - - sort.Slice(nodes, func(i, j int) bool { - return strings.Compare(nodes[i].Address, nodes[j].Address) == -1 - }) - - nm := testNetmap(t, nodes) - - cnrStorage := testContainerStorage() - - p := New(Params{ - Log: testlogger.NewLogger(false), - Peerstore: testPeerstore(t), - Fetcher: cnrStorage, - }) - - require.NoError(t, p.Update(1, nm)) - - oid, err := refs.NewObjectID() - require.NoError(t, err) - - // filter over oid - filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - return bucket.GetSelection(group.Selectors, oid[:]) - } - - owner, err := refs.NewOwnerID(&key.PublicKey) - require.NoError(t, err) - - cnr1 := new(storage.Container) - cnr1.SetOwnerID(owner) - cnr1.SetBasicACL(basic.FromUint32(0)) - cnr1.SetPlacementRule(netmap.PlacementRule{ - ReplFactor: 2, - SFGroups: []netmap.SFGroup{ - { - Selectors: []netmap.Select{ - {Key: "Country", Count: 1}, - {Key: "City", Count: 2}, - {Key: netmap.NodesBucket, Count: 1}, - }, - Filters: []netmap.Filter{ - {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, - }, - }, - }, - }) - - cid1, err := cnrStorage.Put(cnr1) - require.NoError(t, err) - - cnr2 := new(storage.Container) - cnr2.SetOwnerID(owner) - cnr2.SetBasicACL(basic.FromUint32(0)) - cnr2.SetPlacementRule(netmap.PlacementRule{ - ReplFactor: 2, - SFGroups: []netmap.SFGroup{ - { - Selectors: []netmap.Select{ - {Key: "Country", Count: 1}, - {Key: netmap.NodesBucket, Count: 10}, - }, - Filters: []netmap.Filter{ - {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, - }, - }, - }, - }) - - cid2, err := cnrStorage.Put(cnr2) - require.NoError(t, err) - - cnr3 := new(storage.Container) - cnr3.SetOwnerID(owner) - cnr3.SetBasicACL(basic.FromUint32(0)) - cnr3.SetPlacementRule(netmap.PlacementRule{ - ReplFactor: 2, - SFGroups: []netmap.SFGroup{ - { - Selectors: []netmap.Select{ - {Key: "Country", Count: 1}, - }, - Filters: []netmap.Filter{ - {Key: "Country", F: netmap.FilterIn("Germany", "Spain")}, - }, - }, - }, - }) - - cid3, err := cnrStorage.Put(cnr3) - require.NoError(t, err) - - t.Run("Should fail on empty container", func(t *testing.T) { - _, err = p.Query(ctx, ContainerID(*cid2)) - require.EqualError(t, errors.Cause(err), ErrEmptyContainer.Error()) - }) - - t.Run("Should fail on Nodes Bucket is omitted in container", func(t *testing.T) { - _, err = p.Query(ctx, ContainerID(*cid3)) - require.EqualError(t, errors.Cause(err), ErrNodesBucketOmitted.Error()) - }) - - t.Run("Should fail on unknown container (dht error)", func(t *testing.T) { - _, err = p.Query(ctx, ContainerID(refs.CID{5})) - require.Error(t, err) - }) - - g, err := p.Query(ctx, ContainerID(*cid1)) - require.NoError(t, err) - - t.Run("Should return error on empty items", func(t *testing.T) { - _, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { - return &netmap.Bucket{} - }).NodeList() - require.EqualError(t, err, ErrEmptyNodes.Error()) - }) - - t.Run("Should ignore some nodes", func(t *testing.T) { - g1, err := p.Query(ctx, ContainerID(*cid1)) - require.NoError(t, err) - - expect, err := g1. - Filter(filter). - NodeList() - require.NoError(t, err) - - g2, err := p.Query(ctx, ContainerID(*cid1)) - require.NoError(t, err) - - actual, err := g2. - Filter(filter). - NodeList() - require.NoError(t, err) - - require.Equal(t, expect, actual) - - g3, err := p.Query(ctx, ContainerID(*cid1)) - require.NoError(t, err) - - actual, err = g3. - Exclude(expect). - Filter(filter). - NodeList() - require.NoError(t, err) - - for _, item := range expect { - require.NotContains(t, actual, item) - } - - g4, err := p.Query(ctx, - ContainerID(*cid1), - ExcludeNodes(expect)) - require.NoError(t, err) - - actual, err = g4. - Filter(filter). - NodeList() - require.NoError(t, err) - - for _, item := range expect { - require.NotContains(t, actual, item) - } - }) - - t.Run("Should return error on nil Buckets", func(t *testing.T) { - _, err = g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { - return nil - }).NodeList() - require.EqualError(t, err, ErrEmptyNodes.Error()) - }) - - t.Run("Should return error on empty NodeInfo's", func(t *testing.T) { - cp := g.Filter(func(netmap.SFGroup, *netmap.Bucket) *netmap.Bucket { - return nil - }) - - cp.(*graph).items = nil - - _, err := cp.NodeList() - require.EqualError(t, err, ErrEmptyNodes.Error()) - }) - - t.Run("Should return error on unknown items", func(t *testing.T) { - cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket { - return b - }) - - cp.(*graph).items = cp.(*graph).items[:5] - - _, err := cp.NodeList() - require.Error(t, err) - }) - - t.Run("Should return error on bad items", func(t *testing.T) { - cp := g.Filter(func(_ netmap.SFGroup, b *netmap.Bucket) *netmap.Bucket { - return b - }) - - for i := range cp.(*graph).items { - cp.(*graph).items[i].SetAddress("BadAddress") - } - - _, err := cp.NodeList() - require.EqualError(t, errors.Cause(err), "failed to parse multiaddr \"BadAddress\": must begin with /") - }) - - list, err := g. - Filter(filter). - // must return same graph on empty filter - Filter(nil). - NodeList() - require.NoError(t, err) - - // 1 Country, 2 Cities, 1 Node = 2 Nodes - require.Len(t, list, 2) - for _, item := range list { - id := idFromAddress(t, item) - require.Contains(t, ids, id[:4]) // exclude our postfix (0-4) - } -} - -func TestContainerGraph(t *testing.T) { - t.Run("selectors index out-of-range", func(t *testing.T) { - rule := new(netmap.PlacementRule) - - rule.SFGroups = append(rule.SFGroups, netmap.SFGroup{}) - - require.NotPanics(t, func() { - _, _ = ContainerGraph( - netmapcore.New(), - rule, - nil, - refs.CID{}, - ) - }) - }) -} diff --git a/pkg/services/object_manager/placement/store.go b/pkg/services/object_manager/placement/store.go deleted file mode 100644 index f991aca7c..000000000 --- a/pkg/services/object_manager/placement/store.go +++ /dev/null @@ -1,66 +0,0 @@ -package placement - -import ( - "sync" - - "github.com/nspcc-dev/neofs-node/pkg/core/netmap" -) - -type ( - // NetMap is a type alias of - // NetMap from netmap package. - NetMap = netmap.NetMap - - netMapStore struct { - *sync.RWMutex - items map[uint64]*NetMap - - curEpoch uint64 - } -) - -func newNetMapStore() *netMapStore { - return &netMapStore{ - RWMutex: new(sync.RWMutex), - items: make(map[uint64]*NetMap), - } -} - -func (s *netMapStore) put(epoch uint64, nm *NetMap) { - s.Lock() - s.items[epoch] = nm - s.curEpoch = epoch - s.Unlock() -} - -func (s *netMapStore) get(epoch uint64) *NetMap { - s.RLock() - nm := s.items[epoch] - s.RUnlock() - - return nm -} - -// trim cleans all network states elder than epoch. -func (s *netMapStore) trim(epoch uint64) { - s.Lock() - m := make(map[uint64]struct{}, len(s.items)) - - for e := range s.items { - if e < epoch { - m[e] = struct{}{} - } - } - - for e := range m { - delete(s.items, e) - } - s.Unlock() -} - -func (s *netMapStore) epoch() uint64 { - s.RLock() - defer s.RUnlock() - - return s.curEpoch -} diff --git a/pkg/services/object_manager/placement/wrapper.go b/pkg/services/object_manager/placement/wrapper.go deleted file mode 100644 index 2ca967aef..000000000 --- a/pkg/services/object_manager/placement/wrapper.go +++ /dev/null @@ -1,127 +0,0 @@ -package placement - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - netmapcore "github.com/nspcc-dev/neofs-node/pkg/core/netmap" - "github.com/nspcc-dev/netmap" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -/* - File source code includes implementations of placement-related solutions. - Highly specialized interfaces give the opportunity to hide placement implementation in a black box for the reasons: - * placement is implementation-tied entity working with graphs, filters, etc.; - * NeoFS components are mostly needed in a small part of the solutions provided by placement; - * direct dependency from placement avoidance helps other components do not touch crucial changes in placement. -*/ - -type ( - // CID is a type alias of - // CID from refs package of neofs-api-go. - CID = refs.CID - - // SGID is a type alias of - // SGID from refs package of neofs-api-go. - SGID = refs.SGID - - // ObjectID is a type alias of - // ObjectID from refs package of neofs-api-go. - ObjectID = refs.ObjectID - - // Object is a type alias of - // Object from object package of neofs-api-go. - Object = object.Object - - // Address is a type alias of - // Address from refs package of neofs-api-go. - Address = refs.Address - - PlacementWrapper struct { - pl Component - } -) - -var errEmptyPlacement = errors.New("could not create storage lister: empty placement component") - -// NewObjectPlacer wraps Component and returns ObjectPlacer interface. -func NewObjectPlacer(pl Component) (*PlacementWrapper, error) { - if pl == nil { - return nil, errEmptyPlacement - } - - return &PlacementWrapper{pl}, nil -} - -func (v PlacementWrapper) ContainerNodes(ctx context.Context, cid CID) ([]multiaddr.Multiaddr, error) { - graph, err := v.pl.Query(ctx, ContainerID(cid)) - if err != nil { - return nil, errors.Wrap(err, "objectPlacer.ContainerNodes failed on graph query") - } - - return graph.NodeList() -} - -func (v PlacementWrapper) ContainerNodesInfo(ctx context.Context, cid CID, prev int) ([]netmapcore.Info, error) { - graph, err := v.pl.Query(ctx, ContainerID(cid), UsePreviousNetmap(prev)) - if err != nil { - return nil, errors.Wrap(err, "objectPlacer.ContainerNodesInfo failed on graph query") - } - - return graph.NodeInfo() -} - -func (v PlacementWrapper) GetNodes(ctx context.Context, addr Address, usePreviousNetMap bool, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - queryOptions := make([]QueryOption, 1, 2) - queryOptions[0] = ContainerID(addr.CID) - - if usePreviousNetMap { - queryOptions = append(queryOptions, UsePreviousNetmap(1)) - } - - graph, err := v.pl.Query(ctx, queryOptions...) - if err != nil { - if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.NotFound { - return nil, container.ErrNotFound - } - - return nil, errors.Wrap(err, "placer.GetNodes failed on graph query") - } - - filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - return bucket - } - - if !addr.ObjectID.Empty() { - filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) - } - } - - return graph.Exclude(excl).Filter(filter).NodeList() -} - -func (v PlacementWrapper) IsContainerNode(ctx context.Context, addr multiaddr.Multiaddr, cid CID, previousNetMap bool) (bool, error) { - nodes, err := v.GetNodes(ctx, Address{ - CID: cid, - }, previousNetMap) - if err != nil { - return false, errors.Wrap(err, "placer.FromContainer failed on placer.GetNodes") - } - - for i := range nodes { - if nodes[i].Equal(addr) { - return true, nil - } - } - - return false, nil -} - -func (v PlacementWrapper) Epoch() uint64 { return v.pl.NetworkState().Epoch } diff --git a/pkg/services/object_manager/replication/.gitkeep b/pkg/services/object_manager/replication/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/services/object_manager/replication/common.go b/pkg/services/object_manager/replication/common.go deleted file mode 100644 index 7ca8c0a7a..000000000 --- a/pkg/services/object_manager/replication/common.go +++ /dev/null @@ -1,197 +0,0 @@ -package replication - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // CID is a type alias of - // CID from refs package of neofs-api-go. - CID = refs.CID - - // Object is a type alias of - // Object from object package of neofs-api-go. - Object = object.Object - - // OwnerID is a type alias of - // OwnerID from object package of neofs-api-go. - OwnerID = object.OwnerID - - // Address is a type alias of - // Address from refs package of neofs-api-go. - Address = refs.Address - - // ObjectVerificationParams groups the parameters of stored object verification. - ObjectVerificationParams struct { - Address - Node multiaddr.Multiaddr - Handler func(valid bool, obj *Object) - LocalInvalid bool - } - - // ObjectVerifier is an interface of stored object verifier. - ObjectVerifier interface { - Verify(ctx context.Context, params *ObjectVerificationParams) bool - } - - // ObjectSource is an interface of the object storage with read access. - ObjectSource interface { - Get(ctx context.Context, addr Address) (*Object, error) - } - - // ObjectStoreParams groups the parameters for object storing. - ObjectStoreParams struct { - *Object - Nodes []ObjectLocation - Handler func(ObjectLocation, bool) - } - - // ObjectReceptacle is an interface of object storage with write access. - ObjectReceptacle interface { - Put(ctx context.Context, params ObjectStoreParams) error - } - - // ObjectCleaner Entity for removing object by address from somewhere - ObjectCleaner interface { - Del(Address) error - } - - // ContainerActualityChecker is an interface of entity - // for checking local node presence in container - // Return true if no errors && local node is in container - ContainerActualityChecker interface { - Actual(ctx context.Context, cid CID) bool - } - - // ObjectPool is a queue of objects selected for data audit. - // It is updated once in epoch. - ObjectPool interface { - Update([]Address) - Pop() (Address, error) - Undone() int - } - - // Scheduler returns slice of addresses for data audit. - // These addresses put into ObjectPool. - Scheduler interface { - SelectForReplication(limit int) ([]Address, error) - } - - // ReservationRatioReceiver is an interface of entity - // for getting reservation ratio value of object by address. - ReservationRatioReceiver interface { - ReservationRatio(ctx context.Context, objAddr Address) (int, error) - } - - // RemoteStorageSelector is an interface of entity - // for getting remote nodes from placement for object by address - // Result doesn't contain nodes from exclude list - RemoteStorageSelector interface { - SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error) - } - - // MultiSolver is an interface that encapsulates other different utilities. - MultiSolver interface { - AddressStore - RemoteStorageSelector - ReservationRatioReceiver - ContainerActualityChecker - EpochReceiver - WeightComparator - } - - // ObjectLocator is an itnerface of entity - // for building list current object remote nodes by address - ObjectLocator interface { - LocateObject(ctx context.Context, objAddr Address) ([]multiaddr.Multiaddr, error) - } - - // WeightComparator is an itnerface of entity - // for comparing weight by address of local node with passed node - // returns -1 if local node is weightier or on error - // returns 0 if weights are equal - // returns 1 if passed node is weightier - WeightComparator interface { - CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int - } - - // EpochReceiver is an interface of entity for getting current epoch number. - EpochReceiver interface { - Epoch() uint64 - } - - // ObjectLocation groups the information about object current remote location. - ObjectLocation struct { - Node multiaddr.Multiaddr - WeightGreater bool // true if Node field value has less index in placement vector than localhost - } - - // ObjectLocationRecord groups the information about all current locations. - ObjectLocationRecord struct { - Address - ReservationRatio int - Locations []ObjectLocation - } - - // ReplicateTask groups the information about object replication task. - // Task solver should not process nodes from exclude list, - // Task solver should perform up to Shortage replications. - ReplicateTask struct { - Address - Shortage int - ExcludeNodes []multiaddr.Multiaddr - } - - // ReplicateResult groups the information about object replication task result. - ReplicateResult struct { - *ReplicateTask - NewStorages []multiaddr.Multiaddr - } - - // PresenceChecker is an interface of object storage with presence check access. - PresenceChecker interface { - Has(address Address) (bool, error) - } - - // AddressStore is an interface of local peer's network address storage. - AddressStore interface { - SelfAddr() (multiaddr.Multiaddr, error) - } -) - -const ( - writeResultTimeout = "write result timeout" - - taskChanClosed = " process finish finish: task channel closed" - ctxDoneMsg = " process finish: context done" - - objectPoolPart = "object pool" - loggerPart = "logger" - objectVerifierPart = "object verifier" - objectReceptaclePart = "object receptacle" - remoteStorageSelectorPart = "remote storage elector" - objectSourcePart = "object source" - reservationRatioReceiverPart = "reservation ratio receiver" - objectLocatorPart = "object locator" - epochReceiverPart = "epoch receiver" - presenceCheckerPart = "object presence checker" - weightComparatorPart = "weight comparator" - addrStorePart = "address store" -) - -func instanceError(entity, part string) error { - return errors.Errorf("could not instantiate %s: empty %s", entity, part) -} - -func addressFields(addr Address) []zap.Field { - return []zap.Field{ - zap.Stringer("oid", addr.ObjectID), - zap.Stringer("cid", addr.CID), - } -} diff --git a/pkg/services/object_manager/replication/garbage.go b/pkg/services/object_manager/replication/garbage.go deleted file mode 100644 index e2f7d44b4..000000000 --- a/pkg/services/object_manager/replication/garbage.go +++ /dev/null @@ -1,27 +0,0 @@ -package replication - -import ( - "sync" -) - -type ( - garbageStore struct { - *sync.RWMutex - items []Address - } -) - -func (s *garbageStore) put(addr Address) { - s.Lock() - defer s.Unlock() - - for i := range s.items { - if s.items[i].Equal(&addr) { - return - } - } - - s.items = append(s.items, addr) -} - -func newGarbageStore() *garbageStore { return &garbageStore{RWMutex: new(sync.RWMutex)} } diff --git a/pkg/services/object_manager/replication/implementations.go b/pkg/services/object_manager/replication/implementations.go deleted file mode 100644 index 2a3cf81a5..000000000 --- a/pkg/services/object_manager/replication/implementations.go +++ /dev/null @@ -1,294 +0,0 @@ -package replication - -import ( - "context" - "sync" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/nspcc-dev/neofs-node/pkg/util/rand" - "github.com/nspcc-dev/netmap" - "github.com/pkg/errors" -) - -type ( - replicationScheduler struct { - cac ContainerActualityChecker - ls localstore.Iterator - } - - // SchedulerParams groups the parameters of scheduler constructor. - SchedulerParams struct { - ContainerActualityChecker - localstore.Iterator - } - - objectPool struct { - mu *sync.Mutex - tasks []Address - } - - multiSolver struct { - as AddressStore - pl placement.Component - } - - // MultiSolverParams groups the parameters of multi solver constructor. - MultiSolverParams struct { - AddressStore - Placement placement.Component - } -) - -const ( - objectPoolInstanceFailMsg = "could not create object pool" - - multiSolverInstanceFailMsg = "could not create multi solver" - - replicationSchedulerEntity = "replication scheduler" -) - -var ( - errPoolExhausted = errors.New("object pool is exhausted") - errEmptyLister = errors.New("empty local objects lister") - errEmptyContainerActual = errors.New("empty container actuality checker") - errEmptyAddressStore = errors.New("empty address store") - errEmptyPlacement = errors.New("empty placement") -) - -// NewObjectPool is an object pool constructor. -func NewObjectPool() ObjectPool { - return &objectPool{mu: new(sync.Mutex)} -} - -// NewReplicationScheduler is a replication scheduler constructor. -func NewReplicationScheduler(p SchedulerParams) (Scheduler, error) { - switch { - case p.ContainerActualityChecker == nil: - return nil, errors.Wrap(errEmptyContainerActual, objectPoolInstanceFailMsg) - case p.Iterator == nil: - return nil, errors.Wrap(errEmptyLister, objectPoolInstanceFailMsg) - } - - return &replicationScheduler{ - cac: p.ContainerActualityChecker, - ls: p.Iterator, - }, nil -} - -// NewMultiSolver is a multi solver constructor. -func NewMultiSolver(p MultiSolverParams) (MultiSolver, error) { - switch { - case p.Placement == nil: - return nil, errors.Wrap(errEmptyPlacement, multiSolverInstanceFailMsg) - case p.AddressStore == nil: - return nil, errors.Wrap(errEmptyAddressStore, multiSolverInstanceFailMsg) - } - - return &multiSolver{ - as: p.AddressStore, - pl: p.Placement, - }, nil -} - -func (s *objectPool) Update(pool []Address) { - s.mu.Lock() - defer s.mu.Unlock() - - s.tasks = pool -} - -func (s *objectPool) Undone() int { - s.mu.Lock() - defer s.mu.Unlock() - - return len(s.tasks) -} - -func (s *objectPool) Pop() (Address, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if len(s.tasks) == 0 { - return Address{}, errPoolExhausted - } - - head := s.tasks[0] - s.tasks = s.tasks[1:] - - return head, nil -} - -func (s *replicationScheduler) SelectForReplication(limit int) ([]Address, error) { - // Attention! This routine might be inefficient with big number of objects - // and containers. Consider using fast traversal and filtering algorithms - // with sieve of bloom filters. - migration := make([]Address, 0, limit) - replication := make([]Address, 0) - ctx := context.Background() - - if err := s.ls.Iterate(nil, func(meta *localstore.ObjectMeta) bool { - if s.cac.Actual(ctx, meta.Object.SystemHeader.CID) { - replication = append(replication, *meta.Object.Address()) - } else { - migration = append(migration, *meta.Object.Address()) - } - return len(migration) >= limit - }); err != nil { - return nil, err - } - - lnM := len(migration) - lnR := len(replication) - edge := 0 - - // I considered using rand.Perm() and appending elements in `for` cycle. - // But it seems, that shuffling is efficient even when `limit-lnM` - // is 1000 times smaller than `lnR`. But it can be discussed and changed - // later anyway. - if lnM < limit { - r := rand.New() - r.Shuffle(lnR, func(i, j int) { - replication[i], replication[j] = replication[j], replication[i] - }) - - edge = min(limit-lnM, lnR) - } - - return append(migration, replication[:edge]...), nil -} - -func (s *multiSolver) Epoch() uint64 { return s.pl.NetworkState().Epoch } - -func (s *multiSolver) SelfAddr() (multiaddr.Multiaddr, error) { return s.as.SelfAddr() } -func (s *multiSolver) ReservationRatio(ctx context.Context, addr Address) (int, error) { - graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID)) - if err != nil { - return 0, errors.Wrap(err, "reservation ratio computation failed on placement query") - } - - nodes, err := graph.Filter(func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) - }).NodeList() - if err != nil { - return 0, errors.Wrap(err, "reservation ratio computation failed on graph node list") - } - - return len(nodes), nil -} - -func (s *multiSolver) SelectRemoteStorages(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]ObjectLocation, error) { - selfAddr, err := s.as.SelfAddr() - if err != nil { - return nil, errors.Wrap(err, "select remote storage nodes failed on get self address") - } - - nodes, err := s.selectNodes(ctx, addr, excl...) - if err != nil { - return nil, errors.Wrap(err, "select remote storage nodes failed on get node list") - } - - var ( - metSelf bool - selfIndex = -1 - res = make([]ObjectLocation, 0, len(nodes)) - ) - - for i := range nodes { - if nodes[i].Equal(selfAddr) { - metSelf = true - selfIndex = i - } - - res = append(res, ObjectLocation{ - Node: nodes[i], - WeightGreater: !metSelf, - }) - } - - if selfIndex != -1 { - res = append(res[:selfIndex], res[selfIndex+1:]...) - } - - return res, nil -} - -func (s *multiSolver) selectNodes(ctx context.Context, addr Address, excl ...multiaddr.Multiaddr) ([]multiaddr.Multiaddr, error) { - graph, err := s.pl.Query(ctx, placement.ContainerID(addr.CID)) - if err != nil { - return nil, errors.Wrap(err, "select remote storage nodes failed on placement query") - } - - filter := func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { return bucket } - if !addr.ObjectID.Empty() { - filter = func(group netmap.SFGroup, bucket *netmap.Bucket) *netmap.Bucket { - return bucket.GetSelection(group.Selectors, addr.ObjectID.Bytes()) - } - } - - return graph.Exclude(excl).Filter(filter).NodeList() -} - -func (s *multiSolver) Actual(ctx context.Context, cid CID) bool { - graph, err := s.pl.Query(ctx, placement.ContainerID(cid)) - if err != nil { - return false - } - - nodes, err := graph.NodeList() - if err != nil { - return false - } - - selfAddr, err := s.as.SelfAddr() - if err != nil { - return false - } - - for i := range nodes { - if nodes[i].Equal(selfAddr) { - return true - } - } - - return false -} - -func (s *multiSolver) CompareWeight(ctx context.Context, addr Address, node multiaddr.Multiaddr) int { - selfAddr, err := s.as.SelfAddr() - if err != nil { - return -1 - } - - if selfAddr.Equal(node) { - return 0 - } - - excl := make([]multiaddr.Multiaddr, 0) - - for { - nodes, err := s.selectNodes(ctx, addr, excl...) - if err != nil { - return -1 - } - - for j := range nodes { - if nodes[j].Equal(selfAddr) { - return -1 - } else if nodes[j].Equal(node) { - return 1 - } - } - - excl = append(excl, nodes[0]) // TODO: when it will become relevant to append full nodes slice - } -} - -func min(a, b int) int { - if a < b { - return a - } - - return b -} diff --git a/pkg/services/object_manager/replication/location_detector.go b/pkg/services/object_manager/replication/location_detector.go deleted file mode 100644 index d010e48f5..000000000 --- a/pkg/services/object_manager/replication/location_detector.go +++ /dev/null @@ -1,154 +0,0 @@ -package replication - -import ( - "context" - "time" - - "go.uber.org/zap" -) - -type ( - // ObjectLocationDetector is an interface of entity - // that listens tasks to detect object current locations in network. - ObjectLocationDetector interface { - Process(ctx context.Context) chan<- Address - Subscribe(ch chan<- *ObjectLocationRecord) - } - - objectLocationDetector struct { - weightComparator WeightComparator - objectLocator ObjectLocator - reservationRatioReceiver ReservationRatioReceiver - presenceChecker PresenceChecker - log *zap.Logger - - taskChanCap int - resultTimeout time.Duration - resultChan chan<- *ObjectLocationRecord - } - - // LocationDetectorParams groups the parameters of location detector's constructor. - LocationDetectorParams struct { - WeightComparator - ObjectLocator - ReservationRatioReceiver - PresenceChecker - *zap.Logger - - TaskChanCap int - ResultTimeout time.Duration - } -) - -const ( - defaultLocationDetectorChanCap = 10 - defaultLocationDetectorResultTimeout = time.Second - locationDetectorEntity = "object location detector" -) - -func (s *objectLocationDetector) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch } - -func (s *objectLocationDetector) Process(ctx context.Context) chan<- Address { - ch := make(chan Address, s.taskChanCap) - go s.processRoutine(ctx, ch) - - return ch -} - -func (s *objectLocationDetector) writeResult(locationRecord *ObjectLocationRecord) { - if s.resultChan == nil { - return - } - select { - case s.resultChan <- locationRecord: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *objectLocationDetector) processRoutine(ctx context.Context, taskChan <-chan Address) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(locationDetectorEntity+ctxDoneMsg, zap.Error(ctx.Err())) - break loop - case addr, ok := <-taskChan: - if !ok { - s.log.Warn(locationDetectorEntity + taskChanClosed) - break loop - } else if has, err := s.presenceChecker.Has(addr); err != nil || !has { - continue loop - } - s.handleTask(ctx, addr) - } - } - close(s.resultChan) -} - -func (s *objectLocationDetector) handleTask(ctx context.Context, addr Address) { - var ( - err error - log = s.log.With(addressFields(addr)...) - locationRecord = &ObjectLocationRecord{addr, 0, nil} - ) - - if locationRecord.ReservationRatio, err = s.reservationRatioReceiver.ReservationRatio(ctx, addr); err != nil { - log.Error("reservation ratio computation failure", zap.Error(err)) - return - } - - nodes, err := s.objectLocator.LocateObject(ctx, addr) - if err != nil { - log.Error("locate object failure", zap.Error(err)) - return - } - - for i := range nodes { - locationRecord.Locations = append(locationRecord.Locations, ObjectLocation{ - Node: nodes[i], - WeightGreater: s.weightComparator.CompareWeight(ctx, addr, nodes[i]) == 1, - }) - } - - log.Debug("current location record created", - zap.Int("reservation ratio", locationRecord.ReservationRatio), - zap.Any("storage nodes exclude self", locationRecord.Locations)) - - s.writeResult(locationRecord) -} - -// NewLocationDetector is an object location detector's constructor. -func NewLocationDetector(p *LocationDetectorParams) (ObjectLocationDetector, error) { - switch { - case p.PresenceChecker == nil: - return nil, instanceError(locationDetectorEntity, presenceCheckerPart) - case p.ObjectLocator == nil: - return nil, instanceError(locationDetectorEntity, objectLocatorPart) - case p.ReservationRatioReceiver == nil: - return nil, instanceError(locationDetectorEntity, reservationRatioReceiverPart) - case p.Logger == nil: - return nil, instanceError(locationDetectorEntity, loggerPart) - case p.WeightComparator == nil: - return nil, instanceError(locationDetectorEntity, weightComparatorPart) - } - - if p.TaskChanCap <= 0 { - p.TaskChanCap = defaultLocationDetectorChanCap - } - - if p.ResultTimeout <= 0 { - p.ResultTimeout = defaultLocationDetectorResultTimeout - } - - return &objectLocationDetector{ - weightComparator: p.WeightComparator, - objectLocator: p.ObjectLocator, - reservationRatioReceiver: p.ReservationRatioReceiver, - presenceChecker: p.PresenceChecker, - log: p.Logger, - taskChanCap: p.TaskChanCap, - resultTimeout: p.ResultTimeout, - resultChan: nil, - }, nil -} diff --git a/pkg/services/object_manager/replication/manager.go b/pkg/services/object_manager/replication/manager.go deleted file mode 100644 index 57d7d17ae..000000000 --- a/pkg/services/object_manager/replication/manager.go +++ /dev/null @@ -1,347 +0,0 @@ -package replication - -import ( - "context" - "fmt" - "time" - - "go.uber.org/zap" -) - -type ( - // Manager is an interface of object manager, - Manager interface { - Process(ctx context.Context) - HandleEpoch(ctx context.Context, epoch uint64) - } - - manager struct { - objectPool ObjectPool - managerTimeout time.Duration - objectVerifier ObjectVerifier - log *zap.Logger - - locationDetector ObjectLocationDetector - storageValidator StorageValidator - replicator ObjectReplicator - restorer ObjectRestorer - placementHonorer PlacementHonorer - - // internal task channels - detectLocationTaskChan chan<- Address - restoreTaskChan chan<- Address - - pushTaskTimeout time.Duration - - // internal result channels - replicationResultChan <-chan *ReplicateResult - restoreResultChan <-chan Address - - garbageChanCap int - replicateResultChanCap int - restoreResultChanCap int - - garbageChan <-chan Address - garbageStore *garbageStore - - epochCh chan uint64 - scheduler Scheduler - - poolSize int - poolExpansionRate float64 - } - - // ManagerParams groups the parameters of object manager's constructor. - ManagerParams struct { - Interval time.Duration - PushTaskTimeout time.Duration - PlacementHonorerEnabled bool - ReplicateTaskChanCap int - RestoreTaskChanCap int - GarbageChanCap int - InitPoolSize int - ExpansionRate float64 - - ObjectPool - ObjectVerifier - - PlacementHonorer - ObjectLocationDetector - StorageValidator - ObjectReplicator - ObjectRestorer - - *zap.Logger - - Scheduler - } -) - -const ( - managerEntity = "replication manager" - - redundantCopiesBeagleName = "BEAGLE_REDUNDANT_COPIES" - - defaultInterval = 3 * time.Second - defaultPushTaskTimeout = time.Second - - defaultGarbageChanCap = 10 - defaultReplicateResultChanCap = 10 - defaultRestoreResultChanCap = 10 -) - -func (s *manager) Name() string { return redundantCopiesBeagleName } - -func (s *manager) HandleEpoch(ctx context.Context, epoch uint64) { - select { - case s.epochCh <- epoch: - case <-ctx.Done(): - return - case <-time.After(s.managerTimeout): - // this timeout must never happen - // if timeout happens in runtime, then something is definitely wrong! - s.log.Warn("replication scheduler is busy") - } -} - -func (s *manager) Process(ctx context.Context) { - // starting object restorer - // bind manager to push restore tasks to restorer - s.restoreTaskChan = s.restorer.Process(ctx) - - // bind manager to listen object restorer results - restoreResultChan := make(chan Address, s.restoreResultChanCap) - s.restoreResultChan = restoreResultChan - s.restorer.Subscribe(restoreResultChan) - - // starting location detector - // bind manager to push locate tasks to location detector - s.detectLocationTaskChan = s.locationDetector.Process(ctx) - - locationsHandlerStartFn := s.storageValidator.Process - if s.placementHonorer != nil { - locationsHandlerStartFn = s.placementHonorer.Process - - // starting storage validator - // bind placement honorer to push validate tasks to storage validator - s.placementHonorer.Subscribe(s.storageValidator.Process(ctx)) - } - - // starting location handler component - // bind location detector to push tasks to location handler component - s.locationDetector.Subscribe(locationsHandlerStartFn(ctx)) - - // bind manager to listen object replicator results - replicateResultChan := make(chan *ReplicateResult, s.replicateResultChanCap) - s.replicationResultChan = replicateResultChan - s.replicator.Subscribe(replicateResultChan) - - // starting replicator - // bind storage validator to push replicate tasks to replicator - s.storageValidator.SubscribeReplication(s.replicator.Process(ctx)) - garbageChan := make(chan Address, s.garbageChanCap) - s.garbageChan = garbageChan - s.storageValidator.SubscribeGarbage(garbageChan) - - go s.taskRoutine(ctx) - go s.resultRoutine(ctx) - s.processRoutine(ctx) -} - -func resultLog(s1, s2 string) string { - return fmt.Sprintf(managerEntity+" %s process finish: %s", s1, s2) -} - -func (s *manager) writeDetectLocationTask(addr Address) { - if s.detectLocationTaskChan == nil { - return - } - select { - case s.detectLocationTaskChan <- addr: - case <-time.After(s.pushTaskTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *manager) writeRestoreTask(addr Address) { - if s.restoreTaskChan == nil { - return - } - select { - case s.restoreTaskChan <- addr: - case <-time.After(s.pushTaskTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *manager) resultRoutine(ctx context.Context) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(resultLog("result", ctxDoneMsg), zap.Error(ctx.Err())) - break loop - case addr, ok := <-s.restoreResultChan: - if !ok { - s.log.Warn(resultLog("result", "restorer result channel closed")) - break loop - } - s.log.Info("object successfully restored", addressFields(addr)...) - case res, ok := <-s.replicationResultChan: - if !ok { - s.log.Warn(resultLog("result", "replicator result channel closed")) - break loop - } else if len(res.NewStorages) > 0 { - s.log.Info("object successfully replicated", - append(addressFields(res.Address), zap.Any("new storages", res.NewStorages))...) - } - case addr, ok := <-s.garbageChan: - if !ok { - s.log.Warn(resultLog("result", "garbage channel closed")) - break loop - } - s.garbageStore.put(addr) - } - } -} - -func (s *manager) taskRoutine(ctx context.Context) { -loop: - for { - if task, err := s.objectPool.Pop(); err == nil { - select { - case <-ctx.Done(): - s.log.Warn(resultLog("task", ctxDoneMsg), zap.Error(ctx.Err())) - break loop - default: - s.distributeTask(ctx, task) - } - } else { - // if object pool is empty, check it again after a while - time.Sleep(s.managerTimeout) - } - } - close(s.restoreTaskChan) - close(s.detectLocationTaskChan) -} - -func (s *manager) processRoutine(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case epoch := <-s.epochCh: - var delta int - - // undone - amount of objects we couldn't process in last epoch - undone := s.objectPool.Undone() - if undone > 0 { - // if there are unprocessed objects, then lower your estimation - delta = -undone - } else { - // otherwise try to expand - delta = int(float64(s.poolSize) * s.poolExpansionRate) - } - - tasks, err := s.scheduler.SelectForReplication(s.poolSize + delta) - if err != nil { - s.log.Warn("can't select objects for replication", zap.Error(err)) - } - - // if there are NOT enough objects to fill the pool, do not change it - // otherwise expand or shrink it with the delta value - if len(tasks) >= s.poolSize+delta { - s.poolSize += delta - } - - s.objectPool.Update(tasks) - - s.log.Info("replication schedule updated", - zap.Int("unprocessed_tasks", undone), - zap.Int("next_tasks", len(tasks)), - zap.Int("pool_size", s.poolSize), - zap.Uint64("new_epoch", epoch)) - } - } -} - -// Function takes object from storage by address (if verify -// If verify flag is set object stored incorrectly (Verify returned error) - restore task is planned -// otherwise validate task is planned. -func (s *manager) distributeTask(ctx context.Context, addr Address) { - if !s.objectVerifier.Verify(ctx, &ObjectVerificationParams{Address: addr}) { - s.writeRestoreTask(addr) - return - } - - s.writeDetectLocationTask(addr) -} - -// NewManager is an object manager's constructor. -func NewManager(p ManagerParams) (Manager, error) { - switch { - case p.ObjectPool == nil: - return nil, instanceError(managerEntity, objectPoolPart) - case p.ObjectVerifier == nil: - return nil, instanceError(managerEntity, objectVerifierPart) - case p.Logger == nil: - return nil, instanceError(managerEntity, loggerPart) - case p.ObjectLocationDetector == nil: - return nil, instanceError(managerEntity, locationDetectorEntity) - case p.StorageValidator == nil: - return nil, instanceError(managerEntity, storageValidatorEntity) - case p.ObjectReplicator == nil: - return nil, instanceError(managerEntity, objectReplicatorEntity) - case p.ObjectRestorer == nil: - return nil, instanceError(managerEntity, objectRestorerEntity) - case p.PlacementHonorer == nil && p.PlacementHonorerEnabled: - return nil, instanceError(managerEntity, placementHonorerEntity) - case p.Scheduler == nil: - return nil, instanceError(managerEntity, replicationSchedulerEntity) - } - - if p.Interval <= 0 { - p.Interval = defaultInterval - } - - if p.PushTaskTimeout <= 0 { - p.PushTaskTimeout = defaultPushTaskTimeout - } - - if p.GarbageChanCap <= 0 { - p.GarbageChanCap = defaultGarbageChanCap - } - - if p.ReplicateTaskChanCap <= 0 { - p.ReplicateTaskChanCap = defaultReplicateResultChanCap - } - - if p.RestoreTaskChanCap <= 0 { - p.RestoreTaskChanCap = defaultRestoreResultChanCap - } - - if !p.PlacementHonorerEnabled { - p.PlacementHonorer = nil - } - - return &manager{ - objectPool: p.ObjectPool, - managerTimeout: p.Interval, - objectVerifier: p.ObjectVerifier, - log: p.Logger, - locationDetector: p.ObjectLocationDetector, - storageValidator: p.StorageValidator, - replicator: p.ObjectReplicator, - restorer: p.ObjectRestorer, - placementHonorer: p.PlacementHonorer, - pushTaskTimeout: p.PushTaskTimeout, - garbageChanCap: p.GarbageChanCap, - replicateResultChanCap: p.ReplicateTaskChanCap, - restoreResultChanCap: p.RestoreTaskChanCap, - garbageStore: newGarbageStore(), - epochCh: make(chan uint64), - scheduler: p.Scheduler, - poolSize: p.InitPoolSize, - poolExpansionRate: p.ExpansionRate, - }, nil -} diff --git a/pkg/services/object_manager/replication/object_replicator.go b/pkg/services/object_manager/replication/object_replicator.go deleted file mode 100644 index 37167286a..000000000 --- a/pkg/services/object_manager/replication/object_replicator.go +++ /dev/null @@ -1,188 +0,0 @@ -package replication - -import ( - "context" - "time" - - "github.com/multiformats/go-multiaddr" - "go.uber.org/zap" -) - -type ( - // ObjectReplicator is an interface of entity - // that listens object replication tasks. - // Result includes new object storage list. - ObjectReplicator interface { - Process(ctx context.Context) chan<- *ReplicateTask - Subscribe(ch chan<- *ReplicateResult) - } - - objectReplicator struct { - objectReceptacle ObjectReceptacle - remoteStorageSelector RemoteStorageSelector - objectSource ObjectSource - presenceChecker PresenceChecker - log *zap.Logger - - taskChanCap int - resultTimeout time.Duration - resultChan chan<- *ReplicateResult - } - - // ObjectReplicatorParams groups the parameters of replicator's constructor. - ObjectReplicatorParams struct { - RemoteStorageSelector - ObjectSource - ObjectReceptacle - PresenceChecker - *zap.Logger - - TaskChanCap int - ResultTimeout time.Duration - } -) - -const ( - defaultReplicatorChanCap = 10 - defaultReplicatorResultTimeout = time.Second - objectReplicatorEntity = "object replicator" -) - -func (s *objectReplicator) Subscribe(ch chan<- *ReplicateResult) { s.resultChan = ch } - -func (s *objectReplicator) Process(ctx context.Context) chan<- *ReplicateTask { - ch := make(chan *ReplicateTask, s.taskChanCap) - go s.processRoutine(ctx, ch) - - return ch -} - -func (s *objectReplicator) writeResult(replicateResult *ReplicateResult) { - if s.resultChan == nil { - return - } - select { - case s.resultChan <- replicateResult: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *objectReplicator) processRoutine(ctx context.Context, taskChan <-chan *ReplicateTask) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(objectReplicatorEntity+" process finish: context completed", - zap.Error(ctx.Err())) - break loop - case replicateTask, ok := <-taskChan: - if !ok { - s.log.Warn(objectReplicatorEntity + " process finish: task channel closed") - break loop - } else if has, err := s.presenceChecker.Has(replicateTask.Address); err != nil || !has { - continue loop - } - s.handleTask(ctx, replicateTask) - } - } - close(s.resultChan) -} - -func (s *objectReplicator) handleTask(ctx context.Context, task *ReplicateTask) { - obj, err := s.objectSource.Get(ctx, task.Address) - if err != nil { - s.log.Warn("get object from storage failure", zap.Error(err)) - return - } - - res := &ReplicateResult{ - ReplicateTask: task, - NewStorages: make([]multiaddr.Multiaddr, 0, task.Shortage), - } - - for len(res.NewStorages) < task.Shortage { - nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, task.Address, task.ExcludeNodes...) - if err != nil { - break - } - - for i := 0; i < len(nodesInfo); i++ { - if contains(res.NewStorages, nodesInfo[i].Node) { - nodesInfo = append(nodesInfo[:i], nodesInfo[i+1:]...) - i-- - - continue - } - } - - if len(nodesInfo) > task.Shortage { - nodesInfo = nodesInfo[:task.Shortage] - } - - if len(nodesInfo) == 0 { - break - } - - if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{ - Object: obj, - Nodes: nodesInfo, - Handler: func(location ObjectLocation, success bool) { - if success { - res.NewStorages = append(res.NewStorages, location.Node) - } else { - task.ExcludeNodes = append(task.ExcludeNodes, location.Node) - } - }, - }); err != nil { - s.log.Warn("replicate object failure", zap.Error(err)) - break - } - } - - s.writeResult(res) -} - -func contains(list []multiaddr.Multiaddr, item multiaddr.Multiaddr) bool { - for i := range list { - if list[i].Equal(item) { - return true - } - } - - return false -} - -// NewReplicator is an object replicator's constructor. -func NewReplicator(p ObjectReplicatorParams) (ObjectReplicator, error) { - switch { - case p.ObjectReceptacle == nil: - return nil, instanceError(objectReplicatorEntity, objectReceptaclePart) - case p.ObjectSource == nil: - return nil, instanceError(objectReplicatorEntity, objectSourcePart) - case p.RemoteStorageSelector == nil: - return nil, instanceError(objectReplicatorEntity, remoteStorageSelectorPart) - case p.PresenceChecker == nil: - return nil, instanceError(objectReplicatorEntity, presenceCheckerPart) - case p.Logger == nil: - return nil, instanceError(objectReplicatorEntity, loggerPart) - } - - if p.TaskChanCap <= 0 { - p.TaskChanCap = defaultReplicatorChanCap - } - - if p.ResultTimeout <= 0 { - p.ResultTimeout = defaultReplicatorResultTimeout - } - - return &objectReplicator{ - objectReceptacle: p.ObjectReceptacle, - remoteStorageSelector: p.RemoteStorageSelector, - objectSource: p.ObjectSource, - presenceChecker: p.PresenceChecker, - log: p.Logger, - taskChanCap: p.TaskChanCap, - resultTimeout: p.ResultTimeout, - }, nil -} diff --git a/pkg/services/object_manager/replication/object_restorer.go b/pkg/services/object_manager/replication/object_restorer.go deleted file mode 100644 index d64dbdb2f..000000000 --- a/pkg/services/object_manager/replication/object_restorer.go +++ /dev/null @@ -1,173 +0,0 @@ -package replication - -import ( - "context" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "go.uber.org/zap" -) - -type ( - // ObjectRestorer is an interface of entity - // that listen tasks to restore object by address. - // Restorer doesn't recheck if object is actually corrupted. - // Restorer writes result to subscriber only if restoration was successful. - ObjectRestorer interface { - Process(ctx context.Context) chan<- Address - Subscribe(ch chan<- Address) - } - - objectRestorer struct { - objectVerifier ObjectVerifier - remoteStorageSelector RemoteStorageSelector - objectReceptacle ObjectReceptacle - epochReceiver EpochReceiver - presenceChecker PresenceChecker - log *zap.Logger - - taskChanCap int - resultTimeout time.Duration - resultChan chan<- Address - } - - // ObjectRestorerParams groups the parameters of object restorer's constructor. - ObjectRestorerParams struct { - ObjectVerifier - ObjectReceptacle - EpochReceiver - RemoteStorageSelector - PresenceChecker - *zap.Logger - - TaskChanCap int - ResultTimeout time.Duration - } -) - -const ( - defaultRestorerChanCap = 10 - defaultRestorerResultTimeout = time.Second - objectRestorerEntity = "object restorer" -) - -func (s *objectRestorer) Subscribe(ch chan<- Address) { s.resultChan = ch } - -func (s *objectRestorer) Process(ctx context.Context) chan<- Address { - ch := make(chan Address, s.taskChanCap) - go s.processRoutine(ctx, ch) - - return ch -} - -func (s *objectRestorer) writeResult(refInfo Address) { - if s.resultChan == nil { - return - } - select { - case s.resultChan <- refInfo: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *objectRestorer) processRoutine(ctx context.Context, taskChan <-chan Address) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(objectRestorerEntity+ctxDoneMsg, zap.Error(ctx.Err())) - break loop - case addr, ok := <-taskChan: - if !ok { - s.log.Warn(objectRestorerEntity + taskChanClosed) - break loop - } else if has, err := s.presenceChecker.Has(addr); err != nil || !has { - continue loop - } - s.handleTask(ctx, addr) - } - } - close(s.resultChan) -} - -func (s *objectRestorer) handleTask(ctx context.Context, addr Address) { - var ( - receivedObj *Object - exclNodes = make([]multiaddr.Multiaddr, 0) - ) - -loop: - for { - nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, addr, exclNodes...) - if err != nil { - break - } - - for i := range nodesInfo { - info := nodesInfo[i] - if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{ - Address: addr, - Node: nodesInfo[i].Node, - Handler: func(valid bool, obj *Object) { - if valid { - receivedObj = obj - } else { - exclNodes = append(exclNodes, info.Node) - } - }, - LocalInvalid: true, - }) { - break loop - } - } - } - - if err := s.objectReceptacle.Put( - context.WithValue(ctx, localstore.StoreEpochValue, s.epochReceiver.Epoch()), - ObjectStoreParams{Object: receivedObj}, - ); err != nil { - s.log.Warn("put object to local storage failure", append(addressFields(addr), zap.Error(err))...) - return - } - - s.writeResult(addr) -} - -// NewObjectRestorer is an object restorer's constructor. -func NewObjectRestorer(p *ObjectRestorerParams) (ObjectRestorer, error) { - switch { - case p.Logger == nil: - return nil, instanceError(objectRestorerEntity, loggerPart) - case p.ObjectVerifier == nil: - return nil, instanceError(objectRestorerEntity, objectVerifierPart) - case p.ObjectReceptacle == nil: - return nil, instanceError(objectRestorerEntity, objectReceptaclePart) - case p.RemoteStorageSelector == nil: - return nil, instanceError(objectRestorerEntity, remoteStorageSelectorPart) - case p.EpochReceiver == nil: - return nil, instanceError(objectRestorerEntity, epochReceiverPart) - case p.PresenceChecker == nil: - return nil, instanceError(objectRestorerEntity, presenceCheckerPart) - } - - if p.TaskChanCap <= 0 { - p.TaskChanCap = defaultRestorerChanCap - } - - if p.ResultTimeout <= 0 { - p.ResultTimeout = defaultRestorerResultTimeout - } - - return &objectRestorer{ - objectVerifier: p.ObjectVerifier, - remoteStorageSelector: p.RemoteStorageSelector, - objectReceptacle: p.ObjectReceptacle, - epochReceiver: p.EpochReceiver, - presenceChecker: p.PresenceChecker, - log: p.Logger, - taskChanCap: p.TaskChanCap, - resultTimeout: p.ResultTimeout, - }, nil -} diff --git a/pkg/services/object_manager/replication/placement_honorer.go b/pkg/services/object_manager/replication/placement_honorer.go deleted file mode 100644 index 9a5ac3ccd..000000000 --- a/pkg/services/object_manager/replication/placement_honorer.go +++ /dev/null @@ -1,198 +0,0 @@ -package replication - -import ( - "context" - "time" - - "github.com/multiformats/go-multiaddr" - "go.uber.org/zap" -) - -type ( - // PlacementHonorer is an interface of entity - // that listens tasks to piece out placement rule of container for particular object. - PlacementHonorer interface { - Process(ctx context.Context) chan<- *ObjectLocationRecord - Subscribe(ch chan<- *ObjectLocationRecord) - } - - placementHonorer struct { - objectSource ObjectSource - objectReceptacle ObjectReceptacle - remoteStorageSelector RemoteStorageSelector - presenceChecker PresenceChecker - log *zap.Logger - - taskChanCap int - resultTimeout time.Duration - resultChan chan<- *ObjectLocationRecord - } - - // PlacementHonorerParams groups the parameters of placement honorer's constructor. - PlacementHonorerParams struct { - ObjectSource - ObjectReceptacle - RemoteStorageSelector - PresenceChecker - *zap.Logger - - TaskChanCap int - ResultTimeout time.Duration - } -) - -const ( - defaultPlacementHonorerChanCap = 10 - defaultPlacementHonorerResultTimeout = time.Second - placementHonorerEntity = "placement honorer" -) - -func (s *placementHonorer) Subscribe(ch chan<- *ObjectLocationRecord) { s.resultChan = ch } - -func (s *placementHonorer) Process(ctx context.Context) chan<- *ObjectLocationRecord { - ch := make(chan *ObjectLocationRecord, s.taskChanCap) - go s.processRoutine(ctx, ch) - - return ch -} - -func (s *placementHonorer) writeResult(locationRecord *ObjectLocationRecord) { - if s.resultChan == nil { - return - } - select { - case s.resultChan <- locationRecord: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *placementHonorer) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(placementHonorerEntity+ctxDoneMsg, zap.Error(ctx.Err())) - break loop - case locationRecord, ok := <-taskChan: - if !ok { - s.log.Warn(placementHonorerEntity + taskChanClosed) - break loop - } else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has { - continue loop - } - s.handleTask(ctx, locationRecord) - } - } - close(s.resultChan) -} - -func (s *placementHonorer) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) { - defer s.writeResult(locationRecord) - - var ( - err error - log = s.log.With(addressFields(locationRecord.Address)...) - copiesShortage = locationRecord.ReservationRatio - 1 - exclNodes = make([]multiaddr.Multiaddr, 0) - procLocations []ObjectLocation - ) - - obj, err := s.objectSource.Get(ctx, locationRecord.Address) - if err != nil { - log.Warn("get object failure", zap.Error(err)) - return - } - - tombstone := obj.IsTombstone() - - for copiesShortage > 0 { - nodesInfo, err := s.remoteStorageSelector.SelectRemoteStorages(ctx, locationRecord.Address, exclNodes...) - if err != nil { - log.Warn("select remote storage nodes failure", - zap.Stringer("object", locationRecord.Address), - zap.Any("exclude nodes", exclNodes), - zap.String("error", err.Error()), - ) - - return - } - - if !tombstone { - procLocations = make([]ObjectLocation, 0, len(nodesInfo)) - loop: - for i := range nodesInfo { - for j := range locationRecord.Locations { - if locationRecord.Locations[j].Node.Equal(nodesInfo[i].Node) { - copiesShortage-- - continue loop - } - } - procLocations = append(procLocations, nodesInfo[i]) - } - - if len(procLocations) == 0 { - return - } - } else { - procLocations = nodesInfo - } - - if err := s.objectReceptacle.Put(ctx, ObjectStoreParams{ - Object: obj, - Nodes: procLocations, - Handler: func(loc ObjectLocation, success bool) { - if success { - copiesShortage-- - if tombstone { - for i := range locationRecord.Locations { - if locationRecord.Locations[i].Node.Equal(loc.Node) { - return - } - } - } - locationRecord.Locations = append(locationRecord.Locations, loc) - } else { - exclNodes = append(exclNodes, loc.Node) - } - }, - }); err != nil { - s.log.Warn("put object to new nodes failure", zap.Error(err)) - return - } - } -} - -// NewPlacementHonorer is a placement honorer's constructor. -func NewPlacementHonorer(p PlacementHonorerParams) (PlacementHonorer, error) { - switch { - case p.RemoteStorageSelector == nil: - return nil, instanceError(placementHonorerEntity, remoteStorageSelectorPart) - case p.ObjectSource == nil: - return nil, instanceError(placementHonorerEntity, objectSourcePart) - case p.ObjectReceptacle == nil: - return nil, instanceError(placementHonorerEntity, objectReceptaclePart) - case p.Logger == nil: - return nil, instanceError(placementHonorerEntity, loggerPart) - case p.PresenceChecker == nil: - return nil, instanceError(placementHonorerEntity, presenceCheckerPart) - } - - if p.TaskChanCap <= 0 { - p.TaskChanCap = defaultPlacementHonorerChanCap - } - - if p.ResultTimeout <= 0 { - p.ResultTimeout = defaultPlacementHonorerResultTimeout - } - - return &placementHonorer{ - objectSource: p.ObjectSource, - objectReceptacle: p.ObjectReceptacle, - remoteStorageSelector: p.RemoteStorageSelector, - presenceChecker: p.PresenceChecker, - log: p.Logger, - taskChanCap: p.TaskChanCap, - resultTimeout: p.ResultTimeout, - }, nil -} diff --git a/pkg/services/object_manager/replication/storage/locator.go b/pkg/services/object_manager/replication/storage/locator.go deleted file mode 100644 index 7d8fd08d6..000000000 --- a/pkg/services/object_manager/replication/storage/locator.go +++ /dev/null @@ -1,80 +0,0 @@ -package storage - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - locator struct { - executor transport.SelectiveContainerExecutor - log *zap.Logger - } - - // LocatorParams groups the parameters of ObjectLocator constructor. - LocatorParams struct { - SelectiveContainerExecutor transport.SelectiveContainerExecutor - Logger *zap.Logger - } -) - -const locatorInstanceFailMsg = "could not create object locator" - -var errEmptyObjectsContainerHandler = errors.New("empty container objects container handler") - -func (s *locator) LocateObject(ctx context.Context, addr refs.Address) (res []multiaddr.Multiaddr, err error) { - queryBytes, err := (&query.Query{ - Filters: []query.Filter{ - { - Type: query.Filter_Exact, - Name: transport.KeyID, - Value: addr.ObjectID.String(), - }, - }, - }).Marshal() - if err != nil { - return nil, errors.Wrap(err, "locate object failed on query marshal") - } - - err = s.executor.Search(ctx, &transport.SearchParams{ - SelectiveParams: transport.SelectiveParams{ - CID: addr.CID, - TTL: service.NonForwardingTTL, - IDList: make([]object.ID, 1), - }, - SearchCID: addr.CID, - SearchQuery: queryBytes, - Handler: func(node multiaddr.Multiaddr, addrList []refs.Address) { - if len(addrList) > 0 { - res = append(res, node) - } - }, - }) - - return -} - -// NewObjectLocator constructs replication.ObjectLocator from SelectiveContainerExecutor. -func NewObjectLocator(p LocatorParams) (replication.ObjectLocator, error) { - switch { - case p.SelectiveContainerExecutor == nil: - return nil, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg) - case p.Logger == nil: - return nil, errors.Wrap(logger.ErrNilLogger, locatorInstanceFailMsg) - } - - return &locator{ - executor: p.SelectiveContainerExecutor, - log: p.Logger, - }, nil -} diff --git a/pkg/services/object_manager/replication/storage/locator_test.go b/pkg/services/object_manager/replication/storage/locator_test.go deleted file mode 100644 index 9b8a5fa5b..000000000 --- a/pkg/services/object_manager/replication/storage/locator_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package storage - -import ( - "testing" - - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type testExecutor struct { - transport.SelectiveContainerExecutor -} - -func TestNewObjectLocator(t *testing.T) { - validParams := LocatorParams{ - SelectiveContainerExecutor: new(testExecutor), - Logger: zap.L(), - } - - t.Run("valid params", func(t *testing.T) { - s, err := NewObjectLocator(validParams) - require.NoError(t, err) - require.NotNil(t, s) - }) - t.Run("empty logger", func(t *testing.T) { - p := validParams - p.Logger = nil - _, err := NewObjectLocator(p) - require.EqualError(t, err, errors.Wrap(logger.ErrNilLogger, locatorInstanceFailMsg).Error()) - }) - t.Run("empty container handler", func(t *testing.T) { - p := validParams - p.SelectiveContainerExecutor = nil - _, err := NewObjectLocator(p) - require.EqualError(t, err, errors.Wrap(errEmptyObjectsContainerHandler, locatorInstanceFailMsg).Error()) - }) -} diff --git a/pkg/services/object_manager/replication/storage/object.go b/pkg/services/object_manager/replication/storage/object.go deleted file mode 100644 index 122f76d7c..000000000 --- a/pkg/services/object_manager/replication/storage/object.go +++ /dev/null @@ -1,133 +0,0 @@ -package storage - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // ObjectStorage is an interface of encapsulated ObjectReceptacle and ObjectSource pair. - ObjectStorage interface { - replication.ObjectReceptacle - replication.ObjectSource - } - - objectStorage struct { - ls localstore.Localstore - executor transport.SelectiveContainerExecutor - log *zap.Logger - } - - // ObjectStorageParams groups the parameters of ObjectStorage constructor. - ObjectStorageParams struct { - Localstore localstore.Localstore - SelectiveContainerExecutor transport.SelectiveContainerExecutor - Logger *zap.Logger - } -) - -const objectSourceInstanceFailMsg = "could not create object source" - -var errNilObject = errors.New("object is nil") - -var errCouldNotGetObject = errors.New("could not get object from any node") - -func (s *objectStorage) Put(ctx context.Context, params replication.ObjectStoreParams) error { - if params.Object == nil { - return errNilObject - } else if len(params.Nodes) == 0 { - if s.ls == nil { - return errEmptyLocalstore - } - return s.ls.Put(ctx, params.Object) - } - - nodes := make([]multiaddr.Multiaddr, len(params.Nodes)) - for i := range params.Nodes { - nodes[i] = params.Nodes[i].Node - } - - return s.executor.Put(ctx, &transport.PutParams{ - SelectiveParams: transport.SelectiveParams{ - CID: params.Object.SystemHeader.CID, - Nodes: nodes, - TTL: service.NonForwardingTTL, - IDList: make([]object.ID, 1), - }, - Object: params.Object, - Handler: func(node multiaddr.Multiaddr, valid bool) { - if params.Handler == nil { - return - } - for i := range params.Nodes { - if params.Nodes[i].Node.Equal(node) { - params.Handler(params.Nodes[i], valid) - return - } - } - }, - }) -} - -func (s *objectStorage) Get(ctx context.Context, addr object.Address) (res *object.Object, err error) { - if s.ls != nil { - if has, err := s.ls.Has(addr); err == nil && has { - if res, err = s.ls.Get(addr); err == nil { - return res, err - } - } - } - - if err = s.executor.Get(ctx, &transport.GetParams{ - SelectiveParams: transport.SelectiveParams{ - CID: addr.CID, - TTL: service.NonForwardingTTL, - IDList: []object.ID{addr.ObjectID}, - Breaker: func(refs.Address) (cFlag transport.ProgressControlFlag) { - if res != nil { - cFlag = transport.BreakProgress - } - return - }, - }, - Handler: func(node multiaddr.Multiaddr, obj *object.Object) { res = obj }, - }); err != nil { - return - } else if res == nil { - return nil, errCouldNotGetObject - } - - return -} - -// NewObjectStorage encapsulates Localstore and SelectiveContainerExecutor -// and returns ObjectStorage interface. -func NewObjectStorage(p ObjectStorageParams) (ObjectStorage, error) { - if p.Logger == nil { - return nil, errors.Wrap(logger.ErrNilLogger, objectSourceInstanceFailMsg) - } - - if p.Localstore == nil { - p.Logger.Warn("local storage not provided") - } - - if p.SelectiveContainerExecutor == nil { - p.Logger.Warn("object container handler not provided") - } - - return &objectStorage{ - ls: p.Localstore, - executor: p.SelectiveContainerExecutor, - log: p.Logger, - }, nil -} diff --git a/pkg/services/object_manager/replication/storage/peerstore.go b/pkg/services/object_manager/replication/storage/peerstore.go deleted file mode 100644 index 6890ed8bd..000000000 --- a/pkg/services/object_manager/replication/storage/peerstore.go +++ /dev/null @@ -1,75 +0,0 @@ -package storage - -import ( - "crypto/ecdsa" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-node/pkg/network/peers" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // AddressStoreComponent is an interface of encapsulated AddressStore and NodePublicKeyReceiver pair. - AddressStoreComponent interface { - AddressStore - NodePublicKeyReceiver - } - - // AddressStore is an interface of the container of local Multiaddr. - AddressStore interface { - SelfAddr() (multiaddr.Multiaddr, error) - } - - // NodePublicKeyReceiver is an interface of Multiaddr to PublicKey converter. - NodePublicKeyReceiver interface { - PublicKey(multiaddr.Multiaddr) *ecdsa.PublicKey - } - - addressStore struct { - ps peers.Store - - log *zap.Logger - } -) - -const addressStoreInstanceFailMsg = "could not create address store" - -var ( - errEmptyPeerStore = errors.New("empty peer store") - - errEmptyAddressStore = errors.New("empty address store") -) - -func (s addressStore) SelfAddr() (multiaddr.Multiaddr, error) { return s.ps.GetAddr(s.ps.SelfID()) } - -func (s addressStore) PublicKey(mAddr multiaddr.Multiaddr) (res *ecdsa.PublicKey) { - if peerID, err := s.ps.AddressID(mAddr); err != nil { - s.log.Error("could not peer ID", - zap.Stringer("node", mAddr), - zap.Error(err), - ) - } else if res, err = s.ps.GetPublicKey(peerID); err != nil { - s.log.Error("could not receive public key", - zap.Stringer("peer", peerID), - zap.Error(err), - ) - } - - return res -} - -// NewAddressStore wraps peer store and returns AddressStoreComponent. -func NewAddressStore(ps peers.Store, log *zap.Logger) (AddressStoreComponent, error) { - if ps == nil { - return nil, errors.Wrap(errEmptyPeerStore, addressStoreInstanceFailMsg) - } else if log == nil { - return nil, errors.Wrap(logger.ErrNilLogger, addressStoreInstanceFailMsg) - } - - return &addressStore{ - ps: ps, - log: log, - }, nil -} diff --git a/pkg/services/object_manager/replication/storage/validation.go b/pkg/services/object_manager/replication/storage/validation.go deleted file mode 100644 index 0bd3a4a4f..000000000 --- a/pkg/services/object_manager/replication/storage/validation.go +++ /dev/null @@ -1,396 +0,0 @@ -package storage - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/sha256" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - "github.com/nspcc-dev/neofs-node/pkg/services/id" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/nspcc-dev/neofs-node/pkg/util/rand" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - objectValidator struct { - as AddressStore - ls localstore.Localstore - executor transport.SelectiveContainerExecutor - log *zap.Logger - - saltSize int - maxRngSize uint64 - rangeCount int - sltr Salitor - verifier verifier.Verifier - } - - // Salitor is a salting data function. - Salitor func(data, salt []byte) []byte - - // ObjectValidatorParams groups th - ObjectValidatorParams struct { - AddressStore AddressStore - Localstore localstore.Localstore - SelectiveContainerExecutor transport.SelectiveContainerExecutor - Logger *zap.Logger - - Salitor Salitor - SaltSize int - MaxPayloadRangeSize uint64 - PayloadRangeCount int - - Verifier verifier.Verifier - } - - localHeadIntegrityVerifier struct { - } - - payloadVerifier struct { - } - - localIntegrityVerifier struct { - headVerifier verifier.Verifier - payloadVerifier verifier.Verifier - } -) - -const ( - objectValidatorInstanceFailMsg = "could not create object validator" - - defaultSaltSize = 64 // bytes - defaultPayloadRangeCount = 3 - defaultMaxPayloadRangeSize = 64 -) - -var ( - errEmptyLocalstore = errors.New("empty local storage") - errEmptyObjectVerifier = errors.New("empty object verifier") -) - -var ( - errBrokenHeaderStructure = errors.New("broken header structure") - - errMissingPayloadChecksumHeader = errors.New("missing payload checksum header") - errWrongPayloadChecksum = errors.New("wrong payload checksum") -) - -func (s *objectValidator) Verify(ctx context.Context, params *replication.ObjectVerificationParams) bool { - selfAddr, err := s.as.SelfAddr() - if err != nil { - s.log.Debug("receive self address failure", zap.Error(err)) - return false - } - - if params.Node == nil || params.Node.Equal(selfAddr) { - return s.verifyLocal(ctx, params.Address) - } - - return s.verifyRemote(ctx, params) -} - -func (s *objectValidator) verifyLocal(ctx context.Context, addr refs.Address) bool { - var ( - err error - obj *object.Object - ) - - if obj, err = s.ls.Get(addr); err != nil { - s.log.Debug("get local meta information failure", zap.Error(err)) - return false - } else if err = s.verifier.Verify(ctx, obj); err != nil { - s.log.Debug("integrity check failure", zap.Error(err)) - } - - return err == nil -} - -func (s *objectValidator) verifyRemote(ctx context.Context, params *replication.ObjectVerificationParams) bool { - var ( - receivedObj *object.Object - valid bool - ) - - defer func() { - if params.Handler != nil && receivedObj != nil { - params.Handler(valid, receivedObj) - } - }() - - p := &transport.HeadParams{ - GetParams: transport.GetParams{ - SelectiveParams: transport.SelectiveParams{ - CID: params.CID, - Nodes: []multiaddr.Multiaddr{params.Node}, - TTL: service.NonForwardingTTL, - IDList: []object.ID{params.ObjectID}, - Raw: true, - }, - Handler: func(_ multiaddr.Multiaddr, obj *object.Object) { - receivedObj = obj - valid = s.verifier.Verify(ctx, obj) == nil - }, - }, - FullHeaders: true, - } - - if err := s.executor.Head(ctx, p); err != nil || !valid { - return false - } else if receivedObj.SystemHeader.PayloadLength <= 0 || receivedObj.IsLinking() { - return true - } - - if !params.LocalInvalid { - has, err := s.ls.Has(params.Address) - if err == nil && has { - obj, err := s.ls.Get(params.Address) - if err == nil { - return s.verifyThroughHashes(ctx, obj, params.Node) - } - } - } - - valid = false - _ = s.executor.Get(ctx, &p.GetParams) - - return valid -} - -func (s *objectValidator) verifyThroughHashes(ctx context.Context, obj *object.Object, node multiaddr.Multiaddr) (valid bool) { - var ( - salt = generateSalt(s.saltSize) - rngs = generateRanges(obj.SystemHeader.PayloadLength, s.maxRngSize, s.rangeCount) - ) - - _ = s.executor.RangeHash(ctx, &transport.RangeHashParams{ - SelectiveParams: transport.SelectiveParams{ - CID: obj.SystemHeader.CID, - Nodes: []multiaddr.Multiaddr{node}, - TTL: service.NonForwardingTTL, - IDList: []object.ID{obj.SystemHeader.ID}, - }, - Ranges: rngs, - Salt: salt, - Handler: func(node multiaddr.Multiaddr, hashes []hash.Hash) { - valid = compareHashes(s.sltr, obj.Payload, salt, rngs, hashes) - }, - }) - - return -} - -func compareHashes(sltr Salitor, payload, salt []byte, rngs []object.Range, hashes []hash.Hash) bool { - if len(rngs) != len(hashes) { - return false - } - - for i := range rngs { - saltPayloadPart := sltr(payload[rngs[i].Offset:rngs[i].Offset+rngs[i].Length], salt) - if !hashes[i].Equal(hash.Sum(saltPayloadPart)) { - return false - } - } - - return true -} - -func generateRanges(payloadSize, maxRangeSize uint64, count int) []object.Range { - res := make([]object.Range, count) - - l := min(payloadSize, maxRangeSize) - - for i := 0; i < count; i++ { - res[i].Length = l - res[i].Offset = rand.Uint64(rand.New(), int64(payloadSize-l)) - } - - return res -} - -func min(a, b uint64) uint64 { - if a < b { - return a - } - - return b -} - -func generateSalt(saltSize int) []byte { - salt := make([]byte, saltSize) - if _, err := rand.Read(salt); err != nil { - return nil - } - - return salt -} - -// NewObjectValidator constructs universal replication.ObjectVerifier. -func NewObjectValidator(p *ObjectValidatorParams) (replication.ObjectVerifier, error) { - switch { - case p.Logger == nil: - return nil, errors.Wrap(logger.ErrNilLogger, objectValidatorInstanceFailMsg) - case p.AddressStore == nil: - return nil, errors.Wrap(errEmptyAddressStore, objectValidatorInstanceFailMsg) - case p.Localstore == nil: - return nil, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg) - case p.Verifier == nil: - return nil, errors.Wrap(errEmptyObjectVerifier, objectValidatorInstanceFailMsg) - } - - if p.SaltSize <= 0 { - p.SaltSize = defaultSaltSize - } - - if p.PayloadRangeCount <= 0 { - p.PayloadRangeCount = defaultPayloadRangeCount - } - - if p.MaxPayloadRangeSize <= 0 { - p.MaxPayloadRangeSize = defaultMaxPayloadRangeSize - } - - return &objectValidator{ - as: p.AddressStore, - ls: p.Localstore, - executor: p.SelectiveContainerExecutor, - log: p.Logger, - saltSize: p.SaltSize, - maxRngSize: p.MaxPayloadRangeSize, - rangeCount: p.PayloadRangeCount, - sltr: p.Salitor, - verifier: p.Verifier, - }, nil -} - -// NewLocalHeadIntegrityVerifier constructs local object head verifier and returns objutil.Verifier interface. -func NewLocalHeadIntegrityVerifier() (verifier.Verifier, error) { - return new(localHeadIntegrityVerifier), nil -} - -// NewLocalIntegrityVerifier constructs local object verifier and returns objutil.Verifier interface. -func NewLocalIntegrityVerifier() (verifier.Verifier, error) { - return &localIntegrityVerifier{ - headVerifier: new(localHeadIntegrityVerifier), - payloadVerifier: new(payloadVerifier), - }, nil -} - -// NewPayloadVerifier constructs object payload verifier and returns objutil.Verifier. -func NewPayloadVerifier() verifier.Verifier { - return new(payloadVerifier) -} - -type hdrOwnerKeyContainer struct { - owner refs.OwnerID - key []byte -} - -func (s hdrOwnerKeyContainer) GetOwnerID() refs.OwnerID { - return s.owner -} - -func (s hdrOwnerKeyContainer) GetOwnerKey() []byte { - return s.key -} - -func (s *localHeadIntegrityVerifier) Verify(ctx context.Context, obj *object.Object) error { - var ( - checkKey *ecdsa.PublicKey - ownerKeyCnr id.OwnerKeyContainer - ) - - if _, h := obj.LastHeader(object.HeaderType(object.TokenHdr)); h != nil { - token := h.GetValue().(*object.Header_Token).Token - - if err := service.VerifySignatureWithKey( - crypto.UnmarshalPublicKey(token.GetOwnerKey()), - service.NewVerifiedSessionToken(token), - ); err != nil { - return err - } - - ownerKeyCnr = token - - checkKey = crypto.UnmarshalPublicKey(token.GetSessionKey()) - } else if _, h := obj.LastHeader(object.HeaderType(object.PublicKeyHdr)); h != nil { - pkHdr := h.GetValue().(*object.Header_PublicKey) - if pkHdr != nil && pkHdr.PublicKey != nil { - val := pkHdr.PublicKey.GetValue() - - ownerKeyCnr = &hdrOwnerKeyContainer{ - owner: obj.GetSystemHeader().OwnerID, - key: val, - } - - checkKey = crypto.UnmarshalPublicKey(val) - } - } - - if ownerKeyCnr == nil { - return id.ErrNilOwnerKeyContainer - } else if err := id.VerifyKey(ownerKeyCnr); err != nil { - return err - } - - return verifyObjectIntegrity(obj, checkKey) -} - -// verifyObjectIntegrity verifies integrity of object header. -// Returns error if object -// - does not contains integrity header; -// - integrity header is not a last header in object; -// - integrity header signature is broken. -func verifyObjectIntegrity(obj *object.Object, key *ecdsa.PublicKey) error { - n, h := obj.LastHeader(object.HeaderType(object.IntegrityHdr)) - - if l := len(obj.Headers); l <= 0 || n != l-1 { - return errBrokenHeaderStructure - } - - integrityHdr := h.Value.(*object.Header_Integrity).Integrity - if integrityHdr == nil { - return errBrokenHeaderStructure - } - - data, err := verifier.MarshalHeaders(obj, n) - if err != nil { - return err - } - - hdrChecksum := sha256.Sum256(data) - - return crypto.Verify(key, hdrChecksum[:], integrityHdr.ChecksumSignature) -} - -func (s *payloadVerifier) Verify(_ context.Context, obj *object.Object) error { - if _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); h == nil { - return errMissingPayloadChecksumHeader - } else if checksum := sha256.Sum256(obj.Payload); !bytes.Equal( - checksum[:], - h.Value.(*object.Header_PayloadChecksum).PayloadChecksum, - ) { - return errWrongPayloadChecksum - } - - return nil -} - -func (s *localIntegrityVerifier) Verify(ctx context.Context, obj *object.Object) error { - if err := s.headVerifier.Verify(ctx, obj); err != nil { - return err - } - - return s.payloadVerifier.Verify(ctx, obj) -} diff --git a/pkg/services/object_manager/replication/storage/validation_test.go b/pkg/services/object_manager/replication/storage/validation_test.go deleted file mode 100644 index 4e15df187..000000000 --- a/pkg/services/object_manager/replication/storage/validation_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package storage - -import ( - "context" - "crypto/ecdsa" - "crypto/sha256" - "math/rand" - "testing" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/localstore" - verifier2 "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -type testEntity struct { - err error -} - -func (s *testEntity) Verify(context.Context, *object.Object) error { return s.err } - -func (s *testEntity) SelfAddr() (multiaddr.Multiaddr, error) { panic("implement me") } -func (s *testEntity) Put(context.Context, *localstore.Object) error { panic("implement me") } -func (s *testEntity) Get(localstore.Address) (*localstore.Object, error) { panic("implement me") } -func (s *testEntity) Del(localstore.Address) error { panic("implement me") } -func (s *testEntity) Meta(localstore.Address) (*localstore.ObjectMeta, error) { panic("implement me") } -func (s *testEntity) Has(localstore.Address) (bool, error) { panic("implement me") } -func (s *testEntity) ObjectsCount() (uint64, error) { panic("implement me") } -func (s *testEntity) Size() int64 { panic("implement me") } -func (s *testEntity) Iterate(localstore.FilterPipeline, localstore.MetaHandler) error { - panic("implement me") -} - -func (s *testEntity) PRead(ctx context.Context, addr refs.Address, rng object.Range) ([]byte, error) { - panic("implement me") -} - -func TestNewObjectValidator(t *testing.T) { - validParams := ObjectValidatorParams{ - Logger: zap.L(), - AddressStore: new(testEntity), - Localstore: new(testEntity), - Verifier: new(testEntity), - } - - t.Run("valid params", func(t *testing.T) { - s, err := NewObjectValidator(&validParams) - require.NoError(t, err) - require.NotNil(t, s) - }) - t.Run("fail on empty local storage", func(t *testing.T) { - p := validParams - p.Localstore = nil - _, err := NewObjectValidator(&p) - require.EqualError(t, err, errors.Wrap(errEmptyLocalstore, objectValidatorInstanceFailMsg).Error()) - }) - t.Run("fail on empty logger", func(t *testing.T) { - p := validParams - p.Logger = nil - _, err := NewObjectValidator(&p) - require.EqualError(t, err, errors.Wrap(logger.ErrNilLogger, objectValidatorInstanceFailMsg).Error()) - }) -} - -func TestNewLocalIntegrityVerifier(t *testing.T) { - var ( - err error - verifier verifier2.Verifier - ) - - verifier, err = NewLocalHeadIntegrityVerifier() - require.NoError(t, err) - require.NotNil(t, verifier) - - verifier, err = NewLocalIntegrityVerifier() - require.NoError(t, err) - require.NotNil(t, verifier) -} - -func TestLocalHeadIntegrityVerifier_Verify(t *testing.T) { - var ( - ctx = context.TODO() - ownerPrivateKey = test.DecodeKey(0) - ownerPublicKey = &ownerPrivateKey.PublicKey - sessionPrivateKey = test.DecodeKey(1) - sessionPublicKey = &sessionPrivateKey.PublicKey - ) - - ownerID, err := refs.NewOwnerID(ownerPublicKey) - require.NoError(t, err) - - s, err := NewLocalIntegrityVerifier() - require.NoError(t, err) - - okItems := []func() *object.Object{ - // correct object w/ session token - func() *object.Object { - token := new(service.Token) - token.SetOwnerID(ownerID) - token.SetSessionKey(crypto.MarshalPublicKey(sessionPublicKey)) - - require.NoError(t, - service.AddSignatureWithKey( - ownerPrivateKey, - service.NewSignedSessionToken(token), - ), - ) - - obj := new(object.Object) - obj.AddHeader(&object.Header{ - Value: &object.Header_Token{ - Token: token, - }, - }) - - obj.SetPayload([]byte{1, 2, 3}) - addPayloadChecksum(obj) - - addHeadersChecksum(t, obj, sessionPrivateKey) - - return obj - }, - // correct object w/o session token - func() *object.Object { - obj := new(object.Object) - obj.SystemHeader.OwnerID = ownerID - obj.SetPayload([]byte{1, 2, 3}) - - addPayloadChecksum(obj) - - obj.AddHeader(&object.Header{ - Value: &object.Header_PublicKey{ - PublicKey: &object.PublicKey{ - Value: crypto.MarshalPublicKey(ownerPublicKey), - }, - }, - }) - - addHeadersChecksum(t, obj, ownerPrivateKey) - - return obj - }, - } - - failItems := []func() *object.Object{} - - for _, item := range okItems { - require.NoError(t, s.Verify(ctx, item())) - } - - for _, item := range failItems { - require.Error(t, s.Verify(ctx, item())) - } -} - -func addPayloadChecksum(obj *object.Object) { - payloadChecksum := sha256.Sum256(obj.GetPayload()) - - obj.AddHeader(&object.Header{ - Value: &object.Header_PayloadChecksum{ - PayloadChecksum: payloadChecksum[:], - }, - }) -} - -func addHeadersChecksum(t *testing.T, obj *object.Object, key *ecdsa.PrivateKey) { - headersData, err := verifier2.MarshalHeaders(obj, len(obj.Headers)) - require.NoError(t, err) - - headersChecksum := sha256.Sum256(headersData) - - integrityHdr := new(object.IntegrityHeader) - integrityHdr.SetHeadersChecksum(headersChecksum[:]) - - require.NoError(t, service.AddSignatureWithKey(key, integrityHdr)) - - obj.AddHeader(&object.Header{ - Value: &object.Header_Integrity{ - Integrity: integrityHdr, - }, - }) -} - -func TestPayloadVerifier_Verify(t *testing.T) { - ctx := context.TODO() - verifier := new(payloadVerifier) - - t.Run("missing header", func(t *testing.T) { - obj := new(object.Object) - require.EqualError(t, verifier.Verify(ctx, obj), errMissingPayloadChecksumHeader.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - payload := testData(t, 10) - - cs := sha256.Sum256(payload) - hdr := &object.Header_PayloadChecksum{PayloadChecksum: cs[:]} - - obj := &object.Object{ - Headers: []object.Header{{Value: hdr}}, - Payload: payload, - } - - require.NoError(t, verifier.Verify(ctx, obj)) - - hdr.PayloadChecksum[0]++ - require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error()) - - hdr.PayloadChecksum[0]-- - obj.Payload[0]++ - require.EqualError(t, verifier.Verify(ctx, obj), errWrongPayloadChecksum.Error()) - }) -} - -func TestLocalIntegrityVerifier_Verify(t *testing.T) { - ctx := context.TODO() - obj := new(object.Object) - - t.Run("head verification failure", func(t *testing.T) { - hErr := errors.New("test error for head verifier") - - s := &localIntegrityVerifier{ - headVerifier: &testEntity{ - err: hErr, // force head verifier to return hErr - }, - } - - require.EqualError(t, s.Verify(ctx, obj), hErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - pErr := errors.New("test error for payload verifier") - - s := &localIntegrityVerifier{ - headVerifier: new(testEntity), - payloadVerifier: &testEntity{ - err: pErr, // force payload verifier to return hErr - }, - } - - require.EqualError(t, s.Verify(ctx, obj), pErr.Error()) - }) -} - -// testData returns size bytes of random data. -func testData(t *testing.T, size int) []byte { - res := make([]byte, size) - _, err := rand.Read(res) - require.NoError(t, err) - return res -} - -// TODO: write functionality tests diff --git a/pkg/services/object_manager/replication/storage_validator.go b/pkg/services/object_manager/replication/storage_validator.go deleted file mode 100644 index 4dd058c88..000000000 --- a/pkg/services/object_manager/replication/storage_validator.go +++ /dev/null @@ -1,194 +0,0 @@ -package replication - -import ( - "context" - "time" - - "github.com/multiformats/go-multiaddr" - "go.uber.org/zap" -) - -type ( - // StorageValidator is an interface of entity - // that listens and performs task of storage validation on remote nodes. - // Validation can result to the need to replicate or clean object. - StorageValidator interface { - Process(ctx context.Context) chan<- *ObjectLocationRecord - SubscribeReplication(ch chan<- *ReplicateTask) - SubscribeGarbage(ch chan<- Address) - } - - storageValidator struct { - objectVerifier ObjectVerifier - log *zap.Logger - presenceChecker PresenceChecker - addrstore AddressStore - - taskChanCap int - resultTimeout time.Duration - replicateResultChan chan<- *ReplicateTask - garbageChan chan<- Address - } - - // StorageValidatorParams groups the parameters of storage validator's constructor. - StorageValidatorParams struct { - ObjectVerifier - PresenceChecker - *zap.Logger - - TaskChanCap int - ResultTimeout time.Duration - AddrStore AddressStore - } -) - -const ( - defaultStorageValidatorChanCap = 10 - defaultStorageValidatorResultTimeout = time.Second - - storageValidatorEntity = "storage validator" -) - -func (s *storageValidator) SubscribeReplication(ch chan<- *ReplicateTask) { - s.replicateResultChan = ch -} - -func (s *storageValidator) SubscribeGarbage(ch chan<- Address) { s.garbageChan = ch } - -func (s *storageValidator) Process(ctx context.Context) chan<- *ObjectLocationRecord { - ch := make(chan *ObjectLocationRecord, s.taskChanCap) - go s.processRoutine(ctx, ch) - - return ch -} - -func (s *storageValidator) writeReplicateResult(replicateTask *ReplicateTask) { - if s.replicateResultChan == nil { - return - } - select { - case s.replicateResultChan <- replicateTask: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *storageValidator) writeGarbage(addr Address) { - if s.garbageChan == nil { - return - } - select { - case s.garbageChan <- addr: - case <-time.After(s.resultTimeout): - s.log.Warn(writeResultTimeout) - } -} - -func (s *storageValidator) processRoutine(ctx context.Context, taskChan <-chan *ObjectLocationRecord) { -loop: - for { - select { - case <-ctx.Done(): - s.log.Warn(storageValidatorEntity+ctxDoneMsg, zap.Error(ctx.Err())) - break loop - case locationRecord, ok := <-taskChan: - if !ok { - s.log.Warn(storageValidatorEntity + taskChanClosed) - break loop - } else if has, err := s.presenceChecker.Has(locationRecord.Address); err != nil || !has { - continue loop - } - s.handleTask(ctx, locationRecord) - } - } - close(s.replicateResultChan) - close(s.garbageChan) -} - -func (s *storageValidator) handleTask(ctx context.Context, locationRecord *ObjectLocationRecord) { - selfAddr, err := s.addrstore.SelfAddr() - if err != nil { - s.log.Error("storage validator can't obtain self address") - return - } - - var ( - weightierCounter int - replicateTask = &ReplicateTask{ - Address: locationRecord.Address, - Shortage: locationRecord.ReservationRatio - 1, // taking account of object correctly stored in local store - ExcludeNodes: nodesFromLocations(locationRecord.Locations, selfAddr), - } - ) - - for i := range locationRecord.Locations { - loc := locationRecord.Locations[i] - - if s.objectVerifier.Verify(ctx, &ObjectVerificationParams{ - Address: locationRecord.Address, - Node: locationRecord.Locations[i].Node, - Handler: func(valid bool, _ *Object) { - if valid { - replicateTask.Shortage-- - if loc.WeightGreater { - weightierCounter++ - } - } - }, - }); weightierCounter >= locationRecord.ReservationRatio { - s.writeGarbage(locationRecord.Address) - return - } - } - - if replicateTask.Shortage > 0 { - s.writeReplicateResult(replicateTask) - } -} - -// nodesFromLocations must ignore self address, because it is used in -// storage validator during replication. We must ignore our own stored -// objects during replication and work with remote hosts and check their -// verification info. -func nodesFromLocations(locations []ObjectLocation, selfaddr multiaddr.Multiaddr) []multiaddr.Multiaddr { - res := make([]multiaddr.Multiaddr, 0, len(locations)) - - for i := range locations { - if !locations[i].Node.Equal(selfaddr) { - res = append(res, locations[i].Node) - } - } - - return res -} - -// NewStorageValidator is a storage validator's constructor. -func NewStorageValidator(p StorageValidatorParams) (StorageValidator, error) { - switch { - case p.Logger == nil: - return nil, instanceError(storageValidatorEntity, loggerPart) - case p.ObjectVerifier == nil: - return nil, instanceError(storageValidatorEntity, objectVerifierPart) - case p.PresenceChecker == nil: - return nil, instanceError(storageValidatorEntity, presenceCheckerPart) - case p.AddrStore == nil: - return nil, instanceError(storageValidatorEntity, addrStorePart) - } - - if p.TaskChanCap <= 0 { - p.TaskChanCap = defaultStorageValidatorChanCap - } - - if p.ResultTimeout <= 0 { - p.ResultTimeout = defaultStorageValidatorResultTimeout - } - - return &storageValidator{ - objectVerifier: p.ObjectVerifier, - log: p.Logger, - presenceChecker: p.PresenceChecker, - taskChanCap: p.TaskChanCap, - resultTimeout: p.ResultTimeout, - addrstore: p.AddrStore, - }, nil -} diff --git a/pkg/services/object_manager/transformer/.gitkeep b/pkg/services/object_manager/transformer/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/services/object_manager/transformer/alias.go b/pkg/services/object_manager/transformer/alias.go deleted file mode 100644 index a18098bf5..000000000 --- a/pkg/services/object_manager/transformer/alias.go +++ /dev/null @@ -1,25 +0,0 @@ -package transformer - -import ( - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/storagegroup" -) - -type ( - // Object is a type alias of - // Object from object package of neofs-api-go. - Object = object.Object - - // ObjectID is a type alias of - // ObjectID from refs package of neofs-api-go. - ObjectID = refs.ObjectID - - // CID is a type alias of - // CID from refs package of neofs-api-go. - CID = refs.CID - - // StorageGroup is a type alias of - // StorageGroup from storagegroup package of neofs-api-go. - StorageGroup = storagegroup.StorageGroup -) diff --git a/pkg/services/object_manager/transformer/put_test.go b/pkg/services/object_manager/transformer/put_test.go deleted file mode 100644 index 73f0470f7..000000000 --- a/pkg/services/object_manager/transformer/put_test.go +++ /dev/null @@ -1,762 +0,0 @@ -package transformer - -import ( - "bytes" - "context" - "crypto/sha256" - "io" - "math/rand" - "sort" - "testing" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - crypto "github.com/nspcc-dev/neofs-crypto" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/replication/storage" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/nspcc-dev/neofs-node/pkg/util/test" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" -) - -type ( - // Entity for mocking interfaces. - // Implementation of any interface intercepts arguments via f (if not nil). - // If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error. - testPutEntity struct { - // Set of interfaces which entity must implement, but some methods from those does not call. - - // Argument interceptor. Used for ascertain of correct parameter passage between components. - f func(...interface{}) - // Mocked result of any interface. - res interface{} - // Mocked error of any interface. - err error - } -) - -var ( - _ io.Writer = (*testPutEntity)(nil) - _ EpochReceiver = (*testPutEntity)(nil) - _ Transformer = (*testPutEntity)(nil) - _ storagegroup.InfoReceiver = (*testPutEntity)(nil) - _ verifier.Verifier = (*testPutEntity)(nil) -) - -func (s *testPutEntity) Verify(_ context.Context, obj *Object) error { - if s.f != nil { - s.f(obj) - } - return s.err -} - -func (s *testPutEntity) Write(p []byte) (int, error) { - if s.f != nil { - s.f(p) - } - return 0, s.err -} - -func (s *testPutEntity) Transform(_ context.Context, u ProcUnit, h ...ProcUnitHandler) error { - if s.f != nil { - s.f(u, h) - } - return s.err -} - -func (s *testPutEntity) GetSGInfo(_ context.Context, cid CID, group []ObjectID) (*StorageGroup, error) { - if s.f != nil { - s.f(cid, group) - } - if s.err != nil { - return nil, s.err - } - return s.res.(*StorageGroup), nil -} - -func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) } - -func TestNewTransformer(t *testing.T) { - validParams := Params{ - SGInfoReceiver: new(testPutEntity), - EpochReceiver: new(testPutEntity), - SizeLimit: 1, - Verifier: new(testPutEntity), - } - - t.Run("valid params", func(t *testing.T) { - res, err := NewTransformer(validParams) - require.NoError(t, err) - require.NotNil(t, res) - }) - t.Run("non-positive size", func(t *testing.T) { - p := validParams - p.SizeLimit = 0 - _, err := NewTransformer(p) - require.EqualError(t, err, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg).Error()) - }) - t.Run("empty SG info receiver", func(t *testing.T) { - p := validParams - p.SGInfoReceiver = nil - _, err := NewTransformer(p) - require.EqualError(t, err, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg).Error()) - }) - t.Run("empty epoch receiver", func(t *testing.T) { - p := validParams - p.EpochReceiver = nil - _, err := NewTransformer(p) - require.EqualError(t, err, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg).Error()) - }) - t.Run("empty object verifier", func(t *testing.T) { - p := validParams - p.Verifier = nil - _, err := NewTransformer(p) - require.EqualError(t, err, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg).Error()) - }) -} - -func Test_transformer(t *testing.T) { - ctx := context.TODO() - - u := ProcUnit{ - Head: &Object{ - Payload: testData(t, 10), - }, - Payload: new(emptyReader), - } - - handlers := []ProcUnitHandler{func(context.Context, ProcUnit) error { return nil }} - - t.Run("preliminary transformation failure", func(t *testing.T) { - // create custom error for test - pErr := errors.New("test error for prelim transformer") - - s := &transformer{ - tPrelim: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct prelim transformer params", func(t *testing.T) { - require.Equal(t, u, items[0]) - require.Empty(t, items[1]) - }) - }, - err: pErr, // force Transformer to return pErr - }, - } - - // ascertain that error returns as expected - require.EqualError(t, s.Transform(ctx, u, handlers...), pErr.Error()) - }) - - t.Run("size limiter error/correct sign processing", func(t *testing.T) { - // create custom error for test - sErr := errors.New("test error for signer") - lErr := errors.New("test error for size limiter") - - s := &transformer{ - tPrelim: new(testPutEntity), - tSizeLim: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct size limiter params", func(t *testing.T) { - require.Equal(t, u, items[0]) - hs := items[1].([]ProcUnitHandler) - require.Len(t, hs, 1) - require.EqualError(t, hs[0](ctx, u), sErr.Error()) - }) - }, - err: lErr, // force Transformer to return lErr - }, - tSign: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct signer params", func(t *testing.T) { - require.Equal(t, u, items[0]) - require.Equal(t, handlers, items[1]) - }) - }, - err: sErr, // force Transformer to return sErr - }, - } - - // ascertain that error returns as expected - require.EqualError(t, s.Transform(ctx, u, handlers...), lErr.Error()) - }) -} - -func Test_preliminaryTransformer(t *testing.T) { - ctx := context.TODO() - - u := ProcUnit{ - Head: &Object{ - Payload: testData(t, 10), - }, - Payload: new(emptyReader), - } - - t.Run("field moulder failure", func(t *testing.T) { - // create custom error for test - mErr := errors.New("test error for field moulder") - - s := &preliminaryTransformer{ - fMoulder: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct field moulder params", func(t *testing.T) { - require.Equal(t, u, items[0]) - require.Empty(t, items[1]) - }) - }, - err: mErr, // force Transformer to return mErr - }, - } - - // ascertain that error returns as expected - require.EqualError(t, s.Transform(ctx, u), mErr.Error()) - }) - - t.Run("correct result", func(t *testing.T) { - // create custom error for test - sgErr := errors.New("test error for SG moulder") - - s := &preliminaryTransformer{ - fMoulder: new(testPutEntity), - sgMoulder: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct field moulder params", func(t *testing.T) { - require.Equal(t, u, items[0]) - require.Empty(t, items[1]) - }) - }, - err: sgErr, // force Transformer to return sgErr - }, - } - - // ascertain that error returns as expected - require.EqualError(t, s.Transform(ctx, u), sgErr.Error()) - }) -} - -func Test_readChunk(t *testing.T) { - t.Run("empty slice", func(t *testing.T) { - t.Run("missing checksum header", func(t *testing.T) { - obj := new(Object) - - _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) - require.Nil(t, h) - - require.NoError(t, readChunk(ProcUnit{ - Head: obj, - Payload: bytes.NewBuffer(testData(t, 10)), - }, nil, nil, nil)) - - _, h = obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) - - require.NotNil(t, h) - require.Equal(t, sha256.New().Sum(nil), h.Value.(*object.Header_PayloadChecksum).PayloadChecksum) - }) - - t.Run("existing checksum header", func(t *testing.T) { - h := &object.Header_PayloadChecksum{PayloadChecksum: testData(t, 10)} - - obj := &Object{Headers: []object.Header{{Value: h}}} - - require.NoError(t, readChunk(ProcUnit{ - Head: obj, - Payload: bytes.NewBuffer(testData(t, 10)), - }, nil, nil, nil)) - - require.NotNil(t, h) - require.Equal(t, sha256.New().Sum(nil), h.PayloadChecksum) - }) - }) - - t.Run("non-empty slice", func(t *testing.T) { - t.Run("non-full data", func(t *testing.T) { - var ( - size = 10 - buf = testData(t, size) - r = bytes.NewBuffer(buf[:size-1]) - ) - - require.EqualError(t, - readChunk(ProcUnit{Head: new(Object), Payload: r}, buf, nil, nil), - ErrPayloadEOF.Error(), - ) - }) - - t.Run("hash accumulator write", func(t *testing.T) { - var ( - d = testData(t, 10) - srcHash = sha256.Sum256(d) - hAcc = sha256.New() - buf = bytes.NewBuffer(d) - b = make([]byte, len(d)) - obj = new(Object) - - srcHomoHash = hash.Sum(d) - homoHashHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} - ) - - t.Run("failure", func(t *testing.T) { - hErr := errors.New("test error for hash writer") - b := testData(t, len(d)) - - require.EqualError(t, readChunk(EmptyPayloadUnit(new(Object)), b, &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct accumulator params", func(t *testing.T) { - require.Equal(t, b, items[0]) - }) - }, - err: hErr, - }, nil), hErr.Error()) - }) - - require.NoError(t, readChunk(ProcUnit{Head: obj, Payload: buf}, b, hAcc, homoHashHdr)) - - _, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr)) - require.NotNil(t, h) - require.Equal(t, srcHash[:], h.Value.(*object.Header_PayloadChecksum).PayloadChecksum) - - require.Equal(t, srcHash[:], hAcc.Sum(nil)) - require.Equal(t, srcHomoHash, homoHashHdr.HomoHash) - }) - }) -} - -func Test_headSigner(t *testing.T) { - ctx := context.TODO() - - t.Run("invalid input", func(t *testing.T) { - t.Run("missing token", func(t *testing.T) { - u := ProcUnit{Head: new(Object)} - require.Error(t, u.Head.Verify()) - s := &headSigner{verifier: &testPutEntity{err: errors.New("")}} - require.EqualError(t, s.Transform(ctx, u), errNoToken.Error()) - }) - - t.Run("with token", func(t *testing.T) { - u := ProcUnit{Head: new(Object)} - - v, err := storage.NewLocalHeadIntegrityVerifier() - require.NoError(t, err) - - require.Error(t, u.Head.Verify()) - - privateToken, err := session.NewPrivateToken(0) - require.NoError(t, err) - ctx := context.WithValue(ctx, PrivateSessionToken, privateToken) - - s := &headSigner{ - verifier: &testPutEntity{ - err: errors.New(""), - }, - } - - key := &privateToken.PrivateKey().PublicKey - - u.Head.SystemHeader.OwnerID, err = refs.NewOwnerID(key) - require.NoError(t, err) - u.Head.AddHeader(&object.Header{ - Value: &object.Header_PublicKey{ - PublicKey: &object.PublicKey{ - Value: crypto.MarshalPublicKey(key), - }, - }, - }) - - require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { - require.NoError(t, v.Verify(ctx, unit.Head)) - _, h := unit.Head.LastHeader(object.HeaderType(object.IntegrityHdr)) - require.NotNil(t, h) - d, err := verifier.MarshalHeaders(unit.Head, len(unit.Head.Headers)-1) - require.NoError(t, err) - cs := sha256.Sum256(d) - require.Equal(t, cs[:], h.Value.(*object.Header_Integrity).Integrity.GetHeadersChecksum()) - return nil - })) - - t.Run("valid input", func(t *testing.T) { - s := &headSigner{verifier: new(testPutEntity)} - require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { - require.Equal(t, u, unit) - return nil - })) - }) - }) - }) -} - -func Test_fieldMoulder(t *testing.T) { - ctx := context.TODO() - epoch := uint64(100) - - fMoulder := &fieldMoulder{epochRecv: &testPutEntity{res: epoch}} - - t.Run("no token", func(t *testing.T) { - require.EqualError(t, new(fieldMoulder).Transform(ctx, ProcUnit{}), errNoToken.Error()) - }) - - t.Run("with token", func(t *testing.T) { - token := new(service.Token) - token.SetID(service.TokenID{1, 2, 3}) - - ctx := context.WithValue(ctx, PublicSessionToken, token) - - u := ProcUnit{Head: new(Object)} - - _, h := u.Head.LastHeader(object.HeaderType(object.TokenHdr)) - require.Nil(t, h) - - require.NoError(t, fMoulder.Transform(ctx, u)) - - _, h = u.Head.LastHeader(object.HeaderType(object.TokenHdr)) - require.Equal(t, token, h.Value.(*object.Header_Token).Token) - - require.False(t, u.Head.SystemHeader.ID.Empty()) - require.NotZero(t, u.Head.SystemHeader.CreatedAt.UnixTime) - require.Equal(t, epoch, u.Head.SystemHeader.CreatedAt.Epoch) - require.Equal(t, uint64(1), u.Head.SystemHeader.Version) - }) -} - -func Test_sgMoulder(t *testing.T) { - ctx := context.TODO() - - t.Run("invalid SG linking", func(t *testing.T) { - t.Run("w/ header and w/o links", func(t *testing.T) { - obj := new(Object) - obj.SetStorageGroup(new(storagegroup.StorageGroup)) - require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error()) - }) - - t.Run("w/o header and w/ links", func(t *testing.T) { - obj := new(Object) - addLink(obj, object.Link_StorageGroup, ObjectID{}) - require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error()) - }) - }) - - t.Run("non-SG", func(t *testing.T) { - obj := new(Object) - require.NoError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj})) - }) - - t.Run("receive SG info", func(t *testing.T) { - cid := testObjectAddress(t).CID - group := make([]ObjectID, 5) - for i := range group { - group[i] = testObjectAddress(t).ObjectID - } - - t.Run("failure", func(t *testing.T) { - obj := &Object{SystemHeader: object.SystemHeader{CID: cid}} - - obj.SetStorageGroup(new(storagegroup.StorageGroup)) - for i := range group { - addLink(obj, object.Link_StorageGroup, group[i]) - } - - sgErr := errors.New("test error for SG info receiver") - - mSG := &sgMoulder{ - sgInfoRecv: &testPutEntity{ - f: func(items ...interface{}) { - t.Run("correct SG info receiver params", func(t *testing.T) { - cp := make([]ObjectID, len(group)) - copy(cp, group) - sort.Sort(storagegroup.IDList(cp)) - require.Equal(t, cid, items[0]) - require.Equal(t, cp, items[1]) - }) - }, - err: sgErr, - }, - } - - require.EqualError(t, mSG.Transform(ctx, ProcUnit{Head: obj}), sgErr.Error()) - }) - }) - - t.Run("correct result", func(t *testing.T) { - obj := new(Object) - obj.SetStorageGroup(new(storagegroup.StorageGroup)) - addLink(obj, object.Link_StorageGroup, ObjectID{}) - - sgInfo := &storagegroup.StorageGroup{ - ValidationDataSize: 19, - ValidationHash: hash.Sum(testData(t, 10)), - } - - mSG := &sgMoulder{ - sgInfoRecv: &testPutEntity{ - res: sgInfo, - }, - } - - require.NoError(t, mSG.Transform(ctx, ProcUnit{Head: obj})) - - _, h := obj.LastHeader(object.HeaderType(object.StorageGroupHdr)) - require.NotNil(t, h) - require.Equal(t, sgInfo, h.Value.(*object.Header_StorageGroup).StorageGroup) - }) -} - -func Test_sizeLimiter(t *testing.T) { - ctx := context.TODO() - - t.Run("limit entry", func(t *testing.T) { - payload := testData(t, 10) - payloadSize := uint64(len(payload) - 1) - - u := ProcUnit{ - Head: &Object{SystemHeader: object.SystemHeader{ - PayloadLength: payloadSize, - }}, - Payload: bytes.NewBuffer(payload[:payloadSize]), - } - - sl := &sizeLimiter{limit: payloadSize} - - t.Run("cut payload", func(t *testing.T) { - require.Error(t, sl.Transform(ctx, ProcUnit{ - Head: &Object{SystemHeader: object.SystemHeader{PayloadLength: payloadSize}}, - Payload: bytes.NewBuffer(payload[:payloadSize-1]), - })) - }) - - require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { - _, err := unit.Payload.Read(make([]byte, 1)) - require.EqualError(t, err, io.EOF.Error()) - require.Equal(t, payload[:payloadSize], unit.Head.Payload) - _, h := unit.Head.LastHeader(object.HeaderType(object.HomoHashHdr)) - require.NotNil(t, h) - require.Equal(t, hash.Sum(payload[:payloadSize]), h.Value.(*object.Header_HomoHash).HomoHash) - return nil - })) - }) - - t.Run("limit exceed", func(t *testing.T) { - payload := testData(t, 100) - sizeLimit := uint64(len(payload)) / 13 - - pToken, err := session.NewPrivateToken(0) - require.NoError(t, err) - - srcObj := &object.Object{ - SystemHeader: object.SystemHeader{ - Version: 12, - PayloadLength: uint64(len(payload)), - ID: testObjectAddress(t).ObjectID, - OwnerID: object.OwnerID{1, 2, 3}, - CID: testObjectAddress(t).CID, - }, - Headers: []object.Header{ - {Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}}, - }, - } - - u := ProcUnit{ - Head: srcObj, - Payload: bytes.NewBuffer(payload), - } - - epoch := uint64(77) - - sl := &sizeLimiter{ - limit: sizeLimit, - epochRecv: &testPutEntity{res: epoch}, - } - - t.Run("no token", func(t *testing.T) { - require.EqualError(t, sl.Transform(ctx, ProcUnit{ - Head: &Object{ - SystemHeader: object.SystemHeader{ - PayloadLength: uint64(len(payload)), - }, - }, - Payload: bytes.NewBuffer(payload), - }), errNoToken.Error()) - }) - - ctx := context.WithValue(ctx, PrivateSessionToken, pToken) - - t.Run("cut payload", func(t *testing.T) { - require.Error(t, sl.Transform(ctx, ProcUnit{ - Head: &Object{ - SystemHeader: object.SystemHeader{ - PayloadLength: uint64(len(payload)) + 1, - }, - }, - Payload: bytes.NewBuffer(payload), - })) - }) - - objs := make([]Object, 0) - - t.Run("handler error", func(t *testing.T) { - hErr := errors.New("test error for handler") - - require.EqualError(t, sl.Transform(ctx, ProcUnit{ - Head: &Object{ - SystemHeader: object.SystemHeader{PayloadLength: uint64(len(payload))}, - Headers: make([]object.Header, 0), - }, - Payload: bytes.NewBuffer(payload), - }, func(context.Context, ProcUnit) error { return hErr }), hErr.Error()) - }) - - require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { - _, err := unit.Payload.Read(make([]byte, 1)) - require.EqualError(t, err, io.EOF.Error()) - objs = append(objs, *unit.Head.Copy()) - return nil - })) - - ln := len(objs) - - res := make([]byte, 0, len(payload)) - - zObj := objs[ln-1] - require.Zero(t, zObj.SystemHeader.PayloadLength) - require.Empty(t, zObj.Payload) - require.Empty(t, zObj.Links(object.Link_Next)) - require.Empty(t, zObj.Links(object.Link_Previous)) - require.Empty(t, zObj.Links(object.Link_Parent)) - children := zObj.Links(object.Link_Child) - require.Len(t, children, ln-1) - for i := range objs[:ln-1] { - require.Equal(t, objs[i].SystemHeader.ID, children[i]) - } - - for i := range objs[:ln-1] { - res = append(res, objs[i].Payload...) - if i == 0 { - require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID) - require.True(t, objs[i].Links(object.Link_Previous)[0].Empty()) - } else if i < ln-2 { - require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID) - require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID) - } else { - _, h := objs[i].LastHeader(object.HeaderType(object.HomoHashHdr)) - require.NotNil(t, h) - require.Equal(t, hash.Sum(payload), h.Value.(*object.Header_HomoHash).HomoHash) - require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID) - require.True(t, objs[i].Links(object.Link_Next)[0].Empty()) - } - } - - require.Equal(t, payload, res) - }) -} - -// testData returns size bytes of random data. -func testData(t *testing.T, size int) []byte { - res := make([]byte, size) - _, err := rand.Read(res) - require.NoError(t, err) - return res -} - -// testObjectAddress returns new random object address. -func testObjectAddress(t *testing.T) refs.Address { - oid, err := refs.NewObjectID() - require.NoError(t, err) - return refs.Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid} -} - -func TestIntegration(t *testing.T) { - ownerKey := test.DecodeKey(1) - - ownerID, err := refs.NewOwnerID(&ownerKey.PublicKey) - require.NoError(t, err) - - privToken, err := session.NewPrivateToken(0) - require.NoError(t, err) - - pkBytes, err := session.PublicSessionToken(privToken) - require.NoError(t, err) - - ctx := context.WithValue(context.TODO(), PrivateSessionToken, privToken) - - pubToken := new(service.Token) - pubToken.SetID(service.TokenID{1, 2, 3}) - pubToken.SetSessionKey(pkBytes) - pubToken.SetOwnerID(ownerID) - pubToken.SetOwnerKey(crypto.MarshalPublicKey(&ownerKey.PublicKey)) - require.NoError(t, service.AddSignatureWithKey(ownerKey, service.NewSignedSessionToken(pubToken))) - - ctx = context.WithValue(ctx, PublicSessionToken, pubToken) - - t.Run("non-SG object", func(t *testing.T) { - t.Run("with split", func(t *testing.T) { - tr, err := NewTransformer(Params{ - SGInfoReceiver: new(testPutEntity), - EpochReceiver: &testPutEntity{res: uint64(1)}, - SizeLimit: 13, - Verifier: &testPutEntity{ - err: errors.New(""), // force verifier to return non-nil error - }, - }) - require.NoError(t, err) - - payload := make([]byte, 20) - _, err = rand.Read(payload) - require.NoError(t, err) - - obj := &Object{ - SystemHeader: object.SystemHeader{ - PayloadLength: uint64(len(payload)), - CID: CID{3}, - }, - Headers: []object.Header{ - {Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}}, - }, - } - - obj.SystemHeader.OwnerID = ownerID - - obj.SetHeader(&object.Header{ - Value: &object.Header_Token{ - Token: pubToken, - }, - }) - - testTransformer(t, ctx, ProcUnit{ - Head: obj, - Payload: bytes.NewBuffer(payload), - }, tr, payload) - }) - }) -} - -func testTransformer(t *testing.T, ctx context.Context, u ProcUnit, tr Transformer, src []byte) { - objList := make([]Object, 0) - verifier, err := storage.NewLocalHeadIntegrityVerifier() - require.NoError(t, err) - - require.NoError(t, tr.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error { - require.NoError(t, verifier.Verify(ctx, unit.Head)) - objList = append(objList, *unit.Head.Copy()) - return nil - })) - - reverse := NewRestorePipeline(SplitRestorer()) - - res, err := reverse.Restore(ctx, objList...) - require.NoError(t, err) - - integrityVerifier, err := storage.NewLocalIntegrityVerifier() - require.NoError(t, err) - require.NoError(t, integrityVerifier.Verify(ctx, &res[0])) - - require.Equal(t, src, res[0].Payload) - _, h := res[0].LastHeader(object.HeaderType(object.HomoHashHdr)) - require.True(t, hash.Sum(src).Equal(h.Value.(*object.Header_HomoHash).HomoHash)) -} - -func addLink(o *Object, t object.Link_Type, id ObjectID) { - o.AddHeader(&object.Header{Value: &object.Header_Link{ - Link: &object.Link{Type: t, ID: id}, - }}) -} diff --git a/pkg/services/object_manager/transformer/restore.go b/pkg/services/object_manager/transformer/restore.go deleted file mode 100644 index 6242bb761..000000000 --- a/pkg/services/object_manager/transformer/restore.go +++ /dev/null @@ -1,126 +0,0 @@ -package transformer - -import ( - "context" - "sync" - - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/pkg/errors" -) - -type ( - // ObjectRestorer is an interface of object restorer. - ObjectRestorer interface { - Type() object.Transform_Type - Restore(context.Context, ...Object) ([]Object, error) - } - - restorePipeline struct { - ObjectRestorer - *sync.RWMutex - items map[object.Transform_Type]ObjectRestorer - } - - splitRestorer struct{} -) - -var errEmptyObjList = errors.New("object list is empty") - -var errMissingParentLink = errors.New("missing parent link") - -func (s *restorePipeline) Restore(ctx context.Context, srcObjs ...Object) ([]Object, error) { - if len(srcObjs) == 0 { - return nil, errEmptyInput - } - - s.RLock() - defer s.RUnlock() - - var ( - objs = srcObjs - err error - ) - - for { - _, th := objs[0].LastHeader(object.HeaderType(object.TransformHdr)) - if th == nil { - break - } - - transform := th.Value.(*object.Header_Transform).Transform - - tr, ok := s.items[transform.Type] - if !ok { - return nil, errors.Errorf("missing restorer (%s)", transform.Type) - } - - if objs, err = tr.Restore(ctx, objs...); err != nil { - return nil, errors.Wrapf(err, "restoration failed (%s)", transform.Type) - } - } - - return objs, nil -} - -// NewRestorePipeline is a constructor of the pipeline of object restorers. -func NewRestorePipeline(t ...ObjectRestorer) ObjectRestorer { - m := make(map[object.Transform_Type]ObjectRestorer, len(t)) - - for i := range t { - m[t[i].Type()] = t[i] - } - - return &restorePipeline{ - RWMutex: new(sync.RWMutex), - items: m, - } -} - -func (*splitRestorer) Type() object.Transform_Type { - return object.Transform_Split -} - -func (*splitRestorer) Restore(ctx context.Context, objs ...Object) ([]Object, error) { - if len(objs) == 0 { - return nil, errEmptyObjList - } - - chain, err := GetChain(objs...) - if err != nil { - return nil, errors.Wrap(err, "could not get chain of objects") - } - - obj := chain[len(chain)-1] - - var ( - size uint64 - p = make([]byte, 0, len(chain[0].Payload)*len(chain)) - ) - - for j := 0; j < len(chain); j++ { - p = append(p, chain[j].Payload...) - size += chain[j].SystemHeader.PayloadLength - } - - obj.SystemHeader.PayloadLength = size - obj.Payload = p - - parent, err := lastLink(&obj, object.Link_Parent) - if err != nil { - return nil, errMissingParentLink - } - - obj.SystemHeader.ID = parent - - err = deleteTransformer(&obj, object.Transform_Split) - if err != nil { - return nil, err - } - - return []Object{obj}, nil -} - -// SplitRestorer is a splitted object restorer's constructor. -func SplitRestorer() ObjectRestorer { - return new(splitRestorer) -} diff --git a/pkg/services/object_manager/transformer/transformer.go b/pkg/services/object_manager/transformer/transformer.go deleted file mode 100644 index 81f4de49d..000000000 --- a/pkg/services/object_manager/transformer/transformer.go +++ /dev/null @@ -1,525 +0,0 @@ -package transformer - -import ( - "context" - "crypto/sha256" - "io" - "sort" - "time" - - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/session" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/verifier" - "github.com/pkg/errors" -) - -type ( - // Type is a type alias of - // Type from object package of neofs-api-go. - Type = object.Transform_Type - - // ProcUnit groups the information about transforming unit. - ProcUnit struct { - Head *Object - Payload io.Reader - } - - // ProcUnitHandler is a handling ProcUnit function. - ProcUnitHandler func(context.Context, ProcUnit) error - - // Transformer is an interface of object transformer. - Transformer interface { - Transform(context.Context, ProcUnit, ...ProcUnitHandler) error - } - - // EpochReceiver is an interface of epoch number container with read access. - EpochReceiver interface { - Epoch() uint64 - } - - transformer struct { - tPrelim Transformer - tSizeLim Transformer - tSign Transformer - } - - preliminaryTransformer struct { - fMoulder Transformer - sgMoulder Transformer - } - - fieldMoulder struct { - epochRecv EpochReceiver - } - - sgMoulder struct { - sgInfoRecv storagegroup.InfoReceiver - } - - sizeLimiter struct { - limit uint64 - epochRecv EpochReceiver - } - - headSigner struct { - verifier verifier.Verifier - } - - emptyReader struct{} - - // Params groups the parameters of object transformer's constructor. - Params struct { - SGInfoReceiver storagegroup.InfoReceiver - EpochReceiver EpochReceiver - SizeLimit uint64 - Verifier verifier.Verifier - } -) - -// ErrPayloadEOF is returned by Transformer that -// received unexpected end of object payload. -var ErrPayloadEOF = errors.New("payload EOF") - -const ( - verifyHeadersCount = 2 // payload checksum, integrity - splitHeadersCount = 4 // flag, parent, left, right - - transformerInstanceFailMsg = "could not create transformer instance" - - // PrivateSessionToken is a context key for session.PrivateToken. - PrivateSessionToken = "private token" - - // PublicSessionToken is a context key for service.SessionToken. - PublicSessionToken = "public token" -) - -// ErrInvalidSGLinking is returned by Transformer that received -// an object with broken storage group links. -var ErrInvalidSGLinking = errors.New("invalid storage group linking") - -var ( - errNoToken = errors.New("no token provided") - errEmptyInput = errors.New("empty input") - errChainNotFound = errors.New("chain not found") - errCutChain = errors.New("GetChain failed: chain is not full") - errMissingTransformHdr = errors.New("cannot find transformer header") - errEmptySGInfoRecv = errors.New("empty storage group info receivers") - errInvalidSizeLimit = errors.New("non-positive object size limit") - errEmptyEpochReceiver = errors.New("empty epoch receiver") - errEmptyVerifier = errors.New("empty object verifier") -) - -// NewTransformer is an object transformer's constructor. -func NewTransformer(p Params) (Transformer, error) { - switch { - case p.SizeLimit <= 0: - return nil, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg) - case p.EpochReceiver == nil: - return nil, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg) - case p.SGInfoReceiver == nil: - return nil, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg) - case p.Verifier == nil: - return nil, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg) - } - - return &transformer{ - tPrelim: &preliminaryTransformer{ - fMoulder: &fieldMoulder{ - epochRecv: p.EpochReceiver, - }, - sgMoulder: &sgMoulder{ - sgInfoRecv: p.SGInfoReceiver, - }, - }, - tSizeLim: &sizeLimiter{ - limit: p.SizeLimit, - epochRecv: p.EpochReceiver, - }, - tSign: &headSigner{ - verifier: p.Verifier, - }, - }, nil -} - -func (s *transformer) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { - if err := s.tPrelim.Transform(ctx, unit); err != nil { - return err - } - - return s.tSizeLim.Transform(ctx, unit, func(ctx context.Context, unit ProcUnit) error { - return s.tSign.Transform(ctx, unit, handlers...) - }) -} - -func (s *preliminaryTransformer) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error { - if err := s.fMoulder.Transform(ctx, unit); err != nil { - return err - } - - return s.sgMoulder.Transform(ctx, unit) -} - -// TODO: simplify huge function. -func (s *sizeLimiter) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { - if unit.Head.SystemHeader.PayloadLength <= s.limit { - homoHashHdr := &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} - - unit.Head.AddHeader(&object.Header{Value: homoHashHdr}) - - buf := make([]byte, unit.Head.SystemHeader.PayloadLength) - - if err := readChunk(unit, buf, nil, homoHashHdr); err != nil { - return err - } - - unit.Head.Payload = buf - - return procHandlers(ctx, EmptyPayloadUnit(unit.Head), handlers...) - } - - var ( - err error - buf = make([]byte, s.limit) - hAcc = sha256.New() - srcHdrLen = len(unit.Head.Headers) - pObj = unit.Head - resObj = ProcUnit{ - Head: &Object{ - SystemHeader: object.SystemHeader{ - Version: pObj.SystemHeader.Version, - OwnerID: pObj.SystemHeader.OwnerID, - CID: pObj.SystemHeader.CID, - CreatedAt: object.CreationPoint{ - UnixTime: time.Now().Unix(), - Epoch: s.epochRecv.Epoch(), - }, - }, - }, - Payload: unit.Payload, - } - left, right = &object.Link{Type: object.Link_Previous}, &object.Link{Type: object.Link_Next} - hashAccHdr, hashHdr = new(object.Header_PayloadChecksum), new(object.Header_PayloadChecksum) - homoHashAccHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))} - childCount = pObj.SystemHeader.PayloadLength/s.limit + 1 - ) - - if right.ID, err = refs.NewObjectID(); err != nil { - return err - } - - splitHeaders := make([]object.Header, 0, 3*verifyHeadersCount+splitHeadersCount+childCount) - - splitHeaders = append(splitHeaders, pObj.Headers...) - splitHeaders = append(splitHeaders, []object.Header{ - {Value: &object.Header_Transform{Transform: &object.Transform{Type: object.Transform_Split}}}, - {Value: &object.Header_Link{Link: &object.Link{ - Type: object.Link_Parent, - ID: unit.Head.SystemHeader.ID, - }}}, - {Value: &object.Header_Link{Link: left}}, - {Value: &object.Header_Link{Link: right}}, - {Value: hashHdr}, - {Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}}, - {Value: homoHashAccHdr}, - {Value: hashAccHdr}, - {Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}}, - }...) - - children := splitHeaders[srcHdrLen+2*verifyHeadersCount+splitHeadersCount+1:] - pObj.Headers = splitHeaders[:srcHdrLen+2*verifyHeadersCount+splitHeadersCount] - - for tail := pObj.SystemHeader.PayloadLength; tail > 0; tail -= min(tail, s.limit) { - size := min(tail, s.limit) - - resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)-verifyHeadersCount-1] - if err = readChunk(resObj, buf[:size], hAcc, homoHashAccHdr); err != nil { - return err - } - - resObj.Head.SystemHeader.PayloadLength = size - resObj.Head.Payload = buf[:size] - left.ID, resObj.Head.SystemHeader.ID = resObj.Head.SystemHeader.ID, right.ID - - if tail <= s.limit { - right.ID = ObjectID{} - - temp := make([]object.Header, verifyHeadersCount+1) // +1 for homomorphic hash - - copy(temp, pObj.Headers[srcHdrLen:]) - - hashAccHdr.PayloadChecksum = hAcc.Sum(nil) - - copy(pObj.Headers[srcHdrLen:srcHdrLen+verifyHeadersCount+1], - pObj.Headers[len(pObj.Headers)-verifyHeadersCount:]) - - resObj.Head.Headers = pObj.Headers[:srcHdrLen+verifyHeadersCount] - - if err = signWithToken(ctx, &Object{ - SystemHeader: pObj.SystemHeader, - Headers: resObj.Head.Headers, - }); err != nil { - return err - } - - copy(pObj.Headers[srcHdrLen+2*(verifyHeadersCount+1):], - pObj.Headers[srcHdrLen+verifyHeadersCount+1:srcHdrLen+verifyHeadersCount+splitHeadersCount]) - - copy(pObj.Headers[srcHdrLen+verifyHeadersCount+1:], temp) - - resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)] - } else if right.ID, err = refs.NewObjectID(); err != nil { - return err - } - - if err := procHandlers(ctx, EmptyPayloadUnit(resObj.Head), handlers...); err != nil { - return err - } - - children = append(children, object.Header{Value: &object.Header_Link{Link: &object.Link{ - Type: object.Link_Child, - ID: resObj.Head.SystemHeader.ID, - }}}) - } - - pObj.SystemHeader.PayloadLength = 0 - pObj.Headers = append(pObj.Headers[:srcHdrLen], children...) - - if err := readChunk(unit, nil, nil, nil); err != nil { - return err - } - - return procHandlers(ctx, EmptyPayloadUnit(pObj), handlers...) -} - -func readChunk(unit ProcUnit, buf []byte, hAcc io.Writer, homoHashAcc *object.Header_HomoHash) (err error) { - var csHdr *object.Header_PayloadChecksum - - if _, v := unit.Head.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); v == nil { - csHdr = new(object.Header_PayloadChecksum) - - unit.Head.Headers = append(unit.Head.Headers, object.Header{Value: csHdr}) - } else { - csHdr = v.Value.(*object.Header_PayloadChecksum) - } - - if _, err = io.ReadFull(unit.Payload, buf); err != nil && err != io.EOF { - if errors.Is(err, io.ErrUnexpectedEOF) { - err = ErrPayloadEOF - } - - return - } else if hAcc != nil { - if _, err = hAcc.Write(buf); err != nil { - return - } - } - - if homoHashAcc != nil { - if homoHashAcc.HomoHash, err = hash.Concat([]hash.Hash{homoHashAcc.HomoHash, hash.Sum(buf)}); err != nil { - return - } - } - - h := sha256.Sum256(buf) - csHdr.PayloadChecksum = h[:] - - return nil -} - -func (s *headSigner) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { - if s.verifier.Verify(ctx, unit.Head) != nil { - if err := signWithToken(ctx, unit.Head); err != nil { - return err - } - } - - return procHandlers(ctx, unit, handlers...) -} - -func signWithToken(ctx context.Context, obj *Object) error { - integrityHdr := new(object.IntegrityHeader) - - if pToken, ok := ctx.Value(PrivateSessionToken).(session.PrivateToken); !ok { - return errNoToken - } else if hdrData, err := verifier.MarshalHeaders(obj, len(obj.Headers)); err != nil { - return err - } else { - cs := sha256.Sum256(hdrData) - integrityHdr.SetHeadersChecksum(cs[:]) - if err = service.AddSignatureWithKey(pToken.PrivateKey(), integrityHdr); err != nil { - return err - } - } - - obj.AddHeader(&object.Header{Value: &object.Header_Integrity{Integrity: integrityHdr}}) - - return nil -} - -func (s *fieldMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) (err error) { - token, ok := ctx.Value(PublicSessionToken).(*service.Token) - if !ok { - return errNoToken - } - - unit.Head.AddHeader(&object.Header{ - Value: &object.Header_Token{ - Token: token, - }, - }) - - if unit.Head.SystemHeader.ID.Empty() { - if unit.Head.SystemHeader.ID, err = refs.NewObjectID(); err != nil { - return - } - } - - if unit.Head.SystemHeader.CreatedAt.UnixTime == 0 { - unit.Head.SystemHeader.CreatedAt.UnixTime = time.Now().Unix() - } - - if unit.Head.SystemHeader.CreatedAt.Epoch == 0 { - unit.Head.SystemHeader.CreatedAt.Epoch = s.epochRecv.Epoch() - } - - if unit.Head.SystemHeader.Version == 0 { - unit.Head.SystemHeader.Version = 1 - } - - return nil -} - -func (s *sgMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error { - sgLinks := unit.Head.Links(object.Link_StorageGroup) - - group, err := unit.Head.StorageGroup() - - if nonEmptyList := len(sgLinks) > 0; (err == nil) != nonEmptyList { - return ErrInvalidSGLinking - } else if err != nil || !group.Empty() { - return nil - } - - sort.Sort(storagegroup.IDList(sgLinks)) - - sgInfo, err := s.sgInfoRecv.GetSGInfo(ctx, unit.Head.SystemHeader.CID, sgLinks) - if err != nil { - return err - } - - unit.Head.SetStorageGroup(sgInfo) - - return nil -} - -func procHandlers(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error { - for i := range handlers { - if err := handlers[i](ctx, unit); err != nil { - return err - } - } - - return nil -} - -func (*emptyReader) Read([]byte) (n int, err error) { return 0, io.EOF } - -// EmptyPayloadUnit returns ProcUnit with Object from argument and empty payload reader -// that always returns (0, io.EOF). -func EmptyPayloadUnit(head *Object) ProcUnit { return ProcUnit{Head: head, Payload: new(emptyReader)} } - -func min(a, b uint64) uint64 { - if a < b { - return a - } - - return b -} - -// GetChain builds a list of objects in the hereditary chain. -// In case of impossibility to do this, an error is returned. -func GetChain(srcObjs ...Object) ([]Object, error) { - var ( - err error - first, id ObjectID - res = make([]Object, 0, len(srcObjs)) - m = make(map[ObjectID]*Object, len(srcObjs)) - ) - - // Fill map with all objects - for i := range srcObjs { - m[srcObjs[i].SystemHeader.ID] = &srcObjs[i] - - prev, err := lastLink(&srcObjs[i], object.Link_Previous) - if err == nil && prev.Empty() { // then it is first - id, err = lastLink(&srcObjs[i], object.Link_Next) - if err != nil { - return nil, errors.Wrap(err, "GetChain failed: missing first object next links") - } - - first = srcObjs[i].SystemHeader.ID - } - } - - // Check first presence - if first.Empty() { - return nil, errChainNotFound - } - - res = append(res, *m[first]) - - // Iterate chain - for count := 0; !id.Empty() && count < len(srcObjs); count++ { - nextObj, ok := m[id] - if !ok { - return nil, errors.Errorf("GetChain failed: missing next object %s", id) - } - - id, err = lastLink(nextObj, object.Link_Next) - if err != nil { - return nil, errors.Wrap(err, "GetChain failed: missing object next links") - } - - res = append(res, *nextObj) - } - - // Check last chain element has empty next (prevent cut chain) - id, err = lastLink(&res[len(res)-1], object.Link_Next) - if err != nil { - return nil, errors.Wrap(err, "GetChain failed: missing object next links") - } else if !id.Empty() { - return nil, errCutChain - } - - return res, nil -} - -func deleteTransformer(o *Object, t object.Transform_Type) error { - n, th := o.LastHeader(object.HeaderType(object.TransformHdr)) - if th == nil || th.Value.(*object.Header_Transform).Transform.Type != t { - return errMissingTransformHdr - } - - o.Headers = o.Headers[:n] - - return nil -} - -func lastLink(o *Object, t object.Link_Type) (res ObjectID, err error) { - for i := len(o.Headers) - 1; i >= 0; i-- { - if v, ok := o.Headers[i].Value.(*object.Header_Link); ok { - if v.Link.GetType() == t { - res = v.Link.ID - return - } - } - } - - err = errors.Errorf("object.lastLink: links of type %s not found", t) - - return -} diff --git a/pkg/services/object_manager/transport/.gitkeep b/pkg/services/object_manager/transport/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/services/object_manager/transport/object.go b/pkg/services/object_manager/transport/object.go deleted file mode 100644 index 0965265e1..000000000 --- a/pkg/services/object_manager/transport/object.go +++ /dev/null @@ -1,107 +0,0 @@ -package transport - -import ( - "context" - "io" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" -) - -type ( - // ObjectTransport is an interface of the executor of object remote operations. - ObjectTransport interface { - Transport(context.Context, ObjectTransportParams) - } - - // ObjectTransportParams groups the parameters of remote object operation. - ObjectTransportParams struct { - TransportInfo MetaInfo - TargetNode multiaddr.Multiaddr - ResultHandler ResultHandler - } - - // ResultHandler is an interface of remote object operation's result handler. - ResultHandler interface { - HandleResult(context.Context, multiaddr.Multiaddr, interface{}, error) - } - - // MetaInfo is an interface of the container of cross-operation values. - MetaInfo interface { - GetTTL() uint32 - GetTimeout() time.Duration - service.SessionTokenSource - GetRaw() bool - Type() object.RequestType - service.BearerTokenSource - service.ExtendedHeadersSource - } - - // SearchInfo is an interface of the container of object Search operation parameters. - SearchInfo interface { - MetaInfo - GetCID() refs.CID - GetQuery() []byte - } - - // PutInfo is an interface of the container of object Put operation parameters. - PutInfo interface { - MetaInfo - GetHead() *object.Object - Payload() io.Reader - CopiesNumber() uint32 - } - - // AddressInfo is an interface of the container of object request by Address. - AddressInfo interface { - MetaInfo - GetAddress() refs.Address - } - - // GetInfo is an interface of the container of object Get operation parameters. - GetInfo interface { - AddressInfo - } - - // HeadInfo is an interface of the container of object Head operation parameters. - HeadInfo interface { - GetInfo - GetFullHeaders() bool - } - - // RangeInfo is an interface of the container of object GetRange operation parameters. - RangeInfo interface { - AddressInfo - GetRange() object.Range - } - - // RangeHashInfo is an interface of the container of object GetRangeHash operation parameters. - RangeHashInfo interface { - AddressInfo - GetRanges() []object.Range - GetSalt() []byte - } -) - -const ( - // KeyID is a filter key to object ID field. - KeyID = "ID" - - // KeyTombstone is a filter key to tombstone header. - KeyTombstone = "TOMBSTONE" - - // KeyStorageGroup is a filter key to storage group link. - KeyStorageGroup = "STORAGE_GROUP" - - // KeyNoChildren is a filter key to objects w/o child links. - KeyNoChildren = "LEAF" - - // KeyParent is a filter key to parent link. - KeyParent = "PARENT" - - // KeyHasParent is a filter key to objects with parent link. - KeyHasParent = "HAS_PAR" -) diff --git a/pkg/services/object_manager/transport/storagegroup/sg.go b/pkg/services/object_manager/transport/storagegroup/sg.go deleted file mode 100644 index 5d3949060..000000000 --- a/pkg/services/object_manager/transport/storagegroup/sg.go +++ /dev/null @@ -1,138 +0,0 @@ -package storagegroup - -import ( - "context" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transport" - "github.com/nspcc-dev/neofs-node/pkg/util/logger" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -type ( - // StorageGroupInfoReceiverParams groups the parameters of - // storage group information receiver. - StorageGroupInfoReceiverParams struct { - SelectiveContainerExecutor transport.SelectiveContainerExecutor - Logger *zap.Logger - } - - sgInfoRecv struct { - executor transport.SelectiveContainerExecutor - log *zap.Logger - } -) - -const locationFinderInstanceFailMsg = "could not create object location finder" - -// ErrIncompleteSGInfo is returned by storage group information receiver -// that could not receive full information. -var ErrIncompleteSGInfo = errors.New("could not receive full storage group info") - -// PublicSessionToken is a context key for SessionToken. -// FIXME: temp solution for cycle import fix. -// Unify with same const from transformer pkg. -const PublicSessionToken = "public token" - -// BearerToken is a context key for BearerToken. -const BearerToken = "bearer token" - -// ExtendedHeaders is a context key for X-headers. -const ExtendedHeaders = "extended headers" - -func (s *sgInfoRecv) GetSGInfo(ctx context.Context, cid container.ID, group []object.ID) (*storagegroup.StorageGroup, error) { - var ( - err error - res = new(storagegroup.StorageGroup) - hashList = make([]hash.Hash, 0, len(group)) - ) - - m := make(map[string]struct{}, len(group)) - for i := range group { - m[group[i].String()] = struct{}{} - } - - // FIXME: hardcoded for simplicity. - // Function is called in next cases: - // - SG transformation on trusted node side (only in this case session token is needed); - // - SG info check on container nodes (token is not needed since system group has extra access); - // - data audit on inner ring nodes (same as previous). - var token service.SessionToken - if v, ok := ctx.Value(PublicSessionToken).(service.SessionToken); ok { - token = v - } - - var bearer service.BearerToken - if v, ok := ctx.Value(BearerToken).(service.BearerToken); ok { - bearer = v - } - - var extHdrs []service.ExtendedHeader - if v, ok := ctx.Value(ExtendedHeaders).([]service.ExtendedHeader); ok { - extHdrs = v - } - - if err = s.executor.Head(ctx, &transport.HeadParams{ - GetParams: transport.GetParams{ - SelectiveParams: transport.SelectiveParams{ - CID: cid, - TTL: service.SingleForwardingTTL, - IDList: group, - Breaker: func(addr refs.Address) (cFlag transport.ProgressControlFlag) { - if len(m) == 0 { - cFlag = transport.BreakProgress - } else if _, ok := m[addr.ObjectID.String()]; !ok { - cFlag = transport.NextAddress - } - return - }, - Token: token, - - Bearer: bearer, - - ExtendedHeaders: extHdrs, - }, - Handler: func(_ multiaddr.Multiaddr, obj *object.Object) { - _, hashHeader := obj.LastHeader(object.HeaderType(object.HomoHashHdr)) - if hashHeader == nil { - return - } - - hashList = append(hashList, hashHeader.Value.(*object.Header_HomoHash).HomoHash) - res.ValidationDataSize += obj.SystemHeader.PayloadLength - delete(m, obj.SystemHeader.ID.String()) - }, - }, - FullHeaders: true, - }); err != nil { - return nil, err - } else if len(m) > 0 { - return nil, ErrIncompleteSGInfo - } - - res.ValidationHash, err = hash.Concat(hashList) - - return res, err -} - -// NewStorageGroupInfoReceiver constructs storagegroup.InfoReceiver from SelectiveContainerExecutor. -func NewStorageGroupInfoReceiver(p StorageGroupInfoReceiverParams) (storagegroup.InfoReceiver, error) { - switch { - case p.Logger == nil: - return nil, errors.Wrap(logger.ErrNilLogger, locationFinderInstanceFailMsg) - case p.SelectiveContainerExecutor == nil: - return nil, errors.Wrap(errors.New("empty container handler"), locationFinderInstanceFailMsg) - } - - return &sgInfoRecv{ - executor: p.SelectiveContainerExecutor, - log: p.Logger, - }, nil -} diff --git a/pkg/services/object_manager/transport/transport.go b/pkg/services/object_manager/transport/transport.go deleted file mode 100644 index 63ccce1f7..000000000 --- a/pkg/services/object_manager/transport/transport.go +++ /dev/null @@ -1,658 +0,0 @@ -package transport - -import ( - "context" - "io" - "sync" - "time" - - "github.com/multiformats/go-multiaddr" - "github.com/nspcc-dev/neofs-api-go/hash" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-node/pkg/core/container" - "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" - "github.com/pkg/errors" - "go.uber.org/zap" -) - -/* - File source code includes implementation of unified objects container handler. - Implementation provides the opportunity to perform any logic over object container distributed in network. - Implementation holds placement and object transport implementations in a black box. - Any special logic could be tuned through passing handle parameters. - NOTE: Although the implementation of the other interfaces via OCH is the same, they are still separated in order to avoid mess. -*/ - -type ( - // SelectiveContainerExecutor is an interface the tool that performs - // object operations in container with preconditions. - SelectiveContainerExecutor interface { - Put(context.Context, *PutParams) error - Get(context.Context, *GetParams) error - Head(context.Context, *HeadParams) error - Search(context.Context, *SearchParams) error - RangeHash(context.Context, *RangeHashParams) error - } - - // PutParams groups the parameters - // of selective object Put. - PutParams struct { - SelectiveParams - Object *object.Object - Handler func(multiaddr.Multiaddr, bool) - - CopiesNumber uint32 - } - - // GetParams groups the parameters - // of selective object Get. - GetParams struct { - SelectiveParams - Handler func(multiaddr.Multiaddr, *object.Object) - } - - // HeadParams groups the parameters - // of selective object Head. - HeadParams struct { - GetParams - FullHeaders bool - } - - // SearchParams groups the parameters - // of selective object Search. - SearchParams struct { - SelectiveParams - SearchCID refs.CID - SearchQuery []byte - Handler func(multiaddr.Multiaddr, []refs.Address) - } - - // RangeHashParams groups the parameters - // of selective object GetRangeHash. - RangeHashParams struct { - SelectiveParams - Ranges []object.Range - Salt []byte - Handler func(multiaddr.Multiaddr, []hash.Hash) - } - - // SelectiveParams groups the parameters of - // the execution of selective container operation. - SelectiveParams struct { - /* Should be set to true only if service under object transport implementations is served on localhost. */ - ServeLocal bool - - /* Raw option of the request */ - Raw bool - - /* TTL for object transport. All transport operations inherit same value. */ - TTL uint32 - - /* Required ID of processing container. If empty or not set, an error is returned. */ - CID container.ID - - /* List of nodes selected for processing. If not specified => nodes will be selected during. */ - Nodes []multiaddr.Multiaddr - - /* - Next two parameters provide the opportunity to process selective objects in container. - At least on of non-empty IDList or Query is required, an error is returned otherwise. - */ - - /* List of objects to process (overlaps query). */ - IDList []refs.ObjectID - /* If no objects is indicated, query is used for selection. */ - Query []byte - - /* - If function provided, it is called after every successful operation. - True result breaks operation performing. - */ - Breaker func(refs.Address) ProgressControlFlag - - /* Public session token */ - Token service.SessionToken - - /* Bearer token */ - Bearer service.BearerToken - - /* Extended headers */ - ExtendedHeaders []service.ExtendedHeader - } - - // ProgressControlFlag is an enumeration of progress control flags. - ProgressControlFlag int - - // ObjectContainerHandlerParams grops the parameters of SelectiveContainerExecutor constructor. - ObjectContainerHandlerParams struct { - NodeLister *placement.PlacementWrapper - Executor ContainerTraverseExecutor - *zap.Logger - } - - simpleTraverser struct { - *sync.Once - list []multiaddr.Multiaddr - } - - selectiveCnrExec struct { - cnl *placement.PlacementWrapper - Executor ContainerTraverseExecutor - log *zap.Logger - } - - metaInfo struct { - ttl uint32 - raw bool - rt object.RequestType - - token service.SessionToken - - bearer service.BearerToken - - extHdrs []service.ExtendedHeader - } - - putInfo struct { - metaInfo - obj *object.Object - cn uint32 - } - - getInfo struct { - metaInfo - addr object.Address - raw bool - } - - headInfo struct { - getInfo - fullHdr bool - } - - searchInfo struct { - metaInfo - cid container.ID - query []byte - } - - rangeHashInfo struct { - metaInfo - addr object.Address - ranges []object.Range - salt []byte - } - - execItems struct { - params SelectiveParams - metaConstructor func(addr object.Address) MetaInfo - handler ResultHandler - } - - searchTarget struct { - list []refs.Address - } - - // ContainerTraverseExecutor is an interface of - // object operation executor with container traversing. - ContainerTraverseExecutor interface { - Execute(context.Context, TraverseParams) - } - - // TraverseParams groups the parameters of container traversing. - TraverseParams struct { - TransportInfo MetaInfo - Handler ResultHandler - Traverser Traverser - WorkerPool WorkerPool - ExecutionInterceptor func(context.Context, multiaddr.Multiaddr) bool - } - - // WorkerPool is an interface of go-routine pool - WorkerPool interface { - Submit(func()) error - } - - // Traverser is an interface of container traverser. - Traverser interface { - Next(context.Context) []multiaddr.Multiaddr - } - - cnrTraverseExec struct { - transport ObjectTransport - } - - singleRoutinePool struct{} - - emptyReader struct{} -) - -const ( - _ ProgressControlFlag = iota - - // NextAddress is a ProgressControlFlag of to go to the next address of the object. - NextAddress - - // NextNode is a ProgressControlFlag of to go to the next node. - NextNode - - // BreakProgress is a ProgressControlFlag to interrupt the execution. - BreakProgress -) - -const ( - instanceFailMsg = "could not create container objects collector" -) - -var ( - errNilObjectTransport = errors.New("object transport is nil") - errEmptyLogger = errors.New("empty logger") - errEmptyNodeLister = errors.New("empty container node lister") - errEmptyTraverseExecutor = errors.New("empty container traverse executor") - errSelectiveParams = errors.New("neither ID list nor query provided") -) - -func (s *selectiveCnrExec) Put(ctx context.Context, p *PutParams) error { - meta := &putInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestPut, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - obj: p.Object, - cn: p.CopiesNumber, - } - - return s.exec(ctx, &execItems{ - params: p.SelectiveParams, - metaConstructor: func(object.Address) MetaInfo { return meta }, - handler: p, - }) -} - -func (s *selectiveCnrExec) Get(ctx context.Context, p *GetParams) error { - return s.exec(ctx, &execItems{ - params: p.SelectiveParams, - metaConstructor: func(addr object.Address) MetaInfo { - return &getInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestGet, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - addr: addr, - raw: p.Raw, - } - }, - handler: p, - }) -} - -func (s *selectiveCnrExec) Head(ctx context.Context, p *HeadParams) error { - return s.exec(ctx, &execItems{ - params: p.SelectiveParams, - metaConstructor: func(addr object.Address) MetaInfo { - return &headInfo{ - getInfo: getInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestHead, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - addr: addr, - raw: p.Raw, - }, - fullHdr: p.FullHeaders, - } - }, - handler: p, - }) -} - -func (s *selectiveCnrExec) Search(ctx context.Context, p *SearchParams) error { - return s.exec(ctx, &execItems{ - params: p.SelectiveParams, - metaConstructor: func(object.Address) MetaInfo { - return &searchInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestSearch, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - cid: p.SearchCID, - query: p.SearchQuery, - } - }, - handler: p, - }) -} - -func (s *selectiveCnrExec) RangeHash(ctx context.Context, p *RangeHashParams) error { - return s.exec(ctx, &execItems{ - params: p.SelectiveParams, - metaConstructor: func(addr object.Address) MetaInfo { - return &rangeHashInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestRangeHash, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - addr: addr, - ranges: p.Ranges, - salt: p.Salt, - } - }, - handler: p, - }) -} - -func (s *selectiveCnrExec) exec(ctx context.Context, p *execItems) error { - if err := p.params.validate(); err != nil { - return err - } - - nodes, err := s.prepareNodes(ctx, &p.params) - if err != nil { - return err - } - -loop: - for i := range nodes { - addrList := s.prepareAddrList(ctx, &p.params, nodes[i]) - if len(addrList) == 0 { - continue - } - - for j := range addrList { - if p.params.Breaker != nil { - switch cFlag := p.params.Breaker(addrList[j]); cFlag { - case NextAddress: - continue - case NextNode: - continue loop - case BreakProgress: - break loop - } - } - - s.Executor.Execute(ctx, TraverseParams{ - TransportInfo: p.metaConstructor(addrList[j]), - Handler: p.handler, - Traverser: newSimpleTraverser(nodes[i]), - }) - } - } - - return nil -} - -func (s *SelectiveParams) validate() error { - switch { - case len(s.IDList) == 0 && len(s.Query) == 0: - return errSelectiveParams - default: - return nil - } -} - -func (s *selectiveCnrExec) prepareNodes(ctx context.Context, p *SelectiveParams) ([]multiaddr.Multiaddr, error) { - if len(p.Nodes) > 0 { - return p.Nodes, nil - } - - // If node serves Object transport service on localhost => pass single empty node - if p.ServeLocal { - // all transport implementations will use localhost by default - return []multiaddr.Multiaddr{nil}, nil - } - - // Otherwise use container nodes - return s.cnl.ContainerNodes(ctx, p.CID) -} - -func (s *selectiveCnrExec) prepareAddrList(ctx context.Context, p *SelectiveParams, node multiaddr.Multiaddr) []refs.Address { - var ( - addrList []object.Address - l = len(p.IDList) - ) - - if l > 0 { - addrList = make([]object.Address, 0, l) - for i := range p.IDList { - addrList = append(addrList, object.Address{CID: p.CID, ObjectID: p.IDList[i]}) - } - - return addrList - } - - handler := new(searchTarget) - - s.Executor.Execute(ctx, TraverseParams{ - TransportInfo: &searchInfo{ - metaInfo: metaInfo{ - ttl: p.TTL, - rt: object.RequestSearch, - raw: p.Raw, - - token: p.Token, - - bearer: p.Bearer, - - extHdrs: p.ExtendedHeaders, - }, - cid: p.CID, - query: p.Query, - }, - Handler: handler, - Traverser: newSimpleTraverser(node), - }) - - return handler.list -} - -func newSimpleTraverser(list ...multiaddr.Multiaddr) Traverser { - return &simpleTraverser{ - Once: new(sync.Once), - list: list, - } -} - -func (s *simpleTraverser) Next(context.Context) (res []multiaddr.Multiaddr) { - s.Do(func() { - res = s.list - }) - - return -} - -func (s metaInfo) GetTTL() uint32 { return s.ttl } - -func (s metaInfo) GetTimeout() time.Duration { return 0 } - -func (s metaInfo) GetRaw() bool { return s.raw } - -func (s metaInfo) Type() object.RequestType { return s.rt } - -func (s metaInfo) GetSessionToken() service.SessionToken { return s.token } - -func (s metaInfo) GetBearerToken() service.BearerToken { return s.bearer } - -func (s metaInfo) ExtendedHeaders() []service.ExtendedHeader { return s.extHdrs } - -func (s *putInfo) GetHead() *object.Object { return s.obj } - -func (s *putInfo) Payload() io.Reader { return new(emptyReader) } - -func (*emptyReader) Read(p []byte) (int, error) { return 0, io.EOF } - -func (s *putInfo) CopiesNumber() uint32 { - return s.cn -} - -func (s *getInfo) GetAddress() refs.Address { return s.addr } - -func (s *getInfo) Raw() bool { return s.raw } - -func (s *headInfo) GetFullHeaders() bool { return s.fullHdr } - -func (s *searchInfo) GetCID() refs.CID { return s.cid } - -func (s *searchInfo) GetQuery() []byte { return s.query } - -func (s *rangeHashInfo) GetAddress() refs.Address { return s.addr } - -func (s *rangeHashInfo) GetRanges() []object.Range { return s.ranges } - -func (s *rangeHashInfo) GetSalt() []byte { return s.salt } - -func (s *searchTarget) HandleResult(_ context.Context, _ multiaddr.Multiaddr, r interface{}, e error) { - if e == nil { - s.list = append(s.list, r.([]refs.Address)...) - } -} - -// HandleResult calls Handler with: -// - Multiaddr with argument value; -// - error equality to nil. -func (s *PutParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, _ interface{}, e error) { - s.Handler(node, e == nil) -} - -// HandleResult calls Handler if error argument is nil with: -// - Multiaddr with argument value; -// - result casted to an Object pointer. -func (s *GetParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { - if e == nil { - s.Handler(node, r.(*object.Object)) - } -} - -// HandleResult calls Handler if error argument is nil with: -// - Multiaddr with argument value; -// - result casted to Address slice. -func (s *SearchParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { - if e == nil { - s.Handler(node, r.([]refs.Address)) - } -} - -// HandleResult calls Handler if error argument is nil with: -// - Multiaddr with argument value; -// - result casted to Hash slice. -func (s *RangeHashParams) HandleResult(_ context.Context, node multiaddr.Multiaddr, r interface{}, e error) { - if e == nil { - s.Handler(node, r.([]hash.Hash)) - } -} - -func (s *cnrTraverseExec) Execute(ctx context.Context, p TraverseParams) { - if p.WorkerPool == nil { - p.WorkerPool = new(singleRoutinePool) - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - wg := new(sync.WaitGroup) - - for { - select { - case <-ctx.Done(): - return - default: - } - - nodes := p.Traverser.Next(ctx) - if len(nodes) == 0 { - break - } - - for i := range nodes { - node := nodes[i] - - wg.Add(1) - - if err := p.WorkerPool.Submit(func() { - defer wg.Done() - - if p.ExecutionInterceptor != nil && p.ExecutionInterceptor(ctx, node) { - return - } - - s.transport.Transport(ctx, ObjectTransportParams{ - TransportInfo: p.TransportInfo, - TargetNode: node, - ResultHandler: p.Handler, - }) - }); err != nil { - wg.Done() - } - } - - wg.Wait() - } -} - -func (*singleRoutinePool) Submit(fn func()) error { - fn() - return nil -} - -// NewObjectContainerHandler is a SelectiveContainerExecutor constructor. -func NewObjectContainerHandler(p ObjectContainerHandlerParams) (SelectiveContainerExecutor, error) { - switch { - case p.Executor == nil: - return nil, errors.Wrap(errEmptyTraverseExecutor, instanceFailMsg) - case p.Logger == nil: - return nil, errors.Wrap(errEmptyLogger, instanceFailMsg) - case p.NodeLister == nil: - return nil, errors.Wrap(errEmptyNodeLister, instanceFailMsg) - } - - return &selectiveCnrExec{ - cnl: p.NodeLister, - Executor: p.Executor, - log: p.Logger, - }, nil -} - -// NewContainerTraverseExecutor is a ContainerTraverseExecutor executor. -func NewContainerTraverseExecutor(t ObjectTransport) (ContainerTraverseExecutor, error) { - if t == nil { - return nil, errNilObjectTransport - } - - return &cnrTraverseExec{transport: t}, nil -} diff --git a/pkg/util/grace/grace.go b/pkg/util/grace/grace.go index 8f48d3f07..100e124d0 100644 --- a/pkg/util/grace/grace.go +++ b/pkg/util/grace/grace.go @@ -2,6 +2,7 @@ package grace import ( "context" + "fmt" "os" "os/signal" "syscall" @@ -18,8 +19,12 @@ func NewGracefulContext(l *zap.Logger) context.Context { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP) sig := <-ch - l.Info("received signal", - zap.String("signal", sig.String())) + if l != nil { + l.Info("received signal", + zap.String("signal", sig.String())) + } else { + fmt.Printf("received signal %s\n", sig) + } cancel() }()