[#1335] policer: Prevent potential object loss

In previous implementation `Policer` considered local object copy as
redundant on processing single placement vector.

Make `Policer` to call redundant copy callback after full placement
processing. Also fix 404 error parsing.

Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
This commit is contained in:
Leonard Lyubich 2022-05-20 14:31:55 +03:00 committed by LeL
parent 9f62d25b50
commit f8ac4632f8
2 changed files with 40 additions and 31 deletions

View file

@ -14,6 +14,7 @@ Changelog for NeoFS Node
- Basic income transfer's incorrect log message (#1374) - Basic income transfer's incorrect log message (#1374)
- Listen to subnet removal events in notary-enabled env (#1224) - Listen to subnet removal events in notary-enabled env (#1224)
- Update/remove nodes whose subnet has been removed (#1162) - Update/remove nodes whose subnet has been removed (#1162)
- Potential removal of local object when policy isn't complied (#1335)
## [0.28.1] - 2022-05-05 ## [0.28.1] - 2022-05-05

View file

@ -2,12 +2,12 @@ package policer
import ( import (
"context" "context"
"strings"
"github.com/nspcc-dev/neofs-node/pkg/core/container" "github.com/nspcc-dev/neofs-node/pkg/core/container"
"github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/engine"
headsvc "github.com/nspcc-dev/neofs-node/pkg/services/object/head" headsvc "github.com/nspcc-dev/neofs-node/pkg/services/object/head"
"github.com/nspcc-dev/neofs-node/pkg/services/replicator" "github.com/nspcc-dev/neofs-node/pkg/services/replicator"
"github.com/nspcc-dev/neofs-sdk-go/client"
"github.com/nspcc-dev/neofs-sdk-go/netmap" "github.com/nspcc-dev/neofs-sdk-go/netmap"
addressSDK "github.com/nspcc-dev/neofs-sdk-go/object/address" addressSDK "github.com/nspcc-dev/neofs-sdk-go/object/address"
"go.uber.org/zap" "go.uber.org/zap"
@ -50,6 +50,9 @@ func (p *Policer) processObject(ctx context.Context, addr *addressSDK.Address) {
} }
replicas := policy.Replicas() replicas := policy.Replicas()
c := &processPlacementContext{
Context: ctx,
}
for i := range nn { for i := range nn {
select { select {
@ -58,19 +61,32 @@ func (p *Policer) processObject(ctx context.Context, addr *addressSDK.Address) {
default: default:
} }
p.processNodes(ctx, addr, nn[i], replicas[i].Count()) p.processNodes(c, addr, nn[i], replicas[i].Count())
}
if !c.needLocalCopy {
p.log.Info("redundant local object copy detected",
zap.Stringer("object", addr),
)
p.cbRedundantCopy(addr)
} }
} }
func (p *Policer) processNodes(ctx context.Context, addr *addressSDK.Address, nodes netmap.Nodes, shortage uint32) { type processPlacementContext struct {
context.Context
needLocalCopy bool
}
func (p *Policer) processNodes(ctx *processPlacementContext, addr *addressSDK.Address, nodes netmap.Nodes, shortage uint32) {
log := p.log.With( log := p.log.With(
zap.Stringer("object", addr), zap.Stringer("object", addr),
) )
prm := new(headsvc.RemoteHeadPrm).WithObjectAddress(addr) prm := new(headsvc.RemoteHeadPrm).WithObjectAddress(addr)
redundantLocalCopy := false
for i := 0; i < len(nodes); i++ { for i := 0; shortage > 0 && i < len(nodes); i++ {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
@ -78,35 +94,29 @@ func (p *Policer) processNodes(ctx context.Context, addr *addressSDK.Address, no
} }
if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) { if p.netmapKeys.IsLocalKey(nodes[i].PublicKey()) {
if shortage == 0 { ctx.needLocalCopy = true
// we can call the redundant copy callback
// here to slightly improve the performance
// instead of readability.
redundantLocalCopy = true
break
} else {
shortage--
}
} else if shortage > 0 {
callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
_, err := p.remoteHeader.Head(callCtx, prm.WithNodeInfo(nodes[i].NodeInfo)) shortage--
cancel() continue
}
callCtx, cancel := context.WithTimeout(ctx, p.headTimeout)
_, err := p.remoteHeader.Head(callCtx, prm.WithNodeInfo(nodes[i].NodeInfo))
cancel()
if !client.IsErrObjectNotFound(err) {
if err != nil { if err != nil {
if strings.Contains(err.Error(), headsvc.ErrNotFound.Error()) { log.Error("receive object header to check policy compliance",
continue zap.String("error", err.Error()),
} else { )
log.Debug("could not receive object header",
zap.String("error", err.Error()),
)
continue
}
} else { } else {
shortage-- shortage--
} }
continue
} }
nodes = append(nodes[:i], nodes[i+1:]...) nodes = append(nodes[:i], nodes[i+1:]...)
@ -118,14 +128,12 @@ func (p *Policer) processNodes(ctx context.Context, addr *addressSDK.Address, no
zap.Uint32("shortage", shortage), zap.Uint32("shortage", shortage),
) )
// TODO(@cthulhu-rider): replace to processObject in order to prevent repetitions
// in nodes for single object, see #1410
p.replicator.HandleTask(ctx, new(replicator.Task). p.replicator.HandleTask(ctx, new(replicator.Task).
WithObjectAddress(addr). WithObjectAddress(addr).
WithNodes(nodes). WithNodes(nodes).
WithCopiesNumber(shortage), WithCopiesNumber(shortage),
) )
} else if redundantLocalCopy {
log.Info("redundant local object copy detected")
p.cbRedundantCopy(addr)
} }
} }