[#210] policier: Refactor nodes processing
All checks were successful
ci/woodpecker/pr/pre-commit Pipeline was successful
ci/woodpecker/push/pre-commit Pipeline was successful

Resolve funlen linter for processNodes method.

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2023-04-04 15:29:12 +03:00
parent 080be5cfcd
commit d6486d172e

View file

@ -13,6 +13,7 @@ import (
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
@ -142,7 +143,6 @@ type placementRequirements struct {
removeLocalCopy bool
}
// nolint: funlen
func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
addr := addrWithType.Address
@ -152,21 +152,6 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
// Number of copies that are stored on maintenance nodes.
var uncheckedCopies int
handleMaintenance := func(node netmap.NodeInfo) {
// consider remote nodes under maintenance as problem OK. Such
// nodes MAY not respond with object, however, this is how we
// prevent spam with new replicas.
// However, additional copies should not be removed in this case,
// because we can remove the only copy this way.
checkedNodes.submitReplicaHolder(node)
shortage--
uncheckedCopies++
p.log.Debug("consider node under maintenance as OK",
zap.String("node", netmap.StringifyPublicKey(node)),
)
}
if typ == object.TypeLock {
// all nodes of a container must store the `LOCK` objects
// for correct object removal protection:
@ -187,7 +172,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
shortage--
} else if nodes[i].IsMaintenance() {
handleMaintenance(nodes[i])
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else {
if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
if status == 0 {
@ -212,7 +197,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
}
if isClientErrMaintenance(err) {
handleMaintenance(nodes[i])
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
} else if err != nil {
p.log.Error("receive object header to check policy compliance",
zap.Stringer("object", addr),
@ -228,6 +213,29 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
i--
}
p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
}
// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
//
// consider remote nodes under maintenance as problem OK. Such
// nodes MAY not respond with object, however, this is how we
// prevent spam with new replicas.
// However, additional copies should not be removed in this case,
// because we can remove the only copy this way.
func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
checkedNodes.submitReplicaHolder(node)
shortage--
uncheckedCopies++
p.log.Debug("consider node under maintenance as OK",
zap.String("node", netmap.StringifyPublicKey(node)),
)
return shortage, uncheckedCopies
}
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) {
if shortage > 0 {
p.log.Debug("shortage of object copies detected",
zap.Stringer("object", addr),