[#210] policier: Refactor nodes processing
Resolve funlen linter for processNodes method. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
080be5cfcd
commit
d6486d172e
1 changed files with 26 additions and 18 deletions
|
@ -13,6 +13,7 @@ import (
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -142,7 +143,6 @@ type placementRequirements struct {
|
||||||
removeLocalCopy bool
|
removeLocalCopy bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint: funlen
|
|
||||||
func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
|
func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
|
||||||
nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
|
nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
|
||||||
addr := addrWithType.Address
|
addr := addrWithType.Address
|
||||||
|
@ -152,21 +152,6 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
|
||||||
// Number of copies that are stored on maintenance nodes.
|
// Number of copies that are stored on maintenance nodes.
|
||||||
var uncheckedCopies int
|
var uncheckedCopies int
|
||||||
|
|
||||||
handleMaintenance := func(node netmap.NodeInfo) {
|
|
||||||
// consider remote nodes under maintenance as problem OK. Such
|
|
||||||
// nodes MAY not respond with object, however, this is how we
|
|
||||||
// prevent spam with new replicas.
|
|
||||||
// However, additional copies should not be removed in this case,
|
|
||||||
// because we can remove the only copy this way.
|
|
||||||
checkedNodes.submitReplicaHolder(node)
|
|
||||||
shortage--
|
|
||||||
uncheckedCopies++
|
|
||||||
|
|
||||||
p.log.Debug("consider node under maintenance as OK",
|
|
||||||
zap.String("node", netmap.StringifyPublicKey(node)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if typ == object.TypeLock {
|
if typ == object.TypeLock {
|
||||||
// all nodes of a container must store the `LOCK` objects
|
// all nodes of a container must store the `LOCK` objects
|
||||||
// for correct object removal protection:
|
// for correct object removal protection:
|
||||||
|
@ -187,7 +172,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
|
||||||
|
|
||||||
shortage--
|
shortage--
|
||||||
} else if nodes[i].IsMaintenance() {
|
} else if nodes[i].IsMaintenance() {
|
||||||
handleMaintenance(nodes[i])
|
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
|
||||||
} else {
|
} else {
|
||||||
if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
|
if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
|
||||||
if status == 0 {
|
if status == 0 {
|
||||||
|
@ -212,7 +197,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
|
||||||
}
|
}
|
||||||
|
|
||||||
if isClientErrMaintenance(err) {
|
if isClientErrMaintenance(err) {
|
||||||
handleMaintenance(nodes[i])
|
shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
p.log.Error("receive object header to check policy compliance",
|
p.log.Error("receive object header to check policy compliance",
|
||||||
zap.Stringer("object", addr),
|
zap.Stringer("object", addr),
|
||||||
|
@ -228,6 +213,29 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
|
||||||
|
//
|
||||||
|
// consider remote nodes under maintenance as problem OK. Such
|
||||||
|
// nodes MAY not respond with object, however, this is how we
|
||||||
|
// prevent spam with new replicas.
|
||||||
|
// However, additional copies should not be removed in this case,
|
||||||
|
// because we can remove the only copy this way.
|
||||||
|
func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
|
||||||
|
checkedNodes.submitReplicaHolder(node)
|
||||||
|
shortage--
|
||||||
|
uncheckedCopies++
|
||||||
|
|
||||||
|
p.log.Debug("consider node under maintenance as OK",
|
||||||
|
zap.String("node", netmap.StringifyPublicKey(node)),
|
||||||
|
)
|
||||||
|
return shortage, uncheckedCopies
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
|
||||||
|
nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) {
|
||||||
if shortage > 0 {
|
if shortage > 0 {
|
||||||
p.log.Debug("shortage of object copies detected",
|
p.log.Debug("shortage of object copies detected",
|
||||||
zap.Stringer("object", addr),
|
zap.Stringer("object", addr),
|
||||||
|
|
Loading…
Reference in a new issue