diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go
index 6a2d9d327..9cdc4d813 100644
--- a/pkg/services/policer/check.go
+++ b/pkg/services/policer/check.go
@@ -13,6 +13,7 @@ import (
 	apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
 	"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
 	"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
+	oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
 	"go.uber.org/zap"
 )
 
@@ -142,7 +143,6 @@ type placementRequirements struct {
 	removeLocalCopy bool
 }
 
-// nolint: funlen
 func (p *Policer) processNodes(ctx context.Context, requirements *placementRequirements, addrWithType objectcore.AddressWithType,
 	nodes []netmap.NodeInfo, shortage uint32, checkedNodes *nodeCache) {
 	addr := addrWithType.Address
@@ -152,21 +152,6 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
 	// Number of copies that are stored on maintenance nodes.
 	var uncheckedCopies int
 
-	handleMaintenance := func(node netmap.NodeInfo) {
-		// consider remote nodes under maintenance as problem OK. Such
-		// nodes MAY not respond with object, however, this is how we
-		// prevent spam with new replicas.
-		// However, additional copies should not be removed in this case,
-		// because we can remove the only copy this way.
-		checkedNodes.submitReplicaHolder(node)
-		shortage--
-		uncheckedCopies++
-
-		p.log.Debug("consider node under maintenance as OK",
-			zap.String("node", netmap.StringifyPublicKey(node)),
-		)
-	}
-
 	if typ == object.TypeLock {
 		// all nodes of a container must store the `LOCK` objects
 		// for correct object removal protection:
@@ -187,7 +172,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
 
 			shortage--
 		} else if nodes[i].IsMaintenance() {
-			handleMaintenance(nodes[i])
+			shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
 		} else {
 			if status := checkedNodes.processStatus(nodes[i]); status >= 0 {
 				if status == 0 {
@@ -212,7 +197,7 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
 			}
 
 			if isClientErrMaintenance(err) {
-				handleMaintenance(nodes[i])
+				shortage, uncheckedCopies = p.handleMaintenance(nodes[i], checkedNodes, shortage, uncheckedCopies)
 			} else if err != nil {
 				p.log.Error("receive object header to check policy compliance",
 					zap.Stringer("object", addr),
@@ -228,6 +213,29 @@ func (p *Policer) processNodes(ctx context.Context, requirements *placementRequi
 		i--
 	}
 
+	p.handleProcessNodesResult(ctx, addr, requirements, nodes, checkedNodes, shortage, uncheckedCopies)
+}
+
+// handleMaintenance handles node in maintenance mode and returns new shortage and uncheckedCopies values
+//
+// consider remote nodes under maintenance as problem OK. Such
+// nodes MAY not respond with object, however, this is how we
+// prevent spam with new replicas.
+// However, additional copies should not be removed in this case,
+// because we can remove the only copy this way.
+func (p *Policer) handleMaintenance(node netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) (uint32, int) {
+	checkedNodes.submitReplicaHolder(node)
+	shortage--
+	uncheckedCopies++
+
+	p.log.Debug("consider node under maintenance as OK",
+		zap.String("node", netmap.StringifyPublicKey(node)),
+	)
+	return shortage, uncheckedCopies
+}
+
+func (p *Policer) handleProcessNodesResult(ctx context.Context, addr oid.Address, requirements *placementRequirements,
+	nodes []netmap.NodeInfo, checkedNodes *nodeCache, shortage uint32, uncheckedCopies int) {
 	if shortage > 0 {
 		p.log.Debug("shortage of object copies detected",
 			zap.Stringer("object", addr),