forked from TrueCloudLab/frostfs-node
Consider `REP 1 REP 1` placement (selects/filters are omitted). The placement is `[1, 2], [1, 0]`. We are the 0-th node. Node 1 is under maintenance, so we do not replicate object on the node 2. In the second replication group node 1 is under maintenance, but current caching logic considers it as "replica holder" and removes local copy. Voilà, we have DL if the object is missing from the node 1. Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
41 lines
1 KiB
Go
41 lines
1 KiB
Go
package policer
|
|
|
|
import "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
|
|
type nodeProcessStatus int8
|
|
|
|
const (
|
|
nodeNotProcessed nodeProcessStatus = iota
|
|
nodeDoesNotHoldObject
|
|
nodeHoldsObject
|
|
nodeStatusUnknown
|
|
nodeIsUnderMaintenance
|
|
)
|
|
|
|
func (st nodeProcessStatus) Processed() bool {
|
|
return st != nodeNotProcessed
|
|
}
|
|
|
|
// nodeCache tracks Policer's check progress.
|
|
type nodeCache map[uint64]nodeProcessStatus
|
|
|
|
func newNodeCache() nodeCache {
|
|
return make(map[uint64]nodeProcessStatus)
|
|
}
|
|
|
|
func (n nodeCache) set(node netmap.NodeInfo, val nodeProcessStatus) {
|
|
n[node.Hash()] = val
|
|
}
|
|
|
|
// processStatus returns current processing status of the storage node.
|
|
func (n nodeCache) processStatus(node netmap.NodeInfo) nodeProcessStatus {
|
|
return n[node.Hash()]
|
|
}
|
|
|
|
// SubmitSuccessfulReplication marks given storage node as a current object
|
|
// replica holder.
|
|
//
|
|
// SubmitSuccessfulReplication implements replicator.TaskResult.
|
|
func (n nodeCache) SubmitSuccessfulReplication(node netmap.NodeInfo) {
|
|
n.set(node, nodeHoldsObject)
|
|
}
|