forked from TrueCloudLab/frostfs-node
[#1516] traverser: Check for placement vector out of range
Placement vector may contain fewer nodes count than it required by policy due to the outage of the one of the node. Signed-off-by: Anton Nikiforov <an.nikiforov@yadro.com>
This commit is contained in:
parent
6ae8667fb4
commit
9e11e3dc85
2 changed files with 55 additions and 5 deletions
|
@ -111,14 +111,18 @@ func NewTraverser(opts ...Option) (*Traverser, error) {
|
|||
var rem []int
|
||||
if len(cfg.metrics) > 0 && cfg.nodeState != nil {
|
||||
rem = defaultCopiesVector(cfg.policy)
|
||||
var unsortedVector []netmap.NodeInfo
|
||||
var regularVector []netmap.NodeInfo
|
||||
var unsortedVector, regularVector []netmap.NodeInfo
|
||||
for i := range rem {
|
||||
if len(ns[i]) < rem[i] {
|
||||
unsortedVector = append(unsortedVector, ns[i]...)
|
||||
} else {
|
||||
unsortedVector = append(unsortedVector, ns[i][:rem[i]]...)
|
||||
if len(ns[i]) > rem[i] {
|
||||
regularVector = append(regularVector, ns[i][rem[i]:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
rem = []int{-1, -1}
|
||||
|
||||
sortedVector, err := sortVector(cfg, unsortedVector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -356,6 +356,52 @@ func TestTraverserPriorityMetrics(t *testing.T) {
|
|||
require.Nil(t, next)
|
||||
})
|
||||
|
||||
t.Run("one rep one metric fewer nodes", func(t *testing.T) {
|
||||
selectors := []int{2}
|
||||
replicas := []int{3}
|
||||
|
||||
nodes, cnr := testPlacement(selectors, replicas)
|
||||
|
||||
// Node_0, PK - ip4/0.0.0.0/tcp/0
|
||||
nodes[0][0].SetAttribute("ClusterName", "A")
|
||||
// Node_1, PK - ip4/0.0.0.0/tcp/1
|
||||
nodes[0][1].SetAttribute("ClusterName", "B")
|
||||
|
||||
sdkNode := testNode(5)
|
||||
sdkNode.SetAttribute("ClusterName", "B")
|
||||
|
||||
nodesCopy := copyVectors(nodes)
|
||||
|
||||
m := []Metric{NewAttributeMetric("ClusterName")}
|
||||
|
||||
tr, err := NewTraverser(
|
||||
ForContainer(cnr),
|
||||
UseBuilder(&testBuilder{
|
||||
vectors: nodesCopy,
|
||||
}),
|
||||
WithoutSuccessTracking(),
|
||||
WithPriorityMetrics(m),
|
||||
WithNodeState(&nodeState{
|
||||
node: &sdkNode,
|
||||
}),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Without priority metric `ClusterName` the order will be:
|
||||
// [ {Node_0 A}, {Node_1 A} ]
|
||||
// With priority metric `ClusterName` and current node in cluster B
|
||||
// the order should be:
|
||||
// [ {Node_1 B}, {Node_0 A} ]
|
||||
next := tr.Next()
|
||||
require.NotNil(t, next)
|
||||
require.Equal(t, 2, len(next))
|
||||
require.Equal(t, "/ip4/0.0.0.0/tcp/1", string(next[0].PublicKey()))
|
||||
require.Equal(t, "/ip4/0.0.0.0/tcp/0", string(next[1].PublicKey()))
|
||||
|
||||
next = tr.Next()
|
||||
require.Nil(t, next)
|
||||
})
|
||||
|
||||
t.Run("two reps two metrics", func(t *testing.T) {
|
||||
selectors := []int{3, 3}
|
||||
replicas := []int{2, 2}
|
||||
|
|
Loading…
Reference in a new issue