Use typed HRW methods #21
6 changed files with 13 additions and 7 deletions
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.18
|
||||||
require (
|
require (
|
||||||
github.com/TrueCloudLab/frostfs-api-go/v2 v2.0.0-20221212144048-1351b6656d68
|
github.com/TrueCloudLab/frostfs-api-go/v2 v2.0.0-20221212144048-1351b6656d68
|
||||||
github.com/TrueCloudLab/frostfs-contract v0.0.0-20221213081248-6c805c1b4e42
|
github.com/TrueCloudLab/frostfs-contract v0.0.0-20221213081248-6c805c1b4e42
|
||||||
github.com/TrueCloudLab/hrw v1.1.0
|
github.com/TrueCloudLab/hrw v1.1.1-0.20230227111858-79b208bebf52
|
||||||
github.com/TrueCloudLab/tzhash v1.7.0
|
github.com/TrueCloudLab/tzhash v1.7.0
|
||||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12
|
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -152,7 +152,7 @@ func (m NetMap) PlacementVectors(vectors [][]NodeInfo, pivot []byte) ([][]NodeIn
|
||||||
for i := range vectors {
|
for i := range vectors {
|
||||||
result[i] = make([]NodeInfo, len(vectors[i]))
|
result[i] = make([]NodeInfo, len(vectors[i]))
|
||||||
copy(result[i], vectors[i])
|
copy(result[i], vectors[i])
|
||||||
hrw.SortSliceByWeightValue(result[i], nodes(result[i]).weights(wf), h)
|
hrw.SortHasherSliceByWeightValue(result[i], nodes(result[i]).weights(wf), h)
|
||||||
}
|
}
|
||||||
done done
Could you mention about this in the commit message? Actually, a separate commit for this specific opitimization would be nice. Could you mention about this in the commit message? Actually, a separate commit for this specific opitimization would be nice.
done done
|
|||||||
|
|
||||||
return result, nil
|
return result, nil
|
||||||
|
|
|
@ -25,7 +25,8 @@ import (
|
||||||
//
|
//
|
||||||
// Instances can be created using built-in var declaration.
|
// Instances can be created using built-in var declaration.
|
||||||
type NodeInfo struct {
|
type NodeInfo struct {
|
||||||
m netmap.NodeInfo
|
m netmap.NodeInfo
|
||||||
|
hash uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// reads NodeInfo from netmap.NodeInfo message. If checkFieldPresence is set,
|
// reads NodeInfo from netmap.NodeInfo message. If checkFieldPresence is set,
|
||||||
|
@ -86,6 +87,7 @@ func (x *NodeInfo) readFromV2(m netmap.NodeInfo, checkFieldPresence bool) error
|
||||||
}
|
}
|
||||||
|
|
||||||
x.m = m
|
x.m = m
|
||||||
|
x.hash = hrw.Hash(binPublicKey)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -167,6 +169,7 @@ func (x *NodeInfo) UnmarshalJSON(data []byte) error {
|
||||||
// See also PublicKey.
|
// See also PublicKey.
|
||||||
func (x *NodeInfo) SetPublicKey(key []byte) {
|
func (x *NodeInfo) SetPublicKey(key []byte) {
|
||||||
x.m.SetPublicKey(key)
|
x.m.SetPublicKey(key)
|
||||||
|
x.hash = hrw.Hash(x.m.GetPublicKey())
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicKey returns value set using SetPublicKey.
|
// PublicKey returns value set using SetPublicKey.
|
||||||
|
@ -233,6 +236,9 @@ var _ hrw.Hasher = NodeInfo{}
|
||||||
// Hash is needed to support weighted HRW therefore sort function sorts nodes
|
// Hash is needed to support weighted HRW therefore sort function sorts nodes
|
||||||
// based on their public key. Hash isn't expected to be used directly.
|
// based on their public key. Hash isn't expected to be used directly.
|
||||||
func (x NodeInfo) Hash() uint64 {
|
func (x NodeInfo) Hash() uint64 {
|
||||||
|
if x.hash != 0 {
|
||||||
|
return x.hash
|
||||||
|
}
|
||||||
return hrw.Hash(x.m.GetPublicKey())
|
return hrw.Hash(x.m.GetPublicKey())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -108,7 +108,7 @@ func (c *context) getSelection(p PlacementPolicy, s netmap.Selector) ([]nodes, e
|
||||||
weights[i] = calcBucketWeight(res[i], newMeanIQRAgg(), c.weightFunc)
|
weights[i] = calcBucketWeight(res[i], newMeanIQRAgg(), c.weightFunc)
|
||||||
}
|
}
|
||||||
|
|
||||||
hrw.SortSliceByWeightValue(res, weights, c.hrwSeedHash)
|
hrw.SortHasherSliceByWeightValue(res, weights, c.hrwSeedHash)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.GetAttribute() == "" {
|
if s.GetAttribute() == "" {
|
||||||
|
@ -164,7 +164,7 @@ func (c *context) getSelectionBase(subnetID subnetid.ID, s netmap.Selector) []no
|
||||||
|
|
||||||
if len(c.hrwSeed) != 0 {
|
if len(c.hrwSeed) != 0 {
|
||||||
for i := range result {
|
for i := range result {
|
||||||
hrw.SortSliceByWeightValue(result[i].nodes, result[i].nodes.weights(c.weightFunc), c.hrwSeedHash)
|
hrw.SortHasherSliceByWeightValue(result[i].nodes, result[i].nodes.weights(c.weightFunc), c.hrwSeedHash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ func BenchmarkHRWSort(b *testing.B) {
|
||||||
copy(realNodes, vectors)
|
copy(realNodes, vectors)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
hrw.SortSliceByValue(realNodes, pivot)
|
hrw.SortHasherSliceByValue(realNodes, pivot)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("only sort by index", func(b *testing.B) {
|
b.Run("only sort by index", func(b *testing.B) {
|
||||||
|
@ -72,7 +72,7 @@ func BenchmarkHRWSort(b *testing.B) {
|
||||||
copy(realNodes, vectors)
|
copy(realNodes, vectors)
|
||||||
b.StartTimer()
|
b.StartTimer()
|
||||||
|
|
||||||
hrw.SortSliceByWeightValue(realNodes, weights, pivot)
|
hrw.SortHasherSliceByWeightValue(realNodes, weights, pivot)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
b.Run("sort by ID, then by index (deterministic)", func(b *testing.B) {
|
b.Run("sort by ID, then by index (deterministic)", func(b *testing.B) {
|
||||||
|
|
Loading…
Reference in a new issue
Can we do an optimization with hash precalculation, similar to what you've done in the
frostfs-node
?Should be done in
readFromV2
andSetPublicKey
functions. We should also fallback to a normal calculation if the hash is 0. Am I missing something here? @carpawell