frostfs-sdk-go/netmap/aggregator.go
Evgenii Stratonikov d282cd094f
All checks were successful
DCO / DCO (pull_request) Successful in 23s
Code generation / Generate proto (pull_request) Successful in 1m27s
Tests and linters / Tests (pull_request) Successful in 1m27s
Tests and linters / Lint (pull_request) Successful in 2m35s
[#355] netmap: Cache price and capacity attributes
They are used by HRW sorting and it makes sense to store them separately
instead of iterating over all attributes each time we need them.
It also simplifies code: we already parse them in NodeInfo.readFromV2(),
so just save the result.

```
goos: linux
goarch: amd64
pkg: git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap
cpu: 11th Gen Intel(R) Core(TM) i5-1135G7 @ 2.40GHz
                                                              │     old     │                 new                 │
                                                              │   sec/op    │   sec/op     vs base                │
Netmap_ContainerNodes/REP_2-8                                   5.923µ ± 0%   5.209µ ± 1%  -12.05% (p=0.000 n=10)
Netmap_ContainerNodes/REP_2_IN_X_CBF_2_SELECT_2_FROM_*_AS_X-8   5.931µ ± 7%   5.088µ ± 1%  -14.22% (p=0.000 n=10)
geomean                                                         5.927µ        5.148µ       -13.14%

                                                              │     old      │                 new                 │
                                                              │     B/op     │     B/op      vs base               │
Netmap_ContainerNodes/REP_2-8                                   7.609Ki ± 0%   8.172Ki ± 0%  +7.39% (p=0.000 n=10)
Netmap_ContainerNodes/REP_2_IN_X_CBF_2_SELECT_2_FROM_*_AS_X-8   7.031Ki ± 0%   7.469Ki ± 0%  +6.22% (p=0.000 n=10)
geomean                                                         7.315Ki        7.812Ki       +6.81%

                                                              │    old     │                 new                 │
                                                              │ allocs/op  │ allocs/op   vs base                 │
Netmap_ContainerNodes/REP_2-8                                   77.00 ± 0%   77.00 ± 0%       ~ (p=1.000 n=10) ¹
Netmap_ContainerNodes/REP_2_IN_X_CBF_2_SELECT_2_FROM_*_AS_X-8   77.00 ± 0%   77.00 ± 0%       ~ (p=1.000 n=10) ¹
geomean                                                         77.00        77.00       +0.00%
¹ all samples are equal
```

Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
2025-04-07 17:47:08 +03:00

164 lines
2.8 KiB
Go

package netmap
import "slices"
type (
// aggregator can calculate some value across all netmap
// such as median, minimum or maximum.
aggregator interface {
Add(float64)
Compute() float64
}
// normalizer normalizes weight.
normalizer interface {
Normalize(w float64) float64
}
meanAgg struct {
mean float64
count int
}
minAgg struct {
min float64
minFound bool
}
meanIQRAgg struct {
arr []float64
}
reverseMinNorm struct {
min float64
}
sigmoidNorm struct {
scale float64
}
// weightFunc calculates n's weight.
weightFunc = func(NodeInfo) float64
)
var (
_ aggregator = (*meanAgg)(nil)
_ aggregator = (*minAgg)(nil)
_ aggregator = (*meanIQRAgg)(nil)
_ normalizer = (*reverseMinNorm)(nil)
_ normalizer = (*sigmoidNorm)(nil)
)
// newWeightFunc returns weightFunc which multiplies normalized
// capacity and price.
func newWeightFunc(capNorm, priceNorm normalizer) weightFunc {
return func(n NodeInfo) float64 {
return capNorm.Normalize(float64(n.capacity)) * priceNorm.Normalize(float64(n.price))
}
}
// newMeanAgg returns an aggregator which
// computes mean value by recalculating it on
// every addition.
func newMeanAgg() aggregator {
return new(meanAgg)
}
// newMinAgg returns an aggregator which
// computes min value.
func newMinAgg() aggregator {
return new(minAgg)
}
// newReverseMinNorm returns a normalizer which
// normalize values in range of 0.0 to 1.0 to a minimum value.
func newReverseMinNorm(minV float64) normalizer {
return &reverseMinNorm{min: minV}
}
// newSigmoidNorm returns a normalizer which
// normalize values in range of 0.0 to 1.0 to a scaled sigmoid.
func newSigmoidNorm(scale float64) normalizer {
return &sigmoidNorm{scale: scale}
}
func (a *meanAgg) Add(n float64) {
c := a.count + 1
a.mean = a.mean*(float64(a.count)/float64(c)) + n/float64(c)
a.count++
}
func (a *meanAgg) Compute() float64 {
return a.mean
}
func (a *minAgg) Add(n float64) {
if !a.minFound {
a.min = n
a.minFound = true
return
}
if n < a.min {
a.min = n
}
}
func (a *minAgg) Compute() float64 {
return a.min
}
func (a *meanIQRAgg) clear() {
a.arr = a.arr[:0]
}
func (a *meanIQRAgg) Add(n float64) {
a.arr = append(a.arr, n)
}
func (a *meanIQRAgg) Compute() float64 {
l := len(a.arr)
if l == 0 {
return 0
}
slices.Sort(a.arr)
var minV, maxV float64
const minLn = 4
if l < minLn {
minV, maxV = a.arr[0], a.arr[l-1]
} else {
start, end := l/minLn, l*3/minLn-1
minV, maxV = a.arr[start], a.arr[end]
}
count := 0
sum := float64(0)
for _, e := range a.arr {
if e >= minV && e <= maxV {
sum += e
count++
}
}
return sum / float64(count)
}
func (r *reverseMinNorm) Normalize(w float64) float64 {
return (r.min + 1) / (w + 1)
}
func (r *sigmoidNorm) Normalize(w float64) float64 {
if r.scale == 0 {
return 0
}
x := w / r.scale
return x / (1 + x)
}