All checks were successful
Tests and linters / Run gofumpt (pull_request) Successful in 1m56s
DCO action / DCO (pull_request) Successful in 2m15s
Vulncheck / Vulncheck (pull_request) Successful in 2m24s
Tests and linters / Tests (1.22) (pull_request) Successful in 2m54s
Build / Build Components (1.22) (pull_request) Successful in 2m53s
Build / Build Components (1.23) (pull_request) Successful in 2m52s
Tests and linters / Lint (pull_request) Successful in 3m24s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m3s
Tests and linters / Tests (1.23) (pull_request) Successful in 3m22s
Tests and linters / Staticcheck (pull_request) Successful in 3m25s
Tests and linters / gopls check (pull_request) Successful in 4m0s
Tests and linters / Tests with -race (pull_request) Successful in 4m12s
exportloopref is deprecated. gopatch: ``` @@ var index, value identifier var slice expression @@ for index, value := range slice { ... -value := value ... } @@ var index, value identifier var slice expression @@ for index, value := range slice { ... -index := index ... } @@ var value identifier var channel expression @@ for value := range channel { ... -value := value ... } ``` Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
118 lines
3 KiB
Go
118 lines
3 KiB
Go
package putsvc
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
"sync/atomic"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
|
svcutil "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/util"
|
|
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object_manager/placement"
|
|
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
"go.uber.org/zap"
|
|
)
|
|
|
|
type nodeIterator struct {
|
|
traversal
|
|
cfg *cfg
|
|
}
|
|
|
|
func (c *cfg) newNodeIterator(opts []placement.Option) *nodeIterator {
|
|
return &nodeIterator{
|
|
traversal: traversal{
|
|
opts: opts,
|
|
mExclude: make(map[string]*bool),
|
|
},
|
|
cfg: c,
|
|
}
|
|
}
|
|
|
|
func (n *nodeIterator) forEachNode(ctx context.Context, f func(context.Context, nodeDesc) error) error {
|
|
traverser, err := placement.NewTraverser(n.traversal.opts...)
|
|
if err != nil {
|
|
return fmt.Errorf("could not create object placement traverser: %w", err)
|
|
}
|
|
|
|
resErr := &atomic.Value{}
|
|
|
|
// Must iterate over all replicas, regardless of whether there are identical nodes there.
|
|
// At the same time need to exclude identical nodes from processing.
|
|
for {
|
|
addrs := traverser.Next()
|
|
if len(addrs) == 0 {
|
|
break
|
|
}
|
|
|
|
if n.forEachAddress(ctx, traverser, addrs, f, resErr) {
|
|
break
|
|
}
|
|
}
|
|
|
|
if !traverser.Success() {
|
|
var err errIncompletePut
|
|
err.singleErr, _ = resErr.Load().(error)
|
|
return err
|
|
}
|
|
|
|
// perform additional container broadcast if needed
|
|
if n.traversal.submitPrimaryPlacementFinish() {
|
|
err := n.forEachNode(ctx, f)
|
|
if err != nil {
|
|
n.cfg.log.Error(logs.PutAdditionalContainerBroadcastFailure, zap.Error(err))
|
|
// we don't fail primary operation because of broadcast failure
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (n *nodeIterator) forEachAddress(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, f func(context.Context, nodeDesc) error, resErr *atomic.Value) bool {
|
|
var wg sync.WaitGroup
|
|
|
|
for _, addr := range addrs {
|
|
if ok := n.mExclude[string(addr.PublicKey())]; ok != nil {
|
|
if *ok {
|
|
traverser.SubmitSuccess()
|
|
}
|
|
// This can happen only during additional container broadcast.
|
|
continue
|
|
}
|
|
|
|
workerPool, isLocal := n.cfg.getWorkerPool(addr.PublicKey())
|
|
|
|
item := new(bool)
|
|
wg.Add(1)
|
|
if err := workerPool.Submit(func() {
|
|
defer wg.Done()
|
|
|
|
err := f(ctx, nodeDesc{local: isLocal, info: addr})
|
|
if err != nil {
|
|
resErr.Store(err)
|
|
svcutil.LogServiceError(n.cfg.log, "PUT", addr.Addresses(), err)
|
|
return
|
|
}
|
|
|
|
traverser.SubmitSuccess()
|
|
*item = true
|
|
}); err != nil {
|
|
wg.Done()
|
|
svcutil.LogWorkerPoolError(n.cfg.log, "PUT", err)
|
|
return true
|
|
}
|
|
|
|
// Mark the container node as processed in order to exclude it
|
|
// in subsequent container broadcast. Note that we don't
|
|
// process this node during broadcast if primary placement
|
|
// on it failed.
|
|
n.traversal.submitProcessed(addr, item)
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return false
|
|
}
|
|
|
|
func needAdditionalBroadcast(obj *objectSDK.Object, localOnly bool) bool {
|
|
return len(obj.Children()) > 0 || (!localOnly && (obj.Type() == objectSDK.TypeTombstone || obj.Type() == objectSDK.TypeLock))
|
|
}
|