package putsvc import ( "fmt" "sync" "sync/atomic" "github.com/nspcc-dev/neofs-node/pkg/core/object" svcutil "github.com/nspcc-dev/neofs-node/pkg/services/object/util" "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/placement" "github.com/nspcc-dev/neofs-node/pkg/services/object_manager/transformer" "github.com/nspcc-dev/neofs-node/pkg/util" "github.com/nspcc-dev/neofs-node/pkg/util/logger" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" ) type distributedTarget struct { traverseOpts []placement.Option remotePool, localPool util.WorkerPool obj *objectSDK.Object chunks [][]byte nodeTargetInitializer func(nodeDesc) transformer.ObjectTarget isLocalKey func([]byte) bool relay func(nodeDesc) error fmt *object.FormatValidator log *logger.Logger } type nodeDesc struct { local bool info placement.Node } // errIncompletePut is returned if processing on a container fails. type errIncompletePut struct { singleErr error // error from the last responding node } func (x errIncompletePut) Error() string { const commonMsg = "incomplete object PUT by placement" if x.singleErr != nil { return fmt.Sprintf("%s: %v", commonMsg, x.singleErr) } return commonMsg } func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error { t.obj = obj return nil } func (t *distributedTarget) Write(p []byte) (n int, err error) { t.chunks = append(t.chunks, p) return len(p), nil } func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) { sz := 0 for i := range t.chunks { sz += len(t.chunks[i]) } payload := make([]byte, 0, sz) for i := range t.chunks { payload = append(payload, t.chunks[i]...) } t.obj.SetPayload(payload) if err := t.fmt.ValidateContent(t.obj); err != nil { return nil, fmt.Errorf("(%T) could not validate payload content: %w", t, err) } return t.iteratePlacement(t.sendObject) } func (t *distributedTarget) sendObject(node nodeDesc) error { if !node.local && t.relay != nil { return t.relay(node) } target := t.nodeTargetInitializer(node) if err := target.WriteHeader(t.obj); err != nil { return fmt.Errorf("could not write header: %w", err) } else if _, err := target.Close(); err != nil { return fmt.Errorf("could not close object stream: %w", err) } return nil } func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) { traverser, err := placement.NewTraverser( append(t.traverseOpts, placement.ForObject(t.obj.ID()))..., ) if err != nil { return nil, fmt.Errorf("(%T) could not create object placement traverser: %w", t, err) } var resErr atomic.Value loop: for { addrs := traverser.Next() if len(addrs) == 0 { break } wg := new(sync.WaitGroup) for i := range addrs { wg.Add(1) addr := addrs[i] isLocal := t.isLocalKey(addr.PublicKey()) var workerPool util.WorkerPool if isLocal { workerPool = t.localPool } else { workerPool = t.remotePool } if err := workerPool.Submit(func() { defer wg.Done() if err := f(nodeDesc{local: isLocal, info: addr}); err != nil { resErr.Store(err) svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err) return } traverser.SubmitSuccess() }); err != nil { wg.Done() svcutil.LogWorkerPoolError(t.log, "PUT", err) break loop } } wg.Wait() } if !traverser.Success() { var err errIncompletePut err.singleErr, _ = resErr.Load().(error) return nil, err } return new(transformer.AccessIdentifiers). WithSelfID(t.obj.ID()), nil }