[#199] putsvc: Refactor put object

Resolve containedctx linter for streamer and remote target

Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
Dmitrii Stepanov 2023-04-03 14:23:53 +03:00 committed by Gitea
parent cecea8053a
commit 27bdddc48f
24 changed files with 171 additions and 125 deletions

View file

@ -1,6 +1,7 @@
package putsvc
import (
"context"
"fmt"
"sync"
"sync/atomic"
@ -17,7 +18,7 @@ import (
type preparedObjectTarget interface {
WriteObject(*objectSDK.Object, object.ContentMeta) error
Close() (*transformer.AccessIdentifiers, error)
Close(ctx context.Context) (*transformer.AccessIdentifiers, error)
}
type distributedTarget struct {
@ -121,13 +122,13 @@ func (t *distributedTarget) WriteHeader(obj *objectSDK.Object) error {
return nil
}
func (t *distributedTarget) Write(p []byte) (n int, err error) {
func (t *distributedTarget) Write(_ context.Context, p []byte) (n int, err error) {
t.payload.Data = append(t.payload.Data, p...)
return len(p), nil
}
func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
func (t *distributedTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
defer func() {
putPayload(t.payload)
t.payload = nil
@ -146,10 +147,10 @@ func (t *distributedTarget) Close() (*transformer.AccessIdentifiers, error) {
t.traversal.extraBroadcastEnabled = true
}
return t.iteratePlacement(t.sendObject)
return t.iteratePlacement(ctx)
}
func (t *distributedTarget) sendObject(node nodeDesc) error {
func (t *distributedTarget) sendObject(ctx context.Context, node nodeDesc) error {
if !node.local && t.relay != nil {
return t.relay(node)
}
@ -158,13 +159,13 @@ func (t *distributedTarget) sendObject(node nodeDesc) error {
if err := target.WriteObject(t.obj, t.objMeta); err != nil {
return fmt.Errorf("could not write header: %w", err)
} else if _, err := target.Close(); err != nil {
} else if _, err := target.Close(ctx); err != nil {
return fmt.Errorf("could not close object stream: %w", err)
}
return nil
}
func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transformer.AccessIdentifiers, error) {
func (t *distributedTarget) iteratePlacement(ctx context.Context) (*transformer.AccessIdentifiers, error) {
id, _ := t.obj.ID()
traverser, err := placement.NewTraverser(
@ -182,7 +183,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
break
}
if t.iterateAddresses(traverser, addrs, f, resErr) {
if t.iterateAddresses(ctx, traverser, addrs, resErr) {
break
}
}
@ -195,7 +196,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
// perform additional container broadcast if needed
if t.traversal.submitPrimaryPlacementFinish() {
_, err = t.iteratePlacement(f)
_, err = t.iteratePlacement(ctx)
if err != nil {
t.log.Error("additional container broadcast failure", zap.Error(err))
// we don't fail primary operation because of broadcast failure
@ -208,7 +209,7 @@ func (t *distributedTarget) iteratePlacement(f func(nodeDesc) error) (*transform
WithSelfID(id), nil
}
func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, addrs []placement.Node, f func(nodeDesc) error, resErr *atomic.Value) bool {
func (t *distributedTarget) iterateAddresses(ctx context.Context, traverser *placement.Traverser, addrs []placement.Node, resErr *atomic.Value) bool {
wg := &sync.WaitGroup{}
for i := range addrs {
@ -230,7 +231,7 @@ func (t *distributedTarget) iterateAddresses(traverser *placement.Traverser, add
if err := workerPool.Submit(func() {
defer wg.Done()
err := f(nodeDesc{local: isLocal, info: addr})
err := t.sendObject(ctx, nodeDesc{local: isLocal, info: addr})
// mark the container node as processed in order to exclude it
// in subsequent container broadcast. Note that we don't