forked from TrueCloudLab/frostfs-node
[#210] policier: Resolve contextcheck linter
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
9098d0eec0
commit
23575e1ac0
15 changed files with 58 additions and 54 deletions
|
@ -253,11 +253,11 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *reputati
|
|||
policerconfig.HeadTimeout(c.appCfg),
|
||||
),
|
||||
policer.WithReplicator(c.replicator),
|
||||
policer.WithRedundantCopyCallback(func(addr oid.Address) {
|
||||
policer.WithRedundantCopyCallback(func(ctx context.Context, addr oid.Address) {
|
||||
var inhumePrm engine.InhumePrm
|
||||
inhumePrm.MarkAsGarbage(addr)
|
||||
|
||||
_, err := ls.Inhume(inhumePrm)
|
||||
_, err := ls.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
c.log.Warn("could not inhume mark redundant copy as garbage",
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -620,8 +620,8 @@ func (e engineWithNotifications) IsLocked(address oid.Address) (bool, error) {
|
|||
return e.base.IsLocked(address)
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) Delete(tombstone oid.Address, toDelete []oid.ID) error {
|
||||
return e.base.Delete(tombstone, toDelete)
|
||||
func (e engineWithNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
|
||||
return e.base.Delete(ctx, tombstone, toDelete)
|
||||
}
|
||||
|
||||
func (e engineWithNotifications) Lock(locker oid.Address, toLock []oid.ID) error {
|
||||
|
@ -657,7 +657,7 @@ func (e engineWithoutNotifications) IsLocked(address oid.Address) (bool, error)
|
|||
return e.engine.IsLocked(address)
|
||||
}
|
||||
|
||||
func (e engineWithoutNotifications) Delete(tombstone oid.Address, toDelete []oid.ID) error {
|
||||
func (e engineWithoutNotifications) Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error {
|
||||
var prm engine.InhumePrm
|
||||
|
||||
addrs := make([]oid.Address, len(toDelete))
|
||||
|
@ -668,7 +668,7 @@ func (e engineWithoutNotifications) Delete(tombstone oid.Address, toDelete []oid
|
|||
|
||||
prm.WithTarget(tombstone, addrs...)
|
||||
|
||||
_, err := e.engine.Inhume(prm)
|
||||
_, err := e.engine.Inhume(ctx, prm)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
|
||||
|
@ -44,16 +45,16 @@ func (p *DeletePrm) WithForceRemoval() {
|
|||
// NOTE: Marks any object to be deleted (despite any prohibitions
|
||||
// on operations with that object) if WithForceRemoval option has
|
||||
// been provided.
|
||||
func (e *StorageEngine) Delete(prm DeletePrm) (res DeleteRes, err error) {
|
||||
func (e *StorageEngine) Delete(ctx context.Context, prm DeletePrm) (res DeleteRes, err error) {
|
||||
err = e.execIfNotBlocked(func() error {
|
||||
res, err = e.delete(prm)
|
||||
res, err = e.delete(ctx, prm)
|
||||
return err
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
|
||||
func (e *StorageEngine) delete(ctx context.Context, prm DeletePrm) (DeleteRes, error) {
|
||||
if e.metrics != nil {
|
||||
defer elapsed(e.metrics.AddDeleteDuration)()
|
||||
}
|
||||
|
@ -95,7 +96,7 @@ func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
|
|||
shPrm.ForceRemoval()
|
||||
}
|
||||
|
||||
_, err = sh.Inhume(shPrm)
|
||||
_, err = sh.Inhume(ctx, shPrm)
|
||||
if err != nil {
|
||||
e.reportShardError(sh, "could not inhume object in shard", err)
|
||||
|
||||
|
@ -113,13 +114,13 @@ func (e *StorageEngine) delete(prm DeletePrm) (DeleteRes, error) {
|
|||
}
|
||||
|
||||
if splitInfo != nil {
|
||||
e.deleteChildren(prm.addr, prm.forceRemoval, splitInfo.SplitID())
|
||||
e.deleteChildren(ctx, prm.addr, prm.forceRemoval, splitInfo.SplitID())
|
||||
}
|
||||
|
||||
return DeleteRes{}, nil
|
||||
}
|
||||
|
||||
func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID *objectSDK.SplitID) {
|
||||
func (e *StorageEngine) deleteChildren(ctx context.Context, addr oid.Address, force bool, splitID *objectSDK.SplitID) {
|
||||
var fs objectSDK.SearchFilters
|
||||
fs.AddSplitIDFilter(objectSDK.MatchStringEqual, splitID)
|
||||
|
||||
|
@ -144,7 +145,7 @@ func (e *StorageEngine) deleteChildren(addr oid.Address, force bool, splitID *ob
|
|||
for _, addr := range res.AddressList() {
|
||||
inhumePrm.MarkAsGarbage(addr)
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(ctx, inhumePrm)
|
||||
if err != nil {
|
||||
e.log.Debug("could not inhume object in shard",
|
||||
zap.Stringer("addr", addr),
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -78,7 +79,7 @@ func TestDeleteBigObject(t *testing.T) {
|
|||
deletePrm.WithForceRemoval()
|
||||
deletePrm.WithAddress(addrParent)
|
||||
|
||||
_, err := e.Delete(deletePrm)
|
||||
_, err := e.Delete(context.Background(), deletePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkGetError(t, e, addrParent, &apistatus.ObjectNotFound{})
|
||||
|
|
|
@ -60,16 +60,16 @@ var errInhumeFailure = errors.New("inhume operation failed")
|
|||
// with that object) if WithForceRemoval option has been provided.
|
||||
//
|
||||
// Returns an error if executions are blocked (see BlockExecution).
|
||||
func (e *StorageEngine) Inhume(prm InhumePrm) (res InhumeRes, err error) {
|
||||
func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
|
||||
err = e.execIfNotBlocked(func() error {
|
||||
res, err = e.inhume(prm)
|
||||
res, err = e.inhume(ctx, prm)
|
||||
return err
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
|
||||
func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
||||
if e.metrics != nil {
|
||||
defer elapsed(e.metrics.AddInhumeDuration)()
|
||||
}
|
||||
|
@ -98,12 +98,12 @@ func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
|
|||
shPrm.MarkAsGarbage(prm.addrs[i])
|
||||
}
|
||||
|
||||
ok, err := e.inhumeAddr(prm.addrs[i], shPrm, true)
|
||||
ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, true)
|
||||
if err != nil {
|
||||
return InhumeRes{}, err
|
||||
}
|
||||
if !ok {
|
||||
ok, err := e.inhumeAddr(prm.addrs[i], shPrm, false)
|
||||
ok, err := e.inhumeAddr(ctx, prm.addrs[i], shPrm, false)
|
||||
if err != nil {
|
||||
return InhumeRes{}, err
|
||||
} else if !ok {
|
||||
|
@ -116,7 +116,7 @@ func (e *StorageEngine) inhume(prm InhumePrm) (InhumeRes, error) {
|
|||
}
|
||||
|
||||
// Returns ok if object was inhumed during this invocation or before.
|
||||
func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
|
||||
func (e *StorageEngine) inhumeAddr(ctx context.Context, addr oid.Address, prm shard.InhumePrm, checkExists bool) (bool, error) {
|
||||
root := false
|
||||
var errLocked apistatus.ObjectLocked
|
||||
var existPrm shard.ExistsPrm
|
||||
|
@ -154,7 +154,7 @@ func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm, checkE
|
|||
}
|
||||
}
|
||||
|
||||
_, err := sh.Inhume(prm)
|
||||
_, err := sh.Inhume(ctx, prm)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.As(err, &errLocked):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
|
@ -47,7 +48,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs, err := Select(e, cnr, fs)
|
||||
|
@ -75,7 +76,7 @@ func TestStorageEngine_Inhume(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.WithTarget(tombstoneID, object.AddressOf(parent))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
addrs, err := Select(e, cnr, fs)
|
||||
|
|
|
@ -111,7 +111,7 @@ func TestLockUserScenario(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.WithTarget(tombAddr, objAddr)
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
|
||||
// 4.
|
||||
|
@ -124,7 +124,7 @@ func TestLockUserScenario(t *testing.T) {
|
|||
|
||||
inhumePrm.WithTarget(tombForLockAddr, lockerAddr)
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorIs(t, err, meta.ErrLockObjectRemoval)
|
||||
|
||||
// 5.
|
||||
|
@ -135,7 +135,7 @@ func TestLockUserScenario(t *testing.T) {
|
|||
|
||||
inhumePrm.WithTarget(tombAddr, objAddr)
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ func TestLockExpiration(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
|
||||
// 3.
|
||||
|
@ -205,7 +205,7 @@ func TestLockExpiration(t *testing.T) {
|
|||
// 4.
|
||||
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -259,12 +259,12 @@ func TestLockForceRemoval(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
|
||||
inhumePrm.WithTarget(oidtest.Address(), objectcore.AddressOf(obj))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
|
||||
// 4.
|
||||
|
@ -272,12 +272,12 @@ func TestLockForceRemoval(t *testing.T) {
|
|||
deletePrm.WithAddress(objectcore.AddressOf(lock))
|
||||
deletePrm.WithForceRemoval()
|
||||
|
||||
_, err = e.Delete(deletePrm)
|
||||
_, err = e.Delete(context.Background(), deletePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5.
|
||||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
|
||||
|
||||
_, err = e.Inhume(inhumePrm)
|
||||
_, err = e.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ func TestRefillMetabase(t *testing.T) {
|
|||
var inhumePrm InhumePrm
|
||||
inhumePrm.SetTarget(object.AddressOf(tombObj), tombMembers...)
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
var headPrm HeadPrm
|
||||
|
@ -322,7 +322,7 @@ func TestRefillMetabase(t *testing.T) {
|
|||
var prm InhumePrm
|
||||
prm.MarkAsGarbage(addr)
|
||||
|
||||
_, err := sh.Inhume(prm)
|
||||
_, err := sh.Inhume(context.Background(), prm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked),
|
||||
"object %s should be locked", locked[i])
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ var ErrLockObjectRemoval = meta.ErrLockObjectRemoval
|
|||
// if at least one object is locked.
|
||||
//
|
||||
// Returns ErrReadOnlyMode error if shard is in "read-only" mode.
|
||||
func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
|
||||
func (s *Shard) Inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
|
||||
s.m.RLock()
|
||||
|
||||
if s.info.Mode.ReadOnly() {
|
||||
|
@ -119,7 +119,7 @@ func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) {
|
|||
}
|
||||
|
||||
if deletedLockObjs := res.DeletedLockObjects(); len(deletedLockObjs) != 0 {
|
||||
s.deletedLockCallBack(context.Background(), deletedLockObjs)
|
||||
s.deletedLockCallBack(ctx, deletedLockObjs)
|
||||
}
|
||||
|
||||
return InhumeRes{}, nil
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package shard_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/core/object"
|
||||
|
@ -47,7 +48,7 @@ func testShardInhume(t *testing.T, hasWriteCache bool) {
|
|||
_, err = testGet(t, sh, getPrm, hasWriteCache)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sh.Inhume(inhPrm)
|
||||
_, err = sh.Inhume(context.Background(), inhPrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sh.Get(getPrm)
|
||||
|
|
|
@ -93,11 +93,11 @@ func TestShard_Lock(t *testing.T) {
|
|||
var inhumePrm shard.InhumePrm
|
||||
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(obj))
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
|
||||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.ErrorAs(t, err, new(apistatus.ObjectLocked))
|
||||
})
|
||||
|
||||
|
@ -107,11 +107,11 @@ func TestShard_Lock(t *testing.T) {
|
|||
var inhumePrm shard.InhumePrm
|
||||
inhumePrm.SetTarget(objectcore.AddressOf(ts), objectcore.AddressOf(lock))
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.Error(t, err)
|
||||
|
||||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
|
@ -120,7 +120,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock))
|
||||
inhumePrm.ForceRemoval()
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
// it should be possible to remove
|
||||
|
@ -129,7 +129,7 @@ func TestShard_Lock(t *testing.T) {
|
|||
inhumePrm = shard.InhumePrm{}
|
||||
inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj))
|
||||
|
||||
_, err = sh.Inhume(inhumePrm)
|
||||
_, err = sh.Inhume(context.Background(), inhumePrm)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that object has been removed
|
||||
|
|
|
@ -127,7 +127,7 @@ func TestCounters(t *testing.T) {
|
|||
for i := 0; i < inhumedNumber; i++ {
|
||||
prm.MarkAsGarbage(objectcore.AddressOf(oo[i]))
|
||||
|
||||
_, err := sh.Inhume(prm)
|
||||
_, err := sh.Inhume(context.Background(), prm)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,7 @@ func TestCounters(t *testing.T) {
|
|||
inhumedNumber := int(phy / 4)
|
||||
prm.SetTarget(ts, addrFromObjs(oo[:inhumedNumber])...)
|
||||
|
||||
_, err := sh.Inhume(prm)
|
||||
_, err := sh.Inhume(context.Background(), prm)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, phy, mm.objCounters[physical])
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
//
|
||||
// If some address is not a valid object address in a binary format, an error returns.
|
||||
// If request is unsigned or signed by disallowed key, permission error returns.
|
||||
func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
|
||||
func (s *Server) DropObjects(ctx context.Context, req *control.DropObjectsRequest) (*control.DropObjectsResponse, error) {
|
||||
// verify request
|
||||
if err := s.isValidRequest(req); err != nil {
|
||||
return nil, status.Error(codes.PermissionDenied, err.Error())
|
||||
|
@ -42,7 +42,7 @@ func (s *Server) DropObjects(_ context.Context, req *control.DropObjectsRequest)
|
|||
prm.WithAddress(addrList[i])
|
||||
|
||||
//nolint: contextcheck
|
||||
_, err := s.s.Delete(prm)
|
||||
_, err := s.s.Delete(ctx, prm)
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ type ObjectStorage interface {
|
|||
Put(*object.Object) error
|
||||
// Delete must delete passed objects
|
||||
// and return any appeared error.
|
||||
Delete(tombstone oid.Address, toDelete []oid.ID) error
|
||||
Delete(ctx context.Context, tombstone oid.Address, toDelete []oid.ID) error
|
||||
// Lock must lock passed objects
|
||||
// and return any appeared error.
|
||||
Lock(locker oid.Address, toLock []oid.ID) error
|
||||
|
@ -39,10 +39,10 @@ func (t *localTarget) WriteObject(obj *object.Object, meta objectCore.ContentMet
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *localTarget) Close(_ context.Context) (*transformer.AccessIdentifiers, error) {
|
||||
func (t *localTarget) Close(ctx context.Context) (*transformer.AccessIdentifiers, error) {
|
||||
switch t.meta.Type() {
|
||||
case object.TypeTombstone:
|
||||
err := t.storage.Delete(objectCore.AddressOf(t.obj), t.meta.Objects())
|
||||
err := t.storage.Delete(ctx, objectCore.AddressOf(t.obj), t.meta.Objects())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not delete objects from tombstone locally: %w", err)
|
||||
}
|
||||
|
|
|
@ -81,8 +81,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
|
|||
prm.MarkAsGarbage(addrWithType.Address)
|
||||
prm.WithForceRemoval()
|
||||
|
||||
//nolint: contextcheck
|
||||
_, err := p.jobQueue.localStorage.Inhume(prm)
|
||||
_, err := p.jobQueue.localStorage.Inhume(ctx, prm)
|
||||
if err != nil {
|
||||
p.log.Error("could not inhume object with missing container",
|
||||
zap.Stringer("cid", idCnr),
|
||||
|
@ -134,7 +133,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add
|
|||
zap.Stringer("object", addr),
|
||||
)
|
||||
|
||||
p.cbRedundantCopy(addr)
|
||||
p.cbRedundantCopy(ctx, addr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package policer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -63,7 +64,7 @@ type Option func(*cfg)
|
|||
|
||||
// RedundantCopyCallback is a callback to pass
|
||||
// the redundant local copy of the object.
|
||||
type RedundantCopyCallback func(oid.Address)
|
||||
type RedundantCopyCallback func(context.Context, oid.Address)
|
||||
|
||||
type cfg struct {
|
||||
headTimeout time.Duration
|
||||
|
|
Loading…
Reference in a new issue