Refactor replicator #517
11 changed files with 88 additions and 56 deletions
|
@ -996,13 +996,6 @@ func (c *cfg) needBootstrap() bool {
|
|||
return c.cfgNetmap.needBootstrap
|
||||
}
|
||||
|
||||
// ObjectServiceLoad implements system loader interface for policer component.
|
||||
// It is calculated as size/capacity ratio of "remote object put" worker.
|
||||
// Returns float value between 0.0 and 1.0.
|
||||
func (c *cfg) ObjectServiceLoad() float64 {
|
||||
return float64(c.cfgObject.pool.putRemote.Running()) / float64(c.cfgObject.pool.putRemoteCapacity)
|
||||
}
|
||||
|
||||
type dCmp struct {
|
||||
name string
|
||||
reloadFunc func() error
|
||||
|
|
|
@ -259,7 +259,6 @@ func addPolicer(c *cfg, keyStorage *util.KeyStorage, clientConstructor *cache.Cl
|
|||
}),
|
||||
policer.WithMaxCapacity(c.cfgObject.pool.replicatorPoolSize),
|
||||
policer.WithPool(c.cfgObject.pool.replication),
|
||||
policer.WithNodeLoader(c),
|
||||
)
|
||||
|
||||
c.workers = append(c.workers, worker{
|
||||
|
|
|
@ -52,7 +52,6 @@ const (
|
|||
PolicerRoutineStopped = "routine stopped" // Info in ../node/pkg/services/policer/process.go
|
||||
PolicerFailureAtObjectSelectForReplication = "failure at object select for replication" // Warn in ../node/pkg/services/policer/process.go
|
||||
PolicerPoolSubmission = "pool submission" // Warn in ../node/pkg/services/policer/process.go
|
||||
PolicerTuneReplicationCapacity = "tune replication capacity" // Debug in ../node/pkg/services/policer/process.go
|
||||
ReplicatorFinishWork = "finish work" // Debug in ../node/pkg/services/replicator/process.go
|
||||
ReplicatorCouldNotGetObjectFromLocalStorage = "could not get object from local storage" // Error in ../node/pkg/services/replicator/process.go
|
||||
ReplicatorCouldNotReplicateObject = "could not replicate object" // Error in ../node/pkg/services/replicator/process.go
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
type Client interface {
|
||||
ContainerAnnounceUsedSpace(context.Context, client.PrmAnnounceSpace) (*client.ResAnnounceSpace, error)
|
||||
ObjectPutInit(context.Context, client.PrmObjectPutInit) (client.ObjectWriter, error)
|
||||
ObjectPutSingle(context.Context, client.PrmObjectPutSingle) (*client.ResObjectPutSingle, error)
|
||||
ObjectDelete(context.Context, client.PrmObjectDelete) (*client.ResObjectDelete, error)
|
||||
ObjectGetInit(context.Context, client.PrmObjectGet) (*client.ObjectReader, error)
|
||||
ObjectHead(context.Context, client.PrmObjectHead) (*client.ResObjectHead, error)
|
||||
|
|
9
pkg/network/cache/multi.go
vendored
9
pkg/network/cache/multi.go
vendored
|
@ -228,6 +228,15 @@ func (x *multiClient) ObjectPutInit(ctx context.Context, p client.PrmObjectPutIn
|
|||
return
|
||||
}
|
||||
|
||||
func (x *multiClient) ObjectPutSingle(ctx context.Context, p client.PrmObjectPutSingle) (res *client.ResObjectPutSingle, err error) {
|
||||
err = x.iterateClients(ctx, func(c clientcore.Client) error {
|
||||
res, err = c.ObjectPutSingle(ctx, p)
|
||||
return err
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (x *multiClient) ContainerAnnounceUsedSpace(ctx context.Context, prm client.PrmAnnounceSpace) (res *client.ResAnnounceSpace, err error) {
|
||||
err = x.iterateClients(ctx, func(c clientcore.Client) error {
|
||||
res, err = c.ContainerAnnounceUsedSpace(ctx, prm)
|
||||
|
|
|
@ -449,6 +449,54 @@ func PutObject(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// PutObjectSingle saves the object in local storage of the remote node with PutSingle RPC.
|
||||
//
|
||||
// Client and key must be set.
|
||||
//
|
||||
// Returns any error which prevented the operation from completing correctly in error return.
|
||||
func PutObjectSingle(ctx context.Context, prm PutObjectPrm) (*PutObjectRes, error) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "client.PutObjectSingle")
|
||||
defer span.End()
|
||||
|
||||
objID, isSet := prm.obj.ID()
|
||||
if !isSet {
|
||||
return nil, errors.New("missing object id")
|
||||
ale64bit marked this conversation as resolved
Outdated
|
||||
}
|
||||
|
||||
var prmCli client.PrmObjectPutSingle
|
||||
|
||||
prmCli.ExecuteLocal()
|
||||
|
||||
if prm.key != nil {
|
||||
prmCli.UseKey(prm.key)
|
||||
}
|
||||
|
||||
if prm.tokenSession != nil {
|
||||
prmCli.WithinSession(*prm.tokenSession)
|
||||
}
|
||||
|
||||
if prm.tokenBearer != nil {
|
||||
prmCli.WithBearerToken(*prm.tokenBearer)
|
||||
}
|
||||
|
||||
prmCli.WithXHeaders(prm.xHeaders...)
|
||||
prmCli.SetObject(prm.obj.ToV2())
|
||||
|
||||
res, err := prm.cli.ObjectPutSingle(ctx, prmCli)
|
||||
if err != nil {
|
||||
ReportError(prm.cli, err)
|
||||
return nil, fmt.Errorf("put single object on client: %w", err)
|
||||
}
|
||||
|
||||
if err = apistatus.ErrFromStatus(res.Status()); err != nil {
|
||||
return nil, fmt.Errorf("put single object via client: %w", err)
|
||||
ale64bit marked this conversation as resolved
Outdated
ale64bit
commented
discussed offline discussed offline
dstepanov-yadro
commented
fixed fixed
|
||||
}
|
||||
|
||||
return &PutObjectRes{
|
||||
id: objID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SearchObjectsPrm groups parameters of SearchObjects operation.
|
||||
type SearchObjectsPrm struct {
|
||||
readPrmCommon
|
||||
|
|
|
@ -13,6 +13,8 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/transformer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type remoteTarget struct {
|
||||
|
@ -63,6 +65,15 @@ func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifier
|
|||
prm.SetXHeaders(t.commonPrm.XHeaders())
|
||||
prm.SetObject(t.obj)
|
||||
|
||||
res, err := t.putSingle(ctx, prm)
|
||||
if status.Code(err) != codes.Unimplemented {
|
||||
fyrchik
commented
`remoteTarget` is used by node only, so I would expect only prepared objects here.
Can this condition be false?
dstepanov-yadro
commented
fixed fixed
|
||||
return res, err
|
||||
}
|
||||
|
||||
return t.putStream(ctx, prm)
|
||||
}
|
||||
|
||||
func (t *remoteTarget) putStream(ctx context.Context, prm internalclient.PutObjectPrm) (*transformer.AccessIdentifiers, error) {
|
||||
res, err := internalclient.PutObject(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(%T) could not put object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
|
||||
|
@ -71,6 +82,15 @@ func (t *remoteTarget) Close(ctx context.Context) (*transformer.AccessIdentifier
|
|||
return &transformer.AccessIdentifiers{SelfID: res.ID()}, nil
|
||||
}
|
||||
|
||||
func (t *remoteTarget) putSingle(ctx context.Context, prm internalclient.PutObjectPrm) (*transformer.AccessIdentifiers, error) {
|
||||
res, err := internalclient.PutObjectSingle(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("(%T) could not put single object to %s: %w", t, t.nodeInfo.AddressGroup(), err)
|
||||
}
|
||||
|
||||
return &transformer.AccessIdentifiers{SelfID: res.ID()}, nil
|
||||
}
|
||||
|
||||
// NewRemoteSender creates, initializes and returns new RemoteSender instance.
|
||||
func NewRemoteSender(keyStorage *util.KeyStorage, cons ClientConstructor) *RemoteSender {
|
||||
return &RemoteSender{
|
||||
|
|
|
@ -41,12 +41,6 @@ type Replicator interface {
|
|||
// RemoteObjectHeaderFunc is the function to obtain HEAD info from a specific remote node.
|
||||
type RemoteObjectHeaderFunc func(context.Context, netmapSDK.NodeInfo, oid.Address) (*objectSDK.Object, error)
|
||||
|
||||
// NodeLoader provides application load statistics.
|
||||
type nodeLoader interface {
|
||||
// ObjectServiceLoad returns object service load value in [0:1] range.
|
||||
ObjectServiceLoad() float64
|
||||
}
|
||||
|
||||
type cfg struct {
|
||||
headTimeout time.Duration
|
||||
|
||||
|
@ -70,8 +64,6 @@ type cfg struct {
|
|||
|
||||
taskPool *ants.Pool
|
||||
|
||||
loader nodeLoader
|
||||
|
||||
maxCapacity int
|
||||
|
||||
batchSize, cacheSize uint32
|
||||
|
@ -178,10 +170,3 @@ func WithPool(p *ants.Pool) Option {
|
|||
c.taskPool = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithNodeLoader returns option to set FrostFS node load source.
|
||||
func WithNodeLoader(l nodeLoader) Option {
|
||||
return func(c *cfg) {
|
||||
c.loader = l
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,6 @@ func TestBuryObjectWithoutContainer(t *testing.T) {
|
|||
WithContainerSource(containerSrcFunc(containerSrc)),
|
||||
WithBuryFunc(buryFn),
|
||||
WithPool(pool),
|
||||
WithNodeLoader(constNodeLoader(0)),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
@ -279,7 +278,6 @@ func TestIteratorContract(t *testing.T) {
|
|||
WithContainerSource(containerSrcFunc(containerSrc)),
|
||||
WithBuryFunc(buryFn),
|
||||
WithPool(pool),
|
||||
WithNodeLoader(constNodeLoader(0)),
|
||||
func(c *cfg) {
|
||||
c.sleepDuration = time.Millisecond
|
||||
},
|
||||
|
@ -377,11 +375,6 @@ type announcedKeysFunc func([]byte) bool
|
|||
|
||||
func (f announcedKeysFunc) IsLocalKey(k []byte) bool { return f(k) }
|
||||
|
||||
// constNodeLoader is a nodeLoader that always returns a fixed value.
|
||||
type constNodeLoader float64
|
||||
|
||||
func (f constNodeLoader) ObjectServiceLoad() float64 { return float64(f) }
|
||||
|
||||
// replicatorFunc is a Replicator backed by a function.
|
||||
type replicatorFunc func(context.Context, replicator.Task, replicator.TaskResult)
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
)
|
||||
|
||||
func (p *Policer) Run(ctx context.Context) {
|
||||
go p.poolCapacityWorker(ctx)
|
||||
p.shardPolicyWorker(ctx)
|
||||
p.log.Info(logs.PolicerRoutineStopped)
|
||||
}
|
||||
|
@ -65,27 +64,3 @@ func (p *Policer) shardPolicyWorker(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Policer) poolCapacityWorker(ctx context.Context) {
|
||||
ticker := time.NewTicker(p.rebalanceFreq)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
frostfsSysLoad := p.loader.ObjectServiceLoad()
|
||||
newCapacity := int((1.0 - frostfsSysLoad) * float64(p.maxCapacity))
|
||||
if newCapacity == 0 {
|
||||
newCapacity++
|
||||
}
|
||||
|
||||
if p.taskPool.Cap() != newCapacity {
|
||||
p.taskPool.Tune(newCapacity)
|
||||
p.log.Debug(logs.PolicerTuneReplicationCapacity,
|
||||
zap.Float64("system_load", frostfsSysLoad),
|
||||
zap.Int("new_capacity", newCapacity))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,10 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/engine"
|
||||
putsvc "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/services/object/put"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -28,6 +31,13 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult)
|
|||
)
|
||||
}()
|
||||
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "Replicator.HandleTask",
|
||||
trace.WithAttributes(
|
||||
attribute.Stringer("address", task.Addr),
|
||||
attribute.Int64("number_of_copies", int64(task.NumCopies)),
|
||||
))
|
||||
defer span.End()
|
||||
|
||||
if task.Obj == nil {
|
||||
var err error
|
||||
task.Obj, err = engine.Get(ctx, p.localStorage, task.Addr)
|
||||
|
|
Loading…
Reference in a new issue
errors.New
?fixed