[#114] pool: Support client cut with memory limiter #120

Merged
2 changed files with 18 additions and 17 deletions

View file

@ -58,9 +58,8 @@ func (p *PartsBufferPool) FreeBuffer(buff *PartBuffer) error {
p.mu.Lock()
defer p.mu.Unlock()
used := p.limit - p.available
if buff.len > used {
return fmt.Errorf("buffer size %d to free is greater than used: %d", buff.len, used)
if buff.len+p.available > p.limit {
return fmt.Errorf("buffer size %d to free is too large, available: %d, limit: %d", buff.len, p.available, p.limit)

It's clearer this way i think: buff.len + p.available > p.limit

It's clearer this way i think: `buff.len + p.available > p.limit`
}
p.available += buff.len

View file

@ -2250,17 +2250,12 @@ func (p *Pool) PutObject(ctx context.Context, prm PrmObjectPut) (oid.ID, error)
}
}
if prm.clientCut {
buff, err := p.partsBufferPool.GetBuffer()
if err != nil {
return oid.ID{}, fmt.Errorf("cannot get buffer for put operations: %w", err)
}
defer func() {
if errFree := p.partsBufferPool.FreeBuffer(buff); errFree != nil {
p.log(zap.WarnLevel, "failed to free part buffer", zap.Error(err))
}
}()
prm.setPartBuffer(buff.Buffer)
var ni netmap.NetworkInfo
@ -2269,6 +2264,13 @@ func (p *Pool) PutObject(ctx context.Context, prm PrmObjectPut) (oid.ID, error)
prm.setNetworkInfo(ni)
defer func() {
if errFree := p.partsBufferPool.FreeBuffer(buff); errFree != nil {
p.log(zap.WarnLevel, "failed to free part buffer", zap.Error(err))
}
}()
}
id, err := ctxCall.client.objectPut(ctx, prm)
if err != nil {
// removes session token from cache in case of token error