[#2007] services/object: Allocate memory on-demand in GET_RANGE

For big objects we want to get OutOfRange error before all the memory is
allocated.

Signed-off-by: Evgenii Stratonikov <evgeniy@morphbits.ru>
This commit is contained in:
Evgenii Stratonikov 2022-11-02 13:42:29 +03:00 committed by fyrchik
parent ff5526038d
commit a455ec18c3
2 changed files with 21 additions and 3 deletions

View file

@ -15,6 +15,7 @@ Changelog for NeoFS Node
- `neofs-cli container nodes`'s output (#1991)
- Increase error counter for write-cache flush errors (#1818)
- Correctly select the shard for applying tree service operations (#1996)
- Do not panic with bad inputs for `GET_RANGE` (#2007)
### Removed
### Updated

View file

@ -1,6 +1,7 @@
package internal
import (
"bytes"
"context"
"crypto/ecdsa"
"errors"
@ -316,6 +317,11 @@ func (x PayloadRangeRes) PayloadRange() []byte {
return x.data
}
// maxInitialBufferSize is the maximum initial buffer size for PayloadRange result.
// We don't want to allocate a lot of space in advance because a query can
// fail with apistatus.ObjectOutOfRange status.
const maxInitialBufferSize = 1024 * 1024 // 1 MiB
// PayloadRange reads object payload range by address.
//
// Client, context and key must be set.
@ -349,15 +355,26 @@ func PayloadRange(prm PayloadRangePrm) (*PayloadRangeRes, error) {
return nil, fmt.Errorf("init payload reading: %w", err)
}
data := make([]byte, prm.ln)
if int64(prm.ln) < 0 {
// `CopyN` expects `int64`, this check ensures that the result is positive.
// On practice this means that we can return incorrect results for objects
// with size > 8_388 Petabytes, this will be fixed later with support for streaming.
return nil, apistatus.ObjectOutOfRange{}
}
_, err = io.ReadFull(rdr, data)
ln := prm.ln
if ln > maxInitialBufferSize {
ln = maxInitialBufferSize
}
w := bytes.NewBuffer(make([]byte, ln))
_, err = io.CopyN(w, rdr, int64(prm.ln))
if err != nil {
return nil, fmt.Errorf("read payload: %w", err)
}
return &PayloadRangeRes{
data: data,
data: w.Bytes(),
}, nil
}