2020-09-16 14:46:31 +00:00
|
|
|
package transformer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/sha256"
|
|
|
|
"fmt"
|
|
|
|
"hash"
|
|
|
|
"io"
|
|
|
|
|
|
|
|
"github.com/nspcc-dev/neofs-node/pkg/core/object"
|
2021-11-10 07:08:33 +00:00
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/checksum"
|
|
|
|
objectSDK "github.com/nspcc-dev/neofs-sdk-go/object"
|
2022-01-26 12:11:13 +00:00
|
|
|
oidSDK "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
2020-09-16 14:46:31 +00:00
|
|
|
"github.com/nspcc-dev/tzhash/tz"
|
|
|
|
)
|
|
|
|
|
|
|
|
type payloadSizeLimiter struct {
|
|
|
|
maxSize, written uint64
|
|
|
|
|
|
|
|
targetInit func() ObjectTarget
|
|
|
|
|
|
|
|
target ObjectTarget
|
|
|
|
|
|
|
|
current, parent *object.RawObject
|
|
|
|
|
|
|
|
currentHashers, parentHashers []*payloadChecksumHasher
|
|
|
|
|
2022-01-26 12:11:13 +00:00
|
|
|
previous []*oidSDK.ID
|
2020-09-16 14:46:31 +00:00
|
|
|
|
|
|
|
chunkWriter io.Writer
|
2020-11-24 18:14:32 +00:00
|
|
|
|
|
|
|
splitID *objectSDK.SplitID
|
2020-12-02 08:29:51 +00:00
|
|
|
|
|
|
|
parAttrs []*objectSDK.Attribute
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type payloadChecksumHasher struct {
|
|
|
|
hasher hash.Hash
|
|
|
|
|
|
|
|
checksumWriter func([]byte)
|
|
|
|
}
|
|
|
|
|
|
|
|
const tzChecksumSize = 64
|
|
|
|
|
|
|
|
// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length
|
|
|
|
// of the writing object and writes generated objects to targets from initializer.
|
|
|
|
//
|
|
|
|
// Objects w/ payload size less or equal than max size remain untouched.
|
|
|
|
//
|
|
|
|
// TODO: describe behavior in details.
|
|
|
|
func NewPayloadSizeLimiter(maxSize uint64, targetInit TargetInitializer) ObjectTarget {
|
|
|
|
return &payloadSizeLimiter{
|
|
|
|
maxSize: maxSize,
|
|
|
|
targetInit: targetInit,
|
2020-11-24 18:14:32 +00:00
|
|
|
splitID: objectSDK.NewSplitID(),
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) WriteHeader(hdr *object.RawObject) error {
|
|
|
|
s.current = fromObject(hdr)
|
|
|
|
|
2020-09-29 09:21:16 +00:00
|
|
|
s.initialize()
|
|
|
|
|
2020-09-16 14:46:31 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) Write(p []byte) (int, error) {
|
|
|
|
if err := s.writeChunk(p); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) Close() (*AccessIdentifiers, error) {
|
|
|
|
return s.release(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) initialize() {
|
|
|
|
// if it is an object after the 1st
|
|
|
|
if ln := len(s.previous); ln > 0 {
|
|
|
|
// initialize parent object once (after 1st object)
|
|
|
|
if ln == 1 {
|
2020-12-02 08:29:51 +00:00
|
|
|
s.detachParent()
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// set previous object to the last previous identifier
|
|
|
|
s.current.SetPreviousID(s.previous[ln-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
s.initializeCurrent()
|
|
|
|
}
|
|
|
|
|
|
|
|
func fromObject(obj *object.RawObject) *object.RawObject {
|
|
|
|
res := object.NewRaw()
|
2020-11-16 09:43:52 +00:00
|
|
|
res.SetContainerID(obj.ContainerID())
|
|
|
|
res.SetOwnerID(obj.OwnerID())
|
|
|
|
res.SetAttributes(obj.Attributes()...)
|
|
|
|
res.SetType(obj.Type())
|
2020-09-16 14:46:31 +00:00
|
|
|
|
2020-11-24 18:14:32 +00:00
|
|
|
// obj.SetSplitID creates splitHeader but we don't need to do it in case
|
|
|
|
// of small objects, so we should make nil check.
|
|
|
|
if obj.SplitID() != nil {
|
|
|
|
res.SetSplitID(obj.SplitID())
|
|
|
|
}
|
|
|
|
|
2020-09-16 14:46:31 +00:00
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) initializeCurrent() {
|
|
|
|
// initialize current object target
|
|
|
|
s.target = s.targetInit()
|
|
|
|
|
|
|
|
// create payload hashers
|
2020-09-24 12:22:59 +00:00
|
|
|
s.currentHashers = payloadHashersForObject(s.current)
|
|
|
|
|
|
|
|
// compose multi-writer from target and all payload hashers
|
|
|
|
ws := make([]io.Writer, 0, 1+len(s.currentHashers)+len(s.parentHashers))
|
|
|
|
|
|
|
|
ws = append(ws, s.target)
|
|
|
|
|
|
|
|
for i := range s.currentHashers {
|
|
|
|
ws = append(ws, s.currentHashers[i].hasher)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range s.parentHashers {
|
|
|
|
ws = append(ws, s.parentHashers[i].hasher)
|
|
|
|
}
|
|
|
|
|
|
|
|
s.chunkWriter = io.MultiWriter(ws...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func payloadHashersForObject(obj *object.RawObject) []*payloadChecksumHasher {
|
|
|
|
return []*payloadChecksumHasher{
|
2020-09-16 14:46:31 +00:00
|
|
|
{
|
|
|
|
hasher: sha256.New(),
|
|
|
|
checksumWriter: func(cs []byte) {
|
|
|
|
if ln := len(cs); ln != sha256.Size {
|
|
|
|
panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", ln, sha256.Size))
|
|
|
|
}
|
|
|
|
|
|
|
|
csSHA := [sha256.Size]byte{}
|
|
|
|
copy(csSHA[:], cs)
|
|
|
|
|
2021-11-10 07:08:33 +00:00
|
|
|
checksum := checksum.New()
|
2020-09-16 14:46:31 +00:00
|
|
|
checksum.SetSHA256(csSHA)
|
|
|
|
|
2020-09-24 12:22:59 +00:00
|
|
|
obj.SetPayloadChecksum(checksum)
|
2020-09-16 14:46:31 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
hasher: tz.New(),
|
|
|
|
checksumWriter: func(cs []byte) {
|
|
|
|
if ln := len(cs); ln != tzChecksumSize {
|
|
|
|
panic(fmt.Sprintf("wrong checksum length: expected %d, has %d", ln, tzChecksumSize))
|
|
|
|
}
|
|
|
|
|
|
|
|
csTZ := [tzChecksumSize]byte{}
|
|
|
|
copy(csTZ[:], cs)
|
|
|
|
|
2021-11-10 07:08:33 +00:00
|
|
|
checksum := checksum.New()
|
2020-09-16 14:46:31 +00:00
|
|
|
checksum.SetTillichZemor(csTZ)
|
|
|
|
|
2020-09-24 12:22:59 +00:00
|
|
|
obj.SetPayloadHomomorphicHash(checksum)
|
2020-09-16 14:46:31 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) release(close bool) (*AccessIdentifiers, error) {
|
|
|
|
// Arg close is true only from Close method.
|
|
|
|
// We finalize parent and generate linking objects only if it is more
|
|
|
|
// than 1 object in split-chain.
|
|
|
|
withParent := close && len(s.previous) > 0
|
|
|
|
|
|
|
|
if withParent {
|
|
|
|
writeHashes(s.parentHashers)
|
|
|
|
s.parent.SetPayloadSize(s.written)
|
|
|
|
s.current.SetParent(s.parent.SDK().Object())
|
|
|
|
}
|
|
|
|
|
|
|
|
// release current object
|
|
|
|
writeHashes(s.currentHashers)
|
|
|
|
|
|
|
|
// release current, get its id
|
|
|
|
if err := s.target.WriteHeader(s.current); err != nil {
|
2021-05-18 08:12:51 +00:00
|
|
|
return nil, fmt.Errorf("could not write header: %w", err)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ids, err := s.target.Close()
|
|
|
|
if err != nil {
|
2021-05-18 08:12:51 +00:00
|
|
|
return nil, fmt.Errorf("could not close target: %w", err)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// save identifier of the released object
|
|
|
|
s.previous = append(s.previous, ids.SelfID())
|
|
|
|
|
|
|
|
if withParent {
|
|
|
|
// generate and release linking object
|
2020-12-18 10:39:44 +00:00
|
|
|
s.initializeLinking(ids.Parent())
|
2020-09-16 14:46:31 +00:00
|
|
|
s.initializeCurrent()
|
|
|
|
|
|
|
|
if _, err := s.release(false); err != nil {
|
2021-05-18 08:12:51 +00:00
|
|
|
return nil, fmt.Errorf("could not release linking object: %w", err)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeHashes(hashers []*payloadChecksumHasher) {
|
|
|
|
for i := range hashers {
|
|
|
|
hashers[i].checksumWriter(hashers[i].hasher.Sum(nil))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-18 10:39:44 +00:00
|
|
|
func (s *payloadSizeLimiter) initializeLinking(parHdr *objectSDK.Object) {
|
2020-09-16 14:46:31 +00:00
|
|
|
s.current = fromObject(s.current)
|
2020-12-18 10:39:44 +00:00
|
|
|
s.current.SetParent(parHdr)
|
2020-09-16 14:46:31 +00:00
|
|
|
s.current.SetChildren(s.previous...)
|
2020-11-24 18:14:32 +00:00
|
|
|
s.current.SetSplitID(s.splitID)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
|
2020-09-29 09:21:16 +00:00
|
|
|
// statement is true if the previous write of bytes reached exactly the boundary.
|
|
|
|
if s.written > 0 && s.written%s.maxSize == 0 {
|
2020-10-20 06:54:19 +00:00
|
|
|
if s.written == s.maxSize {
|
2020-12-02 08:29:51 +00:00
|
|
|
s.prepareFirstChild()
|
2020-10-20 06:54:19 +00:00
|
|
|
}
|
|
|
|
|
2020-09-29 09:21:16 +00:00
|
|
|
// we need to release current object
|
|
|
|
if _, err := s.release(false); err != nil {
|
2021-05-18 08:12:51 +00:00
|
|
|
return fmt.Errorf("could not release object: %w", err)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// initialize another object
|
|
|
|
s.initialize()
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
ln = uint64(len(chunk))
|
|
|
|
cut = ln
|
|
|
|
leftToEdge = s.maxSize - s.written%s.maxSize
|
|
|
|
)
|
|
|
|
|
|
|
|
// write bytes no further than the boundary of the current object
|
|
|
|
if ln > leftToEdge {
|
|
|
|
cut = leftToEdge
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := s.chunkWriter.Write(chunk[:cut]); err != nil {
|
2021-05-18 08:12:51 +00:00
|
|
|
return fmt.Errorf("could not write chunk to target: %w", err)
|
2020-09-16 14:46:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// increase written bytes counter
|
|
|
|
s.written += cut
|
|
|
|
|
|
|
|
// if there are more bytes in buffer we call method again to start filling another object
|
|
|
|
if ln > leftToEdge {
|
|
|
|
return s.writeChunk(chunk[cut:])
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-12-02 08:29:51 +00:00
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) prepareFirstChild() {
|
|
|
|
// initialize split header with split ID on first object in chain
|
|
|
|
s.current.InitRelations()
|
|
|
|
s.current.SetSplitID(s.splitID)
|
|
|
|
|
|
|
|
// cut source attributes
|
|
|
|
s.parAttrs = s.current.Attributes()
|
|
|
|
s.current.SetAttributes()
|
|
|
|
|
|
|
|
// attributes will be added to parent in detachParent
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) detachParent() {
|
|
|
|
s.parent = s.current
|
|
|
|
s.current = fromObject(s.parent)
|
|
|
|
s.parent.ResetRelations()
|
2020-12-18 10:39:44 +00:00
|
|
|
s.parent.SetSignature(nil)
|
2020-12-02 08:29:51 +00:00
|
|
|
s.parentHashers = s.currentHashers
|
|
|
|
|
|
|
|
// return source attributes
|
|
|
|
s.parent.SetAttributes(s.parAttrs...)
|
|
|
|
}
|