2023-02-17 09:52:46 +00:00
|
|
|
package transformer
|
|
|
|
|
|
|
|
import (
|
2023-02-18 07:43:34 +00:00
|
|
|
"crypto/ecdsa"
|
2023-02-17 09:52:46 +00:00
|
|
|
"crypto/sha256"
|
|
|
|
"fmt"
|
|
|
|
|
2023-03-16 06:51:03 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
|
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/version"
|
|
|
|
"git.frostfs.info/TrueCloudLab/tzhash/tz"
|
2023-02-17 09:52:46 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type payloadSizeLimiter struct {
|
2023-02-18 07:43:34 +00:00
|
|
|
Params
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:43:34 +00:00
|
|
|
written, writtenCurrent uint64
|
2023-02-17 09:52:46 +00:00
|
|
|
|
|
|
|
current, parent *object.Object
|
|
|
|
|
2023-02-24 14:01:38 +00:00
|
|
|
currentHashers, parentHashers []payloadChecksumHasher
|
2023-02-17 09:52:46 +00:00
|
|
|
|
|
|
|
previous []oid.ID
|
|
|
|
|
|
|
|
splitID *object.SplitID
|
|
|
|
|
|
|
|
parAttrs []object.Attribute
|
|
|
|
}
|
|
|
|
|
2023-02-18 07:43:34 +00:00
|
|
|
type Params struct {
|
|
|
|
Key *ecdsa.PrivateKey
|
|
|
|
NextTarget ObjectTarget
|
|
|
|
SessionToken *session.Object
|
|
|
|
NetworkState EpochSource
|
|
|
|
MaxSize uint64
|
|
|
|
WithoutHomomorphicHash bool
|
|
|
|
}
|
|
|
|
|
2023-02-17 09:52:46 +00:00
|
|
|
// NewPayloadSizeLimiter returns ObjectTarget instance that restricts payload length
|
|
|
|
// of the writing object and writes generated objects to targets from initializer.
|
|
|
|
//
|
|
|
|
// Calculates and adds homomorphic hash to resulting objects only if withoutHomomorphicHash
|
|
|
|
// is false.
|
|
|
|
//
|
|
|
|
// Objects w/ payload size less or equal than max size remain untouched.
|
2023-02-18 07:43:34 +00:00
|
|
|
func NewPayloadSizeLimiter(p Params) ObjectTarget {
|
2023-02-17 09:52:46 +00:00
|
|
|
return &payloadSizeLimiter{
|
2023-02-18 07:43:34 +00:00
|
|
|
Params: p,
|
|
|
|
splitID: object.NewSplitID(),
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) WriteHeader(hdr *object.Object) error {
|
|
|
|
s.current = fromObject(hdr)
|
|
|
|
|
|
|
|
s.initialize()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) Write(p []byte) (int, error) {
|
|
|
|
if err := s.writeChunk(p); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) Close() (*AccessIdentifiers, error) {
|
|
|
|
return s.release(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) initialize() {
|
2023-02-24 13:31:12 +00:00
|
|
|
s.current = fromObject(s.current)
|
|
|
|
|
2023-02-17 09:52:46 +00:00
|
|
|
// if it is an object after the 1st
|
|
|
|
if ln := len(s.previous); ln > 0 {
|
|
|
|
// initialize parent object once (after 1st object)
|
|
|
|
if ln == 1 {
|
2023-02-24 13:31:12 +00:00
|
|
|
s.parent = fromObject(s.current)
|
2023-02-24 14:01:38 +00:00
|
|
|
s.parentHashers = append(s.parentHashers[:0], s.currentHashers...)
|
2023-02-24 13:31:12 +00:00
|
|
|
|
|
|
|
// return source attributes
|
|
|
|
s.parent.SetAttributes(s.parAttrs...)
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// set previous object to the last previous identifier
|
|
|
|
s.current.SetPreviousID(s.previous[ln-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
s.initializeCurrent()
|
|
|
|
}
|
|
|
|
|
|
|
|
func fromObject(obj *object.Object) *object.Object {
|
|
|
|
cnr, _ := obj.ContainerID()
|
|
|
|
|
|
|
|
res := object.New()
|
|
|
|
res.SetContainerID(cnr)
|
|
|
|
res.SetOwnerID(obj.OwnerID())
|
|
|
|
res.SetAttributes(obj.Attributes()...)
|
|
|
|
res.SetType(obj.Type())
|
|
|
|
|
|
|
|
// obj.SetSplitID creates splitHeader but we don't need to do it in case
|
|
|
|
// of small objects, so we should make nil check.
|
|
|
|
if obj.SplitID() != nil {
|
|
|
|
res.SetSplitID(obj.SplitID())
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) initializeCurrent() {
|
|
|
|
// create payload hashers
|
2023-02-18 07:43:34 +00:00
|
|
|
s.writtenCurrent = 0
|
2023-02-24 14:01:38 +00:00
|
|
|
s.initPayloadHashers()
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
|
2023-02-24 14:01:38 +00:00
|
|
|
func (s *payloadSizeLimiter) initPayloadHashers() {
|
|
|
|
s.currentHashers = append(s.currentHashers[:0], payloadChecksumHasher{
|
2023-02-17 09:52:46 +00:00
|
|
|
hasher: sha256.New(),
|
2023-02-24 13:31:12 +00:00
|
|
|
typ: checksum.SHA256,
|
2023-02-17 09:52:46 +00:00
|
|
|
})
|
|
|
|
|
2023-02-24 14:01:38 +00:00
|
|
|
if !s.WithoutHomomorphicHash {
|
|
|
|
s.currentHashers = append(s.currentHashers, payloadChecksumHasher{
|
2023-02-17 09:52:46 +00:00
|
|
|
hasher: tz.New(),
|
2023-02-24 13:31:12 +00:00
|
|
|
typ: checksum.TZ,
|
2023-02-17 09:52:46 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) release(finalize bool) (*AccessIdentifiers, error) {
|
|
|
|
// Arg finalize is true only when called from Close method.
|
|
|
|
// We finalize parent and generate linking objects only if it is more
|
|
|
|
// than 1 object in split-chain.
|
|
|
|
withParent := finalize && len(s.previous) > 0
|
|
|
|
|
|
|
|
if withParent {
|
2023-02-24 13:31:12 +00:00
|
|
|
for i := range s.parentHashers {
|
|
|
|
s.parentHashers[i].writeChecksum(s.parent)
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
s.parent.SetPayloadSize(s.written)
|
|
|
|
s.current.SetParent(s.parent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// release current object
|
2023-02-24 13:31:12 +00:00
|
|
|
for i := range s.currentHashers {
|
|
|
|
s.currentHashers[i].writeChecksum(s.current)
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:43:34 +00:00
|
|
|
curEpoch := s.NetworkState.CurrentEpoch()
|
|
|
|
ver := version.Current()
|
|
|
|
|
|
|
|
s.current.SetVersion(&ver)
|
|
|
|
s.current.SetPayloadSize(s.writtenCurrent)
|
|
|
|
s.current.SetSessionToken(s.SessionToken)
|
|
|
|
s.current.SetCreationEpoch(curEpoch)
|
|
|
|
|
|
|
|
var (
|
|
|
|
parID *oid.ID
|
|
|
|
parHdr *object.Object
|
|
|
|
)
|
|
|
|
|
|
|
|
if par := s.current.Parent(); par != nil && par.Signature() == nil {
|
|
|
|
rawPar := object.NewFromV2(par.ToV2())
|
|
|
|
|
|
|
|
rawPar.SetSessionToken(s.SessionToken)
|
|
|
|
rawPar.SetCreationEpoch(curEpoch)
|
|
|
|
|
|
|
|
if err := object.SetIDWithSignature(*s.Key, rawPar); err != nil {
|
|
|
|
return nil, fmt.Errorf("could not finalize parent object: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
id, _ := rawPar.ID()
|
|
|
|
parID = &id
|
|
|
|
parHdr = rawPar
|
|
|
|
|
|
|
|
s.current.SetParent(parHdr)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := object.SetIDWithSignature(*s.Key, s.current); err != nil {
|
|
|
|
return nil, fmt.Errorf("could not finalize object: %w", err)
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
|
2023-02-18 07:43:34 +00:00
|
|
|
if err := s.NextTarget.WriteHeader(s.current); err != nil {
|
|
|
|
return nil, fmt.Errorf("could not write header to next target: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := s.NextTarget.Close(); err != nil {
|
|
|
|
return nil, fmt.Errorf("could not close next target: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
id, _ := s.current.ID()
|
|
|
|
|
|
|
|
ids := &AccessIdentifiers{
|
|
|
|
ParentID: parID,
|
|
|
|
SelfID: id,
|
|
|
|
ParentHeader: parHdr,
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// save identifier of the released object
|
2023-02-17 10:00:27 +00:00
|
|
|
s.previous = append(s.previous, ids.SelfID)
|
2023-02-17 09:52:46 +00:00
|
|
|
|
|
|
|
if withParent {
|
|
|
|
// generate and release linking object
|
2023-02-17 10:00:27 +00:00
|
|
|
s.initializeLinking(ids.ParentHeader)
|
2023-02-17 09:52:46 +00:00
|
|
|
s.initializeCurrent()
|
|
|
|
|
|
|
|
if _, err := s.release(false); err != nil {
|
|
|
|
return nil, fmt.Errorf("could not release linking object: %w", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ids, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) initializeLinking(parHdr *object.Object) {
|
|
|
|
s.current = fromObject(s.current)
|
|
|
|
s.current.SetParent(parHdr)
|
|
|
|
s.current.SetChildren(s.previous...)
|
|
|
|
s.current.SetSplitID(s.splitID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *payloadSizeLimiter) writeChunk(chunk []byte) error {
|
2023-02-18 07:36:20 +00:00
|
|
|
for {
|
|
|
|
// statement is true if the previous write of bytes reached exactly the boundary.
|
2023-02-18 07:43:34 +00:00
|
|
|
if s.written > 0 && s.written%s.MaxSize == 0 {
|
|
|
|
if s.written == s.MaxSize {
|
2023-02-18 07:36:20 +00:00
|
|
|
s.prepareFirstChild()
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
// we need to release current object
|
|
|
|
if _, err := s.release(false); err != nil {
|
|
|
|
return fmt.Errorf("could not release object: %w", err)
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
// initialize another object
|
|
|
|
s.initialize()
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
var (
|
|
|
|
ln = uint64(len(chunk))
|
|
|
|
cut = ln
|
2023-02-18 07:43:34 +00:00
|
|
|
leftToEdge = s.MaxSize - s.written%s.MaxSize
|
2023-02-18 07:36:20 +00:00
|
|
|
)
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
// write bytes no further than the boundary of the current object
|
|
|
|
if ln > leftToEdge {
|
|
|
|
cut = leftToEdge
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-24 14:09:34 +00:00
|
|
|
if err := s.writeHashes(chunk[:cut]); err != nil {
|
2023-02-18 07:36:20 +00:00
|
|
|
return fmt.Errorf("could not write chunk to target: %w", err)
|
|
|
|
}
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
// increase written bytes counter
|
2023-02-18 07:43:34 +00:00
|
|
|
s.writtenCurrent += cut
|
2023-02-18 07:36:20 +00:00
|
|
|
s.written += cut
|
2023-02-17 09:52:46 +00:00
|
|
|
|
2023-02-18 07:36:20 +00:00
|
|
|
if cut == ln {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// if there are more bytes in buffer we call method again to start filling another object
|
|
|
|
chunk = chunk[cut:]
|
2023-02-17 09:52:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-24 14:09:34 +00:00
|
|
|
func (s *payloadSizeLimiter) writeHashes(chunk []byte) error {
|
|
|
|
_, err := s.NextTarget.Write(chunk)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The `Write` method of `hash.Hash` never returns an error.
|
|
|
|
for i := range s.currentHashers {
|
|
|
|
_, _ = s.currentHashers[i].hasher.Write(chunk)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range s.parentHashers {
|
|
|
|
_, _ = s.parentHashers[i].hasher.Write(chunk)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-02-17 09:52:46 +00:00
|
|
|
func (s *payloadSizeLimiter) prepareFirstChild() {
|
|
|
|
// initialize split header with split ID on first object in chain
|
|
|
|
s.current.InitRelations()
|
|
|
|
s.current.SetSplitID(s.splitID)
|
|
|
|
|
|
|
|
// cut source attributes
|
|
|
|
s.parAttrs = s.current.Attributes()
|
|
|
|
s.current.SetAttributes()
|
|
|
|
|
|
|
|
// attributes will be added to parent in detachParent
|
|
|
|
}
|