forked from TrueCloudLab/frostfs-node
Initial commit
Initial public review release v0.10.0
This commit is contained in:
commit
dadfd90dcd
276 changed files with 46331 additions and 0 deletions
25
lib/transformer/alias.go
Normal file
25
lib/transformer/alias.go
Normal file
|
@ -0,0 +1,25 @@
|
|||
package transformer
|
||||
|
||||
import (
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/storagegroup"
|
||||
)
|
||||
|
||||
type (
|
||||
// Object is a type alias of
|
||||
// Object from object package of neofs-api-go.
|
||||
Object = object.Object
|
||||
|
||||
// ObjectID is a type alias of
|
||||
// ObjectID from refs package of neofs-api-go.
|
||||
ObjectID = refs.ObjectID
|
||||
|
||||
// CID is a type alias of
|
||||
// CID from refs package of neofs-api-go.
|
||||
CID = refs.CID
|
||||
|
||||
// StorageGroup is a type alias of
|
||||
// StorageGroup from storagegroup package of neofs-api-go.
|
||||
StorageGroup = storagegroup.StorageGroup
|
||||
)
|
764
lib/transformer/put_test.go
Normal file
764
lib/transformer/put_test.go
Normal file
|
@ -0,0 +1,764 @@
|
|||
package transformer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-api-go/session"
|
||||
"github.com/nspcc-dev/neofs-api-go/storagegroup"
|
||||
crypto "github.com/nspcc-dev/neofs-crypto"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/core"
|
||||
"github.com/nspcc-dev/neofs-node/lib/implementations"
|
||||
"github.com/nspcc-dev/neofs-node/lib/objutil"
|
||||
"github.com/nspcc-dev/neofs-node/lib/test"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type (
|
||||
// Entity for mocking interfaces.
|
||||
// Implementation of any interface intercepts arguments via f (if not nil).
|
||||
// If err is not nil, it returns as it is. Otherwise, casted to needed type res returns w/o error.
|
||||
testPutEntity struct {
|
||||
// Set of interfaces which entity must implement, but some methods from those does not call.
|
||||
|
||||
// Argument interceptor. Used for ascertain of correct parameter passage between components.
|
||||
f func(...interface{})
|
||||
// Mocked result of any interface.
|
||||
res interface{}
|
||||
// Mocked error of any interface.
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
_ io.Writer = (*testPutEntity)(nil)
|
||||
_ EpochReceiver = (*testPutEntity)(nil)
|
||||
_ Transformer = (*testPutEntity)(nil)
|
||||
_ storagegroup.InfoReceiver = (*testPutEntity)(nil)
|
||||
_ objutil.Verifier = (*testPutEntity)(nil)
|
||||
)
|
||||
|
||||
func (s *testPutEntity) Verify(_ context.Context, obj *Object) error {
|
||||
if s.f != nil {
|
||||
s.f(obj)
|
||||
}
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *testPutEntity) Write(p []byte) (int, error) {
|
||||
if s.f != nil {
|
||||
s.f(p)
|
||||
}
|
||||
return 0, s.err
|
||||
}
|
||||
|
||||
func (s *testPutEntity) Transform(_ context.Context, u ProcUnit, h ...ProcUnitHandler) error {
|
||||
if s.f != nil {
|
||||
s.f(u, h)
|
||||
}
|
||||
return s.err
|
||||
}
|
||||
|
||||
func (s *testPutEntity) GetSGInfo(_ context.Context, cid CID, group []ObjectID) (*StorageGroup, error) {
|
||||
if s.f != nil {
|
||||
s.f(cid, group)
|
||||
}
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.res.(*StorageGroup), nil
|
||||
}
|
||||
|
||||
func (s *testPutEntity) Epoch() uint64 { return s.res.(uint64) }
|
||||
|
||||
func TestNewTransformer(t *testing.T) {
|
||||
validParams := Params{
|
||||
SGInfoReceiver: new(testPutEntity),
|
||||
EpochReceiver: new(testPutEntity),
|
||||
SizeLimit: 1,
|
||||
Verifier: new(testPutEntity),
|
||||
}
|
||||
|
||||
t.Run("valid params", func(t *testing.T) {
|
||||
res, err := NewTransformer(validParams)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
})
|
||||
t.Run("non-positive size", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.SizeLimit = 0
|
||||
_, err := NewTransformer(p)
|
||||
require.EqualError(t, err, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg).Error())
|
||||
})
|
||||
t.Run("empty SG info receiver", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.SGInfoReceiver = nil
|
||||
_, err := NewTransformer(p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg).Error())
|
||||
})
|
||||
t.Run("empty epoch receiver", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.EpochReceiver = nil
|
||||
_, err := NewTransformer(p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg).Error())
|
||||
})
|
||||
t.Run("empty object verifier", func(t *testing.T) {
|
||||
p := validParams
|
||||
p.Verifier = nil
|
||||
_, err := NewTransformer(p)
|
||||
require.EqualError(t, err, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg).Error())
|
||||
})
|
||||
}
|
||||
|
||||
func Test_transformer(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
u := ProcUnit{
|
||||
Head: &Object{
|
||||
Payload: testData(t, 10),
|
||||
},
|
||||
Payload: new(emptyReader),
|
||||
}
|
||||
|
||||
handlers := []ProcUnitHandler{func(context.Context, ProcUnit) error { return nil }}
|
||||
|
||||
t.Run("preliminary transformation failure", func(t *testing.T) {
|
||||
// create custom error for test
|
||||
pErr := internal.Error("test error for prelim transformer")
|
||||
|
||||
s := &transformer{
|
||||
tPrelim: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct prelim transformer params", func(t *testing.T) {
|
||||
require.Equal(t, u, items[0])
|
||||
require.Empty(t, items[1])
|
||||
})
|
||||
},
|
||||
err: pErr, // force Transformer to return pErr
|
||||
},
|
||||
}
|
||||
|
||||
// ascertain that error returns as expected
|
||||
require.EqualError(t, s.Transform(ctx, u, handlers...), pErr.Error())
|
||||
})
|
||||
|
||||
t.Run("size limiter error/correct sign processing", func(t *testing.T) {
|
||||
// create custom error for test
|
||||
sErr := internal.Error("test error for signer")
|
||||
lErr := internal.Error("test error for size limiter")
|
||||
|
||||
s := &transformer{
|
||||
tPrelim: new(testPutEntity),
|
||||
tSizeLim: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct size limiter params", func(t *testing.T) {
|
||||
require.Equal(t, u, items[0])
|
||||
hs := items[1].([]ProcUnitHandler)
|
||||
require.Len(t, hs, 1)
|
||||
require.EqualError(t, hs[0](ctx, u), sErr.Error())
|
||||
})
|
||||
},
|
||||
err: lErr, // force Transformer to return lErr
|
||||
},
|
||||
tSign: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct signer params", func(t *testing.T) {
|
||||
require.Equal(t, u, items[0])
|
||||
require.Equal(t, handlers, items[1])
|
||||
})
|
||||
},
|
||||
err: sErr, // force Transformer to return sErr
|
||||
},
|
||||
}
|
||||
|
||||
// ascertain that error returns as expected
|
||||
require.EqualError(t, s.Transform(ctx, u, handlers...), lErr.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func Test_preliminaryTransformer(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
u := ProcUnit{
|
||||
Head: &Object{
|
||||
Payload: testData(t, 10),
|
||||
},
|
||||
Payload: new(emptyReader),
|
||||
}
|
||||
|
||||
t.Run("field moulder failure", func(t *testing.T) {
|
||||
// create custom error for test
|
||||
mErr := internal.Error("test error for field moulder")
|
||||
|
||||
s := &preliminaryTransformer{
|
||||
fMoulder: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct field moulder params", func(t *testing.T) {
|
||||
require.Equal(t, u, items[0])
|
||||
require.Empty(t, items[1])
|
||||
})
|
||||
},
|
||||
err: mErr, // force Transformer to return mErr
|
||||
},
|
||||
}
|
||||
|
||||
// ascertain that error returns as expected
|
||||
require.EqualError(t, s.Transform(ctx, u), mErr.Error())
|
||||
})
|
||||
|
||||
t.Run("correct result", func(t *testing.T) {
|
||||
// create custom error for test
|
||||
sgErr := internal.Error("test error for SG moulder")
|
||||
|
||||
s := &preliminaryTransformer{
|
||||
fMoulder: new(testPutEntity),
|
||||
sgMoulder: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct field moulder params", func(t *testing.T) {
|
||||
require.Equal(t, u, items[0])
|
||||
require.Empty(t, items[1])
|
||||
})
|
||||
},
|
||||
err: sgErr, // force Transformer to return sgErr
|
||||
},
|
||||
}
|
||||
|
||||
// ascertain that error returns as expected
|
||||
require.EqualError(t, s.Transform(ctx, u), sgErr.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func Test_readChunk(t *testing.T) {
|
||||
t.Run("empty slice", func(t *testing.T) {
|
||||
t.Run("missing checksum header", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
|
||||
_, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
|
||||
require.Nil(t, h)
|
||||
|
||||
require.NoError(t, readChunk(ProcUnit{
|
||||
Head: obj,
|
||||
Payload: bytes.NewBuffer(testData(t, 10)),
|
||||
}, nil, nil, nil))
|
||||
|
||||
_, h = obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
|
||||
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, sha256.New().Sum(nil), h.Value.(*object.Header_PayloadChecksum).PayloadChecksum)
|
||||
})
|
||||
|
||||
t.Run("existing checksum header", func(t *testing.T) {
|
||||
h := &object.Header_PayloadChecksum{PayloadChecksum: testData(t, 10)}
|
||||
|
||||
obj := &Object{Headers: []object.Header{{Value: h}}}
|
||||
|
||||
require.NoError(t, readChunk(ProcUnit{
|
||||
Head: obj,
|
||||
Payload: bytes.NewBuffer(testData(t, 10)),
|
||||
}, nil, nil, nil))
|
||||
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, sha256.New().Sum(nil), h.PayloadChecksum)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("non-empty slice", func(t *testing.T) {
|
||||
t.Run("non-full data", func(t *testing.T) {
|
||||
var (
|
||||
size = 10
|
||||
buf = testData(t, size)
|
||||
r = bytes.NewBuffer(buf[:size-1])
|
||||
)
|
||||
|
||||
require.EqualError(t,
|
||||
readChunk(ProcUnit{Head: new(Object), Payload: r}, buf, nil, nil),
|
||||
ErrPayloadEOF.Error(),
|
||||
)
|
||||
})
|
||||
|
||||
t.Run("hash accumulator write", func(t *testing.T) {
|
||||
var (
|
||||
d = testData(t, 10)
|
||||
srcHash = sha256.Sum256(d)
|
||||
hAcc = sha256.New()
|
||||
buf = bytes.NewBuffer(d)
|
||||
b = make([]byte, len(d))
|
||||
obj = new(Object)
|
||||
|
||||
srcHomoHash = hash.Sum(d)
|
||||
homoHashHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
|
||||
)
|
||||
|
||||
t.Run("failure", func(t *testing.T) {
|
||||
hErr := internal.Error("test error for hash writer")
|
||||
b := testData(t, len(d))
|
||||
|
||||
require.EqualError(t, readChunk(EmptyPayloadUnit(new(Object)), b, &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct accumulator params", func(t *testing.T) {
|
||||
require.Equal(t, b, items[0])
|
||||
})
|
||||
},
|
||||
err: hErr,
|
||||
}, nil), hErr.Error())
|
||||
})
|
||||
|
||||
require.NoError(t, readChunk(ProcUnit{Head: obj, Payload: buf}, b, hAcc, homoHashHdr))
|
||||
|
||||
_, h := obj.LastHeader(object.HeaderType(object.PayloadChecksumHdr))
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, srcHash[:], h.Value.(*object.Header_PayloadChecksum).PayloadChecksum)
|
||||
|
||||
require.Equal(t, srcHash[:], hAcc.Sum(nil))
|
||||
require.Equal(t, srcHomoHash, homoHashHdr.HomoHash)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_headSigner(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
t.Run("invalid input", func(t *testing.T) {
|
||||
t.Run("missing token", func(t *testing.T) {
|
||||
u := ProcUnit{Head: new(Object)}
|
||||
require.Error(t, u.Head.Verify())
|
||||
s := &headSigner{verifier: &testPutEntity{err: internal.Error("")}}
|
||||
require.EqualError(t, s.Transform(ctx, u), errNoToken.Error())
|
||||
})
|
||||
|
||||
t.Run("with token", func(t *testing.T) {
|
||||
u := ProcUnit{Head: new(Object)}
|
||||
|
||||
verifier, err := implementations.NewLocalHeadIntegrityVerifier(core.NewNeoKeyVerifier())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Error(t, u.Head.Verify())
|
||||
|
||||
privateToken, err := session.NewPrivateToken(0)
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(ctx, PrivateSessionToken, privateToken)
|
||||
|
||||
s := &headSigner{
|
||||
verifier: &testPutEntity{
|
||||
err: internal.Error(""),
|
||||
},
|
||||
}
|
||||
|
||||
key := &privateToken.PrivateKey().PublicKey
|
||||
|
||||
u.Head.SystemHeader.OwnerID, err = refs.NewOwnerID(key)
|
||||
require.NoError(t, err)
|
||||
u.Head.AddHeader(&object.Header{
|
||||
Value: &object.Header_PublicKey{
|
||||
PublicKey: &object.PublicKey{
|
||||
Value: crypto.MarshalPublicKey(key),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
|
||||
require.NoError(t, verifier.Verify(ctx, unit.Head))
|
||||
_, h := unit.Head.LastHeader(object.HeaderType(object.IntegrityHdr))
|
||||
require.NotNil(t, h)
|
||||
d, err := objutil.MarshalHeaders(unit.Head, len(unit.Head.Headers)-1)
|
||||
require.NoError(t, err)
|
||||
cs := sha256.Sum256(d)
|
||||
require.Equal(t, cs[:], h.Value.(*object.Header_Integrity).Integrity.GetHeadersChecksum())
|
||||
return nil
|
||||
}))
|
||||
|
||||
t.Run("valid input", func(t *testing.T) {
|
||||
s := &headSigner{verifier: new(testPutEntity)}
|
||||
require.NoError(t, s.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
|
||||
require.Equal(t, u, unit)
|
||||
return nil
|
||||
}))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func Test_fieldMoulder(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
epoch := uint64(100)
|
||||
|
||||
fMoulder := &fieldMoulder{epochRecv: &testPutEntity{res: epoch}}
|
||||
|
||||
t.Run("no token", func(t *testing.T) {
|
||||
require.EqualError(t, new(fieldMoulder).Transform(ctx, ProcUnit{}), errNoToken.Error())
|
||||
})
|
||||
|
||||
t.Run("with token", func(t *testing.T) {
|
||||
token := new(service.Token)
|
||||
token.SetID(service.TokenID{1, 2, 3})
|
||||
|
||||
ctx := context.WithValue(ctx, PublicSessionToken, token)
|
||||
|
||||
u := ProcUnit{Head: new(Object)}
|
||||
|
||||
_, h := u.Head.LastHeader(object.HeaderType(object.TokenHdr))
|
||||
require.Nil(t, h)
|
||||
|
||||
require.NoError(t, fMoulder.Transform(ctx, u))
|
||||
|
||||
_, h = u.Head.LastHeader(object.HeaderType(object.TokenHdr))
|
||||
require.Equal(t, token, h.Value.(*object.Header_Token).Token)
|
||||
|
||||
require.False(t, u.Head.SystemHeader.ID.Empty())
|
||||
require.NotZero(t, u.Head.SystemHeader.CreatedAt.UnixTime)
|
||||
require.Equal(t, epoch, u.Head.SystemHeader.CreatedAt.Epoch)
|
||||
require.Equal(t, uint64(1), u.Head.SystemHeader.Version)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_sgMoulder(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
t.Run("invalid SG linking", func(t *testing.T) {
|
||||
t.Run("w/ header and w/o links", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
obj.SetStorageGroup(new(storagegroup.StorageGroup))
|
||||
require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error())
|
||||
})
|
||||
|
||||
t.Run("w/o header and w/ links", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
addLink(obj, object.Link_StorageGroup, ObjectID{})
|
||||
require.EqualError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}), ErrInvalidSGLinking.Error())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("non-SG", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
require.NoError(t, new(sgMoulder).Transform(ctx, ProcUnit{Head: obj}))
|
||||
})
|
||||
|
||||
t.Run("receive SG info", func(t *testing.T) {
|
||||
cid := testObjectAddress(t).CID
|
||||
group := make([]ObjectID, 5)
|
||||
for i := range group {
|
||||
group[i] = testObjectAddress(t).ObjectID
|
||||
}
|
||||
|
||||
t.Run("failure", func(t *testing.T) {
|
||||
obj := &Object{SystemHeader: object.SystemHeader{CID: cid}}
|
||||
|
||||
obj.SetStorageGroup(new(storagegroup.StorageGroup))
|
||||
for i := range group {
|
||||
addLink(obj, object.Link_StorageGroup, group[i])
|
||||
}
|
||||
|
||||
sgErr := internal.Error("test error for SG info receiver")
|
||||
|
||||
mSG := &sgMoulder{
|
||||
sgInfoRecv: &testPutEntity{
|
||||
f: func(items ...interface{}) {
|
||||
t.Run("correct SG info receiver params", func(t *testing.T) {
|
||||
cp := make([]ObjectID, len(group))
|
||||
copy(cp, group)
|
||||
sort.Sort(storagegroup.IDList(cp))
|
||||
require.Equal(t, cid, items[0])
|
||||
require.Equal(t, cp, items[1])
|
||||
})
|
||||
},
|
||||
err: sgErr,
|
||||
},
|
||||
}
|
||||
|
||||
require.EqualError(t, mSG.Transform(ctx, ProcUnit{Head: obj}), sgErr.Error())
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("correct result", func(t *testing.T) {
|
||||
obj := new(Object)
|
||||
obj.SetStorageGroup(new(storagegroup.StorageGroup))
|
||||
addLink(obj, object.Link_StorageGroup, ObjectID{})
|
||||
|
||||
sgInfo := &storagegroup.StorageGroup{
|
||||
ValidationDataSize: 19,
|
||||
ValidationHash: hash.Sum(testData(t, 10)),
|
||||
}
|
||||
|
||||
mSG := &sgMoulder{
|
||||
sgInfoRecv: &testPutEntity{
|
||||
res: sgInfo,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, mSG.Transform(ctx, ProcUnit{Head: obj}))
|
||||
|
||||
_, h := obj.LastHeader(object.HeaderType(object.StorageGroupHdr))
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, sgInfo, h.Value.(*object.Header_StorageGroup).StorageGroup)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_sizeLimiter(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
t.Run("limit entry", func(t *testing.T) {
|
||||
payload := testData(t, 10)
|
||||
payloadSize := uint64(len(payload) - 1)
|
||||
|
||||
u := ProcUnit{
|
||||
Head: &Object{SystemHeader: object.SystemHeader{
|
||||
PayloadLength: payloadSize,
|
||||
}},
|
||||
Payload: bytes.NewBuffer(payload[:payloadSize]),
|
||||
}
|
||||
|
||||
sl := &sizeLimiter{limit: payloadSize}
|
||||
|
||||
t.Run("cut payload", func(t *testing.T) {
|
||||
require.Error(t, sl.Transform(ctx, ProcUnit{
|
||||
Head: &Object{SystemHeader: object.SystemHeader{PayloadLength: payloadSize}},
|
||||
Payload: bytes.NewBuffer(payload[:payloadSize-1]),
|
||||
}))
|
||||
})
|
||||
|
||||
require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
|
||||
_, err := unit.Payload.Read(make([]byte, 1))
|
||||
require.EqualError(t, err, io.EOF.Error())
|
||||
require.Equal(t, payload[:payloadSize], unit.Head.Payload)
|
||||
_, h := unit.Head.LastHeader(object.HeaderType(object.HomoHashHdr))
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, hash.Sum(payload[:payloadSize]), h.Value.(*object.Header_HomoHash).HomoHash)
|
||||
return nil
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("limit exceed", func(t *testing.T) {
|
||||
payload := testData(t, 100)
|
||||
sizeLimit := uint64(len(payload)) / 13
|
||||
|
||||
pToken, err := session.NewPrivateToken(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
srcObj := &object.Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
Version: 12,
|
||||
PayloadLength: uint64(len(payload)),
|
||||
ID: testObjectAddress(t).ObjectID,
|
||||
OwnerID: object.OwnerID{1, 2, 3},
|
||||
CID: testObjectAddress(t).CID,
|
||||
},
|
||||
Headers: []object.Header{
|
||||
{Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}},
|
||||
},
|
||||
}
|
||||
|
||||
u := ProcUnit{
|
||||
Head: srcObj,
|
||||
Payload: bytes.NewBuffer(payload),
|
||||
}
|
||||
|
||||
epoch := uint64(77)
|
||||
|
||||
sl := &sizeLimiter{
|
||||
limit: sizeLimit,
|
||||
epochRecv: &testPutEntity{res: epoch},
|
||||
}
|
||||
|
||||
t.Run("no token", func(t *testing.T) {
|
||||
require.EqualError(t, sl.Transform(ctx, ProcUnit{
|
||||
Head: &Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
PayloadLength: uint64(len(payload)),
|
||||
},
|
||||
},
|
||||
Payload: bytes.NewBuffer(payload),
|
||||
}), errNoToken.Error())
|
||||
})
|
||||
|
||||
ctx := context.WithValue(ctx, PrivateSessionToken, pToken)
|
||||
|
||||
t.Run("cut payload", func(t *testing.T) {
|
||||
require.Error(t, sl.Transform(ctx, ProcUnit{
|
||||
Head: &Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
PayloadLength: uint64(len(payload)) + 1,
|
||||
},
|
||||
},
|
||||
Payload: bytes.NewBuffer(payload),
|
||||
}))
|
||||
})
|
||||
|
||||
objs := make([]Object, 0)
|
||||
|
||||
t.Run("handler error", func(t *testing.T) {
|
||||
hErr := internal.Error("test error for handler")
|
||||
|
||||
require.EqualError(t, sl.Transform(ctx, ProcUnit{
|
||||
Head: &Object{
|
||||
SystemHeader: object.SystemHeader{PayloadLength: uint64(len(payload))},
|
||||
Headers: make([]object.Header, 0),
|
||||
},
|
||||
Payload: bytes.NewBuffer(payload),
|
||||
}, func(context.Context, ProcUnit) error { return hErr }), hErr.Error())
|
||||
})
|
||||
|
||||
require.NoError(t, sl.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
|
||||
_, err := unit.Payload.Read(make([]byte, 1))
|
||||
require.EqualError(t, err, io.EOF.Error())
|
||||
objs = append(objs, *unit.Head.Copy())
|
||||
return nil
|
||||
}))
|
||||
|
||||
ln := len(objs)
|
||||
|
||||
res := make([]byte, 0, len(payload))
|
||||
|
||||
zObj := objs[ln-1]
|
||||
require.Zero(t, zObj.SystemHeader.PayloadLength)
|
||||
require.Empty(t, zObj.Payload)
|
||||
require.Empty(t, zObj.Links(object.Link_Next))
|
||||
require.Empty(t, zObj.Links(object.Link_Previous))
|
||||
require.Empty(t, zObj.Links(object.Link_Parent))
|
||||
children := zObj.Links(object.Link_Child)
|
||||
require.Len(t, children, ln-1)
|
||||
for i := range objs[:ln-1] {
|
||||
require.Equal(t, objs[i].SystemHeader.ID, children[i])
|
||||
}
|
||||
|
||||
for i := range objs[:ln-1] {
|
||||
res = append(res, objs[i].Payload...)
|
||||
if i == 0 {
|
||||
require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID)
|
||||
require.True(t, objs[i].Links(object.Link_Previous)[0].Empty())
|
||||
} else if i < ln-2 {
|
||||
require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID)
|
||||
require.Equal(t, objs[i].Links(object.Link_Next)[0], objs[i+1].SystemHeader.ID)
|
||||
} else {
|
||||
_, h := objs[i].LastHeader(object.HeaderType(object.HomoHashHdr))
|
||||
require.NotNil(t, h)
|
||||
require.Equal(t, hash.Sum(payload), h.Value.(*object.Header_HomoHash).HomoHash)
|
||||
require.Equal(t, objs[i].Links(object.Link_Previous)[0], objs[i-1].SystemHeader.ID)
|
||||
require.True(t, objs[i].Links(object.Link_Next)[0].Empty())
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, payload, res)
|
||||
})
|
||||
}
|
||||
|
||||
// testData returns size bytes of random data.
|
||||
func testData(t *testing.T, size int) []byte {
|
||||
res := make([]byte, size)
|
||||
_, err := rand.Read(res)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
}
|
||||
|
||||
// testObjectAddress returns new random object address.
|
||||
func testObjectAddress(t *testing.T) refs.Address {
|
||||
oid, err := refs.NewObjectID()
|
||||
require.NoError(t, err)
|
||||
return refs.Address{CID: refs.CIDForBytes(testData(t, refs.CIDSize)), ObjectID: oid}
|
||||
}
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ownerKey := test.DecodeKey(1)
|
||||
|
||||
ownerID, err := refs.NewOwnerID(&ownerKey.PublicKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
privToken, err := session.NewPrivateToken(0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pkBytes, err := session.PublicSessionToken(privToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.WithValue(context.TODO(), PrivateSessionToken, privToken)
|
||||
|
||||
pubToken := new(service.Token)
|
||||
pubToken.SetID(service.TokenID{1, 2, 3})
|
||||
pubToken.SetSessionKey(pkBytes)
|
||||
pubToken.SetOwnerID(ownerID)
|
||||
pubToken.SetOwnerKey(crypto.MarshalPublicKey(&ownerKey.PublicKey))
|
||||
require.NoError(t, service.AddSignatureWithKey(ownerKey, service.NewSignedSessionToken(pubToken)))
|
||||
|
||||
ctx = context.WithValue(ctx, PublicSessionToken, pubToken)
|
||||
|
||||
t.Run("non-SG object", func(t *testing.T) {
|
||||
t.Run("with split", func(t *testing.T) {
|
||||
tr, err := NewTransformer(Params{
|
||||
SGInfoReceiver: new(testPutEntity),
|
||||
EpochReceiver: &testPutEntity{res: uint64(1)},
|
||||
SizeLimit: 13,
|
||||
Verifier: &testPutEntity{
|
||||
err: internal.Error(""), // force verifier to return non-nil error
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
payload := make([]byte, 20)
|
||||
_, err = rand.Read(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
obj := &Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
PayloadLength: uint64(len(payload)),
|
||||
CID: CID{3},
|
||||
},
|
||||
Headers: []object.Header{
|
||||
{Value: &object.Header_UserHeader{UserHeader: &object.UserHeader{Key: "key", Value: "value"}}},
|
||||
},
|
||||
}
|
||||
|
||||
obj.SystemHeader.OwnerID = ownerID
|
||||
|
||||
obj.SetHeader(&object.Header{
|
||||
Value: &object.Header_Token{
|
||||
Token: pubToken,
|
||||
},
|
||||
})
|
||||
|
||||
testTransformer(t, ctx, ProcUnit{
|
||||
Head: obj,
|
||||
Payload: bytes.NewBuffer(payload),
|
||||
}, tr, payload)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func testTransformer(t *testing.T, ctx context.Context, u ProcUnit, tr Transformer, src []byte) {
|
||||
objList := make([]Object, 0)
|
||||
verifier, err := implementations.NewLocalHeadIntegrityVerifier(core.NewNeoKeyVerifier())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, tr.Transform(ctx, u, func(_ context.Context, unit ProcUnit) error {
|
||||
require.NoError(t, verifier.Verify(ctx, unit.Head))
|
||||
objList = append(objList, *unit.Head.Copy())
|
||||
return nil
|
||||
}))
|
||||
|
||||
reverse := NewRestorePipeline(SplitRestorer())
|
||||
|
||||
res, err := reverse.Restore(ctx, objList...)
|
||||
require.NoError(t, err)
|
||||
|
||||
integrityVerifier, err := implementations.NewLocalIntegrityVerifier(core.NewNeoKeyVerifier())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, integrityVerifier.Verify(ctx, &res[0]))
|
||||
|
||||
require.Equal(t, src, res[0].Payload)
|
||||
_, h := res[0].LastHeader(object.HeaderType(object.HomoHashHdr))
|
||||
require.True(t, hash.Sum(src).Equal(h.Value.(*object.Header_HomoHash).HomoHash))
|
||||
}
|
||||
|
||||
func addLink(o *Object, t object.Link_Type, id ObjectID) {
|
||||
o.AddHeader(&object.Header{Value: &object.Header_Link{
|
||||
Link: &object.Link{Type: t, ID: id},
|
||||
}})
|
||||
}
|
126
lib/transformer/restore.go
Normal file
126
lib/transformer/restore.go
Normal file
|
@ -0,0 +1,126 @@
|
|||
package transformer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
// ObjectRestorer is an interface of object restorer.
|
||||
ObjectRestorer interface {
|
||||
Type() object.Transform_Type
|
||||
Restore(context.Context, ...Object) ([]Object, error)
|
||||
}
|
||||
|
||||
restorePipeline struct {
|
||||
ObjectRestorer
|
||||
*sync.RWMutex
|
||||
items map[object.Transform_Type]ObjectRestorer
|
||||
}
|
||||
|
||||
splitRestorer struct{}
|
||||
)
|
||||
|
||||
var errEmptyObjList = errors.New("object list is empty")
|
||||
|
||||
var errMissingParentLink = errors.New("missing parent link")
|
||||
|
||||
func (s *restorePipeline) Restore(ctx context.Context, srcObjs ...Object) ([]Object, error) {
|
||||
if len(srcObjs) == 0 {
|
||||
return nil, errEmptyInput
|
||||
}
|
||||
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
var (
|
||||
objs = srcObjs
|
||||
err error
|
||||
)
|
||||
|
||||
for {
|
||||
_, th := objs[0].LastHeader(object.HeaderType(object.TransformHdr))
|
||||
if th == nil {
|
||||
break
|
||||
}
|
||||
|
||||
transform := th.Value.(*object.Header_Transform).Transform
|
||||
|
||||
tr, ok := s.items[transform.Type]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing restorer (%s)", transform.Type)
|
||||
}
|
||||
|
||||
if objs, err = tr.Restore(ctx, objs...); err != nil {
|
||||
return nil, errors.Wrapf(err, "restoration failed (%s)", transform.Type)
|
||||
}
|
||||
}
|
||||
|
||||
return objs, nil
|
||||
}
|
||||
|
||||
// NewRestorePipeline is a constructor of the pipeline of object restorers.
|
||||
func NewRestorePipeline(t ...ObjectRestorer) ObjectRestorer {
|
||||
m := make(map[object.Transform_Type]ObjectRestorer, len(t))
|
||||
|
||||
for i := range t {
|
||||
m[t[i].Type()] = t[i]
|
||||
}
|
||||
|
||||
return &restorePipeline{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
items: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (*splitRestorer) Type() object.Transform_Type {
|
||||
return object.Transform_Split
|
||||
}
|
||||
|
||||
func (*splitRestorer) Restore(ctx context.Context, objs ...Object) ([]Object, error) {
|
||||
if len(objs) == 0 {
|
||||
return nil, errEmptyObjList
|
||||
}
|
||||
|
||||
chain, err := GetChain(objs...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get chain of objects")
|
||||
}
|
||||
|
||||
obj := chain[len(chain)-1]
|
||||
|
||||
var (
|
||||
size uint64
|
||||
p = make([]byte, 0, len(chain[0].Payload)*len(chain))
|
||||
)
|
||||
|
||||
for j := 0; j < len(chain); j++ {
|
||||
p = append(p, chain[j].Payload...)
|
||||
size += chain[j].SystemHeader.PayloadLength
|
||||
}
|
||||
|
||||
obj.SystemHeader.PayloadLength = size
|
||||
obj.Payload = p
|
||||
|
||||
parent, err := lastLink(&obj, object.Link_Parent)
|
||||
if err != nil {
|
||||
return nil, errMissingParentLink
|
||||
}
|
||||
|
||||
obj.SystemHeader.ID = parent
|
||||
|
||||
err = deleteTransformer(&obj, object.Transform_Split)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []Object{obj}, nil
|
||||
}
|
||||
|
||||
// SplitRestorer is a splitted object restorer's constructor.
|
||||
func SplitRestorer() ObjectRestorer {
|
||||
return new(splitRestorer)
|
||||
}
|
528
lib/transformer/transformer.go
Normal file
528
lib/transformer/transformer.go
Normal file
|
@ -0,0 +1,528 @@
|
|||
package transformer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/hash"
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-api-go/session"
|
||||
"github.com/nspcc-dev/neofs-api-go/storagegroup"
|
||||
"github.com/nspcc-dev/neofs-node/internal"
|
||||
"github.com/nspcc-dev/neofs-node/lib/objutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
// Type is a type alias of
|
||||
// Type from object package of neofs-api-go.
|
||||
Type = object.Transform_Type
|
||||
|
||||
// ProcUnit groups the information about transforming unit.
|
||||
ProcUnit struct {
|
||||
Head *Object
|
||||
Payload io.Reader
|
||||
}
|
||||
|
||||
// ProcUnitHandler is a handling ProcUnit function.
|
||||
ProcUnitHandler func(context.Context, ProcUnit) error
|
||||
|
||||
// Transformer is an interface of object transformer.
|
||||
Transformer interface {
|
||||
Transform(context.Context, ProcUnit, ...ProcUnitHandler) error
|
||||
}
|
||||
|
||||
// EpochReceiver is an interface of epoch number container with read access.
|
||||
EpochReceiver interface {
|
||||
Epoch() uint64
|
||||
}
|
||||
|
||||
transformer struct {
|
||||
tPrelim Transformer
|
||||
tSizeLim Transformer
|
||||
tSign Transformer
|
||||
}
|
||||
|
||||
preliminaryTransformer struct {
|
||||
fMoulder Transformer
|
||||
sgMoulder Transformer
|
||||
}
|
||||
|
||||
fieldMoulder struct {
|
||||
epochRecv EpochReceiver
|
||||
}
|
||||
|
||||
sgMoulder struct {
|
||||
sgInfoRecv storagegroup.InfoReceiver
|
||||
}
|
||||
|
||||
sizeLimiter struct {
|
||||
limit uint64
|
||||
epochRecv EpochReceiver
|
||||
}
|
||||
|
||||
headSigner struct {
|
||||
verifier objutil.Verifier
|
||||
}
|
||||
|
||||
emptyReader struct{}
|
||||
|
||||
// Params groups the parameters of object transformer's constructor.
|
||||
Params struct {
|
||||
SGInfoReceiver storagegroup.InfoReceiver
|
||||
EpochReceiver EpochReceiver
|
||||
SizeLimit uint64
|
||||
Verifier objutil.Verifier
|
||||
}
|
||||
)
|
||||
|
||||
// ErrPayloadEOF is returned by Transformer that
|
||||
// received unexpected end of object payload.
|
||||
const ErrPayloadEOF = internal.Error("payload EOF")
|
||||
|
||||
const (
|
||||
verifyHeadersCount = 2 // payload checksum, integrity
|
||||
splitHeadersCount = 4 // flag, parent, left, right
|
||||
|
||||
errEmptyInput = internal.Error("empty input")
|
||||
|
||||
transformerInstanceFailMsg = "could not create transformer instance"
|
||||
errEmptySGInfoRecv = internal.Error("empty storage group info receivers")
|
||||
errInvalidSizeLimit = internal.Error("non-positive object size limit")
|
||||
errEmptyEpochReceiver = internal.Error("empty epoch receiver")
|
||||
errEmptyVerifier = internal.Error("empty object verifier")
|
||||
|
||||
// ErrInvalidSGLinking is returned by Transformer that received
|
||||
// an object with broken storage group links.
|
||||
ErrInvalidSGLinking = internal.Error("invalid storage group linking")
|
||||
|
||||
// PrivateSessionToken is a context key for session.PrivateToken.
|
||||
PrivateSessionToken = "private token"
|
||||
|
||||
// PublicSessionToken is a context key for service.SessionToken.
|
||||
PublicSessionToken = "public token"
|
||||
|
||||
errNoToken = internal.Error("no token provided")
|
||||
)
|
||||
|
||||
var errChainNotFound = errors.New("chain not found")
|
||||
|
||||
var errCutChain = errors.New("GetChain failed: chain is not full")
|
||||
|
||||
var errMissingTransformHdr = errors.New("cannot find transformer header")
|
||||
|
||||
// NewTransformer is an object transformer's constructor.
|
||||
func NewTransformer(p Params) (Transformer, error) {
|
||||
switch {
|
||||
case p.SizeLimit <= 0:
|
||||
return nil, errors.Wrap(errInvalidSizeLimit, transformerInstanceFailMsg)
|
||||
case p.EpochReceiver == nil:
|
||||
return nil, errors.Wrap(errEmptyEpochReceiver, transformerInstanceFailMsg)
|
||||
case p.SGInfoReceiver == nil:
|
||||
return nil, errors.Wrap(errEmptySGInfoRecv, transformerInstanceFailMsg)
|
||||
case p.Verifier == nil:
|
||||
return nil, errors.Wrap(errEmptyVerifier, transformerInstanceFailMsg)
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
tPrelim: &preliminaryTransformer{
|
||||
fMoulder: &fieldMoulder{
|
||||
epochRecv: p.EpochReceiver,
|
||||
},
|
||||
sgMoulder: &sgMoulder{
|
||||
sgInfoRecv: p.SGInfoReceiver,
|
||||
},
|
||||
},
|
||||
tSizeLim: &sizeLimiter{
|
||||
limit: p.SizeLimit,
|
||||
epochRecv: p.EpochReceiver,
|
||||
},
|
||||
tSign: &headSigner{
|
||||
verifier: p.Verifier,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *transformer) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
|
||||
if err := s.tPrelim.Transform(ctx, unit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.tSizeLim.Transform(ctx, unit, func(ctx context.Context, unit ProcUnit) error {
|
||||
return s.tSign.Transform(ctx, unit, handlers...)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *preliminaryTransformer) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error {
|
||||
if err := s.fMoulder.Transform(ctx, unit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.sgMoulder.Transform(ctx, unit)
|
||||
}
|
||||
|
||||
// TODO: simplify huge function.
|
||||
func (s *sizeLimiter) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
|
||||
if unit.Head.SystemHeader.PayloadLength <= s.limit {
|
||||
homoHashHdr := &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
|
||||
|
||||
unit.Head.AddHeader(&object.Header{Value: homoHashHdr})
|
||||
|
||||
buf := make([]byte, unit.Head.SystemHeader.PayloadLength)
|
||||
|
||||
if err := readChunk(unit, buf, nil, homoHashHdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unit.Head.Payload = buf
|
||||
|
||||
return procHandlers(ctx, EmptyPayloadUnit(unit.Head), handlers...)
|
||||
}
|
||||
|
||||
var (
|
||||
err error
|
||||
buf = make([]byte, s.limit)
|
||||
hAcc = sha256.New()
|
||||
srcHdrLen = len(unit.Head.Headers)
|
||||
pObj = unit.Head
|
||||
resObj = ProcUnit{
|
||||
Head: &Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
Version: pObj.SystemHeader.Version,
|
||||
OwnerID: pObj.SystemHeader.OwnerID,
|
||||
CID: pObj.SystemHeader.CID,
|
||||
CreatedAt: object.CreationPoint{
|
||||
UnixTime: time.Now().Unix(),
|
||||
Epoch: s.epochRecv.Epoch(),
|
||||
},
|
||||
},
|
||||
},
|
||||
Payload: unit.Payload,
|
||||
}
|
||||
left, right = &object.Link{Type: object.Link_Previous}, &object.Link{Type: object.Link_Next}
|
||||
hashAccHdr, hashHdr = new(object.Header_PayloadChecksum), new(object.Header_PayloadChecksum)
|
||||
homoHashAccHdr = &object.Header_HomoHash{HomoHash: hash.Sum(make([]byte, 0))}
|
||||
childCount = pObj.SystemHeader.PayloadLength/s.limit + 1
|
||||
)
|
||||
|
||||
if right.ID, err = refs.NewObjectID(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
splitHeaders := make([]object.Header, 0, 3*verifyHeadersCount+splitHeadersCount+childCount)
|
||||
|
||||
splitHeaders = append(splitHeaders, pObj.Headers...)
|
||||
splitHeaders = append(splitHeaders, []object.Header{
|
||||
{Value: &object.Header_Transform{Transform: &object.Transform{Type: object.Transform_Split}}},
|
||||
{Value: &object.Header_Link{Link: &object.Link{
|
||||
Type: object.Link_Parent,
|
||||
ID: unit.Head.SystemHeader.ID,
|
||||
}}},
|
||||
{Value: &object.Header_Link{Link: left}},
|
||||
{Value: &object.Header_Link{Link: right}},
|
||||
{Value: hashHdr},
|
||||
{Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}},
|
||||
{Value: homoHashAccHdr},
|
||||
{Value: hashAccHdr},
|
||||
{Value: &object.Header_Integrity{Integrity: new(object.IntegrityHeader)}},
|
||||
}...)
|
||||
|
||||
children := splitHeaders[srcHdrLen+2*verifyHeadersCount+splitHeadersCount+1:]
|
||||
pObj.Headers = splitHeaders[:srcHdrLen+2*verifyHeadersCount+splitHeadersCount]
|
||||
|
||||
for tail := pObj.SystemHeader.PayloadLength; tail > 0; tail -= min(tail, s.limit) {
|
||||
size := min(tail, s.limit)
|
||||
|
||||
resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)-verifyHeadersCount-1]
|
||||
if err = readChunk(resObj, buf[:size], hAcc, homoHashAccHdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resObj.Head.SystemHeader.PayloadLength = size
|
||||
resObj.Head.Payload = buf[:size]
|
||||
left.ID, resObj.Head.SystemHeader.ID = resObj.Head.SystemHeader.ID, right.ID
|
||||
|
||||
if tail <= s.limit {
|
||||
right.ID = ObjectID{}
|
||||
|
||||
temp := make([]object.Header, verifyHeadersCount+1) // +1 for homomorphic hash
|
||||
|
||||
copy(temp, pObj.Headers[srcHdrLen:])
|
||||
|
||||
hashAccHdr.PayloadChecksum = hAcc.Sum(nil)
|
||||
|
||||
copy(pObj.Headers[srcHdrLen:srcHdrLen+verifyHeadersCount+1],
|
||||
pObj.Headers[len(pObj.Headers)-verifyHeadersCount:])
|
||||
|
||||
resObj.Head.Headers = pObj.Headers[:srcHdrLen+verifyHeadersCount]
|
||||
|
||||
if err = signWithToken(ctx, &Object{
|
||||
SystemHeader: pObj.SystemHeader,
|
||||
Headers: resObj.Head.Headers,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copy(pObj.Headers[srcHdrLen+2*(verifyHeadersCount+1):],
|
||||
pObj.Headers[srcHdrLen+verifyHeadersCount+1:srcHdrLen+verifyHeadersCount+splitHeadersCount])
|
||||
|
||||
copy(pObj.Headers[srcHdrLen+verifyHeadersCount+1:], temp)
|
||||
|
||||
resObj.Head.Headers = pObj.Headers[:len(pObj.Headers)]
|
||||
} else if right.ID, err = refs.NewObjectID(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := procHandlers(ctx, EmptyPayloadUnit(resObj.Head), handlers...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
children = append(children, object.Header{Value: &object.Header_Link{Link: &object.Link{
|
||||
Type: object.Link_Child,
|
||||
ID: resObj.Head.SystemHeader.ID,
|
||||
}}})
|
||||
}
|
||||
|
||||
pObj.SystemHeader.PayloadLength = 0
|
||||
pObj.Headers = append(pObj.Headers[:srcHdrLen], children...)
|
||||
|
||||
if err := readChunk(unit, nil, nil, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return procHandlers(ctx, EmptyPayloadUnit(pObj), handlers...)
|
||||
}
|
||||
|
||||
func readChunk(unit ProcUnit, buf []byte, hAcc io.Writer, homoHashAcc *object.Header_HomoHash) (err error) {
|
||||
var csHdr *object.Header_PayloadChecksum
|
||||
|
||||
if _, v := unit.Head.LastHeader(object.HeaderType(object.PayloadChecksumHdr)); v == nil {
|
||||
csHdr = new(object.Header_PayloadChecksum)
|
||||
|
||||
unit.Head.Headers = append(unit.Head.Headers, object.Header{Value: csHdr})
|
||||
} else {
|
||||
csHdr = v.Value.(*object.Header_PayloadChecksum)
|
||||
}
|
||||
|
||||
if _, err = io.ReadFull(unit.Payload, buf); err != nil && err != io.EOF {
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
err = ErrPayloadEOF
|
||||
}
|
||||
|
||||
return
|
||||
} else if hAcc != nil {
|
||||
if _, err = hAcc.Write(buf); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if homoHashAcc != nil {
|
||||
if homoHashAcc.HomoHash, err = hash.Concat([]hash.Hash{homoHashAcc.HomoHash, hash.Sum(buf)}); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
h := sha256.Sum256(buf)
|
||||
csHdr.PayloadChecksum = h[:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *headSigner) Transform(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
|
||||
if s.verifier.Verify(ctx, unit.Head) != nil {
|
||||
if err := signWithToken(ctx, unit.Head); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return procHandlers(ctx, unit, handlers...)
|
||||
}
|
||||
|
||||
func signWithToken(ctx context.Context, obj *Object) error {
|
||||
integrityHdr := new(object.IntegrityHeader)
|
||||
|
||||
if pToken, ok := ctx.Value(PrivateSessionToken).(session.PrivateToken); !ok {
|
||||
return errNoToken
|
||||
} else if hdrData, err := objutil.MarshalHeaders(obj, len(obj.Headers)); err != nil {
|
||||
return err
|
||||
} else {
|
||||
cs := sha256.Sum256(hdrData)
|
||||
integrityHdr.SetHeadersChecksum(cs[:])
|
||||
if err = service.AddSignatureWithKey(pToken.PrivateKey(), integrityHdr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
obj.AddHeader(&object.Header{Value: &object.Header_Integrity{Integrity: integrityHdr}})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *fieldMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) (err error) {
|
||||
token, ok := ctx.Value(PublicSessionToken).(*service.Token)
|
||||
if !ok {
|
||||
return errNoToken
|
||||
}
|
||||
|
||||
unit.Head.AddHeader(&object.Header{
|
||||
Value: &object.Header_Token{
|
||||
Token: token,
|
||||
},
|
||||
})
|
||||
|
||||
if unit.Head.SystemHeader.ID.Empty() {
|
||||
if unit.Head.SystemHeader.ID, err = refs.NewObjectID(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if unit.Head.SystemHeader.CreatedAt.UnixTime == 0 {
|
||||
unit.Head.SystemHeader.CreatedAt.UnixTime = time.Now().Unix()
|
||||
}
|
||||
|
||||
if unit.Head.SystemHeader.CreatedAt.Epoch == 0 {
|
||||
unit.Head.SystemHeader.CreatedAt.Epoch = s.epochRecv.Epoch()
|
||||
}
|
||||
|
||||
if unit.Head.SystemHeader.Version == 0 {
|
||||
unit.Head.SystemHeader.Version = 1
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sgMoulder) Transform(ctx context.Context, unit ProcUnit, _ ...ProcUnitHandler) error {
|
||||
sgLinks := unit.Head.Links(object.Link_StorageGroup)
|
||||
|
||||
group, err := unit.Head.StorageGroup()
|
||||
|
||||
if nonEmptyList := len(sgLinks) > 0; (err == nil) != nonEmptyList {
|
||||
return ErrInvalidSGLinking
|
||||
} else if err != nil || !group.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Sort(storagegroup.IDList(sgLinks))
|
||||
|
||||
sgInfo, err := s.sgInfoRecv.GetSGInfo(ctx, unit.Head.SystemHeader.CID, sgLinks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unit.Head.SetStorageGroup(sgInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func procHandlers(ctx context.Context, unit ProcUnit, handlers ...ProcUnitHandler) error {
|
||||
for i := range handlers {
|
||||
if err := handlers[i](ctx, unit); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyReader) Read([]byte) (n int, err error) { return 0, io.EOF }
|
||||
|
||||
// EmptyPayloadUnit returns ProcUnit with Object from argument and empty payload reader
|
||||
// that always returns (0, io.EOF).
|
||||
func EmptyPayloadUnit(head *Object) ProcUnit { return ProcUnit{Head: head, Payload: new(emptyReader)} }
|
||||
|
||||
func min(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// GetChain builds a list of objects in the hereditary chain.
|
||||
// In case of impossibility to do this, an error is returned.
|
||||
func GetChain(srcObjs ...Object) ([]Object, error) {
|
||||
var (
|
||||
err error
|
||||
first, id ObjectID
|
||||
res = make([]Object, 0, len(srcObjs))
|
||||
m = make(map[ObjectID]*Object, len(srcObjs))
|
||||
)
|
||||
|
||||
// Fill map with all objects
|
||||
for i := range srcObjs {
|
||||
m[srcObjs[i].SystemHeader.ID] = &srcObjs[i]
|
||||
|
||||
prev, err := lastLink(&srcObjs[i], object.Link_Previous)
|
||||
if err == nil && prev.Empty() { // then it is first
|
||||
id, err = lastLink(&srcObjs[i], object.Link_Next)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetChain failed: missing first object next links")
|
||||
}
|
||||
|
||||
first = srcObjs[i].SystemHeader.ID
|
||||
}
|
||||
}
|
||||
|
||||
// Check first presence
|
||||
if first.Empty() {
|
||||
return nil, errChainNotFound
|
||||
}
|
||||
|
||||
res = append(res, *m[first])
|
||||
|
||||
// Iterate chain
|
||||
for count := 0; !id.Empty() && count < len(srcObjs); count++ {
|
||||
nextObj, ok := m[id]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("GetChain failed: missing next object %s", id)
|
||||
}
|
||||
|
||||
id, err = lastLink(nextObj, object.Link_Next)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetChain failed: missing object next links")
|
||||
}
|
||||
|
||||
res = append(res, *nextObj)
|
||||
}
|
||||
|
||||
// Check last chain element has empty next (prevent cut chain)
|
||||
id, err = lastLink(&res[len(res)-1], object.Link_Next)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "GetChain failed: missing object next links")
|
||||
} else if !id.Empty() {
|
||||
return nil, errCutChain
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func deleteTransformer(o *Object, t object.Transform_Type) error {
|
||||
n, th := o.LastHeader(object.HeaderType(object.TransformHdr))
|
||||
if th == nil || th.Value.(*object.Header_Transform).Transform.Type != t {
|
||||
return errMissingTransformHdr
|
||||
}
|
||||
|
||||
o.Headers = o.Headers[:n]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lastLink(o *Object, t object.Link_Type) (res ObjectID, err error) {
|
||||
for i := len(o.Headers) - 1; i >= 0; i-- {
|
||||
if v, ok := o.Headers[i].Value.(*object.Header_Link); ok {
|
||||
if v.Link.GetType() == t {
|
||||
res = v.Link.ID
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = errors.Errorf("object.lastLink: links of type %s not found", t)
|
||||
|
||||
return
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue