forked from TrueCloudLab/frostfs-sdk-go
slicer: Allow to toggle homomorphic hashing
Homomorphic hashing of object payload is not always necessary. There is a need to provide ability to skip calculation. It's also worth to not calculate it by default since current implementation of Tillich-Zemor algorithm has large resource cost. Do not calculate homomorphic checksum in `Slicer` methods by default. Provide option to enable the calculation. Make tests to randomize calculation flag and assert results according to it. Refs #342. Signed-off-by: Leonard Lyubich <leonard@morphbits.io>
This commit is contained in:
parent
ab5ae28fdb
commit
e2011832eb
4 changed files with 57 additions and 23 deletions
|
@ -342,6 +342,9 @@ func CreateObject(ctx context.Context, cli *Client, signer neofscrypto.Signer, c
|
|||
var opts slicer.Options
|
||||
opts.SetObjectPayloadLimit(netInfo.MaxObjectSize())
|
||||
opts.SetCurrentNeoFSEpoch(netInfo.CurrentEpoch())
|
||||
if !netInfo.HomomorphicHashingDisabled() {
|
||||
opts.CalculateHomomorphicChecksum()
|
||||
}
|
||||
|
||||
s := slicer.New(signer, cnr, owner, &objectWriter{
|
||||
context: ctx,
|
||||
|
|
|
@ -5,6 +5,8 @@ type Options struct {
|
|||
objectPayloadLimit uint64
|
||||
|
||||
currentNeoFSEpoch uint64
|
||||
|
||||
withHomoChecksum bool
|
||||
}
|
||||
|
||||
// SetObjectPayloadLimit specifies data size limit for produced physically
|
||||
|
@ -17,3 +19,9 @@ func (x *Options) SetObjectPayloadLimit(l uint64) {
|
|||
func (x *Options) SetCurrentNeoFSEpoch(e uint64) {
|
||||
x.currentNeoFSEpoch = e
|
||||
}
|
||||
|
||||
// CalculateHomomorphicChecksum makes Slicer to calculate and set homomorphic
|
||||
// checksum of the processed objects.
|
||||
func (x *Options) CalculateHomomorphicChecksum() {
|
||||
x.withHomoChecksum = true
|
||||
}
|
||||
|
|
|
@ -143,16 +143,15 @@ func (x *Slicer) Slice(data io.Reader, attributes ...string) (oid.ID, error) {
|
|||
|
||||
var rootID oid.ID
|
||||
var rootHeader object.Object
|
||||
var rootMeta dynamicObjectMetadata
|
||||
var offset uint64
|
||||
var isSplit bool
|
||||
var childMeta dynamicObjectMetadata
|
||||
var writtenChildren []oid.ID
|
||||
var childHeader object.Object
|
||||
rootMeta := newDynamicObjectMetadata(x.opts.withHomoChecksum)
|
||||
bChunk := make([]byte, x.opts.objectPayloadLimit+1)
|
||||
|
||||
x.fillCommonMetadata(&rootHeader)
|
||||
rootMeta.reset()
|
||||
|
||||
for {
|
||||
n, err := data.Read(bChunk[offset:])
|
||||
|
@ -264,7 +263,7 @@ func (x *Slicer) Slice(data io.Reader, attributes ...string) (oid.ID, error) {
|
|||
|
||||
// shift overflow bytes to the beginning
|
||||
if !isSplitCp {
|
||||
childMeta = dynamicObjectMetadata{} // to avoid rootMeta corruption
|
||||
childMeta = newDynamicObjectMetadata(x.opts.withHomoChecksum) // to avoid rootMeta corruption
|
||||
}
|
||||
childMeta.reset()
|
||||
childMeta.accumulateNextPayloadChunk(bChunk[toSend:])
|
||||
|
@ -297,6 +296,8 @@ func (x *Slicer) InitPayloadStream(attributes ...string) (*PayloadWriter, error)
|
|||
currentEpoch: x.opts.currentNeoFSEpoch,
|
||||
sessionToken: x.sessionToken,
|
||||
attributes: attributes,
|
||||
rootMeta: newDynamicObjectMetadata(x.opts.withHomoChecksum),
|
||||
childMeta: newDynamicObjectMetadata(x.opts.withHomoChecksum),
|
||||
}
|
||||
|
||||
res.buf.Grow(int(x.childPayloadSizeLimit()))
|
||||
|
@ -486,11 +487,13 @@ func flushObjectMetadata(signer neofscrypto.Signer, meta dynamicObjectMetadata,
|
|||
cs.SetSHA256(csBytes)
|
||||
header.SetPayloadChecksum(cs)
|
||||
|
||||
if meta.homomorphicChecksum != nil {
|
||||
var csHomoBytes [tz.Size]byte
|
||||
copy(csHomoBytes[:], meta.homomorphicChecksum.Sum(nil))
|
||||
|
||||
cs.SetTillichZemor(csHomoBytes)
|
||||
header.SetPayloadHomomorphicHash(cs)
|
||||
}
|
||||
|
||||
header.SetPayloadSize(meta.length)
|
||||
|
||||
|
@ -552,6 +555,18 @@ type dynamicObjectMetadata struct {
|
|||
homomorphicChecksum hash.Hash
|
||||
}
|
||||
|
||||
func newDynamicObjectMetadata(withHomoChecksum bool) dynamicObjectMetadata {
|
||||
m := dynamicObjectMetadata{
|
||||
checksum: sha256.New(),
|
||||
}
|
||||
|
||||
if withHomoChecksum {
|
||||
m.homomorphicChecksum = tz.New()
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (x *dynamicObjectMetadata) Write(chunk []byte) (int, error) {
|
||||
x.accumulateNextPayloadChunk(chunk)
|
||||
return len(chunk), nil
|
||||
|
@ -562,23 +577,17 @@ func (x *dynamicObjectMetadata) Write(chunk []byte) (int, error) {
|
|||
func (x *dynamicObjectMetadata) accumulateNextPayloadChunk(chunk []byte) {
|
||||
x.length += uint64(len(chunk))
|
||||
x.checksum.Write(chunk)
|
||||
if x.homomorphicChecksum != nil {
|
||||
x.homomorphicChecksum.Write(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
// reset resets all accumulated metadata.
|
||||
func (x *dynamicObjectMetadata) reset() {
|
||||
x.length = 0
|
||||
|
||||
if x.checksum != nil {
|
||||
x.checksum.Reset()
|
||||
} else {
|
||||
x.checksum = sha256.New()
|
||||
}
|
||||
|
||||
if x.homomorphicChecksum != nil {
|
||||
x.homomorphicChecksum.Reset()
|
||||
} else {
|
||||
x.homomorphicChecksum = tz.New()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -141,6 +141,7 @@ type input struct {
|
|||
sessionToken *session.Object
|
||||
payload []byte
|
||||
attributes []string
|
||||
withHomo bool
|
||||
}
|
||||
|
||||
func randomData(size uint64) []byte {
|
||||
|
@ -181,9 +182,14 @@ func randomInput(tb testing.TB, size, sizeLimit uint64) (input, slicer.Options)
|
|||
in.owner = *usertest.ID(tb)
|
||||
}
|
||||
|
||||
in.withHomo = rand.Int()%2 == 0
|
||||
|
||||
var opts slicer.Options
|
||||
opts.SetObjectPayloadLimit(in.payloadLimit)
|
||||
opts.SetCurrentNeoFSEpoch(in.currentEpoch)
|
||||
if in.withHomo {
|
||||
opts.CalculateHomomorphicChecksum()
|
||||
}
|
||||
|
||||
return in, opts
|
||||
}
|
||||
|
@ -340,6 +346,9 @@ func checkStaticMetadata(tb testing.TB, header object.Object, in input) {
|
|||
require.Equal(tb, in.sessionToken, header.SessionToken(), "configured session token must be written into objects")
|
||||
|
||||
require.NoError(tb, object.CheckHeaderVerificationFields(&header), "verification fields must be correctly set in header")
|
||||
|
||||
_, ok = header.PayloadHomomorphicHash()
|
||||
require.Equal(tb, in.withHomo, ok)
|
||||
}
|
||||
|
||||
func (x *chainCollector) handleOutgoingObject(header object.Object, payload io.Reader) {
|
||||
|
@ -404,14 +413,19 @@ func (x *chainCollector) handleOutgoingObject(header object.Object, payload io.R
|
|||
cs, ok := header.PayloadChecksum()
|
||||
require.True(x.tb, ok)
|
||||
|
||||
csHomo, ok := header.PayloadHomomorphicHash()
|
||||
require.True(x.tb, ok)
|
||||
|
||||
x.mPayloads[id] = payloadWithChecksum{
|
||||
pcs := payloadWithChecksum{
|
||||
r: payload,
|
||||
cs: []checksum.Checksum{cs, csHomo},
|
||||
hs: []hash.Hash{sha256.New(), tz.New()},
|
||||
cs: []checksum.Checksum{cs},
|
||||
hs: []hash.Hash{sha256.New()},
|
||||
}
|
||||
|
||||
csHomo, ok := header.PayloadHomomorphicHash()
|
||||
if ok {
|
||||
pcs.cs = append(pcs.cs, csHomo)
|
||||
pcs.hs = append(pcs.hs, tz.New())
|
||||
}
|
||||
|
||||
x.mPayloads[id] = pcs
|
||||
}
|
||||
|
||||
func (x *chainCollector) verify(in input, rootID oid.ID) {
|
||||
|
|
Loading…
Reference in a new issue