Account for pack header overhead at each entry
This will miss the pack header crypto overhead and the length field, which only amount to a few bytes per pack file.
This commit is contained in:
parent
856d5e4303
commit
a6e9e08034
4 changed files with 21 additions and 25 deletions
|
@ -31,7 +31,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
|
|||
}
|
||||
|
||||
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
||||
// number of bytes written to the pack.
|
||||
// number of bytes written to the pack plus the pack header entry size.
|
||||
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
@ -44,6 +44,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedL
|
|||
c.UncompressedLength = uint(uncompressedLength)
|
||||
p.bytes += uint(n)
|
||||
p.blobs = append(p.blobs, c)
|
||||
n += CalculateEntrySize(c)
|
||||
|
||||
return n, errors.Wrap(err, "Write")
|
||||
}
|
||||
|
@ -69,13 +70,11 @@ type compressedHeaderEntry struct {
|
|||
}
|
||||
|
||||
// Finalize writes the header for all added blobs and finalizes the pack.
|
||||
// Returned are the number of bytes written, including the header.
|
||||
func (p *Packer) Finalize() (uint, error) {
|
||||
// Returned are the number of bytes written, not yet reported by Add.
|
||||
func (p *Packer) Finalize() (int, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
bytesWritten := p.bytes
|
||||
|
||||
header, err := p.makeHeader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -97,17 +96,14 @@ func (p *Packer) Finalize() (uint, error) {
|
|||
return 0, errors.New("wrong number of bytes written")
|
||||
}
|
||||
|
||||
bytesWritten += uint(hdrBytes)
|
||||
|
||||
// write length
|
||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "binary.Write")
|
||||
}
|
||||
bytesWritten += uint(binary.Size(uint32(0)))
|
||||
p.bytes += uint(hdrBytes + binary.Size(uint32(0)))
|
||||
|
||||
p.bytes = uint(bytesWritten)
|
||||
return bytesWritten, nil
|
||||
return restic.CiphertextLength(0) + binary.Size(uint32(0)), nil
|
||||
}
|
||||
|
||||
// makeHeader constructs the header for p.
|
||||
|
|
|
@ -106,11 +106,11 @@ func (r *packerManager) insertPacker(p *Packer) {
|
|||
}
|
||||
|
||||
// savePacker stores p in the backend.
|
||||
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error {
|
||||
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) (int, error) {
|
||||
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
|
||||
_, err := p.Packer.Finalize()
|
||||
hdrOverhead, err := p.Packer.Finalize()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
id := restic.IDFromHash(p.hw.Sum(nil))
|
||||
|
@ -122,27 +122,27 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
|||
}
|
||||
rd, err := restic.NewFileReader(p.tmpfile, beHash)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = r.be.Save(ctx, h, rd)
|
||||
if err != nil {
|
||||
debug.Log("Save(%v) error: %v", h, err)
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
debug.Log("saved as %v", h)
|
||||
|
||||
err = p.tmpfile.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "close tempfile")
|
||||
return 0, errors.Wrap(err, "close tempfile")
|
||||
}
|
||||
|
||||
// on windows the tempfile is automatically deleted on close
|
||||
if runtime.GOOS != "windows" {
|
||||
err = fs.RemoveIfExists(p.tmpfile.Name())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
return 0, errors.Wrap(err, "Remove")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,9 +152,9 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
|||
|
||||
// Save index if full
|
||||
if r.noAutoIndexUpdate {
|
||||
return nil
|
||||
return hdrOverhead, nil
|
||||
}
|
||||
return r.idx.SaveFullIndex(ctx, r)
|
||||
return hdrOverhead, r.idx.SaveFullIndex(ctx, r)
|
||||
}
|
||||
|
||||
// countPacker returns the number of open (unfinished) packers.
|
||||
|
|
|
@ -74,8 +74,8 @@ func fillPacks(t testing.TB, rnd *rand.Rand, be Saver, pm *packerManager, buf []
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != l {
|
||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
||||
if n != l+37 {
|
||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", l, n)
|
||||
}
|
||||
bytes += l
|
||||
|
||||
|
@ -107,7 +107,7 @@ func flushRemainingPacks(t testing.TB, be Saver, pm *packerManager) (bytes int)
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes += int(n)
|
||||
bytes += n
|
||||
|
||||
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
||||
var beHash []byte
|
||||
|
|
|
@ -435,12 +435,12 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
|
|||
}
|
||||
|
||||
// else write the pack to the backend
|
||||
err = r.savePacker(ctx, t, packer)
|
||||
hdrSize, err := r.savePacker(ctx, t, packer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return size, nil
|
||||
return size + hdrSize, nil
|
||||
}
|
||||
|
||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||
|
@ -551,7 +551,7 @@ func (r *Repository) flushPacks(ctx context.Context) error {
|
|||
|
||||
debug.Log("manually flushing %d packs", len(p.pm.packers))
|
||||
for _, packer := range p.pm.packers {
|
||||
err := r.savePacker(ctx, p.t, packer)
|
||||
_, err := r.savePacker(ctx, p.t, packer)
|
||||
if err != nil {
|
||||
p.pm.pm.Unlock()
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue