Account for pack header overhead at each entry
This will miss the pack header crypto overhead and the length field, which only amount to a few bytes per pack file.
This commit is contained in:
parent
856d5e4303
commit
a6e9e08034
4 changed files with 21 additions and 25 deletions
|
@ -31,7 +31,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
||||||
// number of bytes written to the pack.
|
// number of bytes written to the pack plus the pack header entry size.
|
||||||
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
|
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
|
||||||
p.m.Lock()
|
p.m.Lock()
|
||||||
defer p.m.Unlock()
|
defer p.m.Unlock()
|
||||||
|
@ -44,6 +44,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedL
|
||||||
c.UncompressedLength = uint(uncompressedLength)
|
c.UncompressedLength = uint(uncompressedLength)
|
||||||
p.bytes += uint(n)
|
p.bytes += uint(n)
|
||||||
p.blobs = append(p.blobs, c)
|
p.blobs = append(p.blobs, c)
|
||||||
|
n += CalculateEntrySize(c)
|
||||||
|
|
||||||
return n, errors.Wrap(err, "Write")
|
return n, errors.Wrap(err, "Write")
|
||||||
}
|
}
|
||||||
|
@ -69,13 +70,11 @@ type compressedHeaderEntry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize writes the header for all added blobs and finalizes the pack.
|
// Finalize writes the header for all added blobs and finalizes the pack.
|
||||||
// Returned are the number of bytes written, including the header.
|
// Returned are the number of bytes written, not yet reported by Add.
|
||||||
func (p *Packer) Finalize() (uint, error) {
|
func (p *Packer) Finalize() (int, error) {
|
||||||
p.m.Lock()
|
p.m.Lock()
|
||||||
defer p.m.Unlock()
|
defer p.m.Unlock()
|
||||||
|
|
||||||
bytesWritten := p.bytes
|
|
||||||
|
|
||||||
header, err := p.makeHeader()
|
header, err := p.makeHeader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -97,17 +96,14 @@ func (p *Packer) Finalize() (uint, error) {
|
||||||
return 0, errors.New("wrong number of bytes written")
|
return 0, errors.New("wrong number of bytes written")
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesWritten += uint(hdrBytes)
|
|
||||||
|
|
||||||
// write length
|
// write length
|
||||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes))
|
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrap(err, "binary.Write")
|
return 0, errors.Wrap(err, "binary.Write")
|
||||||
}
|
}
|
||||||
bytesWritten += uint(binary.Size(uint32(0)))
|
p.bytes += uint(hdrBytes + binary.Size(uint32(0)))
|
||||||
|
|
||||||
p.bytes = uint(bytesWritten)
|
return restic.CiphertextLength(0) + binary.Size(uint32(0)), nil
|
||||||
return bytesWritten, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeHeader constructs the header for p.
|
// makeHeader constructs the header for p.
|
||||||
|
|
|
@ -106,11 +106,11 @@ func (r *packerManager) insertPacker(p *Packer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// savePacker stores p in the backend.
|
// savePacker stores p in the backend.
|
||||||
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error {
|
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) (int, error) {
|
||||||
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
|
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
|
||||||
_, err := p.Packer.Finalize()
|
hdrOverhead, err := p.Packer.Finalize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id := restic.IDFromHash(p.hw.Sum(nil))
|
id := restic.IDFromHash(p.hw.Sum(nil))
|
||||||
|
@ -122,27 +122,27 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
||||||
}
|
}
|
||||||
rd, err := restic.NewFileReader(p.tmpfile, beHash)
|
rd, err := restic.NewFileReader(p.tmpfile, beHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.be.Save(ctx, h, rd)
|
err = r.be.Save(ctx, h, rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Save(%v) error: %v", h, err)
|
debug.Log("Save(%v) error: %v", h, err)
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("saved as %v", h)
|
debug.Log("saved as %v", h)
|
||||||
|
|
||||||
err = p.tmpfile.Close()
|
err = p.tmpfile.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "close tempfile")
|
return 0, errors.Wrap(err, "close tempfile")
|
||||||
}
|
}
|
||||||
|
|
||||||
// on windows the tempfile is automatically deleted on close
|
// on windows the tempfile is automatically deleted on close
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
err = fs.RemoveIfExists(p.tmpfile.Name())
|
err = fs.RemoveIfExists(p.tmpfile.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return 0, errors.Wrap(err, "Remove")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,9 +152,9 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
||||||
|
|
||||||
// Save index if full
|
// Save index if full
|
||||||
if r.noAutoIndexUpdate {
|
if r.noAutoIndexUpdate {
|
||||||
return nil
|
return hdrOverhead, nil
|
||||||
}
|
}
|
||||||
return r.idx.SaveFullIndex(ctx, r)
|
return hdrOverhead, r.idx.SaveFullIndex(ctx, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// countPacker returns the number of open (unfinished) packers.
|
// countPacker returns the number of open (unfinished) packers.
|
||||||
|
|
|
@ -74,8 +74,8 @@ func fillPacks(t testing.TB, rnd *rand.Rand, be Saver, pm *packerManager, buf []
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if n != l {
|
if n != l+37 {
|
||||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", l, n)
|
||||||
}
|
}
|
||||||
bytes += l
|
bytes += l
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ func flushRemainingPacks(t testing.TB, be Saver, pm *packerManager) (bytes int)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
bytes += int(n)
|
bytes += n
|
||||||
|
|
||||||
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
||||||
var beHash []byte
|
var beHash []byte
|
||||||
|
|
|
@ -435,12 +435,12 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
|
||||||
}
|
}
|
||||||
|
|
||||||
// else write the pack to the backend
|
// else write the pack to the backend
|
||||||
err = r.savePacker(ctx, t, packer)
|
hdrSize, err := r.savePacker(ctx, t, packer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return size, nil
|
return size + hdrSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||||
|
@ -551,7 +551,7 @@ func (r *Repository) flushPacks(ctx context.Context) error {
|
||||||
|
|
||||||
debug.Log("manually flushing %d packs", len(p.pm.packers))
|
debug.Log("manually flushing %d packs", len(p.pm.packers))
|
||||||
for _, packer := range p.pm.packers {
|
for _, packer := range p.pm.packers {
|
||||||
err := r.savePacker(ctx, p.t, packer)
|
_, err := r.savePacker(ctx, p.t, packer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.pm.pm.Unlock()
|
p.pm.pm.Unlock()
|
||||||
return err
|
return err
|
||||||
|
|
Loading…
Reference in a new issue