commit
bb5f196b09
22 changed files with 140 additions and 110 deletions
|
@ -23,4 +23,7 @@ repository using `init --repository-version 2 --copy-chunker-params --repo2 path
|
|||
Then use the `copy` command to copy all snapshots to the new repository.
|
||||
|
||||
https://github.com/restic/restic/issues/21
|
||||
https://github.com/restic/restic/issues/3779
|
||||
https://github.com/restic/restic/pull/3666
|
||||
https://github.com/restic/restic/pull/3704
|
||||
https://github.com/restic/restic/pull/3733
|
||||
|
|
|
@ -386,7 +386,7 @@ func loadBlobs(ctx context.Context, repo restic.Repository, pack restic.ID, list
|
|||
}
|
||||
}
|
||||
if reuploadBlobs {
|
||||
_, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true)
|
||||
_, _, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -130,11 +130,11 @@ func runStats(gopts GlobalOptions, args []string) error {
|
|||
if statsOptions.countMode == countModeRawData {
|
||||
// the blob handles have been collected, but not yet counted
|
||||
for blobHandle := range stats.blobs {
|
||||
blobSize, found := repo.LookupBlobSize(blobHandle.ID, blobHandle.Type)
|
||||
if !found {
|
||||
pbs := repo.Index().Lookup(blobHandle)
|
||||
if len(pbs) == 0 {
|
||||
return fmt.Errorf("blob %v not found", blobHandle)
|
||||
}
|
||||
stats.TotalSize += uint64(blobSize)
|
||||
stats.TotalSize += uint64(pbs[0].Length)
|
||||
stats.TotalBlobCount++
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,18 +31,22 @@ type ErrorFunc func(file string, fi os.FileInfo, err error) error
|
|||
|
||||
// ItemStats collects some statistics about a particular file or directory.
|
||||
type ItemStats struct {
|
||||
DataBlobs int // number of new data blobs added for this item
|
||||
DataSize uint64 // sum of the sizes of all new data blobs
|
||||
TreeBlobs int // number of new tree blobs added for this item
|
||||
TreeSize uint64 // sum of the sizes of all new tree blobs
|
||||
DataBlobs int // number of new data blobs added for this item
|
||||
DataSize uint64 // sum of the sizes of all new data blobs
|
||||
DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
|
||||
TreeBlobs int // number of new tree blobs added for this item
|
||||
TreeSize uint64 // sum of the sizes of all new tree blobs
|
||||
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
|
||||
}
|
||||
|
||||
// Add adds other to the current ItemStats.
|
||||
func (s *ItemStats) Add(other ItemStats) {
|
||||
s.DataBlobs += other.DataBlobs
|
||||
s.DataSize += other.DataSize
|
||||
s.DataSizeInRepo += other.DataSizeInRepo
|
||||
s.TreeBlobs += other.TreeBlobs
|
||||
s.TreeSize += other.TreeSize
|
||||
s.TreeSizeInRepo += other.TreeSizeInRepo
|
||||
}
|
||||
|
||||
// Archiver saves a directory structure to the repo.
|
||||
|
@ -183,7 +187,8 @@ func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID,
|
|||
res.Wait(ctx)
|
||||
if !res.Known() {
|
||||
s.TreeBlobs++
|
||||
s.TreeSize += uint64(len(buf))
|
||||
s.TreeSize += uint64(res.Length())
|
||||
s.TreeSizeInRepo += uint64(res.SizeInRepo())
|
||||
}
|
||||
// The context was canceled in the meantime, res.ID() might be invalid
|
||||
if ctx.Err() != nil {
|
||||
|
|
|
@ -415,16 +415,16 @@ type blobCountingRepo struct {
|
|||
saved map[restic.BlobHandle]uint
|
||||
}
|
||||
|
||||
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) {
|
||||
id, exists, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
|
||||
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
|
||||
id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
|
||||
if exists {
|
||||
return id, exists, err
|
||||
return id, exists, size, err
|
||||
}
|
||||
h := restic.BlobHandle{ID: id, Type: t}
|
||||
repo.m.Lock()
|
||||
repo.saved[h]++
|
||||
repo.m.Unlock()
|
||||
return id, exists, err
|
||||
return id, exists, size, err
|
||||
}
|
||||
|
||||
func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
|
||||
|
@ -1019,7 +1019,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
want: TestDir{
|
||||
"targetfile": TestFile{Content: string("foobar")},
|
||||
},
|
||||
stat: ItemStats{1, 6, 0, 0},
|
||||
stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1031,7 +1031,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"targetfile": TestFile{Content: string("foobar")},
|
||||
"filesymlink": TestSymlink{Target: "targetfile"},
|
||||
},
|
||||
stat: ItemStats{1, 6, 0, 0},
|
||||
stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1051,7 +1051,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
"symlink": TestSymlink{Target: "subdir"},
|
||||
},
|
||||
},
|
||||
stat: ItemStats{0, 0, 1, 0x154},
|
||||
stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a},
|
||||
},
|
||||
{
|
||||
src: TestDir{
|
||||
|
@ -1075,7 +1075,7 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
stat: ItemStats{1, 6, 3, 0x47f},
|
||||
stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1140,7 +1140,8 @@ func TestArchiverSaveTree(t *testing.T) {
|
|||
bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
|
||||
bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
|
||||
bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize)
|
||||
bothZeroOrNeither(t, test.stat.TreeSize, stat.TreeSize)
|
||||
bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo)
|
||||
bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1944,10 +1945,10 @@ type failSaveRepo struct {
|
|||
err error
|
||||
}
|
||||
|
||||
func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) {
|
||||
func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
|
||||
val := atomic.AddInt32(&f.cnt, 1)
|
||||
if val >= f.failAfter {
|
||||
return restic.ID{}, false, f.err
|
||||
return restic.ID{}, false, 0, f.err
|
||||
}
|
||||
|
||||
return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
// Saver allows saving a blob.
|
||||
type Saver interface {
|
||||
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error)
|
||||
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error)
|
||||
Index() restic.MasterIndex
|
||||
}
|
||||
|
||||
|
@ -86,11 +86,17 @@ func (s *FutureBlob) Known() bool {
|
|||
return s.res.known
|
||||
}
|
||||
|
||||
// Length returns the length of the blob.
|
||||
// Length returns the raw length of the blob.
|
||||
func (s *FutureBlob) Length() int {
|
||||
return s.length
|
||||
}
|
||||
|
||||
// SizeInRepo returns the number of bytes added to the repo (including
|
||||
// compression and crypto overhead).
|
||||
func (s *FutureBlob) SizeInRepo() int {
|
||||
return s.res.size
|
||||
}
|
||||
|
||||
type saveBlobJob struct {
|
||||
restic.BlobType
|
||||
buf *Buffer
|
||||
|
@ -100,10 +106,11 @@ type saveBlobJob struct {
|
|||
type saveBlobResponse struct {
|
||||
id restic.ID
|
||||
known bool
|
||||
size int
|
||||
}
|
||||
|
||||
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) {
|
||||
id, known, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false)
|
||||
id, known, size, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false)
|
||||
|
||||
if err != nil {
|
||||
return saveBlobResponse{}, err
|
||||
|
@ -112,6 +119,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte)
|
|||
return saveBlobResponse{
|
||||
id: id,
|
||||
known: known,
|
||||
size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,13 +21,13 @@ type saveFail struct {
|
|||
failAt int32
|
||||
}
|
||||
|
||||
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, error) {
|
||||
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, int, error) {
|
||||
val := atomic.AddInt32(&b.cnt, 1)
|
||||
if val == b.failAt {
|
||||
return restic.ID{}, false, errTest
|
||||
return restic.ID{}, false, 0, errTest
|
||||
}
|
||||
|
||||
return id, false, nil
|
||||
return id, false, 0, nil
|
||||
}
|
||||
|
||||
func (b *saveFail) Index() restic.MasterIndex {
|
||||
|
|
|
@ -210,6 +210,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
|
|||
if !res.Known() {
|
||||
stats.DataBlobs++
|
||||
stats.DataSize += uint64(res.Length())
|
||||
stats.DataSizeInRepo += uint64(res.SizeInRepo())
|
||||
}
|
||||
|
||||
node.Content = append(node.Content, res.ID())
|
||||
|
|
|
@ -483,7 +483,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) {
|
|||
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil)
|
||||
test.OK(t, err)
|
||||
|
||||
_, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false)
|
||||
_, _, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false)
|
||||
test.OK(t, err)
|
||||
|
||||
malNode := &restic.Node{
|
||||
|
|
|
@ -31,7 +31,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
|
|||
}
|
||||
|
||||
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
||||
// number of bytes written to the pack.
|
||||
// number of bytes written to the pack plus the pack header entry size.
|
||||
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
@ -44,6 +44,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedL
|
|||
c.UncompressedLength = uint(uncompressedLength)
|
||||
p.bytes += uint(n)
|
||||
p.blobs = append(p.blobs, c)
|
||||
n += CalculateEntrySize(c)
|
||||
|
||||
return n, errors.Wrap(err, "Write")
|
||||
}
|
||||
|
@ -69,13 +70,11 @@ type compressedHeaderEntry struct {
|
|||
}
|
||||
|
||||
// Finalize writes the header for all added blobs and finalizes the pack.
|
||||
// Returned are the number of bytes written, including the header.
|
||||
func (p *Packer) Finalize() (uint, error) {
|
||||
// Returned are the number of bytes written, not yet reported by Add.
|
||||
func (p *Packer) Finalize() (int, error) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
|
||||
bytesWritten := p.bytes
|
||||
|
||||
header, err := p.makeHeader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -97,17 +96,14 @@ func (p *Packer) Finalize() (uint, error) {
|
|||
return 0, errors.New("wrong number of bytes written")
|
||||
}
|
||||
|
||||
bytesWritten += uint(hdrBytes)
|
||||
|
||||
// write length
|
||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "binary.Write")
|
||||
}
|
||||
bytesWritten += uint(binary.Size(uint32(0)))
|
||||
p.bytes += uint(hdrBytes + binary.Size(uint32(0)))
|
||||
|
||||
p.bytes = uint(bytesWritten)
|
||||
return bytesWritten, nil
|
||||
return restic.CiphertextLength(0) + binary.Size(uint32(0)), nil
|
||||
}
|
||||
|
||||
// makeHeader constructs the header for p.
|
||||
|
|
|
@ -23,7 +23,7 @@ func FuzzSaveLoadBlob(f *testing.F) {
|
|||
id := restic.Hash(blob)
|
||||
repo, _ := TestRepositoryWithBackend(t, mem.New(), 2)
|
||||
|
||||
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false)
|
||||
_, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -106,11 +106,11 @@ func (r *packerManager) insertPacker(p *Packer) {
|
|||
}
|
||||
|
||||
// savePacker stores p in the backend.
|
||||
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error {
|
||||
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) (int, error) {
|
||||
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
|
||||
_, err := p.Packer.Finalize()
|
||||
hdrOverhead, err := p.Packer.Finalize()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
id := restic.IDFromHash(p.hw.Sum(nil))
|
||||
|
@ -122,27 +122,27 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
|||
}
|
||||
rd, err := restic.NewFileReader(p.tmpfile, beHash)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = r.be.Save(ctx, h, rd)
|
||||
if err != nil {
|
||||
debug.Log("Save(%v) error: %v", h, err)
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
debug.Log("saved as %v", h)
|
||||
|
||||
err = p.tmpfile.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "close tempfile")
|
||||
return 0, errors.Wrap(err, "close tempfile")
|
||||
}
|
||||
|
||||
// on windows the tempfile is automatically deleted on close
|
||||
if runtime.GOOS != "windows" {
|
||||
err = fs.RemoveIfExists(p.tmpfile.Name())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
return 0, errors.Wrap(err, "Remove")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,9 +152,9 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
|
|||
|
||||
// Save index if full
|
||||
if r.noAutoIndexUpdate {
|
||||
return nil
|
||||
return hdrOverhead, nil
|
||||
}
|
||||
return r.idx.SaveFullIndex(ctx, r)
|
||||
return hdrOverhead, r.idx.SaveFullIndex(ctx, r)
|
||||
}
|
||||
|
||||
// countPacker returns the number of open (unfinished) packers.
|
||||
|
|
|
@ -74,8 +74,8 @@ func fillPacks(t testing.TB, rnd *rand.Rand, be Saver, pm *packerManager, buf []
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != l {
|
||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l)
|
||||
if n != l+37 {
|
||||
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", l, n)
|
||||
}
|
||||
bytes += l
|
||||
|
||||
|
@ -107,7 +107,7 @@ func flushRemainingPacks(t testing.TB, be Saver, pm *packerManager) (bytes int)
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes += int(n)
|
||||
bytes += n
|
||||
|
||||
packID := restic.IDFromHash(packer.hw.Sum(nil))
|
||||
var beHash []byte
|
||||
|
|
|
@ -75,7 +75,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
|
|||
}
|
||||
|
||||
// We do want to save already saved blobs!
|
||||
_, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true)
|
||||
_, _, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl
|
|||
buf := make([]byte, length)
|
||||
rand.Read(buf)
|
||||
|
||||
id, exists, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false)
|
||||
id, exists, _, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false)
|
||||
if err != nil {
|
||||
t.Fatalf("SaveFrom() error %v", err)
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
|
|||
// invert first data byte
|
||||
buf[0] ^= 0xff
|
||||
|
||||
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false)
|
||||
_, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false)
|
||||
if err != nil {
|
||||
t.Fatalf("SaveFrom() error %v", err)
|
||||
}
|
||||
|
|
|
@ -378,9 +378,10 @@ func (r *Repository) getZstdDecoder() *zstd.Decoder {
|
|||
}
|
||||
|
||||
// saveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
||||
// is small enough, it will be packed together with other small blobs.
|
||||
// The caller must ensure that the id matches the data.
|
||||
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error {
|
||||
// is small enough, it will be packed together with other small blobs. The
|
||||
// caller must ensure that the id matches the data. Returned is the size data
|
||||
// occupies in the repo (compressed or not, including the encryption overhead).
|
||||
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (size int, err error) {
|
||||
debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
|
||||
|
||||
uncompressedLength := 0
|
||||
|
@ -417,24 +418,29 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
|
|||
|
||||
packer, err := pm.findPacker()
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// save ciphertext
|
||||
_, err = packer.Add(t, id, ciphertext, uncompressedLength)
|
||||
size, err = packer.Add(t, id, ciphertext, uncompressedLength)
|
||||
if err != nil {
|
||||
return err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// if the pack is not full enough, put back to the list
|
||||
if packer.Size() < minPackSize {
|
||||
debug.Log("pack is not full enough (%d bytes)", packer.Size())
|
||||
pm.insertPacker(packer)
|
||||
return nil
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// else write the pack to the backend
|
||||
return r.savePacker(ctx, t, packer)
|
||||
hdrSize, err := r.savePacker(ctx, t, packer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return size + hdrSize, nil
|
||||
}
|
||||
|
||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||
|
@ -545,7 +551,7 @@ func (r *Repository) flushPacks(ctx context.Context) error {
|
|||
|
||||
debug.Log("manually flushing %d packs", len(p.pm.packers))
|
||||
for _, packer := range p.pm.packers {
|
||||
err := r.savePacker(ctx, p.t, packer)
|
||||
_, err := r.savePacker(ctx, p.t, packer)
|
||||
if err != nil {
|
||||
p.pm.pm.Unlock()
|
||||
return err
|
||||
|
@ -815,8 +821,10 @@ func (r *Repository) Close() error {
|
|||
// It takes care that no duplicates are saved; this can be overwritten
|
||||
// by setting storeDuplicate to true.
|
||||
// If id is the null id, it will be computed and returned.
|
||||
// Also returns if the blob was already known before
|
||||
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, err error) {
|
||||
// Also returns if the blob was already known before.
|
||||
// If the blob was not known before, it returns the number of bytes the blob
|
||||
// occupies in the repo (compressed or not, including encryption overhead).
|
||||
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
|
||||
|
||||
// compute plaintext hash if not already set
|
||||
if id.IsNull() {
|
||||
|
@ -830,10 +838,10 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte
|
|||
|
||||
// only save when needed or explicitly told
|
||||
if !known || storeDuplicate {
|
||||
err = r.saveAndEncrypt(ctx, t, buf, newID)
|
||||
size, err = r.saveAndEncrypt(ctx, t, buf, newID)
|
||||
}
|
||||
|
||||
return newID, known, err
|
||||
return newID, known, size, err
|
||||
}
|
||||
|
||||
// LoadTree loads a tree from the repository.
|
||||
|
@ -867,7 +875,7 @@ func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, e
|
|||
// adds a newline after each object)
|
||||
buf = append(buf, '\n')
|
||||
|
||||
id, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
|
||||
id, _, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
|
||||
return id, err
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ func testSave(t *testing.T, version uint) {
|
|||
id := restic.Hash(data)
|
||||
|
||||
// save
|
||||
sid, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false)
|
||||
sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false)
|
||||
rtest.OK(t, err)
|
||||
|
||||
rtest.Equals(t, id, sid)
|
||||
|
@ -83,7 +83,7 @@ func testSaveFrom(t *testing.T, version uint) {
|
|||
id := restic.Hash(data)
|
||||
|
||||
// save
|
||||
id2, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false)
|
||||
id2, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false)
|
||||
rtest.OK(t, err)
|
||||
rtest.Equals(t, id, id2)
|
||||
|
||||
|
@ -125,7 +125,7 @@ func benchmarkSaveAndEncrypt(t *testing.B, version uint) {
|
|||
t.SetBytes(int64(size))
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true)
|
||||
_, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true)
|
||||
rtest.OK(t, err)
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func testLoadBlob(t *testing.T, version uint) {
|
|||
_, err := io.ReadFull(rnd, buf)
|
||||
rtest.OK(t, err)
|
||||
|
||||
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
rtest.OK(t, err)
|
||||
rtest.OK(t, repo.Flush(context.Background()))
|
||||
|
||||
|
@ -220,7 +220,7 @@ func benchmarkLoadBlob(b *testing.B, version uint) {
|
|||
_, err := io.ReadFull(rnd, buf)
|
||||
rtest.OK(b, err)
|
||||
|
||||
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
rtest.OK(b, err)
|
||||
rtest.OK(b, repo.Flush(context.Background()))
|
||||
|
||||
|
@ -396,7 +396,7 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax
|
|||
_, err := io.ReadFull(rnd, buf)
|
||||
rtest.OK(t, err)
|
||||
|
||||
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
_, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
|
||||
rtest.OK(t, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ type Repository interface {
|
|||
LoadUnpacked(ctx context.Context, buf []byte, t FileType, id ID) (data []byte, err error)
|
||||
|
||||
LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error)
|
||||
SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, error)
|
||||
SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error)
|
||||
|
||||
LoadTree(context.Context, ID) (*Tree, error)
|
||||
SaveTree(context.Context, *Tree) (ID, error)
|
||||
|
|
|
@ -52,7 +52,7 @@ func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs
|
|||
|
||||
id := Hash(chunk.Data)
|
||||
if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) {
|
||||
_, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true)
|
||||
_, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true)
|
||||
if err != nil {
|
||||
fs.t.Fatalf("error saving chunk: %v", err)
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I
|
|||
return id
|
||||
}
|
||||
|
||||
_, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false)
|
||||
_, _, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false)
|
||||
if err != nil {
|
||||
fs.t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID {
|
|||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
id, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false)
|
||||
id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -110,12 +110,14 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
|
|||
switch messageType {
|
||||
case "dir new":
|
||||
b.print(verboseUpdate{
|
||||
MessageType: "verbose_status",
|
||||
Action: "new",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
MetadataSize: s.TreeSize,
|
||||
MessageType: "verbose_status",
|
||||
Action: "new",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
DataSizeInRepo: s.DataSizeInRepo,
|
||||
MetadataSize: s.TreeSize,
|
||||
MetadataSizeInRepo: s.TreeSizeInRepo,
|
||||
})
|
||||
case "dir unchanged":
|
||||
b.print(verboseUpdate{
|
||||
|
@ -125,20 +127,23 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
|
|||
})
|
||||
case "dir modified":
|
||||
b.print(verboseUpdate{
|
||||
MessageType: "verbose_status",
|
||||
Action: "modified",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
MetadataSize: s.TreeSize,
|
||||
MessageType: "verbose_status",
|
||||
Action: "modified",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
DataSizeInRepo: s.DataSizeInRepo,
|
||||
MetadataSize: s.TreeSize,
|
||||
MetadataSizeInRepo: s.TreeSizeInRepo,
|
||||
})
|
||||
case "file new":
|
||||
b.print(verboseUpdate{
|
||||
MessageType: "verbose_status",
|
||||
Action: "new",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
MessageType: "verbose_status",
|
||||
Action: "new",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
DataSizeInRepo: s.DataSizeInRepo,
|
||||
})
|
||||
case "file unchanged":
|
||||
b.print(verboseUpdate{
|
||||
|
@ -148,11 +153,12 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
|
|||
})
|
||||
case "file modified":
|
||||
b.print(verboseUpdate{
|
||||
MessageType: "verbose_status",
|
||||
Action: "modified",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
MessageType: "verbose_status",
|
||||
Action: "modified",
|
||||
Item: item,
|
||||
Duration: d.Seconds(),
|
||||
DataSize: s.DataSize,
|
||||
DataSizeInRepo: s.DataSizeInRepo,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -216,13 +222,15 @@ type errorUpdate struct {
|
|||
}
|
||||
|
||||
type verboseUpdate struct {
|
||||
MessageType string `json:"message_type"` // "verbose_status"
|
||||
Action string `json:"action"`
|
||||
Item string `json:"item"`
|
||||
Duration float64 `json:"duration"` // in seconds
|
||||
DataSize uint64 `json:"data_size"`
|
||||
MetadataSize uint64 `json:"metadata_size"`
|
||||
TotalFiles uint `json:"total_files"`
|
||||
MessageType string `json:"message_type"` // "verbose_status"
|
||||
Action string `json:"action"`
|
||||
Item string `json:"item"`
|
||||
Duration float64 `json:"duration"` // in seconds
|
||||
DataSize uint64 `json:"data_size"`
|
||||
DataSizeInRepo uint64 `json:"data_size_in_repo"`
|
||||
MetadataSize uint64 `json:"metadata_size"`
|
||||
MetadataSizeInRepo uint64 `json:"metadata_size_in_repo"`
|
||||
TotalFiles uint `json:"total_files"`
|
||||
}
|
||||
|
||||
type summaryOutput struct {
|
||||
|
|
|
@ -138,17 +138,17 @@ func formatBytes(c uint64) string {
|
|||
func (b *TextProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
|
||||
switch messageType {
|
||||
case "dir new":
|
||||
b.VV("new %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize))
|
||||
b.VV("new %v, saved in %.3fs (%v added, %v stored, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo), formatBytes(s.TreeSizeInRepo))
|
||||
case "dir unchanged":
|
||||
b.VV("unchanged %v", item)
|
||||
case "dir modified":
|
||||
b.VV("modified %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize))
|
||||
b.VV("modified %v, saved in %.3fs (%v added, %v stored, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo), formatBytes(s.TreeSizeInRepo))
|
||||
case "file new":
|
||||
b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize))
|
||||
case "file unchanged":
|
||||
b.VV("unchanged %v", item)
|
||||
case "file modified":
|
||||
b.VV("modified %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize))
|
||||
b.VV("modified %v, saved in %.3fs (%v added, %v stored)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ func (b *TextProgress) Finish(snapshotID restic.ID, start time.Time, summary *Su
|
|||
if dryRun {
|
||||
verb = "Would add"
|
||||
}
|
||||
b.P("%s to the repo: %-5s\n", verb, formatBytes(summary.ItemStats.DataSize+summary.ItemStats.TreeSize))
|
||||
b.P("%s to the repo: %-5s (%-5s stored)\n", verb, formatBytes(summary.ItemStats.DataSize+summary.ItemStats.TreeSize), formatBytes(summary.ItemStats.DataSizeInRepo+summary.ItemStats.TreeSizeInRepo))
|
||||
b.P("\n")
|
||||
b.P("processed %v files, %v in %s",
|
||||
summary.Files.New+summary.Files.Changed+summary.Files.Unchanged,
|
||||
|
|
Loading…
Reference in a new issue