forked from TrueCloudLab/restic
Rework function for listing packs
This commit is contained in:
parent
e07ae7631c
commit
9f752b8306
5 changed files with 13 additions and 30 deletions
|
@ -676,13 +676,13 @@ func checkPack(r *repository.Repository, id backend.ID) error {
|
|||
return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
|
||||
}
|
||||
|
||||
unpacker, err := pack.NewUnpacker(r.Key(), pack.BufferLoader(buf))
|
||||
blobs, err := pack.List(r.Key(), pack.BufferLoader(buf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for i, blob := range unpacker.Entries {
|
||||
for i, blob := range blobs {
|
||||
debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str())
|
||||
|
||||
plainBuf := make([]byte, blob.Length)
|
||||
|
|
|
@ -228,22 +228,13 @@ func (p *Packer) String() string {
|
|||
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
|
||||
}
|
||||
|
||||
// Unpacker is used to read individual blobs from a pack.
|
||||
type Unpacker struct {
|
||||
rd io.ReadSeeker
|
||||
Entries []Blob
|
||||
k *crypto.Key
|
||||
}
|
||||
|
||||
const (
|
||||
preloadHeaderSize = 2048
|
||||
maxHeaderSize = 16 * 1024 * 1024
|
||||
)
|
||||
|
||||
// NewUnpacker returns a pointer to Unpacker which can be used to read
|
||||
// individual Blobs from a pack.
|
||||
func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) {
|
||||
var err error
|
||||
// List returns the list of entries found in a pack file.
|
||||
func List(k *crypto.Key, ldr Loader) (entries []Blob, err error) {
|
||||
|
||||
// read the last 2048 byte, this will mostly be enough for the header, so
|
||||
// we do not need another round trip.
|
||||
|
@ -294,8 +285,6 @@ func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) {
|
|||
|
||||
rd := bytes.NewReader(hdr)
|
||||
|
||||
var entries []Blob
|
||||
|
||||
pos := uint(0)
|
||||
for {
|
||||
e := headerEntry{}
|
||||
|
@ -328,11 +317,5 @@ func NewUnpacker(k *crypto.Key, ldr Loader) (*Unpacker, error) {
|
|||
pos += uint(e.Length)
|
||||
}
|
||||
|
||||
up := &Unpacker{
|
||||
rd: rd,
|
||||
k: k,
|
||||
Entries: entries,
|
||||
}
|
||||
|
||||
return up, nil
|
||||
return entries, nil
|
||||
}
|
||||
|
|
|
@ -63,13 +63,13 @@ func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, ldr pack.Loader, packS
|
|||
Equals(t, uint(written), packSize)
|
||||
|
||||
// read and parse it again
|
||||
np, err := pack.NewUnpacker(k, ldr)
|
||||
entries, err := pack.List(k, ldr)
|
||||
OK(t, err)
|
||||
Equals(t, len(np.Entries), len(bufs))
|
||||
Equals(t, len(entries), len(bufs))
|
||||
|
||||
var buf []byte
|
||||
for i, b := range bufs {
|
||||
e := np.Entries[i]
|
||||
e := entries[i]
|
||||
Equals(t, b.id, e.ID)
|
||||
|
||||
if len(buf) < int(e.Length) {
|
||||
|
|
|
@ -32,14 +32,14 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err
|
|||
|
||||
debug.Log("Repack", "pack %v loaded (%d bytes)", packID.Str(), len(buf))
|
||||
|
||||
unpck, err := pack.NewUnpacker(repo.Key(), pack.BufferLoader(buf))
|
||||
blobs, err := pack.List(repo.Key(), pack.BufferLoader(buf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(unpck.Entries))
|
||||
debug.Log("Repack", "processing pack %v, blobs: %v", packID.Str(), len(blobs))
|
||||
var plaintext []byte
|
||||
for _, entry := range unpck.Entries {
|
||||
for _, entry := range blobs {
|
||||
h := pack.Handle{ID: entry.ID, Type: entry.Type}
|
||||
if !keepBlobs.Has(h) {
|
||||
continue
|
||||
|
|
|
@ -556,12 +556,12 @@ func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) {
|
|||
|
||||
ldr := pack.BackendLoader{Backend: r.Backend(), Handle: h}
|
||||
|
||||
unpacker, err := pack.NewUnpacker(r.Key(), ldr)
|
||||
blobs, err := pack.List(r.Key(), ldr)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return unpacker.Entries, blobInfo.Size, nil
|
||||
return blobs, blobInfo.Size, nil
|
||||
}
|
||||
|
||||
// Delete calls backend.Delete() if implemented, and returns an error
|
||||
|
|
Loading…
Reference in a new issue