forked from TrueCloudLab/restic
Remove last ocurrence of Create()
This commit is contained in:
parent
1a95e48389
commit
ea29ad6f96
3 changed files with 47 additions and 66 deletions
76
pack/pack.go
76
pack/pack.go
|
@ -1,7 +1,7 @@
|
||||||
package pack
|
package pack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -12,8 +12,10 @@ import (
|
||||||
"github.com/restic/restic/crypto"
|
"github.com/restic/restic/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// BlobType specifies what a blob stored in a pack is.
|
||||||
type BlobType uint8
|
type BlobType uint8
|
||||||
|
|
||||||
|
// These are the blob types that can be stored in a pack.
|
||||||
const (
|
const (
|
||||||
Data BlobType = 0
|
Data BlobType = 0
|
||||||
Tree = 1
|
Tree = 1
|
||||||
|
@ -30,6 +32,7 @@ func (t BlobType) String() string {
|
||||||
return fmt.Sprintf("<BlobType %d>", t)
|
return fmt.Sprintf("<BlobType %d>", t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes the BlobType into JSON.
|
||||||
func (t BlobType) MarshalJSON() ([]byte, error) {
|
func (t BlobType) MarshalJSON() ([]byte, error) {
|
||||||
switch t {
|
switch t {
|
||||||
case Data:
|
case Data:
|
||||||
|
@ -41,6 +44,7 @@ func (t BlobType) MarshalJSON() ([]byte, error) {
|
||||||
return nil, errors.New("unknown blob type")
|
return nil, errors.New("unknown blob type")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the BlobType from JSON.
|
||||||
func (t *BlobType) UnmarshalJSON(buf []byte) error {
|
func (t *BlobType) UnmarshalJSON(buf []byte) error {
|
||||||
switch string(buf) {
|
switch string(buf) {
|
||||||
case `"data"`:
|
case `"data"`:
|
||||||
|
@ -79,16 +83,15 @@ type Packer struct {
|
||||||
|
|
||||||
bytes uint
|
bytes uint
|
||||||
k *crypto.Key
|
k *crypto.Key
|
||||||
wr io.Writer
|
buf *bytes.Buffer
|
||||||
hw *backend.HashingWriter
|
|
||||||
|
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPacker returns a new Packer that can be used to pack blobs
|
// NewPacker returns a new Packer that can be used to pack blobs
|
||||||
// together.
|
// together.
|
||||||
func NewPacker(k *crypto.Key, w io.Writer) *Packer {
|
func NewPacker(k *crypto.Key, buf []byte) *Packer {
|
||||||
return &Packer{k: k, wr: w, hw: backend.NewHashingWriter(w, sha256.New())}
|
return &Packer{k: k, buf: bytes.NewBuffer(buf)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
// Add saves the data read from rd as a new blob to the packer. Returned is the
|
||||||
|
@ -99,7 +102,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) {
|
||||||
|
|
||||||
c := Blob{Type: t, ID: id}
|
c := Blob{Type: t, ID: id}
|
||||||
|
|
||||||
n, err := io.Copy(p.hw, rd)
|
n, err := io.Copy(p.buf, rd)
|
||||||
c.Length = uint(n)
|
c.Length = uint(n)
|
||||||
c.Offset = p.bytes
|
c.Offset = p.bytes
|
||||||
p.bytes += uint(n)
|
p.bytes += uint(n)
|
||||||
|
@ -118,45 +121,47 @@ type headerEntry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finalize writes the header for all added blobs and finalizes the pack.
|
// Finalize writes the header for all added blobs and finalizes the pack.
|
||||||
// Returned are the complete number of bytes written, including the header.
|
// Returned are all bytes written, including the header.
|
||||||
// After Finalize() has finished, the ID of this pack can be obtained by
|
func (p *Packer) Finalize() ([]byte, error) {
|
||||||
// calling ID().
|
|
||||||
func (p *Packer) Finalize() (bytesWritten uint, err error) {
|
|
||||||
p.m.Lock()
|
p.m.Lock()
|
||||||
defer p.m.Unlock()
|
defer p.m.Unlock()
|
||||||
|
|
||||||
bytesWritten = p.bytes
|
bytesWritten := p.bytes
|
||||||
|
|
||||||
// create writer to encrypt header
|
hdrBuf := bytes.NewBuffer(nil)
|
||||||
wr := crypto.EncryptTo(p.k, p.hw)
|
bytesHeader, err := p.writeHeader(hdrBuf)
|
||||||
|
|
||||||
bytesHeader, err := p.writeHeader(wr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wr.Close()
|
return nil, err
|
||||||
return bytesWritten + bytesHeader, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesWritten += bytesHeader
|
encryptedHeader, err := crypto.Encrypt(p.k, nil, hdrBuf.Bytes())
|
||||||
|
|
||||||
// finalize encrypted header
|
|
||||||
err = wr.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bytesWritten, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// account for crypto overhead
|
// append the header
|
||||||
bytesWritten += crypto.Extension
|
n, err := p.buf.Write(encryptedHeader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hdrBytes := bytesHeader + crypto.Extension
|
||||||
|
if uint(n) != hdrBytes {
|
||||||
|
return nil, errors.New("wrong number of bytes written")
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesWritten += hdrBytes
|
||||||
|
|
||||||
// write length
|
// write length
|
||||||
err = binary.Write(p.hw, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
|
err = binary.Write(p.buf, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bytesWritten, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bytesWritten += uint(binary.Size(uint32(0)))
|
bytesWritten += uint(binary.Size(uint32(0)))
|
||||||
|
|
||||||
p.bytes = uint(bytesWritten)
|
p.bytes = uint(bytesWritten)
|
||||||
|
|
||||||
return bytesWritten, nil
|
return p.buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeHeader constructs and writes the header to wr.
|
// writeHeader constructs and writes the header to wr.
|
||||||
|
@ -179,18 +184,6 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ID returns the ID of all data written so far.
|
|
||||||
func (p *Packer) ID() backend.ID {
|
|
||||||
p.m.Lock()
|
|
||||||
defer p.m.Unlock()
|
|
||||||
|
|
||||||
hash := p.hw.Sum(nil)
|
|
||||||
id := backend.ID{}
|
|
||||||
copy(id[:], hash)
|
|
||||||
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the number of bytes written so far.
|
// Size returns the number of bytes written so far.
|
||||||
func (p *Packer) Size() uint {
|
func (p *Packer) Size() uint {
|
||||||
p.m.Lock()
|
p.m.Lock()
|
||||||
|
@ -215,11 +208,6 @@ func (p *Packer) Blobs() []Blob {
|
||||||
return p.blobs
|
return p.blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writer returns the underlying writer.
|
|
||||||
func (p *Packer) Writer() io.Writer {
|
|
||||||
return p.wr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Packer) String() string {
|
func (p *Packer) String() string {
|
||||||
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
|
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,23 +34,19 @@ func TestCreatePack(t *testing.T) {
|
||||||
bufs = append(bufs, Buf{data: b, id: h})
|
bufs = append(bufs, Buf{data: b, id: h})
|
||||||
}
|
}
|
||||||
|
|
||||||
file := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
// create random keys
|
// create random keys
|
||||||
k := crypto.NewRandomKey()
|
k := crypto.NewRandomKey()
|
||||||
|
|
||||||
// pack blobs
|
// pack blobs
|
||||||
p := pack.NewPacker(k, file)
|
p := pack.NewPacker(k, nil)
|
||||||
for _, b := range bufs {
|
for _, b := range bufs {
|
||||||
p.Add(pack.Tree, b.id, bytes.NewReader(b.data))
|
p.Add(pack.Tree, b.id, bytes.NewReader(b.data))
|
||||||
}
|
}
|
||||||
|
|
||||||
// write file
|
packData, err := p.Finalize()
|
||||||
n, err := p.Finalize()
|
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
written := 0
|
written := 0
|
||||||
// data
|
|
||||||
for _, l := range lengths {
|
for _, l := range lengths {
|
||||||
written += l
|
written += l
|
||||||
}
|
}
|
||||||
|
@ -62,11 +58,11 @@ func TestCreatePack(t *testing.T) {
|
||||||
written += crypto.Extension
|
written += crypto.Extension
|
||||||
|
|
||||||
// check length
|
// check length
|
||||||
Equals(t, uint(written), n)
|
Equals(t, written, len(packData))
|
||||||
Equals(t, uint(written), p.Size())
|
Equals(t, uint(written), p.Size())
|
||||||
|
|
||||||
// read and parse it again
|
// read and parse it again
|
||||||
rd := bytes.NewReader(file.Bytes())
|
rd := bytes.NewReader(packData)
|
||||||
np, err := pack.NewUnpacker(k, rd)
|
np, err := pack.NewUnpacker(k, rd)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
Equals(t, len(np.Entries), len(bufs))
|
Equals(t, len(np.Entries), len(bufs))
|
||||||
|
|
|
@ -42,12 +42,8 @@ func (r *packerManager) findPacker(size uint) (*pack.Packer, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// no suitable packer found, return new
|
// no suitable packer found, return new
|
||||||
blob, err := r.be.Create()
|
debug.Log("Repo.findPacker", "create new pack for %d bytes", size)
|
||||||
if err != nil {
|
return pack.NewPacker(r.key, nil), nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
debug.Log("Repo.findPacker", "create new pack %p for %d bytes", blob, size)
|
|
||||||
return pack.NewPacker(r.key, blob), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertPacker appends p to s.packs.
|
// insertPacker appends p to s.packs.
|
||||||
|
@ -62,28 +58,29 @@ func (r *packerManager) insertPacker(p *pack.Packer) {
|
||||||
// savePacker stores p in the backend.
|
// savePacker stores p in the backend.
|
||||||
func (r *Repository) savePacker(p *pack.Packer) error {
|
func (r *Repository) savePacker(p *pack.Packer) error {
|
||||||
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
|
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
|
||||||
_, err := p.Finalize()
|
data, err := p.Finalize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// move file to the final location
|
id := backend.Hash(data)
|
||||||
sid := p.ID()
|
h := backend.Handle{Type: backend.Data, Name: id.String()}
|
||||||
err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
|
|
||||||
|
err = r.be.Save(h, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Repo.savePacker", "blob Finalize() error: %v", err)
|
debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Repo.savePacker", "saved as %v", sid.Str())
|
debug.Log("Repo.savePacker", "saved as %v", h)
|
||||||
|
|
||||||
// update blobs in the index
|
// update blobs in the index
|
||||||
for _, b := range p.Blobs() {
|
for _, b := range p.Blobs() {
|
||||||
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str())
|
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), id.Str())
|
||||||
r.idx.Current().Store(PackedBlob{
|
r.idx.Current().Store(PackedBlob{
|
||||||
Type: b.Type,
|
Type: b.Type,
|
||||||
ID: b.ID,
|
ID: b.ID,
|
||||||
PackID: sid,
|
PackID: id,
|
||||||
Offset: b.Offset,
|
Offset: b.Offset,
|
||||||
Length: uint(b.Length),
|
Length: uint(b.Length),
|
||||||
})
|
})
|
||||||
|
|
Loading…
Add table
Reference in a new issue