From ea29ad6f96f897517259557cccf13cc315c3db49 Mon Sep 17 00:00:00 2001
From: Alexander Neumann <alexander@bumpern.de>
Date: Sun, 24 Jan 2016 19:30:14 +0100
Subject: [PATCH] Remove last ocurrence of Create()

---
 pack/pack.go                 | 76 +++++++++++++++---------------------
 pack/pack_test.go            | 12 ++----
 repository/packer_manager.go | 25 ++++++------
 3 files changed, 47 insertions(+), 66 deletions(-)

diff --git a/pack/pack.go b/pack/pack.go
index 697adb8aa..8a96a942d 100644
--- a/pack/pack.go
+++ b/pack/pack.go
@@ -1,7 +1,7 @@
 package pack
 
 import (
-	"crypto/sha256"
+	"bytes"
 	"encoding/binary"
 	"errors"
 	"fmt"
@@ -12,8 +12,10 @@ import (
 	"github.com/restic/restic/crypto"
 )
 
+// BlobType specifies what a blob stored in a pack is.
 type BlobType uint8
 
+// These are the blob types that can be stored in a pack.
 const (
 	Data BlobType = 0
 	Tree          = 1
@@ -30,6 +32,7 @@ func (t BlobType) String() string {
 	return fmt.Sprintf("<BlobType %d>", t)
 }
 
+// MarshalJSON encodes the BlobType into JSON.
 func (t BlobType) MarshalJSON() ([]byte, error) {
 	switch t {
 	case Data:
@@ -41,6 +44,7 @@ func (t BlobType) MarshalJSON() ([]byte, error) {
 	return nil, errors.New("unknown blob type")
 }
 
+// UnmarshalJSON decodes the BlobType from JSON.
 func (t *BlobType) UnmarshalJSON(buf []byte) error {
 	switch string(buf) {
 	case `"data"`:
@@ -79,16 +83,15 @@ type Packer struct {
 
 	bytes uint
 	k     *crypto.Key
-	wr    io.Writer
-	hw    *backend.HashingWriter
+	buf   *bytes.Buffer
 
 	m sync.Mutex
 }
 
 // NewPacker returns a new Packer that can be used to pack blobs
 // together.
-func NewPacker(k *crypto.Key, w io.Writer) *Packer {
-	return &Packer{k: k, wr: w, hw: backend.NewHashingWriter(w, sha256.New())}
+func NewPacker(k *crypto.Key, buf []byte) *Packer {
+	return &Packer{k: k, buf: bytes.NewBuffer(buf)}
 }
 
 // Add saves the data read from rd as a new blob to the packer. Returned is the
@@ -99,7 +102,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) {
 
 	c := Blob{Type: t, ID: id}
 
-	n, err := io.Copy(p.hw, rd)
+	n, err := io.Copy(p.buf, rd)
 	c.Length = uint(n)
 	c.Offset = p.bytes
 	p.bytes += uint(n)
@@ -118,45 +121,47 @@ type headerEntry struct {
 }
 
 // Finalize writes the header for all added blobs and finalizes the pack.
-// Returned are the complete number of bytes written, including the header.
-// After Finalize() has finished, the ID of this pack can be obtained by
-// calling ID().
-func (p *Packer) Finalize() (bytesWritten uint, err error) {
+// Returned are all bytes written, including the header.
+func (p *Packer) Finalize() ([]byte, error) {
 	p.m.Lock()
 	defer p.m.Unlock()
 
-	bytesWritten = p.bytes
+	bytesWritten := p.bytes
 
-	// create writer to encrypt header
-	wr := crypto.EncryptTo(p.k, p.hw)
-
-	bytesHeader, err := p.writeHeader(wr)
+	hdrBuf := bytes.NewBuffer(nil)
+	bytesHeader, err := p.writeHeader(hdrBuf)
 	if err != nil {
-		wr.Close()
-		return bytesWritten + bytesHeader, err
+		return nil, err
 	}
 
-	bytesWritten += bytesHeader
-
-	// finalize encrypted header
-	err = wr.Close()
+	encryptedHeader, err := crypto.Encrypt(p.k, nil, hdrBuf.Bytes())
 	if err != nil {
-		return bytesWritten, err
+		return nil, err
 	}
 
-	// account for crypto overhead
-	bytesWritten += crypto.Extension
+	// append the header
+	n, err := p.buf.Write(encryptedHeader)
+	if err != nil {
+		return nil, err
+	}
+
+	hdrBytes := bytesHeader + crypto.Extension
+	if uint(n) != hdrBytes {
+		return nil, errors.New("wrong number of bytes written")
+	}
+
+	bytesWritten += hdrBytes
 
 	// write length
-	err = binary.Write(p.hw, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
+	err = binary.Write(p.buf, binary.LittleEndian, uint32(uint(len(p.blobs))*entrySize+crypto.Extension))
 	if err != nil {
-		return bytesWritten, err
+		return nil, err
 	}
 	bytesWritten += uint(binary.Size(uint32(0)))
 
 	p.bytes = uint(bytesWritten)
 
-	return bytesWritten, nil
+	return p.buf.Bytes(), nil
 }
 
 // writeHeader constructs and writes the header to wr.
@@ -179,18 +184,6 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
 	return
 }
 
-// ID returns the ID of all data written so far.
-func (p *Packer) ID() backend.ID {
-	p.m.Lock()
-	defer p.m.Unlock()
-
-	hash := p.hw.Sum(nil)
-	id := backend.ID{}
-	copy(id[:], hash)
-
-	return id
-}
-
 // Size returns the number of bytes written so far.
 func (p *Packer) Size() uint {
 	p.m.Lock()
@@ -215,11 +208,6 @@ func (p *Packer) Blobs() []Blob {
 	return p.blobs
 }
 
-// Writer returns the underlying writer.
-func (p *Packer) Writer() io.Writer {
-	return p.wr
-}
-
 func (p *Packer) String() string {
 	return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
 }
diff --git a/pack/pack_test.go b/pack/pack_test.go
index 28ef4c22c..0d5c1f155 100644
--- a/pack/pack_test.go
+++ b/pack/pack_test.go
@@ -34,23 +34,19 @@ func TestCreatePack(t *testing.T) {
 		bufs = append(bufs, Buf{data: b, id: h})
 	}
 
-	file := bytes.NewBuffer(nil)
-
 	// create random keys
 	k := crypto.NewRandomKey()
 
 	// pack blobs
-	p := pack.NewPacker(k, file)
+	p := pack.NewPacker(k, nil)
 	for _, b := range bufs {
 		p.Add(pack.Tree, b.id, bytes.NewReader(b.data))
 	}
 
-	// write file
-	n, err := p.Finalize()
+	packData, err := p.Finalize()
 	OK(t, err)
 
 	written := 0
-	// data
 	for _, l := range lengths {
 		written += l
 	}
@@ -62,11 +58,11 @@ func TestCreatePack(t *testing.T) {
 	written += crypto.Extension
 
 	// check length
-	Equals(t, uint(written), n)
+	Equals(t, written, len(packData))
 	Equals(t, uint(written), p.Size())
 
 	// read and parse it again
-	rd := bytes.NewReader(file.Bytes())
+	rd := bytes.NewReader(packData)
 	np, err := pack.NewUnpacker(k, rd)
 	OK(t, err)
 	Equals(t, len(np.Entries), len(bufs))
diff --git a/repository/packer_manager.go b/repository/packer_manager.go
index 99b74cea4..42ffe96cb 100644
--- a/repository/packer_manager.go
+++ b/repository/packer_manager.go
@@ -42,12 +42,8 @@ func (r *packerManager) findPacker(size uint) (*pack.Packer, error) {
 	}
 
 	// no suitable packer found, return new
-	blob, err := r.be.Create()
-	if err != nil {
-		return nil, err
-	}
-	debug.Log("Repo.findPacker", "create new pack %p for %d bytes", blob, size)
-	return pack.NewPacker(r.key, blob), nil
+	debug.Log("Repo.findPacker", "create new pack for %d bytes", size)
+	return pack.NewPacker(r.key, nil), nil
 }
 
 // insertPacker appends p to s.packs.
@@ -62,28 +58,29 @@ func (r *packerManager) insertPacker(p *pack.Packer) {
 // savePacker stores p in the backend.
 func (r *Repository) savePacker(p *pack.Packer) error {
 	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
-	_, err := p.Finalize()
+	data, err := p.Finalize()
 	if err != nil {
 		return err
 	}
 
-	// move file to the final location
-	sid := p.ID()
-	err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
+	id := backend.Hash(data)
+	h := backend.Handle{Type: backend.Data, Name: id.String()}
+
+	err = r.be.Save(h, data)
 	if err != nil {
-		debug.Log("Repo.savePacker", "blob Finalize() error: %v", err)
+		debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
 		return err
 	}
 
-	debug.Log("Repo.savePacker", "saved as %v", sid.Str())
+	debug.Log("Repo.savePacker", "saved as %v", h)
 
 	// update blobs in the index
 	for _, b := range p.Blobs() {
-		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
+		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), id.Str())
 		r.idx.Current().Store(PackedBlob{
 			Type:   b.Type,
 			ID:     b.ID,
-			PackID: sid,
+			PackID: id,
 			Offset: b.Offset,
 			Length: uint(b.Length),
 		})