2015-11-18 19:20:25 +00:00
|
|
|
package repository
|
|
|
|
|
|
|
|
import (
|
2017-01-22 16:53:00 +00:00
|
|
|
"crypto/sha256"
|
2016-03-06 11:26:25 +00:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2016-08-31 18:29:54 +00:00
|
|
|
"restic"
|
2015-11-18 19:20:25 +00:00
|
|
|
"sync"
|
|
|
|
|
2016-09-01 20:17:37 +00:00
|
|
|
"restic/errors"
|
2017-01-22 16:53:00 +00:00
|
|
|
"restic/hashing"
|
2016-08-21 15:48:36 +00:00
|
|
|
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic/crypto"
|
|
|
|
"restic/debug"
|
Fix 567 (#570)
* Patch for https://github.com/restic/restic/issues/567
Backup also files on windows with longer pathnames than 255 chars (e.g. from node).
as fd0 says "So, as far as I can see, we need to have custom methods for all functions that accept a path, so that on Windows we can substitute the normal (possibly relative) path used within restic by an (absolute) UNC path, and only then call the underlying functions like os.Stat(), os.Lstat(), os.Open() and so on.
I've already thought about adding a generic abstraction for the file system (so we can mock this easier in tests), and this looks like a good opportunity to build it."
* fixed building tests
* Restructured patches
Add Wrapper for filepath.Walk
* using \\?\ requires absolute pathes to be used.
Now all tests run
* used gofmt on the code
* Restructured Code. No patches dir, integrate the file functions into restic/fs/
There is still an issue, because restic.fs.Open has a different api the os.Open, which returns the result of OpenFile, but takes only a string
* Changed the last os.Open() calls to fs.Open() after extending the File interface
* fixed name-clash of restic.fs and fuse.fs detected by travis
* fixed fmt with gofmt
* c&p failure: removed fixpath() call.
* missing include
* fixed includes in linux variant
* Fix for Linux. Fd() is required on File interface
* done gofmt
2016-08-15 19:59:13 +00:00
|
|
|
"restic/fs"
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic/pack"
|
2015-11-18 19:20:25 +00:00
|
|
|
)
|
|
|
|
|
2016-03-05 14:58:39 +00:00
|
|
|
// Saver implements saving data in a backend.
|
|
|
|
type Saver interface {
|
2017-01-22 11:32:20 +00:00
|
|
|
Save(restic.Handle, io.Reader) error
|
2016-03-05 14:58:39 +00:00
|
|
|
}
|
|
|
|
|
2017-01-22 16:53:00 +00:00
|
|
|
// Packer holds a pack.Packer together with a hash writer.
|
|
|
|
type Packer struct {
|
|
|
|
*pack.Packer
|
|
|
|
hw *hashing.Writer
|
|
|
|
tmpfile *os.File
|
|
|
|
}
|
|
|
|
|
2015-11-18 19:20:25 +00:00
|
|
|
// packerManager keeps a list of open packs and creates new on demand.
|
|
|
|
type packerManager struct {
|
2017-01-22 16:53:00 +00:00
|
|
|
be Saver
|
|
|
|
key *crypto.Key
|
|
|
|
pm sync.Mutex
|
|
|
|
packers []*Packer
|
2016-03-06 11:26:25 +00:00
|
|
|
|
2016-03-06 12:14:06 +00:00
|
|
|
pool sync.Pool
|
2015-11-18 19:20:25 +00:00
|
|
|
}
|
|
|
|
|
2016-02-22 20:09:21 +00:00
|
|
|
const minPackSize = 4 * 1024 * 1024
|
|
|
|
const maxPackSize = 16 * 1024 * 1024
|
2015-11-18 19:20:25 +00:00
|
|
|
const maxPackers = 200
|
|
|
|
|
2016-03-06 13:20:48 +00:00
|
|
|
// newPackerManager returns an new packer manager which writes temporary files
|
2016-03-06 11:26:25 +00:00
|
|
|
// to a temporary directory
|
2016-03-06 13:20:48 +00:00
|
|
|
func newPackerManager(be Saver, key *crypto.Key) *packerManager {
|
2016-03-06 12:14:06 +00:00
|
|
|
return &packerManager{
|
2016-03-06 11:26:25 +00:00
|
|
|
be: be,
|
|
|
|
key: key,
|
2016-03-06 12:14:06 +00:00
|
|
|
pool: sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return make([]byte, (minPackSize+maxPackSize)/2)
|
|
|
|
},
|
|
|
|
},
|
2016-03-06 11:26:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-18 19:20:25 +00:00
|
|
|
// findPacker returns a packer for a new blob of size bytes. Either a new one is
|
|
|
|
// created or one is returned that already has some blobs.
|
2017-01-22 16:53:00 +00:00
|
|
|
func (r *packerManager) findPacker(size uint) (packer *Packer, err error) {
|
2015-11-18 19:20:25 +00:00
|
|
|
r.pm.Lock()
|
|
|
|
defer r.pm.Unlock()
|
|
|
|
|
|
|
|
// search for a suitable packer
|
2017-01-22 16:53:00 +00:00
|
|
|
if len(r.packers) > 0 {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("searching packer for %d bytes\n", size)
|
2017-01-22 16:53:00 +00:00
|
|
|
for i, p := range r.packers {
|
|
|
|
if p.Packer.Size()+size < maxPackSize {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("found packer %v", p)
|
2015-11-18 19:20:25 +00:00
|
|
|
// remove from list
|
2017-01-22 16:53:00 +00:00
|
|
|
r.packers = append(r.packers[:i], r.packers[i+1:]...)
|
2015-11-18 19:20:25 +00:00
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// no suitable packer found, return new
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("create new pack for %d bytes", size)
|
2016-03-06 12:14:06 +00:00
|
|
|
tmpfile, err := ioutil.TempFile("", "restic-temp-pack-")
|
2016-03-06 11:26:25 +00:00
|
|
|
if err != nil {
|
2016-08-29 20:16:58 +00:00
|
|
|
return nil, errors.Wrap(err, "ioutil.TempFile")
|
2016-03-06 11:26:25 +00:00
|
|
|
}
|
|
|
|
|
2017-01-22 16:53:00 +00:00
|
|
|
hw := hashing.NewWriter(tmpfile, sha256.New())
|
|
|
|
p := pack.NewPacker(r.key, hw)
|
|
|
|
packer = &Packer{
|
|
|
|
Packer: p,
|
|
|
|
hw: hw,
|
|
|
|
tmpfile: tmpfile,
|
|
|
|
}
|
|
|
|
|
|
|
|
return packer, nil
|
2015-11-18 19:20:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// insertPacker appends p to s.packs.
|
2017-01-22 16:53:00 +00:00
|
|
|
func (r *packerManager) insertPacker(p *Packer) {
|
2015-11-18 19:20:25 +00:00
|
|
|
r.pm.Lock()
|
|
|
|
defer r.pm.Unlock()
|
|
|
|
|
2017-01-22 16:53:00 +00:00
|
|
|
r.packers = append(r.packers, p)
|
|
|
|
debug.Log("%d packers\n", len(r.packers))
|
2015-11-18 19:20:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// savePacker stores p in the backend.
|
2017-01-22 16:53:00 +00:00
|
|
|
func (r *Repository) savePacker(p *Packer) error {
|
|
|
|
debug.Log("save packer with %d blobs\n", p.Packer.Count())
|
|
|
|
_, err := p.Packer.Finalize()
|
2015-11-18 19:20:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-01-23 17:45:15 +00:00
|
|
|
_, err = p.tmpfile.Seek(0, 0)
|
2016-03-06 11:26:25 +00:00
|
|
|
if err != nil {
|
2017-01-23 17:45:15 +00:00
|
|
|
return errors.Wrap(err, "Seek")
|
2016-03-06 11:26:25 +00:00
|
|
|
}
|
|
|
|
|
2017-01-22 16:53:00 +00:00
|
|
|
id := restic.IDFromHash(p.hw.Sum(nil))
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
2016-01-24 18:30:14 +00:00
|
|
|
|
2017-01-23 17:45:15 +00:00
|
|
|
err = r.be.Save(h, p.tmpfile)
|
2015-11-18 19:20:25 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("Save(%v) error: %v", h, err)
|
2015-11-18 19:20:25 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("saved as %v", h)
|
2015-11-18 19:20:25 +00:00
|
|
|
|
2017-01-23 17:45:15 +00:00
|
|
|
err = p.tmpfile.Close()
|
2017-01-22 16:53:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "close tempfile")
|
|
|
|
}
|
|
|
|
|
|
|
|
err = fs.Remove(p.tmpfile.Name())
|
2016-03-06 12:14:06 +00:00
|
|
|
if err != nil {
|
2016-08-29 20:16:58 +00:00
|
|
|
return errors.Wrap(err, "Remove")
|
2016-03-06 12:14:06 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 19:20:25 +00:00
|
|
|
// update blobs in the index
|
2017-01-22 16:53:00 +00:00
|
|
|
for _, b := range p.Packer.Blobs() {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str())
|
2017-01-02 13:14:51 +00:00
|
|
|
r.idx.Store(restic.PackedBlob{
|
2016-08-31 20:39:36 +00:00
|
|
|
Blob: restic.Blob{
|
|
|
|
Type: b.Type,
|
|
|
|
ID: b.ID,
|
|
|
|
Offset: b.Offset,
|
|
|
|
Length: uint(b.Length),
|
|
|
|
},
|
2016-01-24 18:30:14 +00:00
|
|
|
PackID: id,
|
2015-11-18 19:20:25 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// countPacker returns the number of open (unfinished) packers.
|
|
|
|
func (r *packerManager) countPacker() int {
|
|
|
|
r.pm.Lock()
|
|
|
|
defer r.pm.Unlock()
|
|
|
|
|
2017-01-22 16:53:00 +00:00
|
|
|
return len(r.packers)
|
2015-11-18 19:20:25 +00:00
|
|
|
}
|