forked from TrueCloudLab/restic
Merge branch 'cleanups'
This commit is contained in:
commit
13a42ec5ec
14 changed files with 81 additions and 157 deletions
151
archiver.go
151
archiver.go
|
@ -20,14 +20,14 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxConcurrentBlobs = 32
|
maxConcurrentBlobs = 32
|
||||||
maxConcurrency = 10
|
maxConcurrency = 10
|
||||||
maxConcurrencyPreload = 20
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var archiverAbortOnAllErrors = func(str string, fi os.FileInfo, err error) error { return err }
|
var archiverAbortOnAllErrors = func(str string, fi os.FileInfo, err error) error { return err }
|
||||||
var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
|
var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
|
||||||
|
|
||||||
|
// Archiver is used to backup a set of directories.
|
||||||
type Archiver struct {
|
type Archiver struct {
|
||||||
s *server.Server
|
s *server.Server
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ type Archiver struct {
|
||||||
Filter func(item string, fi os.FileInfo) bool
|
Filter func(item string, fi os.FileInfo) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewArchiver returns a new archiver.
|
||||||
func NewArchiver(s *server.Server) *Archiver {
|
func NewArchiver(s *server.Server) *Archiver {
|
||||||
arch := &Archiver{
|
arch := &Archiver{
|
||||||
s: s,
|
s: s,
|
||||||
|
@ -53,6 +54,7 @@ func NewArchiver(s *server.Server) *Archiver {
|
||||||
return arch
|
return arch
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save stores a blob read from rd in the server.
|
||||||
func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
|
func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
|
||||||
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
|
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
|
||||||
|
|
||||||
|
@ -73,6 +75,7 @@ func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Re
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SaveTreeJSON stores a tree in the server.
|
||||||
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
|
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
|
||||||
data, err := json.Marshal(item)
|
data, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -209,99 +212,6 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (arch *Archiver) saveTree(p *Progress, t *Tree) (backend.ID, error) {
|
|
||||||
debug.Log("Archiver.saveTree", "saveTree(%v)\n", t)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
// TODO: do all this in parallel
|
|
||||||
for _, node := range t.Nodes {
|
|
||||||
if node.tree != nil {
|
|
||||||
id, err := arch.saveTree(p, node.tree)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
node.Subtree = id
|
|
||||||
p.Report(Stat{Dirs: 1})
|
|
||||||
} else if node.Type == "file" {
|
|
||||||
if len(node.Content) > 0 {
|
|
||||||
removeContent := false
|
|
||||||
|
|
||||||
// check content
|
|
||||||
for _, id := range node.Content {
|
|
||||||
packID, _, _, _, err := arch.s.Index().Lookup(id)
|
|
||||||
if err != nil {
|
|
||||||
debug.Log("Archiver.saveTree", "unable to find storage id for data blob %v: %v", id.Str(), err)
|
|
||||||
arch.Error(node.path, nil, fmt.Errorf("unable to find storage id for data blob %v: %v", id.Str(), err))
|
|
||||||
removeContent = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok, err := arch.s.Test(backend.Data, packID.String()); !ok || err != nil {
|
|
||||||
debug.Log("Archiver.saveTree", "pack %v of blob %v not in repository (error is %v)", packID, id, err)
|
|
||||||
arch.Error(node.path, nil, fmt.Errorf("pack %v of blob %v not in repository (error is %v)", packID, id, err))
|
|
||||||
removeContent = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if removeContent {
|
|
||||||
debug.Log("Archiver.saveTree", "removing content for %s", node.path)
|
|
||||||
node.Content = node.Content[:0]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(node.Content) == 0 {
|
|
||||||
// start goroutine
|
|
||||||
wg.Add(1)
|
|
||||||
go func(n *Node) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
n.err = arch.SaveFile(p, n)
|
|
||||||
p.Report(Stat{Files: 1})
|
|
||||||
}(node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
usedIDs := backend.NewIDSet()
|
|
||||||
|
|
||||||
// check for invalid file nodes
|
|
||||||
for _, node := range t.Nodes {
|
|
||||||
if node.Type == "file" && node.Content == nil && node.err == nil {
|
|
||||||
return nil, fmt.Errorf("node %v has empty content", node.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remember used hashes
|
|
||||||
if node.Type == "file" && node.Content != nil {
|
|
||||||
for _, id := range node.Content {
|
|
||||||
usedIDs.Insert(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.Type == "dir" && node.Subtree != nil {
|
|
||||||
usedIDs.Insert(node.Subtree)
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.err != nil {
|
|
||||||
err := arch.Error(node.path, nil, node.err)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// save error message in node
|
|
||||||
node.Error = node.err.Error()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := arch.SaveTreeJSON(t)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
|
func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan struct{}, entCh <-chan pipe.Entry) {
|
||||||
defer func() {
|
defer func() {
|
||||||
debug.Log("Archiver.fileWorker", "done")
|
debug.Log("Archiver.fileWorker", "done")
|
||||||
|
@ -459,36 +369,36 @@ func (arch *Archiver) dirWorker(wg *sync.WaitGroup, p *Progress, done <-chan str
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ArchivePipe struct {
|
type archivePipe struct {
|
||||||
Old <-chan WalkTreeJob
|
Old <-chan WalkTreeJob
|
||||||
New <-chan pipe.Job
|
New <-chan pipe.Job
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
func copyJobs(done <-chan struct{}, in <-chan pipe.Job, out chan<- pipe.Job) {
|
||||||
i := in
|
|
||||||
o := out
|
|
||||||
|
|
||||||
o = nil
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
j pipe.Job
|
// disable sending on the outCh until we received a job
|
||||||
ok bool
|
outCh chan<- pipe.Job
|
||||||
|
// enable receiving from in
|
||||||
|
inCh = in
|
||||||
|
job pipe.Job
|
||||||
|
ok bool
|
||||||
)
|
)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
return
|
return
|
||||||
case j, ok = <-i:
|
case job, ok = <-inCh:
|
||||||
if !ok {
|
if !ok {
|
||||||
// in ch closed, we're done
|
// input channel closed, we're done
|
||||||
debug.Log("copyJobs", "in channel closed, we're done")
|
debug.Log("copyJobs", "input channel closed, we're done")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
i = nil
|
inCh = nil
|
||||||
o = out
|
outCh = out
|
||||||
case o <- j:
|
case outCh <- job:
|
||||||
o = nil
|
outCh = nil
|
||||||
i = in
|
inCh = in
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -499,7 +409,7 @@ type archiveJob struct {
|
||||||
new pipe.Job
|
new pipe.Job
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *ArchivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
func (a *archivePipe) compare(done <-chan struct{}, out chan<- pipe.Job) {
|
||||||
defer func() {
|
defer func() {
|
||||||
close(out)
|
close(out)
|
||||||
debug.Log("ArchivePipe.compare", "done")
|
debug.Log("ArchivePipe.compare", "done")
|
||||||
|
@ -619,7 +529,10 @@ func (j archiveJob) Copy() pipe.Job {
|
||||||
return j.new
|
return j.new
|
||||||
}
|
}
|
||||||
|
|
||||||
func (arch *Archiver) Snapshot(p *Progress, paths []string, pid backend.ID) (*Snapshot, backend.ID, error) {
|
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
|
||||||
|
// used to compare the files to the ones archived at the time this snapshot was
|
||||||
|
// taken.
|
||||||
|
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID backend.ID) (*Snapshot, backend.ID, error) {
|
||||||
debug.Log("Archiver.Snapshot", "start for %v", paths)
|
debug.Log("Archiver.Snapshot", "start for %v", paths)
|
||||||
|
|
||||||
debug.Break("Archiver.Snapshot")
|
debug.Break("Archiver.Snapshot")
|
||||||
|
@ -638,14 +551,14 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, pid backend.ID) (*Sn
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
jobs := ArchivePipe{}
|
jobs := archivePipe{}
|
||||||
|
|
||||||
// use parent snapshot (if some was given)
|
// use parent snapshot (if some was given)
|
||||||
if pid != nil {
|
if parentID != nil {
|
||||||
sn.Parent = pid
|
sn.Parent = parentID
|
||||||
|
|
||||||
// load parent snapshot
|
// load parent snapshot
|
||||||
parent, err := LoadSnapshot(arch.s, pid)
|
parent, err := LoadSnapshot(arch.s, parentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -745,6 +658,8 @@ func isFile(fi os.FileInfo) bool {
|
||||||
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
|
return fi.Mode()&(os.ModeType|os.ModeCharDevice) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Scan traverses the dirs to collect Stat information while emitting progress
|
||||||
|
// information with p.
|
||||||
func Scan(dirs []string, p *Progress) (Stat, error) {
|
func Scan(dirs []string, p *Progress) (Stat, error) {
|
||||||
p.Start()
|
p.Start()
|
||||||
defer p.Done()
|
defer p.Done()
|
||||||
|
|
|
@ -115,7 +115,7 @@ func TestArchivePipe(t *testing.T) {
|
||||||
go testTreeWalker(done, treeCh)
|
go testTreeWalker(done, treeCh)
|
||||||
go testPipeWalker(done, pipeCh)
|
go testPipeWalker(done, pipeCh)
|
||||||
|
|
||||||
p := ArchivePipe{Old: treeCh, New: pipeCh}
|
p := archivePipe{Old: treeCh, New: pipeCh}
|
||||||
|
|
||||||
ch := make(chan pipe.Job)
|
ch := make(chan pipe.Job)
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ outer:
|
||||||
|
|
||||||
// wrap around io.LimitedReader that implements io.ReadCloser
|
// wrap around io.LimitedReader that implements io.ReadCloser
|
||||||
type blobReader struct {
|
type blobReader struct {
|
||||||
f io.Closer
|
cl io.Closer
|
||||||
rd io.Reader
|
rd io.Reader
|
||||||
closed bool
|
closed bool
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func (l *blobReader) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !l.closed {
|
if !l.closed {
|
||||||
err := l.f.Close()
|
err := l.cl.Close()
|
||||||
l.closed = true
|
l.closed = true
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -128,6 +128,8 @@ func (l *blobReader) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func LimitReader(f io.ReadCloser, n int64) *blobReader {
|
// LimitReadCloser returns a new reader wraps r in an io.LimitReader, but also
|
||||||
return &blobReader{f: f, rd: io.LimitReader(f, n)}
|
// implements the Close() method.
|
||||||
|
func LimitReadCloser(r io.ReadCloser, n int64) *blobReader {
|
||||||
|
return &blobReader{cl: r, rd: io.LimitReader(r, n)}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
// IDSize contains the size of an ID, in bytes.
|
// IDSize contains the size of an ID, in bytes.
|
||||||
const IDSize = hashSize
|
const IDSize = hashSize
|
||||||
|
|
||||||
// References content within a repository.
|
// ID references content within a repository.
|
||||||
type ID []byte
|
type ID []byte
|
||||||
|
|
||||||
// ParseID converts the given string to an ID.
|
// ParseID converts the given string to an ID.
|
||||||
|
|
|
@ -312,7 +312,7 @@ func (b *Local) GetReader(t backend.Type, name string, offset, length uint) (io.
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return backend.LimitReader(f, int64(length)), nil
|
return backend.LimitReadCloser(f, int64(length)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test returns true if a blob of the given type and name exists in the backend.
|
// Test returns true if a blob of the given type and name exists in the backend.
|
||||||
|
|
|
@ -446,7 +446,7 @@ func (r *SFTP) GetReader(t backend.Type, name string, offset, length uint) (io.R
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return backend.LimitReader(f, int64(length)), nil
|
return backend.LimitReadCloser(f, int64(length)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test returns true if a blob of the given type and name exists in the backend.
|
// Test returns true if a blob of the given type and name exists in the backend.
|
||||||
|
|
22
cache.go
22
cache.go
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/restic/restic/server"
|
"github.com/restic/restic/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Cache is used to locally cache items from a server.
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
base string
|
base string
|
||||||
}
|
}
|
||||||
|
@ -29,6 +30,7 @@ func NewCache(be backend.Identifier) (*Cache, error) {
|
||||||
return &Cache{base: basedir}, nil
|
return &Cache{base: basedir}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Has checks if the local cache has the id.
|
||||||
func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) {
|
func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error) {
|
||||||
filename, err := c.filename(t, subtype, id)
|
filename, err := c.filename(t, subtype, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -51,6 +53,9 @@ func (c *Cache) Has(t backend.Type, subtype string, id backend.ID) (bool, error)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store returns an io.WriteCloser that is used to save new information to the
|
||||||
|
// cache. The returned io.WriteCloser must be closed by the caller after all
|
||||||
|
// data has been written.
|
||||||
func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) {
|
func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCloser, error) {
|
||||||
filename, err := c.filename(t, subtype, id)
|
filename, err := c.filename(t, subtype, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -73,6 +78,8 @@ func (c *Cache) Store(t backend.Type, subtype string, id backend.ID) (io.WriteCl
|
||||||
return file, nil
|
return file, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Load returns information from the cache. The returned io.ReadCloser must be
|
||||||
|
// closed by the caller.
|
||||||
func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) {
|
func (c *Cache) Load(t backend.Type, subtype string, id backend.ID) (io.ReadCloser, error) {
|
||||||
filename, err := c.filename(t, subtype, id)
|
filename, err := c.filename(t, subtype, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -98,8 +105,9 @@ func (c *Cache) purge(t backend.Type, subtype string, id backend.ID) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear removes information from the cache that isn't present in the server any more.
|
||||||
func (c *Cache) Clear(s *server.Server) error {
|
func (c *Cache) Clear(s *server.Server) error {
|
||||||
list, err := c.List(backend.Snapshot)
|
list, err := c.list(backend.Snapshot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -120,19 +128,19 @@ func (c *Cache) Clear(s *server.Server) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type CacheEntry struct {
|
type cacheEntry struct {
|
||||||
ID backend.ID
|
ID backend.ID
|
||||||
Subtype string
|
Subtype string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c CacheEntry) String() string {
|
func (c cacheEntry) String() string {
|
||||||
if c.Subtype != "" {
|
if c.Subtype != "" {
|
||||||
return c.ID.Str() + "." + c.Subtype
|
return c.ID.Str() + "." + c.Subtype
|
||||||
}
|
}
|
||||||
return c.ID.Str()
|
return c.ID.Str()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) List(t backend.Type) ([]CacheEntry, error) {
|
func (c *Cache) list(t backend.Type) ([]cacheEntry, error) {
|
||||||
var dir string
|
var dir string
|
||||||
|
|
||||||
switch t {
|
switch t {
|
||||||
|
@ -145,7 +153,7 @@ func (c *Cache) List(t backend.Type) ([]CacheEntry, error) {
|
||||||
fd, err := os.Open(dir)
|
fd, err := os.Open(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return []CacheEntry{}, nil
|
return []cacheEntry{}, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -156,7 +164,7 @@ func (c *Cache) List(t backend.Type) ([]CacheEntry, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
entries := make([]CacheEntry, 0, len(fis))
|
entries := make([]cacheEntry, 0, len(fis))
|
||||||
|
|
||||||
for _, fi := range fis {
|
for _, fi := range fis {
|
||||||
parts := strings.SplitN(fi.Name(), ".", 2)
|
parts := strings.SplitN(fi.Name(), ".", 2)
|
||||||
|
@ -168,7 +176,7 @@ func (c *Cache) List(t backend.Type) ([]CacheEntry, error) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
e := CacheEntry{ID: id}
|
e := cacheEntry{ID: id}
|
||||||
|
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
e.Subtype = parts[1]
|
e.Subtype = parts[1]
|
||||||
|
|
|
@ -12,16 +12,17 @@ const (
|
||||||
MiB = 1024 * KiB
|
MiB = 1024 * KiB
|
||||||
|
|
||||||
// WindowSize is the size of the sliding window.
|
// WindowSize is the size of the sliding window.
|
||||||
WindowSize = 64
|
windowSize = 64
|
||||||
|
|
||||||
// aim to create chunks of 20 bits or about 1MiB on average.
|
// aim to create chunks of 20 bits or about 1MiB on average.
|
||||||
AverageBits = 20
|
averageBits = 20
|
||||||
|
|
||||||
// Chunks should be in the range of 512KiB to 8MiB.
|
// MinSize is the minimal size of a chunk.
|
||||||
MinSize = 512 * KiB
|
MinSize = 512 * KiB
|
||||||
|
// MaxSize is the maximal size of a chunk.
|
||||||
MaxSize = 8 * MiB
|
MaxSize = 8 * MiB
|
||||||
|
|
||||||
splitmask = (1 << AverageBits) - 1
|
splitmask = (1 << averageBits) - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
type tables struct {
|
type tables struct {
|
||||||
|
@ -39,7 +40,7 @@ func init() {
|
||||||
cache.entries = make(map[Pol]*tables)
|
cache.entries = make(map[Pol]*tables)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A chunk is one content-dependent chunk of bytes whose end was cut when the
|
// Chunk is one content-dependent chunk of bytes whose end was cut when the
|
||||||
// Rabin Fingerprint had the value stored in Cut.
|
// Rabin Fingerprint had the value stored in Cut.
|
||||||
type Chunk struct {
|
type Chunk struct {
|
||||||
Start uint
|
Start uint
|
||||||
|
@ -52,7 +53,7 @@ func (c Chunk) Reader(r io.ReaderAt) io.Reader {
|
||||||
return io.NewSectionReader(r, int64(c.Start), int64(c.Length))
|
return io.NewSectionReader(r, int64(c.Start), int64(c.Length))
|
||||||
}
|
}
|
||||||
|
|
||||||
// A chunker internally holds everything needed to split content.
|
// Chunker splits content with Rabin Fingerprints.
|
||||||
type Chunker struct {
|
type Chunker struct {
|
||||||
pol Pol
|
pol Pol
|
||||||
polShift uint
|
polShift uint
|
||||||
|
@ -61,7 +62,7 @@ type Chunker struct {
|
||||||
rd io.Reader
|
rd io.Reader
|
||||||
closed bool
|
closed bool
|
||||||
|
|
||||||
window [WindowSize]byte
|
window [windowSize]byte
|
||||||
wpos int
|
wpos int
|
||||||
|
|
||||||
buf []byte
|
buf []byte
|
||||||
|
@ -97,7 +98,7 @@ func (c *Chunker) Reset(rd io.Reader, p Pol) {
|
||||||
c.fillTables()
|
c.fillTables()
|
||||||
c.rd = rd
|
c.rd = rd
|
||||||
|
|
||||||
for i := 0; i < WindowSize; i++ {
|
for i := 0; i < windowSize; i++ {
|
||||||
c.window[i] = 0
|
c.window[i] = 0
|
||||||
}
|
}
|
||||||
c.closed = false
|
c.closed = false
|
||||||
|
@ -116,7 +117,7 @@ func (c *Chunker) Reset(rd io.Reader, p Pol) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not start a new chunk unless at least MinSize bytes have been read
|
// do not start a new chunk unless at least MinSize bytes have been read
|
||||||
c.pre = MinSize - WindowSize
|
c.pre = MinSize - windowSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate out_table and mod_table for optimization. Must be called only
|
// Calculate out_table and mod_table for optimization. Must be called only
|
||||||
|
@ -154,7 +155,7 @@ func (c *Chunker) fillTables() {
|
||||||
var h Pol
|
var h Pol
|
||||||
|
|
||||||
h = appendByte(h, byte(b), c.pol)
|
h = appendByte(h, byte(b), c.pol)
|
||||||
for i := 0; i < WindowSize-1; i++ {
|
for i := 0; i < windowSize-1; i++ {
|
||||||
h = appendByte(h, 0, c.pol)
|
h = appendByte(h, 0, c.pol)
|
||||||
}
|
}
|
||||||
c.tables.out[b] = h
|
c.tables.out[b] = h
|
||||||
|
@ -246,7 +247,7 @@ func (c *Chunker) Next() (*Chunk, error) {
|
||||||
out := c.window[c.wpos]
|
out := c.window[c.wpos]
|
||||||
c.window[c.wpos] = b
|
c.window[c.wpos] = b
|
||||||
c.digest ^= uint64(c.tables.out[out])
|
c.digest ^= uint64(c.tables.out[out])
|
||||||
c.wpos = (c.wpos + 1) % WindowSize
|
c.wpos = (c.wpos + 1) % windowSize
|
||||||
|
|
||||||
// c.append(b)
|
// c.append(b)
|
||||||
index := c.digest >> c.polShift
|
index := c.digest >> c.polShift
|
||||||
|
@ -284,7 +285,7 @@ func (c *Chunker) Next() (*Chunk, error) {
|
||||||
c.Reset(c.rd, c.pol)
|
c.Reset(c.rd, c.pol)
|
||||||
c.pos = pos
|
c.pos = pos
|
||||||
c.start = pos
|
c.start = pos
|
||||||
c.pre = MinSize - WindowSize
|
c.pre = MinSize - windowSize
|
||||||
|
|
||||||
return chunk, nil
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
@ -330,7 +331,7 @@ func (c *Chunker) slide(b byte) {
|
||||||
out := c.window[c.wpos]
|
out := c.window[c.wpos]
|
||||||
c.window[c.wpos] = b
|
c.window[c.wpos] = b
|
||||||
c.digest ^= uint64(c.tables.out[out])
|
c.digest ^= uint64(c.tables.out[out])
|
||||||
c.wpos = (c.wpos + 1) % WindowSize
|
c.wpos = (c.wpos + 1) % windowSize
|
||||||
|
|
||||||
c.append(b)
|
c.append(b)
|
||||||
}
|
}
|
||||||
|
|
3
node.go
3
node.go
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/restic/restic/server"
|
"github.com/restic/restic/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Node is a file, directory or other item in a backup.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
@ -62,6 +63,7 @@ func (node Node) Tree() *Tree {
|
||||||
return node.tree
|
return node.tree
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeFromFileInfo returns a new node from the given path and FileInfo.
|
||||||
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
|
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
|
||||||
node := &Node{
|
node := &Node{
|
||||||
path: path,
|
path: path,
|
||||||
|
@ -100,6 +102,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateAt creates the node at the given path and restores all the meta data.
|
||||||
func (node *Node) CreateAt(path string, s *server.Server) error {
|
func (node *Node) CreateAt(path string, s *server.Server) error {
|
||||||
switch node.Type {
|
switch node.Type {
|
||||||
case "dir":
|
case "dir":
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/juju/errors"
|
"github.com/juju/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Restorer is used to restore a snapshot to a directory.
|
||||||
type Restorer struct {
|
type Restorer struct {
|
||||||
s *server.Server
|
s *server.Server
|
||||||
sn *Snapshot
|
sn *Snapshot
|
||||||
|
@ -103,6 +104,7 @@ func (res *Restorer) RestoreTo(dir string) error {
|
||||||
return res.restoreTo(dir, "", res.sn.Tree)
|
return res.restoreTo(dir, "", res.sn.Tree)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Snapshot returns the snapshot this restorer is configured to use.
|
||||||
func (res *Restorer) Snapshot() *Snapshot {
|
func (res *Restorer) Snapshot() *Snapshot {
|
||||||
return res.sn
|
return res.sn
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,13 +258,13 @@ func (k *Key) DecryptFrom(rd io.Reader) (io.ReadCloser, error) {
|
||||||
return crypto.DecryptFrom(k.master, rd)
|
return crypto.DecryptFrom(k.master, rd)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Master() returns the master keys for this repository. Only included for
|
// Master returns the master keys for this repository. Only included for
|
||||||
// debug purposes.
|
// debug purposes.
|
||||||
func (k *Key) Master() *crypto.Key {
|
func (k *Key) Master() *crypto.Key {
|
||||||
return k.master
|
return k.master
|
||||||
}
|
}
|
||||||
|
|
||||||
// User() returns the user keys for this key. Only included for debug purposes.
|
// User returns the user keys for this key. Only included for debug purposes.
|
||||||
func (k *Key) User() *crypto.Key {
|
func (k *Key) User() *crypto.Key {
|
||||||
return k.user
|
return k.user
|
||||||
}
|
}
|
||||||
|
|
|
@ -422,12 +422,10 @@ func (s *Server) Flush() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the backend used for this server.
|
|
||||||
func (s *Server) Backend() backend.Backend {
|
func (s *Server) Backend() backend.Backend {
|
||||||
return s.be
|
return s.be
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the index of this server.
|
|
||||||
func (s *Server) Index() *Index {
|
func (s *Server) Index() *Index {
|
||||||
return s.idx
|
return s.idx
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ok fails the test if an err is not nil.
|
// OK fails the test if an err is not nil.
|
||||||
func OK(tb testing.TB, err error) {
|
func OK(tb testing.TB, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, file, line, _ := runtime.Caller(1)
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
@ -30,7 +30,7 @@ func OK(tb testing.TB, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// equals fails the test if exp is not equal to act.
|
// Equals fails the test if exp is not equal to act.
|
||||||
func Equals(tb testing.TB, exp, act interface{}) {
|
func Equals(tb testing.TB, exp, act interface{}) {
|
||||||
if !reflect.DeepEqual(exp, act) {
|
if !reflect.DeepEqual(exp, act) {
|
||||||
_, file, line, _ := runtime.Caller(1)
|
_, file, line, _ := runtime.Caller(1)
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
package restic
|
|
||||||
|
|
||||||
// Add constant O_PATH missing from Go1.3, will be added to Go1.4 according to
|
|
||||||
// https://code.google.com/p/go/issues/detail?id=7830
|
|
||||||
const O_PATH = 010000000
|
|
Loading…
Reference in a new issue