2014-09-23 20:39:12 +00:00
|
|
|
package khepri
|
|
|
|
|
|
|
|
import (
|
2014-11-17 22:28:51 +00:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2014-09-23 20:39:12 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2014-11-16 21:50:20 +00:00
|
|
|
"sync"
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
"github.com/fd0/khepri/backend"
|
2014-11-17 22:28:51 +00:00
|
|
|
"github.com/fd0/khepri/chunker"
|
2014-09-23 20:39:12 +00:00
|
|
|
)
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
const (
|
|
|
|
maxConcurrentFiles = 32
|
|
|
|
)
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
type Archiver struct {
|
2014-11-16 21:50:20 +00:00
|
|
|
be backend.Server
|
|
|
|
key *Key
|
|
|
|
ch *ContentHandler
|
|
|
|
|
|
|
|
m sync.Mutex
|
2014-09-23 20:39:12 +00:00
|
|
|
smap *StorageMap // blobs used for the current snapshot
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
fileToken chan struct{}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
Stats Stats
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
Error func(dir string, fi os.FileInfo, err error) error
|
|
|
|
Filter func(item string, fi os.FileInfo) bool
|
2014-11-16 20:29:11 +00:00
|
|
|
|
|
|
|
ScannerUpdate func(stats Stats)
|
|
|
|
SaveUpdate func(stats Stats)
|
2014-11-16 21:50:20 +00:00
|
|
|
|
|
|
|
sum sync.Mutex // for SaveUpdate
|
2014-11-16 20:29:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Stats struct {
|
|
|
|
Files int
|
|
|
|
Directories int
|
|
|
|
Other int
|
|
|
|
Bytes uint64
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewArchiver(be backend.Server, key *Key) (*Archiver, error) {
|
|
|
|
var err error
|
2014-11-16 21:50:20 +00:00
|
|
|
arch := &Archiver{
|
|
|
|
be: be,
|
|
|
|
key: key,
|
|
|
|
fileToken: make(chan struct{}, maxConcurrentFiles),
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill file token
|
|
|
|
for i := 0; i < maxConcurrentFiles; i++ {
|
|
|
|
arch.fileToken <- struct{}{}
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
// abort on all errors
|
|
|
|
arch.Error = func(string, os.FileInfo, error) error { return err }
|
|
|
|
// allow all files
|
|
|
|
arch.Filter = func(string, os.FileInfo) bool { return true }
|
2014-11-16 20:29:11 +00:00
|
|
|
// do nothing
|
|
|
|
arch.ScannerUpdate = func(Stats) {}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
|
|
|
arch.smap = NewStorageMap()
|
|
|
|
arch.ch, err = NewContentHandler(be, key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// load all blobs from all snapshots
|
|
|
|
err = arch.ch.LoadAllSnapshots()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return arch, nil
|
|
|
|
}
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
func (arch *Archiver) saveUpdate(stats Stats) {
|
|
|
|
if arch.SaveUpdate != nil {
|
|
|
|
arch.sum.Lock()
|
|
|
|
defer arch.sum.Unlock()
|
|
|
|
arch.SaveUpdate(stats)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-23 20:39:12 +00:00
|
|
|
func (arch *Archiver) Save(t backend.Type, data []byte) (*Blob, error) {
|
|
|
|
blob, err := arch.ch.Save(t, data)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// store blob in storage map for current snapshot
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.m.Lock()
|
|
|
|
defer arch.m.Unlock()
|
2014-09-23 20:39:12 +00:00
|
|
|
arch.smap.Insert(blob)
|
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (arch *Archiver) SaveJSON(t backend.Type, item interface{}) (*Blob, error) {
|
|
|
|
blob, err := arch.ch.SaveJSON(t, item)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// store blob in storage map for current snapshot
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.m.Lock()
|
|
|
|
defer arch.m.Unlock()
|
2014-09-23 20:39:12 +00:00
|
|
|
arch.smap.Insert(blob)
|
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
2014-11-17 22:28:51 +00:00
|
|
|
// SaveFile stores the content of the file on the backend as a Blob by calling
|
|
|
|
// Save for each chunk.
|
2014-11-16 20:29:11 +00:00
|
|
|
func (arch *Archiver) SaveFile(node *Node) error {
|
2014-11-17 22:28:51 +00:00
|
|
|
file, err := os.Open(node.path)
|
|
|
|
defer file.Close()
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-17 22:28:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var blobs Blobs
|
|
|
|
|
|
|
|
// if the file is small enough, store it directly
|
|
|
|
if node.Size < chunker.MinSize {
|
|
|
|
buf, err := ioutil.ReadAll(file)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
blob, err := arch.ch.Save(backend.Data, buf)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
arch.saveUpdate(Stats{Bytes: blob.Size})
|
|
|
|
|
|
|
|
blobs = Blobs{blob}
|
|
|
|
} else {
|
|
|
|
// else store all chunks
|
|
|
|
chunker := chunker.New(file)
|
|
|
|
|
|
|
|
for {
|
|
|
|
chunk, err := chunker.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
blob, err := arch.ch.Save(backend.Data, chunk.Data)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
arch.saveUpdate(Stats{Bytes: blob.Size})
|
|
|
|
|
|
|
|
blobs = append(blobs, blob)
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
node.Content = make([]backend.ID, len(blobs))
|
|
|
|
for i, blob := range blobs {
|
|
|
|
node.Content[i] = blob.ID
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.m.Lock()
|
2014-09-23 20:39:12 +00:00
|
|
|
arch.smap.Insert(blob)
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.m.Unlock()
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
return err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
func (arch *Archiver) loadTree(dir string) (*Tree, error) {
|
|
|
|
// open and list path
|
2014-09-23 20:39:12 +00:00
|
|
|
fd, err := os.Open(dir)
|
|
|
|
defer fd.Close()
|
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
entries, err := fd.Readdir(-1)
|
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tree := Tree{}
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
path := filepath.Join(dir, entry.Name())
|
|
|
|
|
|
|
|
if !arch.Filter(path, entry) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
node, err := NodeFromFileInfo(path, entry)
|
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
// TODO: error processing
|
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tree = append(tree, node)
|
|
|
|
|
|
|
|
if entry.IsDir() {
|
2014-11-16 20:29:11 +00:00
|
|
|
node.Tree, err = arch.loadTree(path)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
switch node.Type {
|
|
|
|
case "file":
|
|
|
|
arch.Stats.Files++
|
|
|
|
arch.Stats.Bytes += node.Size
|
|
|
|
case "dir":
|
|
|
|
arch.Stats.Directories++
|
|
|
|
default:
|
|
|
|
arch.Stats.Other++
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
arch.ScannerUpdate(arch.Stats)
|
|
|
|
|
|
|
|
return &tree, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
func (arch *Archiver) LoadTree(path string) (*Tree, error) {
|
|
|
|
fi, err := os.Lstat(path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
node, err := NodeFromFileInfo(path, fi)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if node.Type != "dir" {
|
|
|
|
arch.Stats.Files = 1
|
|
|
|
arch.Stats.Bytes = node.Size
|
|
|
|
arch.ScannerUpdate(arch.Stats)
|
|
|
|
return &Tree{node}, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
arch.Stats.Directories = 1
|
|
|
|
node.Tree, err = arch.loadTree(path)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
2014-11-16 20:29:11 +00:00
|
|
|
return nil, err
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
arch.ScannerUpdate(arch.Stats)
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
return &Tree{node}, nil
|
|
|
|
}
|
2014-09-23 20:39:12 +00:00
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
func (arch *Archiver) saveTree(t *Tree) (*Blob, error) {
|
2014-11-16 21:50:20 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
for _, node := range *t {
|
|
|
|
if node.Tree != nil && node.Subtree == nil {
|
|
|
|
b, err := arch.saveTree(node.Tree)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
node.Subtree = b.ID
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.saveUpdate(Stats{Directories: 1})
|
2014-11-16 20:29:11 +00:00
|
|
|
} else if node.Type == "file" && len(node.Content) == 0 {
|
2014-11-16 21:50:20 +00:00
|
|
|
// start goroutine
|
|
|
|
wg.Add(1)
|
|
|
|
go func(n *Node) {
|
|
|
|
defer wg.Done()
|
2014-11-16 20:29:11 +00:00
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
// get token
|
|
|
|
token := <-arch.fileToken
|
|
|
|
defer func() {
|
|
|
|
arch.fileToken <- token
|
|
|
|
}()
|
|
|
|
|
|
|
|
// TODO: handle error
|
|
|
|
arch.SaveFile(n)
|
2014-11-17 22:28:51 +00:00
|
|
|
arch.saveUpdate(Stats{Files: 1})
|
2014-11-16 21:50:20 +00:00
|
|
|
}(node)
|
2014-11-16 20:29:11 +00:00
|
|
|
} else {
|
2014-11-16 21:50:20 +00:00
|
|
|
arch.saveUpdate(Stats{Other: 1})
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-16 21:50:20 +00:00
|
|
|
wg.Wait()
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
blob, err := arch.SaveJSON(backend.Tree, t)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (arch *Archiver) Snapshot(dir string, t *Tree) (*Snapshot, backend.ID, error) {
|
|
|
|
sn := NewSnapshot(dir)
|
|
|
|
|
|
|
|
blob, err := arch.saveTree(t)
|
2014-09-23 20:39:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sn.Content = blob.ID
|
|
|
|
|
|
|
|
// save snapshot
|
|
|
|
sn.StorageMap = arch.smap
|
|
|
|
blob, err = arch.SaveJSON(backend.Snapshot, sn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:29:11 +00:00
|
|
|
return sn, blob.Storage, nil
|
2014-09-23 20:39:12 +00:00
|
|
|
}
|