Small refactorings

* use uint instead of uint32 in packs/indexes
 * use ID.Str() for debug messages
 * add ParallelIDWorkFunc
This commit is contained in:
Alexander Neumann 2015-08-08 13:47:08 +02:00
parent 2cb0fbf589
commit d42ff509ba
3 changed files with 25 additions and 8 deletions

View file

@ -57,7 +57,7 @@ func (t *BlobType) UnmarshalJSON(buf []byte) error {
// Blob is a blob within a pack.
type Blob struct {
Type BlobType
Length uint32
Length uint
ID backend.ID
Offset uint
}
@ -100,7 +100,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, rd io.Reader) (int64, error) {
c := Blob{Type: t, ID: id}
n, err := io.Copy(p.hw, rd)
c.Length = uint32(n)
c.Length = uint(n)
c.Offset = p.bytes
p.bytes += uint(n)
p.blobs = append(p.blobs, c)
@ -164,7 +164,7 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
for _, b := range p.blobs {
entry := headerEntry{
Type: b.Type,
Length: b.Length,
Length: uint32(b.Length),
ID: b.ID,
}
@ -276,7 +276,7 @@ func NewUnpacker(k *crypto.Key, entries []Blob, rd io.ReadSeeker) (*Unpacker, er
entries = append(entries, Blob{
Type: e.Type,
Length: e.Length,
Length: uint(e.Length),
ID: e.ID,
Offset: pos,
})

View file

@ -109,7 +109,7 @@ func (idx *Index) Merge(other *Index) {
for k, v := range other.pack {
if _, ok := idx.pack[k]; ok {
debug.Log("Index.Merge", "index already has key %v, updating", k[:8])
debug.Log("Index.Merge", "index already has key %v, updating", k.Str())
}
idx.pack[k] = v
@ -146,7 +146,7 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
ID: id,
Offset: blob.offset,
Type: blob.tpe,
Length: uint32(blob.length),
Length: blob.length,
},
PackID: *blob.packID,
}:
@ -166,7 +166,7 @@ func (idx *Index) Count(t pack.BlobType) (n uint) {
for id, blob := range idx.pack {
if blob.tpe == t {
n++
debug.Log("Index.Count", " blob %v counted: %v", id[:8], blob)
debug.Log("Index.Count", " blob %v counted: %v", id.Str(), blob)
}
}
@ -201,7 +201,7 @@ func (idx *Index) generatePackList(selectFn func(indexEntry) bool) ([]*packJSON,
if blob.packID.IsNull() {
debug.Log("Index.generatePackList", "blob %q has no packID! (type %v, offset %v, length %v)",
id[:8], blob.tpe, blob.offset, blob.length)
id.Str(), blob.tpe, blob.offset, blob.length)
return nil, fmt.Errorf("unable to serialize index: pack for blob %v hasn't been written yet", id)
}

View file

@ -20,6 +20,10 @@ func closeIfOpen(ch chan struct{}) {
// processing stops. If done is closed, the function should return.
type ParallelWorkFunc func(id string, done <-chan struct{}) error
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return.
type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned.
@ -69,3 +73,16 @@ func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWork
return nil
}
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a
// function that takes a string.
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
return func(s string, done <-chan struct{}) error {
id, err := backend.ParseID(s)
if err != nil {
return err
}
return f(id, done)
}
}