Refactor Index.Store() to take a PackedBlob

This commit is contained in:
Alexander Neumann 2015-11-02 19:05:19 +01:00
parent f3f84b1544
commit 1fc0d78913
4 changed files with 66 additions and 19 deletions

View file

@ -90,7 +90,7 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
} }
blobsDone[b] = struct{}{} blobsDone[b] = struct{}{}
combinedIndex.Store(packedBlob.Type, packedBlob.ID, packedBlob.PackID, packedBlob.Offset, packedBlob.Length) combinedIndex.Store(packedBlob)
} }
combinedIndex.AddToSupersedes(indexID) combinedIndex.AddToSupersedes(indexID)
@ -162,7 +162,13 @@ func (cmd CmdRebuildIndex) RebuildIndex() error {
for _, blob := range up.Entries { for _, blob := range up.Entries {
debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob) debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
combinedIndex.Store(blob.Type, blob.ID, packID, blob.Offset, blob.Length) combinedIndex.Store(repository.PackedBlob{
Type: blob.Type,
ID: blob.ID,
PackID: packID,
Offset: blob.Offset,
Length: blob.Length,
})
} }
err = rd.Close() err = rd.Close()

View file

@ -40,12 +40,12 @@ func NewIndex() *Index {
} }
} }
func (idx *Index) store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) { func (idx *Index) store(blob PackedBlob) {
idx.pack[id] = indexEntry{ idx.pack[blob.ID] = indexEntry{
tpe: t, tpe: blob.Type,
packID: pack, packID: blob.PackID,
offset: offset, offset: blob.Offset,
length: length, length: blob.Length,
} }
} }
@ -96,7 +96,7 @@ var IndexFull = func(idx *Index) bool {
// Store remembers the id and pack in the index. An existing entry will be // Store remembers the id and pack in the index. An existing entry will be
// silently overwritten. // silently overwritten.
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) { func (idx *Index) Store(blob PackedBlob) {
idx.m.Lock() idx.m.Lock()
defer idx.m.Unlock() defer idx.m.Unlock()
@ -104,10 +104,9 @@ func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset,
panic("store new item in finalized index") panic("store new item in finalized index")
} }
debug.Log("Index.Store", "pack %v contains id %v (%v), offset %v, length %v", debug.Log("Index.Store", "%v", blob)
pack.Str(), id.Str(), t, offset, length)
idx.store(t, id, pack, offset, length) idx.store(blob)
} }
// Lookup queries the index for the blob ID and returns a PackedBlob. // Lookup queries the index for the blob ID and returns a PackedBlob.
@ -489,7 +488,13 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex() idx = NewIndex()
for _, pack := range idxJSON.Packs { for _, pack := range idxJSON.Packs {
for _, blob := range pack.Blobs { for _, blob := range pack.Blobs {
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length) idx.store(PackedBlob{
Type: blob.Type,
ID: blob.ID,
Offset: blob.Offset,
Length: blob.Length,
PackID: pack.ID,
})
} }
} }
idx.supersedes = idxJSON.Supersedes idx.supersedes = idxJSON.Supersedes
@ -514,7 +519,13 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
idx = NewIndex() idx = NewIndex()
for _, pack := range list { for _, pack := range list {
for _, blob := range pack.Blobs { for _, blob := range pack.Blobs {
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length) idx.store(PackedBlob{
Type: blob.Type,
ID: blob.ID,
PackID: pack.ID,
Offset: blob.Offset,
Length: blob.Length,
})
} }
} }

View file

@ -41,7 +41,13 @@ func TestIndexSerialize(t *testing.T) {
for j := 0; j < 20; j++ { for j := 0; j < 20; j++ {
id := randomID() id := randomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(pack.Data, id, packID, pos, length) idx.Store(repository.PackedBlob{
Type: pack.Data,
ID: id,
PackID: packID,
Offset: pos,
Length: length,
})
tests = append(tests, testEntry{ tests = append(tests, testEntry{
id: id, id: id,
@ -95,7 +101,13 @@ func TestIndexSerialize(t *testing.T) {
for j := 0; j < 10; j++ { for j := 0; j < 10; j++ {
id := randomID() id := randomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(pack.Data, id, packID, pos, length) idx.Store(repository.PackedBlob{
Type: pack.Data,
ID: id,
PackID: packID,
Offset: pos,
Length: length,
})
newtests = append(newtests, testEntry{ newtests = append(newtests, testEntry{
id: id, id: id,
@ -154,7 +166,13 @@ func TestIndexSize(t *testing.T) {
for j := 0; j < blobs; j++ { for j := 0; j < blobs; j++ {
id := randomID() id := randomID()
length := uint(i*100 + j) length := uint(i*100 + j)
idx.Store(pack.Data, id, packID, pos, length) idx.Store(repository.PackedBlob{
Type: pack.Data,
ID: id,
PackID: packID,
Offset: pos,
Length: length,
})
pos += length pos += length
} }
@ -361,7 +379,13 @@ func TestIndexPacks(t *testing.T) {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
packID := randomID() packID := randomID()
idx.Store(pack.Data, randomID(), packID, 0, 23) idx.Store(repository.PackedBlob{
Type: pack.Data,
ID: randomID(),
PackID: packID,
Offset: 0,
Length: 23,
})
packs.Insert(packID) packs.Insert(packID)
} }

View file

@ -270,7 +270,13 @@ func (r *Repository) savePacker(p *pack.Packer) error {
// update blobs in the index // update blobs in the index
for _, b := range p.Blobs() { for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str()) debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str())
r.idx.Current().Store(b.Type, b.ID, sid, b.Offset, uint(b.Length)) r.idx.Current().Store(PackedBlob{
Type: b.Type,
ID: b.ID,
PackID: sid,
Offset: b.Offset,
Length: uint(b.Length),
})
r.idx.RemoveFromInFlight(b.ID) r.idx.RemoveFromInFlight(b.ID)
} }