forked from TrueCloudLab/restic
Remove unneeded pointer to pack id
This commit is contained in:
parent
2710d6399a
commit
af0d6f58b9
4 changed files with 24 additions and 24 deletions
|
@ -26,7 +26,7 @@ type Index struct {
|
|||
|
||||
type indexEntry struct {
|
||||
tpe pack.BlobType
|
||||
packID *backend.ID
|
||||
packID backend.ID
|
||||
offset uint
|
||||
length uint
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func NewIndex() *Index {
|
|||
}
|
||||
}
|
||||
|
||||
func (idx *Index) store(t pack.BlobType, id backend.ID, pack *backend.ID, offset, length uint) {
|
||||
func (idx *Index) store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) {
|
||||
idx.pack[id] = indexEntry{
|
||||
tpe: t,
|
||||
packID: pack,
|
||||
|
@ -95,7 +95,7 @@ func (idx *Index) Full() bool {
|
|||
|
||||
// Store remembers the id and pack in the index. An existing entry will be
|
||||
// silently overwritten.
|
||||
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack *backend.ID, offset, length uint) {
|
||||
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (idx *Index) Store(t pack.BlobType, id backend.ID, pack *backend.ID, offset
|
|||
}
|
||||
|
||||
// Lookup returns the pack for the id.
|
||||
func (idx *Index) Lookup(id backend.ID) (packID *backend.ID, tpe pack.BlobType, offset, length uint, err error) {
|
||||
func (idx *Index) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
|
||||
idx.m.Lock()
|
||||
defer idx.m.Unlock()
|
||||
|
||||
|
@ -121,7 +121,7 @@ func (idx *Index) Lookup(id backend.ID) (packID *backend.ID, tpe pack.BlobType,
|
|||
}
|
||||
|
||||
debug.Log("Index.Lookup", "id %v not found", id.Str())
|
||||
return nil, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
|
||||
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
|
||||
}
|
||||
|
||||
// Has returns true iff the id is listed in the index.
|
||||
|
@ -196,7 +196,7 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
|
|||
Type: blob.tpe,
|
||||
Length: blob.length,
|
||||
},
|
||||
PackID: *blob.packID,
|
||||
PackID: blob.packID,
|
||||
}:
|
||||
}
|
||||
}
|
||||
|
@ -239,8 +239,8 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
|
|||
packs := make(map[backend.ID]*packJSON)
|
||||
|
||||
for id, blob := range idx.pack {
|
||||
if blob.packID == nil {
|
||||
panic("nil pack id")
|
||||
if blob.packID.IsNull() {
|
||||
panic("null pack id")
|
||||
}
|
||||
|
||||
debug.Log("Index.generatePackList", "handle blob %v", id.Str())
|
||||
|
@ -252,10 +252,10 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
|
|||
}
|
||||
|
||||
// see if pack is already in map
|
||||
p, ok := packs[*blob.packID]
|
||||
p, ok := packs[blob.packID]
|
||||
if !ok {
|
||||
// else create new pack
|
||||
p = &packJSON{ID: *blob.packID}
|
||||
p = &packJSON{ID: blob.packID}
|
||||
|
||||
// and append it to the list and map
|
||||
list = append(list, p)
|
||||
|
@ -380,7 +380,7 @@ func DecodeIndex(rd io.Reader) (idx *Index, err error) {
|
|||
idx = NewIndex()
|
||||
for _, pack := range idxJSON.Packs {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(blob.Type, blob.ID, &pack.ID, blob.Offset, blob.Length)
|
||||
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length)
|
||||
}
|
||||
}
|
||||
idx.supersedes = idxJSON.Supersedes
|
||||
|
@ -405,7 +405,7 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
|
|||
idx = NewIndex()
|
||||
for _, pack := range list {
|
||||
for _, blob := range pack.Blobs {
|
||||
idx.store(blob.Type, blob.ID, &pack.ID, blob.Offset, blob.Length)
|
||||
idx.store(blob.Type, blob.ID, pack.ID, blob.Offset, blob.Length)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestIndexSerialize(t *testing.T) {
|
|||
for j := 0; j < 20; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, &packID, pos, length)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
|
||||
tests = append(tests, testEntry{
|
||||
id: id,
|
||||
|
@ -72,7 +72,7 @@ func TestIndexSerialize(t *testing.T) {
|
|||
packID, tpe, offset, length, err := idx.Lookup(testBlob.id)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, testBlob.pack, *packID)
|
||||
Equals(t, testBlob.pack, packID)
|
||||
Equals(t, testBlob.tpe, tpe)
|
||||
Equals(t, testBlob.offset, offset)
|
||||
Equals(t, testBlob.length, length)
|
||||
|
@ -80,7 +80,7 @@ func TestIndexSerialize(t *testing.T) {
|
|||
packID, tpe, offset, length, err = idx2.Lookup(testBlob.id)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, testBlob.pack, *packID)
|
||||
Equals(t, testBlob.pack, packID)
|
||||
Equals(t, testBlob.tpe, tpe)
|
||||
Equals(t, testBlob.offset, offset)
|
||||
Equals(t, testBlob.length, length)
|
||||
|
@ -95,7 +95,7 @@ func TestIndexSerialize(t *testing.T) {
|
|||
for j := 0; j < 10; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, &packID, pos, length)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
|
||||
newtests = append(newtests, testEntry{
|
||||
id: id,
|
||||
|
@ -129,7 +129,7 @@ func TestIndexSerialize(t *testing.T) {
|
|||
packID, tpe, offset, length, err := idx3.Lookup(testBlob.id)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, testBlob.pack, *packID)
|
||||
Equals(t, testBlob.pack, packID)
|
||||
Equals(t, testBlob.tpe, tpe)
|
||||
Equals(t, testBlob.offset, offset)
|
||||
Equals(t, testBlob.length, length)
|
||||
|
@ -148,7 +148,7 @@ func TestIndexSize(t *testing.T) {
|
|||
for j := 0; j < blobs; j++ {
|
||||
id := randomID()
|
||||
length := uint(i*100 + j)
|
||||
idx.Store(pack.Data, id, &packID, pos, length)
|
||||
idx.Store(pack.Data, id, packID, pos, length)
|
||||
|
||||
pos += length
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ func TestIndexUnserialize(t *testing.T) {
|
|||
packID, tpe, offset, length, err := idx.Lookup(test.id)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, test.packID, *packID)
|
||||
Equals(t, test.packID, packID)
|
||||
Equals(t, test.tpe, tpe)
|
||||
Equals(t, test.offset, offset)
|
||||
Equals(t, test.length, length)
|
||||
|
@ -267,7 +267,7 @@ func TestIndexUnserializeOld(t *testing.T) {
|
|||
packID, tpe, offset, length, err := idx.Lookup(test.id)
|
||||
OK(t, err)
|
||||
|
||||
Equals(t, test.packID, *packID)
|
||||
Equals(t, test.packID, packID)
|
||||
Equals(t, test.tpe, tpe)
|
||||
Equals(t, test.offset, offset)
|
||||
Equals(t, test.length, length)
|
||||
|
@ -313,7 +313,7 @@ func TestConvertIndex(t *testing.T) {
|
|||
packID, tpe, offset, length, err := oldIndex.Lookup(packedBlob.ID)
|
||||
OK(t, err)
|
||||
|
||||
Assert(t, *packID == packedBlob.PackID,
|
||||
Assert(t, packID == packedBlob.PackID,
|
||||
"Check blob %v: pack ID %v != %v", packedBlob.ID, packID, packedBlob.PackID)
|
||||
Assert(t, tpe == packedBlob.Type,
|
||||
"Check blob %v: Type %v != %v", packedBlob.ID, tpe, packedBlob.Type)
|
||||
|
|
|
@ -33,7 +33,7 @@ func NewMasterIndex() *MasterIndex {
|
|||
}
|
||||
|
||||
// Lookup queries all known Indexes for the ID and returns the first match.
|
||||
func (mi *MasterIndex) Lookup(id backend.ID) (packID *backend.ID, tpe pack.BlobType, offset, length uint, err error) {
|
||||
func (mi *MasterIndex) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
|
||||
mi.idxMutex.RLock()
|
||||
defer mi.idxMutex.RUnlock()
|
||||
|
||||
|
@ -50,7 +50,7 @@ func (mi *MasterIndex) Lookup(id backend.ID) (packID *backend.ID, tpe pack.BlobT
|
|||
}
|
||||
|
||||
debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
|
||||
return nil, pack.Data, 0, 0, fmt.Errorf("id %v not found in any index", id)
|
||||
return backend.ID{}, pack.Data, 0, 0, fmt.Errorf("id %v not found in any index", id)
|
||||
}
|
||||
|
||||
// LookupSize queries all known Indexes for the ID and returns the first match.
|
||||
|
|
|
@ -269,7 +269,7 @@ func (r *Repository) savePacker(p *pack.Packer) error {
|
|||
// update blobs in the index
|
||||
for _, b := range p.Blobs() {
|
||||
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str())
|
||||
r.idx.Current().Store(b.Type, b.ID, &sid, b.Offset, uint(b.Length))
|
||||
r.idx.Current().Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
|
||||
r.idx.RemoveFromInFlight(b.ID)
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue