cache: plex integration, refactor chunk storage and worker retries (#1899)
This commit is contained in:
parent
b05e472d2e
commit
b48b537325
14 changed files with 781 additions and 652 deletions
224
cache/cache_internal_test.go
vendored
224
cache/cache_internal_test.go
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !plan9
|
||||
// +build !plan9,go1.7
|
||||
|
||||
package cache_test
|
||||
|
||||
|
@ -24,18 +24,14 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
WrapRemote = flag.String("wrap-remote", "", "Remote to wrap")
|
||||
RemoteName = flag.String("remote-name", "TestCacheInternal", "Root remote")
|
||||
SkipTimeouts = flag.Bool("skip-waits", false, "To skip tests that have wait times")
|
||||
rootFs fs.Fs
|
||||
boltDb *cache.Persistent
|
||||
metaAge = time.Second * 30
|
||||
infoAge = time.Second * 10
|
||||
chunkAge = time.Second * 10
|
||||
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
workers = 2
|
||||
warmupRate = 3
|
||||
warmupSec = 10
|
||||
WrapRemote = flag.String("wrap-remote", "", "Remote to wrap")
|
||||
RemoteName = flag.String("remote-name", "TestCacheInternal", "Root remote")
|
||||
rootFs fs.Fs
|
||||
boltDb *cache.Persistent
|
||||
infoAge = time.Second * 10
|
||||
chunkClean = time.Second
|
||||
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
|
||||
workers = 2
|
||||
)
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
|
@ -44,7 +40,7 @@ func TestInternalInit(t *testing.T) {
|
|||
|
||||
// delete the default path
|
||||
dbPath := filepath.Join(fs.CacheDir, "cache-backend", *RemoteName+".db")
|
||||
boltDb, err = cache.GetPersistent(dbPath, true)
|
||||
boltDb, err = cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})
|
||||
require.NoError(t, err)
|
||||
fstest.Initialise()
|
||||
|
||||
|
@ -65,17 +61,17 @@ func TestInternalInit(t *testing.T) {
|
|||
fs.ConfigFileSet(*RemoteName, "type", "cache")
|
||||
fs.ConfigFileSet(*RemoteName, "remote", *WrapRemote)
|
||||
fs.ConfigFileSet(*RemoteName, "chunk_size", "1024")
|
||||
fs.ConfigFileSet(*RemoteName, "chunk_age", chunkAge.String())
|
||||
fs.ConfigFileSet(*RemoteName, "chunk_total_size", "2048")
|
||||
fs.ConfigFileSet(*RemoteName, "info_age", infoAge.String())
|
||||
}
|
||||
|
||||
_ = flag.Set("cache-warm-up-age", metaAge.String())
|
||||
_ = flag.Set("cache-warm-up-rps", fmt.Sprintf("%v/%v", warmupRate, warmupSec))
|
||||
_ = flag.Set("cache-chunk-no-memory", "true")
|
||||
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
|
||||
|
||||
// Instantiate root
|
||||
rootFs, err = fs.NewFs(*RemoteName + ":")
|
||||
require.NoError(t, err)
|
||||
_ = rootFs.Features().Purge()
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("")
|
||||
|
@ -305,143 +301,6 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||
require.Equal(t, o.ModTime(), co.ModTime())
|
||||
}
|
||||
|
||||
func TestInternalWarmUp(t *testing.T) {
|
||||
if *SkipTimeouts {
|
||||
t.Skip("--skip-waits set")
|
||||
}
|
||||
|
||||
reset(t)
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
o1 := writeObjectRandomBytes(t, rootFs, (chunkSize * 3))
|
||||
o2 := writeObjectRandomBytes(t, rootFs, (chunkSize * 4))
|
||||
o3 := writeObjectRandomBytes(t, rootFs, (chunkSize * 6))
|
||||
|
||||
_ = readDataFromObj(t, o1, 0, chunkSize, false)
|
||||
_ = readDataFromObj(t, o2, 0, chunkSize, false)
|
||||
|
||||
// validate a fresh chunk
|
||||
expectedExpiry := time.Now().Add(chunkAge)
|
||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), o2.Remote()), 0)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
|
||||
// validate that we entered a warm up state
|
||||
_ = readDataFromObj(t, o3, 0, chunkSize, false)
|
||||
require.True(t, cfs.InWarmUp())
|
||||
expectedExpiry = time.Now().Add(metaAge)
|
||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), 0)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
|
||||
// validate that we cooled down and exit warm up
|
||||
// we wait for the cache to expire
|
||||
t.Logf("Waiting 10 seconds for warm up to expire\n")
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
_ = readDataFromObj(t, o3, chunkSize, chunkSize*2, false)
|
||||
require.False(t, cfs.InWarmUp())
|
||||
expectedExpiry = time.Now().Add(chunkAge)
|
||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), chunkSize)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
}
|
||||
|
||||
func TestInternalWarmUpInFlight(t *testing.T) {
|
||||
if *SkipTimeouts {
|
||||
t.Skip("--skip-waits set")
|
||||
}
|
||||
|
||||
reset(t)
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
|
||||
o1 := writeObjectRandomBytes(t, rootFs, (chunkSize * 3))
|
||||
o2 := writeObjectRandomBytes(t, rootFs, (chunkSize * 4))
|
||||
o3 := writeObjectRandomBytes(t, rootFs, (chunkSize * int64(workers) * int64(2)))
|
||||
|
||||
_ = readDataFromObj(t, o1, 0, chunkSize, false)
|
||||
_ = readDataFromObj(t, o2, 0, chunkSize, false)
|
||||
require.False(t, cfs.InWarmUp())
|
||||
|
||||
// validate that we entered a warm up state
|
||||
_ = readDataFromObj(t, o3, 0, chunkSize, false)
|
||||
require.True(t, cfs.InWarmUp())
|
||||
expectedExpiry := time.Now().Add(metaAge)
|
||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), 0)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
|
||||
checkSample := make([]byte, chunkSize)
|
||||
reader, err := o3.Open(&fs.SeekOption{Offset: 0})
|
||||
require.NoError(t, err)
|
||||
rs, ok := reader.(*cache.Handle)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := 0; i <= workers; i++ {
|
||||
_, _ = rs.Seek(int64(i)*chunkSize, 0)
|
||||
_, err = io.ReadFull(reader, checkSample)
|
||||
require.NoError(t, err)
|
||||
|
||||
if i == workers {
|
||||
require.False(t, rs.InWarmUp(), "iteration %v", i)
|
||||
} else {
|
||||
require.True(t, rs.InWarmUp(), "iteration %v", i)
|
||||
}
|
||||
}
|
||||
_ = reader.Close()
|
||||
require.True(t, cfs.InWarmUp())
|
||||
expectedExpiry = time.Now().Add(chunkAge)
|
||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o3.Remote()), chunkSize*int64(workers+1))
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
|
||||
// validate that we cooled down and exit warm up
|
||||
// we wait for the cache to expire
|
||||
t.Logf("Waiting 10 seconds for warm up to expire\n")
|
||||
time.Sleep(time.Second * 10)
|
||||
|
||||
_ = readDataFromObj(t, o2, chunkSize, chunkSize*2, false)
|
||||
require.False(t, cfs.InWarmUp())
|
||||
expectedExpiry = time.Now().Add(chunkAge)
|
||||
ts, err = boltDb.GetChunkTs(path.Join(rootFs.Root(), o2.Remote()), chunkSize)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
}
|
||||
|
||||
// TODO: this is bugged
|
||||
//func TestInternalRateLimiter(t *testing.T) {
|
||||
// reset(t)
|
||||
// _ = flag.Set("cache-rps", "2")
|
||||
// rootFs, err := fs.NewFs(*RemoteName + ":")
|
||||
// require.NoError(t, err)
|
||||
// defer func() {
|
||||
// _ = flag.Set("cache-rps", "-1")
|
||||
// rootFs, err = fs.NewFs(*RemoteName + ":")
|
||||
// require.NoError(t, err)
|
||||
// }()
|
||||
// cfs, err := getCacheFs(rootFs)
|
||||
// require.NoError(t, err)
|
||||
// chunkSize := cfs.ChunkSize()
|
||||
//
|
||||
// // create some rand test data
|
||||
// co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||
//
|
||||
// doStuff(t, 5, time.Second, func() {
|
||||
// r, err := co.Open(&fs.SeekOption{Offset: chunkSize + 1})
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// buf := make([]byte, chunkSize)
|
||||
// totalRead, err := io.ReadFull(r, buf)
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, len(buf), totalRead)
|
||||
// _ = r.Close()
|
||||
// })
|
||||
//}
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
reset(t)
|
||||
_ = flag.Set("cache-writes", "true")
|
||||
|
@ -453,10 +312,10 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||
|
||||
// create some rand test data
|
||||
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
|
||||
expectedExpiry := time.Now().Add(metaAge)
|
||||
expectedTs := time.Now()
|
||||
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
|
||||
require.NoError(t, err)
|
||||
require.WithinDuration(t, expectedExpiry, ts, okDiff)
|
||||
require.WithinDuration(t, expectedTs, ts, okDiff)
|
||||
|
||||
// reset fs
|
||||
_ = flag.Set("cache-writes", "false")
|
||||
|
@ -464,43 +323,44 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalExpiredChunkRemoved(t *testing.T) {
|
||||
t.Skip("FIXME disabled because it is unreliable")
|
||||
|
||||
if *SkipTimeouts {
|
||||
t.Skip("--skip-waits set")
|
||||
}
|
||||
|
||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
reset(t)
|
||||
_ = flag.Set("cache-workers", "1")
|
||||
rootFs, err := fs.NewFs(*RemoteName + ":")
|
||||
require.NoError(t, err)
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
chunkSize := cfs.ChunkSize()
|
||||
totalChunks := 20
|
||||
|
||||
// create some rand test data
|
||||
co := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
remote := co.Remote()
|
||||
// cache all the chunks
|
||||
_ = readDataFromObj(t, co, 0, co.Size(), false)
|
||||
|
||||
// we wait for the cache to expire
|
||||
t.Logf("Waiting %v for cache to expire\n", chunkAge.String())
|
||||
time.Sleep(chunkAge)
|
||||
_, _ = cfs.List("")
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
o, err := cfs.NewObject(remote)
|
||||
require.NoError(t, err)
|
||||
co2, ok := o.(*cache.Object)
|
||||
o := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
require.False(t, boltDb.HasChunk(co2, 0))
|
||||
|
||||
for i := 0; i < 4; i++ { // read first 4
|
||||
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
// the last 2 **must** be in the cache
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*2))
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*3))
|
||||
|
||||
for i := 4; i < 6; i++ { // read next 2
|
||||
_ = readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||
}
|
||||
cfs.CleanUpCache(true)
|
||||
// the last 2 **must** be in the cache
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*4))
|
||||
require.True(t, boltDb.HasChunk(co, chunkSize*5))
|
||||
|
||||
// reset fs
|
||||
_ = flag.Set("cache-workers", strconv.Itoa(workers))
|
||||
rootFs, err = fs.NewFs(*RemoteName + ":")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
if *SkipTimeouts {
|
||||
t.Skip("--skip-waits set")
|
||||
}
|
||||
|
||||
reset(t)
|
||||
cfs, err := getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue