cache: refactor entries caching pattern for #1904 (#1924)

This commit is contained in:
remusb 2017-12-18 14:55:37 +02:00 committed by GitHub
parent 29d34426bc
commit 6b5989712f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 559 additions and 310 deletions

View file

@ -10,12 +10,20 @@ import (
"math/rand"
"path"
"path/filepath"
"runtime"
"strconv"
"sync"
"testing"
"time"
//"os"
"os/exec"
//"strings"
"github.com/ncw/rclone/cache"
//"github.com/ncw/rclone/cmd/mount"
//_ "github.com/ncw/rclone/cmd/cmount"
//"github.com/ncw/rclone/cmd/mountlib"
_ "github.com/ncw/rclone/drive"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest"
"github.com/ncw/rclone/local"
@ -24,70 +32,21 @@ import (
)
var (
WrapRemote = flag.String("wrap-remote", "", "Remote to wrap")
RemoteName = flag.String("remote-name", "TestCacheInternal", "Root remote")
rootFs fs.Fs
boltDb *cache.Persistent
infoAge = time.Second * 10
chunkClean = time.Second
okDiff = time.Second * 9 // really big diff here but the build machines seem to be slow. need a different way for this
workers = 2
)
// prepare the test server and return a function to tidy it up afterwards
func TestInternalInit(t *testing.T) {
var err error
// delete the default path
dbPath := filepath.Join(fs.CacheDir, "cache-backend", *RemoteName+".db")
boltDb, err = cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
fstest.Initialise()
if len(*WrapRemote) == 0 {
*WrapRemote = "localInternal:/var/tmp/rclone-cache"
fs.ConfigFileSet("localInternal", "type", "local")
fs.ConfigFileSet("localInternal", "nounc", "true")
}
remoteExists := false
for _, s := range fs.ConfigFileSections() {
if s == *RemoteName {
remoteExists = true
}
}
if !remoteExists {
fs.ConfigFileSet(*RemoteName, "type", "cache")
fs.ConfigFileSet(*RemoteName, "remote", *WrapRemote)
fs.ConfigFileSet(*RemoteName, "chunk_size", "1024")
fs.ConfigFileSet(*RemoteName, "chunk_total_size", "2048")
fs.ConfigFileSet(*RemoteName, "info_age", infoAge.String())
}
_ = flag.Set("cache-chunk-no-memory", "true")
_ = flag.Set("cache-workers", strconv.Itoa(workers))
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
// Instantiate root
rootFs, err = fs.NewFs(*RemoteName + ":")
require.NoError(t, err)
_ = rootFs.Features().Purge()
require.NoError(t, err)
err = rootFs.Mkdir("")
require.NoError(t, err)
// flush cache
_, err = getCacheFs(rootFs)
require.NoError(t, err)
}
func TestInternalListRootAndInnerRemotes(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tilrair-local", "tilrair-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs
innerFolder := "inner"
err := rootFs.Mkdir(innerFolder)
require.NoError(t, err)
innerFs, err := fs.NewFs(*RemoteName + ":" + innerFolder)
innerFs, err := fs.NewFs("tilrair-cache:" + innerFolder)
require.NoError(t, err)
obj := writeObjectString(t, innerFs, "one", "content")
@ -105,14 +64,12 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
err = obj.Remove()
require.NoError(t, err)
err = innerFs.Features().Purge()
require.NoError(t, err)
innerFs = nil
}
func TestInternalObjWrapFsFound(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "tiowff-local", "tiowff-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
wrappedFs := cfs.UnWrap()
@ -144,14 +101,18 @@ func TestInternalObjWrapFsFound(t *testing.T) {
}
func TestInternalObjNotFound(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "tionf-local", "tionf-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject("404")
require.Error(t, err)
require.Nil(t, obj)
}
func TestInternalCachedWrittenContentMatches(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "ticwcm-local", "ticwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
@ -176,7 +137,8 @@ func TestInternalCachedWrittenContentMatches(t *testing.T) {
}
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "ticucm-local", "ticucm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
// create some rand test data
testData1 := []byte(fstest.RandomString(100))
@ -196,12 +158,13 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
}
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "tiwwcm-local", "tiwwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
reset(t)
// create some rand test data
testData := make([]byte, (chunkSize*4 + chunkSize/2))
testSize, err := rand.Read(testData)
@ -230,13 +193,13 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
t.Skip("FIXME disabled because it is unreliable")
rootFs, boltDb := newLocalCacheFs(t, "tilwcm-local", "tilwcm-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
reset(t)
// create some rand test data
testData := make([]byte, (chunkSize*10 + chunkSize/2))
testSize, err := rand.Read(testData)
@ -260,8 +223,53 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
}
}
func TestInternalLargeWrittenContentMatches2(t *testing.T) {
t.Skip("FIXME disabled because it is unreliable")
cryptFs, boltDb := newLocalCacheCryptFs(t, "tilwcm2-local", "tilwcm2-cache", "tilwcm2-crypt", true, nil)
defer cleanupFs(t, cryptFs, boltDb)
cfs, err := getCacheFs(cryptFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
fileSize := 87197196
readOffset := 87195648
// create some rand test data
testData := make([]byte, fileSize)
testSize, err := rand.Read(testData)
require.Equal(t, len(testData), testSize)
require.NoError(t, err)
// write the object
o := writeObjectBytes(t, cryptFs, "data.bin", testData)
require.Equal(t, o.Size(), int64(testSize))
o2, err := cryptFs.NewObject("data.bin")
require.NoError(t, err)
require.Equal(t, o2.Size(), o.Size())
// check data from in-file
reader, err := o2.Open(&fs.SeekOption{Offset: int64(readOffset)})
require.NoError(t, err)
rs, ok := reader.(io.Seeker)
require.True(t, ok)
checkOffset, err := rs.Seek(int64(readOffset), 0)
require.NoError(t, err)
require.Equal(t, checkOffset, int64(readOffset))
checkSample, err := ioutil.ReadAll(reader)
require.NoError(t, err)
_ = reader.Close()
require.Equal(t, len(checkSample), fileSize-readOffset)
for i := 0; i < fileSize-readOffset; i++ {
require.Equal(t, testData[readOffset+i], checkSample[i], "byte: %d (%d), chunk: %d", int64(i)%chunkSize, i, int64(i)/chunkSize)
}
}
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "tiwfcns-local", "tiwfcns-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
@ -279,33 +287,53 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
co2, err := rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.NotEqual(t, o.ModTime(), co.ModTime())
require.NotEqual(t, o.ModTime(), co2.ModTime())
require.Equal(t, co.ModTime(), co2.ModTime())
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
}
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
rootFs, boltDb := newLocalCacheFs(t, "ticsadcf-local", "ticsadcf-cache", nil)
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
// create some rand test data
co := writeObjectRandomBytes(t, rootFs, (chunkSize*4 + chunkSize/2))
// update in the wrapped fs
o, err := cfs.UnWrap().NewObject(co.Remote())
require.NoError(t, err)
err = o.SetModTime(co.ModTime().Add(-1 * time.Hour))
require.NoError(t, err)
// get a new instance from the cache
co2, err := rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
require.NotEqual(t, o.ModTime().String(), co2.ModTime().String())
require.Equal(t, co.ModTime().String(), co2.ModTime().String())
cfs.DirCacheFlush() // flush the cache
l, err := cfs.UnWrap().List("")
require.NoError(t, err)
require.Len(t, l, 1)
o := l[0]
o2 := l[0]
// get a new instance from the cache
co, err := rootFs.NewObject(o.Remote())
co, err = rootFs.NewObject(o.Remote())
require.NoError(t, err)
require.Equal(t, o.ModTime(), co.ModTime())
require.Equal(t, o2.ModTime().String(), co.ModTime().String())
}
func TestInternalCacheWrites(t *testing.T) {
reset(t)
_ = flag.Set("cache-writes", "true")
rootFs, err := fs.NewFs(*RemoteName + ":")
require.NoError(t, err)
rootFs, boltDb := newLocalCacheFs(t, "ticw-local", "ticw-cache", map[string]string{"cache-writes": "true"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
@ -316,25 +344,21 @@ func TestInternalCacheWrites(t *testing.T) {
ts, err := boltDb.GetChunkTs(path.Join(rootFs.Root(), co.Remote()), 0)
require.NoError(t, err)
require.WithinDuration(t, expectedTs, ts, okDiff)
// reset fs
_ = flag.Set("cache-writes", "false")
rootFs, err = fs.NewFs(*RemoteName + ":")
require.NoError(t, err)
}
func TestInternalMaxChunkSizeRespected(t *testing.T) {
reset(t)
_ = flag.Set("cache-workers", "1")
rootFs, err := fs.NewFs(*RemoteName + ":")
require.NoError(t, err)
rootFs, boltDb := newLocalCacheFs(t, "timcsr-local", "timcsr-cache", map[string]string{"cache-workers": "1"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
chunkSize := cfs.ChunkSize()
totalChunks := 20
// create some rand test data
o := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
obj := writeObjectRandomBytes(t, cfs, (int64(totalChunks-1)*chunkSize + chunkSize/2))
o, err := rootFs.NewObject(obj.Remote())
require.NoError(t, err)
co, ok := o.(*cache.Object)
require.True(t, ok)
@ -353,15 +377,12 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
// the last 2 **must** be in the cache
require.True(t, boltDb.HasChunk(co, chunkSize*4))
require.True(t, boltDb.HasChunk(co, chunkSize*5))
// reset fs
_ = flag.Set("cache-workers", strconv.Itoa(workers))
rootFs, err = fs.NewFs(*RemoteName + ":")
require.NoError(t, err)
}
func TestInternalExpiredEntriesRemoved(t *testing.T) {
reset(t)
rootFs, boltDb := newLocalCacheFs(t, "tieer-local", "tieer-cache", map[string]string{"info_age": "5s"})
defer cleanupFs(t, rootFs, boltDb)
cfs, err := getCacheFs(rootFs)
require.NoError(t, err)
@ -371,26 +392,84 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
require.NoError(t, err)
_ = writeObjectString(t, cfs, "test/second", "second content")
objOne, err := cfs.NewObject("one")
l, err := cfs.List("test")
require.NoError(t, err)
require.Equal(t, int64(len([]byte("one content"))), objOne.Size())
require.Len(t, l, 1)
waitTime := infoAge + time.Second*2
err = cfs.UnWrap().Mkdir("test/test2")
require.NoError(t, err)
l, err = cfs.List("test")
require.NoError(t, err)
require.Len(t, l, 1)
waitTime := time.Second * 5
t.Logf("Waiting %v seconds for cache to expire\n", waitTime)
time.Sleep(infoAge)
time.Sleep(waitTime)
_, err = cfs.List("test")
l, err = cfs.List("test")
require.NoError(t, err)
time.Sleep(time.Second * 2)
require.False(t, boltDb.HasEntry("one"))
require.Len(t, l, 2)
}
func TestInternalFinalise(t *testing.T) {
var err error
err = rootFs.Features().Purge()
require.NoError(t, err)
}
// FIXME, enable this when mount is sorted out
//func TestInternalFilesMissingInMount1904(t *testing.T) {
// t.Skip("Not yet")
// if runtime.GOOS == "windows" {
// t.Skip("Not yet")
// }
// id := "tifm1904"
// rootFs, _ := newLocalCacheCryptFs(t, "test-local", "test-cache", "test-crypt", false,
// map[string]string{"chunk_size": "5M", "info_age": "1m", "chunk_total_size": "500M", "cache-writes": "true"})
// mntPoint := path.Join("/tmp", "tifm1904-mnt")
// testPoint := path.Join(mntPoint, id)
// checkOutput := "1 10 100 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 27 28 29 3 30 31 32 33 34 35 36 37 38 39 4 40 41 42 43 44 45 46 47 48 49 5 50 51 52 53 54 55 56 57 58 59 6 60 61 62 63 64 65 66 67 68 69 7 70 71 72 73 74 75 76 77 78 79 8 80 81 82 83 84 85 86 87 88 89 9 90 91 92 93 94 95 96 97 98 99 "
//
// _ = os.MkdirAll(mntPoint, os.ModePerm)
//
// list, err := rootFs.List("")
// require.NoError(t, err)
// found := false
// list.ForDir(func(d fs.Directory) {
// if strings.Contains(d.Remote(), id) {
// found = true
// }
// })
//
// if !found {
// t.Skip("Test folder '%v' doesn't exist", id)
// }
//
// mountFs(t, rootFs, mntPoint)
// defer unmountFs(t, mntPoint)
//
// for i := 1; i <= 2; i++ {
// out, err := exec.Command("ls", testPoint).Output()
// require.NoError(t, err)
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1))
// t.Logf("root path has all files")
// _ = writeObjectString(t, rootFs, path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"), "one content")
//
// for j := 1; j <= 100; j++ {
// out, err := exec.Command("ls", path.Join(testPoint, strconv.Itoa(j))).Output()
// require.NoError(t, err)
// require.Equal(t, checkOutput, strings.Replace(string(out), "\n", " ", -1), "'%v' doesn't match", j)
// }
// obj, err := rootFs.NewObject(path.Join(id, strconv.Itoa(i), strconv.Itoa(i), "one_file"))
// require.NoError(t, err)
// err = obj.Remove()
// require.NoError(t, err)
// t.Logf("folders contain all the files")
//
// out, err = exec.Command("date").Output()
// require.NoError(t, err)
// t.Logf("check #%v date: '%v'", i, strings.Replace(string(out), "\n", " ", -1))
//
// if i < 2 {
// time.Sleep(time.Second * 60)
// }
// }
//}
func writeObjectRandomBytes(t *testing.T, f fs.Fs, size int64) fs.Object {
remote := strconv.Itoa(rand.Int()) + ".bin"
@ -454,32 +533,205 @@ func readDataFromObj(t *testing.T, co fs.Object, offset, end int64, useSeek bool
return checkSample
}
func doStuff(t *testing.T, times int, maxDuration time.Duration, stuff func()) {
var wg sync.WaitGroup
for i := 0; i < times; i++ {
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(maxDuration / 2)
stuff()
time.Sleep(maxDuration / 2)
}()
}
wg.Wait()
func cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge()
require.NoError(t, err)
b.Close()
}
func reset(t *testing.T) {
var err error
err = rootFs.Features().Purge()
func newLocalCacheCryptFs(t *testing.T, localRemote, cacheRemote, cryptRemote string, purge bool, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
boltDb, err := cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
cryptExists := false
for _, s := range fs.ConfigFileSections() {
if s == localRemote {
localExists = true
}
if s == cacheRemote {
cacheExists = true
}
if s == cryptRemote {
cryptExists = true
}
}
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
}
if !cryptExists {
t.Skipf("Skipping due to missing crypt remote: %v", cryptRemote)
}
if c, ok := cfg["cache-chunk-no-memory"]; ok {
_ = flag.Set("cache-chunk-no-memory", c)
} else {
_ = flag.Set("cache-chunk-no-memory", "true")
}
if c, ok := cfg["cache-workers"]; ok {
_ = flag.Set("cache-workers", c)
} else {
_ = flag.Set("cache-workers", strconv.Itoa(workers))
}
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
_ = flag.Set("cache-chunk-clean-interval", c)
} else {
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
}
if c, ok := cfg["cache-writes"]; ok {
_ = flag.Set("cache-writes", c)
} else {
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
}
// Instantiate root
rootFs, err = fs.NewFs(*RemoteName + ":")
f, err := fs.NewFs(cryptRemote + ":")
require.NoError(t, err)
err = rootFs.Mkdir("")
if purge {
_ = f.Features().Purge()
require.NoError(t, err)
}
err = f.Mkdir("")
require.NoError(t, err)
return f, boltDb
}
func newLocalCacheFs(t *testing.T, localRemote, cacheRemote string, cfg map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
dbPath := filepath.Join(fs.CacheDir, "cache-backend", cacheRemote+".db")
boltDb, err := cache.GetPersistent(dbPath, &cache.Features{PurgeDb: true})
require.NoError(t, err)
localExists := false
cacheExists := false
for _, s := range fs.ConfigFileSections() {
if s == localRemote {
localExists = true
}
if s == cacheRemote {
cacheExists = true
}
}
localRemoteWrap := ""
if !localExists {
localRemoteWrap = localRemote + ":/var/tmp/" + localRemote
fs.ConfigFileSet(localRemote, "type", "local")
fs.ConfigFileSet(localRemote, "nounc", "true")
}
if !cacheExists {
fs.ConfigFileSet(cacheRemote, "type", "cache")
fs.ConfigFileSet(cacheRemote, "remote", localRemoteWrap)
}
if c, ok := cfg["chunk_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_size", "1m")
}
if c, ok := cfg["chunk_total_size"]; ok {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", c)
} else {
fs.ConfigFileSet(cacheRemote, "chunk_total_size", "2m")
}
if c, ok := cfg["info_age"]; ok {
fs.ConfigFileSet(cacheRemote, "info_age", c)
} else {
fs.ConfigFileSet(cacheRemote, "info_age", infoAge.String())
}
if c, ok := cfg["cache-chunk-no-memory"]; ok {
_ = flag.Set("cache-chunk-no-memory", c)
} else {
_ = flag.Set("cache-chunk-no-memory", "true")
}
if c, ok := cfg["cache-workers"]; ok {
_ = flag.Set("cache-workers", c)
} else {
_ = flag.Set("cache-workers", strconv.Itoa(workers))
}
if c, ok := cfg["cache-chunk-clean-interval"]; ok {
_ = flag.Set("cache-chunk-clean-interval", c)
} else {
_ = flag.Set("cache-chunk-clean-interval", chunkClean.String())
}
if c, ok := cfg["cache-writes"]; ok {
_ = flag.Set("cache-writes", c)
} else {
_ = flag.Set("cache-writes", strconv.FormatBool(cache.DefCacheWrites))
}
// Instantiate root
f, err := fs.NewFs(cacheRemote + ":")
require.NoError(t, err)
_ = f.Features().Purge()
require.NoError(t, err)
err = f.Mkdir("")
require.NoError(t, err)
return f, boltDb
}
//func mountFs(t *testing.T, f fs.Fs, mntPoint string) {
// if runtime.GOOS == "windows" {
// t.Skip("Skipping test cause on windows")
// return
// }
//
// _ = flag.Set("debug-fuse", "false")
//
// go func() {
// mountlib.DebugFUSE = false
// mountlib.AllowOther = true
// mount.Mount(f, mntPoint)
// }()
//
// time.Sleep(time.Second * 3)
//}
func unmountFs(t *testing.T, mntPoint string) {
var out []byte
var err error
if runtime.GOOS == "windows" {
t.Skip("Skipping test cause on windows")
return
} else if runtime.GOOS == "linux" {
out, err = exec.Command("fusermount", "-u", mntPoint).Output()
} else if runtime.GOOS == "darwin" {
out, err = exec.Command("diskutil", "unmount", mntPoint).Output()
}
t.Logf("Unmount output: %v", string(out))
require.NoError(t, err)
}
@ -499,20 +751,6 @@ func getCacheFs(f fs.Fs) (*cache.Fs, error) {
return nil, fmt.Errorf("didn't found a cache fs")
}
func getSourceFs(f fs.Fs) (fs.Fs, error) {
if f.Features().UnWrap != nil {
sfs := f.Features().UnWrap()
_, ok := sfs.(*cache.Fs)
if !ok {
return sfs, nil
}
return getSourceFs(sfs)
}
return nil, fmt.Errorf("didn't found a source fs")
}
var (
_ fs.Fs = (*cache.Fs)(nil)
_ fs.Fs = (*local.Fs)(nil)