From 979bb07c86f9680466e9421da116aaf3af94c1da Mon Sep 17 00:00:00 2001
From: Nick Craig-Wood <nick@craig-wood.com>
Date: Tue, 11 Aug 2020 15:09:00 +0100
Subject: [PATCH] filefabric: Implement the Enterprise File Fabric backend

Missing features
- M-Stream support
- Oauth-like flow (soon being changed to oauth)
---
 README.md                             |    1 +
 backend/all/all.go                    |    1 +
 backend/filefabric/api/types.go       |  391 +++++++
 backend/filefabric/filefabric.go      | 1347 +++++++++++++++++++++++++
 backend/filefabric/filefabric_test.go |   17 +
 bin/make_manual.py                    |    1 +
 docs/content/_index.md                |    1 +
 docs/content/docs.md                  |    1 +
 docs/content/filefabric.md            |  260 +++++
 docs/content/overview.md              |    2 +
 docs/layouts/chrome/navbar.html       |    1 +
 fstest/test_all/config.yaml           |    6 +
 12 files changed, 2029 insertions(+)
 create mode 100644 backend/filefabric/api/types.go
 create mode 100644 backend/filefabric/filefabric.go
 create mode 100644 backend/filefabric/filefabric_test.go
 create mode 100644 docs/content/filefabric.md

diff --git a/README.md b/README.md
index 8cd853701..75d7a94b0 100644
--- a/README.md
+++ b/README.md
@@ -30,6 +30,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
   * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
   * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
   * Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
+  * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
   * FTP [:page_facing_up:](https://rclone.org/ftp/)
   * GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
   * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
diff --git a/backend/all/all.go b/backend/all/all.go
index 17fb6ddf2..3ee70f4b0 100644
--- a/backend/all/all.go
+++ b/backend/all/all.go
@@ -14,6 +14,7 @@ import (
 	_ "github.com/rclone/rclone/backend/drive"
 	_ "github.com/rclone/rclone/backend/dropbox"
 	_ "github.com/rclone/rclone/backend/fichier"
+	_ "github.com/rclone/rclone/backend/filefabric"
 	_ "github.com/rclone/rclone/backend/ftp"
 	_ "github.com/rclone/rclone/backend/googlecloudstorage"
 	_ "github.com/rclone/rclone/backend/googlephotos"
diff --git a/backend/filefabric/api/types.go b/backend/filefabric/api/types.go
new file mode 100644
index 000000000..e3b80e6c0
--- /dev/null
+++ b/backend/filefabric/api/types.go
@@ -0,0 +1,391 @@
+// Package api has type definitions for filefabric
+//
+// Converted from the API responses with help from https://mholt.github.io/json-to-go/
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strings"
+	"time"
+)
+
+const (
+	// TimeFormat for parameters (UTC)
+	timeFormatParameters = `2006-01-02 15:04:05`
+	// "2020-08-11 10:10:04" for JSON parsing
+	timeFormatJSON = `"` + timeFormatParameters + `"`
+)
+
+// Time represents represents date and time information for the
+// filefabric API
+type Time time.Time
+
+// MarshalJSON turns a Time into JSON (in UTC)
+func (t *Time) MarshalJSON() (out []byte, err error) {
+	timeString := (*time.Time)(t).UTC().Format(timeFormatJSON)
+	return []byte(timeString), nil
+}
+
+var zeroTime = []byte(`"0000-00-00 00:00:00"`)
+
+// UnmarshalJSON turns JSON into a Time (in UTC)
+func (t *Time) UnmarshalJSON(data []byte) error {
+	// Set a Zero time.Time if we receive a zero time input
+	if bytes.Equal(data, zeroTime) {
+		*t = Time(time.Time{})
+		return nil
+	}
+	newT, err := time.Parse(timeFormatJSON, string(data))
+	if err != nil {
+		return err
+	}
+	*t = Time(newT)
+	return nil
+}
+
+// String turns a Time into a string in UTC suitable for the API
+// parameters
+func (t Time) String() string {
+	return time.Time(t).UTC().Format(timeFormatParameters)
+}
+
+// Status return returned in all status responses
+type Status struct {
+	Code    string `json:"status"`
+	Message string `json:"statusmessage"`
+	TaskID  string `json:"taskid"`
+	// Warning string `json:"warning"` // obsolete
+}
+
+// Status statisfies the error interface
+func (e *Status) Error() string {
+	return fmt.Sprintf("%s (%s)", e.Message, e.Code)
+}
+
+// OK returns true if the status is all good
+func (e *Status) OK() bool {
+	return e.Code == "ok"
+}
+
+// GetCode returns the status code if any
+func (e *Status) GetCode() string {
+	return e.Code
+}
+
+// OKError defines an interface for items which can be OK or be an error
+type OKError interface {
+	error
+	OK() bool
+	GetCode() string
+}
+
+// Check Status satisfies the OKError interface
+var _ OKError = (*Status)(nil)
+
+// EmptyResponse is response which just returns the error condition
+type EmptyResponse struct {
+	Status
+}
+
+// GetTokenByAuthTokenResponse is the response to getTokenByAuthToken
+type GetTokenByAuthTokenResponse struct {
+	Status
+	Token              string `json:"token"`
+	UserID             string `json:"userid"`
+	AllowLoginRemember string `json:"allowloginremember"`
+	LastLogin          Time   `json:"lastlogin"`
+	AutoLoginCode      string `json:"autologincode"`
+}
+
+// ApplianceInfo is the response to getApplianceInfo
+type ApplianceInfo struct {
+	Status
+	Sitetitle            string `json:"sitetitle"`
+	OauthLoginSupport    string `json:"oauthloginsupport"`
+	IsAppliance          string `json:"isappliance"`
+	SoftwareVersion      string `json:"softwareversion"`
+	SoftwareVersionLabel string `json:"softwareversionlabel"`
+}
+
+// GetFolderContentsResponse is returned from getFolderContents
+type GetFolderContentsResponse struct {
+	Status
+	Total  int    `json:"total,string"`
+	Items  []Item `json:"filelist"`
+	Folder Item   `json:"folder"`
+	From   int    `json:"from,string"`
+	//Count         int    `json:"count"`
+	Pid           string `json:"pid"`
+	RefreshResult Status `json:"refreshresult"`
+	// Curfolder         Item              `json:"curfolder"` - sometimes returned as "ROOT"?
+	Parents           []Item            `json:"parents"`
+	CustomPermissions CustomPermissions `json:"custompermissions"`
+}
+
+// ItemType determine whether it is a file or a folder
+type ItemType uint8
+
+// Types of things in Item
+const (
+	ItemTypeFile   ItemType = 0
+	ItemTypeFolder ItemType = 1
+)
+
+// Item ia a File or a Folder
+type Item struct {
+	ID  string `json:"fi_id"`
+	PID string `json:"fi_pid"`
+	// UID             string   `json:"fi_uid"`
+	Name string `json:"fi_name"`
+	// S3Name          string   `json:"fi_s3name"`
+	// Extension       string   `json:"fi_extension"`
+	// Description     string   `json:"fi_description"`
+	Type ItemType `json:"fi_type,string"`
+	// Created         Time     `json:"fi_created"`
+	Size        int64  `json:"fi_size,string"`
+	ContentType string `json:"fi_contenttype"`
+	// Tags            string   `json:"fi_tags"`
+	// MainCode        string   `json:"fi_maincode"`
+	// Public          int      `json:"fi_public,string"`
+	// Provider        string   `json:"fi_provider"`
+	// ProviderFolder  string   `json:"fi_providerfolder"` // folder
+	// Encrypted       int      `json:"fi_encrypted,string"`
+	// StructType      string   `json:"fi_structtype"`
+	// Bname           string   `json:"fi_bname"` // folder
+	// OrgID           string   `json:"fi_orgid"`
+	// Favorite        int      `json:"fi_favorite,string"`
+	// IspartOf        string   `json:"fi_ispartof"` // folder
+	Modified Time `json:"fi_modified"`
+	// LastAccessed    Time     `json:"fi_lastaccessed"`
+	// Hits            int64    `json:"fi_hits,string"`
+	// IP              string   `json:"fi_ip"` // folder
+	// BigDescription  string   `json:"fi_bigdescription"`
+	LocalTime Time `json:"fi_localtime"`
+	// OrgfolderID     string   `json:"fi_orgfolderid"`
+	// StorageIP       string   `json:"fi_storageip"` // folder
+	// RemoteTime      Time     `json:"fi_remotetime"`
+	// ProviderOptions string   `json:"fi_provideroptions"`
+	// Access          string   `json:"fi_access"`
+	// Hidden          string   `json:"fi_hidden"` // folder
+	// VersionOf       string   `json:"fi_versionof"`
+	Trash bool `json:"trash"`
+	// Isbucket        string   `json:"isbucket"` // filelist
+	SubFolders int64 `json:"subfolders"` // folder
+}
+
+// ItemFields is a | separated list of fields in Item
+var ItemFields = mustFields(Item{})
+
+// fields returns the JSON fields in use by opt as a | separated
+// string.
+func fields(opt interface{}) (pipeTags string, err error) {
+	var tags []string
+	def := reflect.ValueOf(opt)
+	defType := def.Type()
+	for i := 0; i < def.NumField(); i++ {
+		field := defType.Field(i)
+		tag, ok := field.Tag.Lookup("json")
+		if !ok {
+			continue
+		}
+		if comma := strings.IndexRune(tag, ','); comma >= 0 {
+			tag = tag[:comma]
+		}
+		if tag == "" {
+			continue
+		}
+		tags = append(tags, tag)
+	}
+	return strings.Join(tags, "|"), nil
+}
+
+// mustFields returns the JSON fields in use by opt as a | separated
+// string. It panics on failure.
+func mustFields(opt interface{}) string {
+	tags, err := fields(opt)
+	if err != nil {
+		panic(err)
+	}
+	return tags
+}
+
+// CustomPermissions is returned as part of GetFolderContentsResponse
+type CustomPermissions struct {
+	Upload            string `json:"upload"`
+	CreateSubFolder   string `json:"createsubfolder"`
+	Rename            string `json:"rename"`
+	Delete            string `json:"delete"`
+	Move              string `json:"move"`
+	ManagePermissions string `json:"managepermissions"`
+	ListOnly          string `json:"listonly"`
+	VisibleInTrash    string `json:"visibleintrash"`
+}
+
+// DoCreateNewFolderResponse is response from foCreateNewFolder
+type DoCreateNewFolderResponse struct {
+	Status
+	Item Item `json:"file"`
+}
+
+// DoInitUploadResponse is response from doInitUpload
+type DoInitUploadResponse struct {
+	Status
+	ProviderID          string `json:"providerid"`
+	UploadCode          string `json:"uploadcode"`
+	FileType            string `json:"filetype"`
+	DirectUploadSupport string `json:"directuploadsupport"`
+	ResumeAllowed       string `json:"resumeallowed"`
+}
+
+// UploaderResponse is returned from /cgi-bin/uploader/uploader1.cgi
+//
+// Sometimes the response is returned as XML and sometimes as JSON
+type UploaderResponse struct {
+	FileSize int64  `xml:"filesize" json:"filesize,string"`
+	MD5      string `xml:"md5" json:"md5"`
+	Success  string `xml:"success" json:"success"`
+}
+
+// UploadStatus is returned from getUploadStatus
+type UploadStatus struct {
+	Status
+	UploadCode     string `json:"uploadcode"`
+	Metafile       string `json:"metafile"`
+	Percent        int    `json:"percent,string"`
+	Uploaded       int64  `json:"uploaded,string"`
+	Size           int64  `json:"size,string"`
+	Filename       string `json:"filename"`
+	Nofile         string `json:"nofile"`
+	Completed      string `json:"completed"`
+	Completsuccess string `json:"completsuccess"`
+	Completerror   string `json:"completerror"`
+}
+
+// DoCompleteUploadResponse is the response to doCompleteUpload
+type DoCompleteUploadResponse struct {
+	Status
+	UploadedSize int64  `json:"uploadedsize,string"`
+	StorageIP    string `json:"storageip"`
+	UploadedName string `json:"uploadedname"`
+	// Versioned    []interface{} `json:"versioned"`
+	// VersionedID  int           `json:"versionedid"`
+	// Comment      interface{}           `json:"comment"`
+	File Item `json:"file"`
+	// UsSize       string        `json:"us_size"`
+	// PaSize       string        `json:"pa_size"`
+	// SpaceInfo    SpaceInfo     `json:"spaceinfo"`
+}
+
+// Providers is returned as part of UploadResponse
+type Providers struct {
+	Max     string `json:"max"`
+	Used    string `json:"used"`
+	ID      string `json:"id"`
+	Private string `json:"private"`
+	Limit   string `json:"limit"`
+	Percent int    `json:"percent"`
+}
+
+// Total is returned as part of UploadResponse
+type Total struct {
+	Max        string `json:"max"`
+	Used       string `json:"used"`
+	ID         string `json:"id"`
+	Priused    string `json:"priused"`
+	Primax     string `json:"primax"`
+	Limit      string `json:"limit"`
+	Percent    int    `json:"percent"`
+	Pripercent int    `json:"pripercent"`
+}
+
+// UploadResponse is returned as part of SpaceInfo
+type UploadResponse struct {
+	Providers []Providers `json:"providers"`
+	Total     Total       `json:"total"`
+}
+
+// SpaceInfo is returned as part of DoCompleteUploadResponse
+type SpaceInfo struct {
+	Response UploadResponse `json:"response"`
+	Status   string         `json:"status"`
+}
+
+// DeleteResponse is returned from doDeleteFile
+type DeleteResponse struct {
+	Status
+	Deleted        []string      `json:"deleted"`
+	Errors         []interface{} `json:"errors"`
+	ID             string        `json:"fi_id"`
+	BackgroundTask int           `json:"backgroundtask"`
+	UsSize         string        `json:"us_size"`
+	PaSize         string        `json:"pa_size"`
+	//SpaceInfo      SpaceInfo     `json:"spaceinfo"`
+}
+
+// FileResponse is returned from doRenameFile
+type FileResponse struct {
+	Status
+	Item   Item   `json:"file"`
+	Exists string `json:"exists"`
+}
+
+// MoveFilesResponse is returned from doMoveFiles
+type MoveFilesResponse struct {
+	Status
+	Filesleft         string   `json:"filesleft"`
+	Addedtobackground string   `json:"addedtobackground"`
+	Moved             string   `json:"moved"`
+	Item              Item     `json:"file"`
+	IDs               []string `json:"fi_ids"`
+	Length            int      `json:"length"`
+	DirID             string   `json:"dir_id"`
+	MovedObjects      []Item   `json:"movedobjects"`
+	// FolderTasks       []interface{}  `json:"foldertasks"`
+}
+
+// TasksResponse is the response to getUserBackgroundTasks
+type TasksResponse struct {
+	Status
+	Tasks []Task `json:"tasks"`
+	Total string `json:"total"`
+}
+
+// BtData is part of TasksResponse
+type BtData struct {
+	Callback string `json:"callback"`
+}
+
+// Task describes a task returned in TasksResponse
+type Task struct {
+	BtID             string `json:"bt_id"`
+	UsID             string `json:"us_id"`
+	BtType           string `json:"bt_type"`
+	BtData           BtData `json:"bt_data"`
+	BtStatustext     string `json:"bt_statustext"`
+	BtStatusdata     string `json:"bt_statusdata"`
+	BtMessage        string `json:"bt_message"`
+	BtProcent        string `json:"bt_procent"`
+	BtAdded          string `json:"bt_added"`
+	BtStatus         string `json:"bt_status"`
+	BtCompleted      string `json:"bt_completed"`
+	BtTitle          string `json:"bt_title"`
+	BtCredentials    string `json:"bt_credentials"`
+	BtHidden         string `json:"bt_hidden"`
+	BtAutoremove     string `json:"bt_autoremove"`
+	BtDevsite        string `json:"bt_devsite"`
+	BtPriority       string `json:"bt_priority"`
+	BtReport         string `json:"bt_report"`
+	BtSitemarker     string `json:"bt_sitemarker"`
+	BtExecuteafter   string `json:"bt_executeafter"`
+	BtCompletestatus string `json:"bt_completestatus"`
+	BtSubtype        string `json:"bt_subtype"`
+	BtCanceled       string `json:"bt_canceled"`
+	Callback         string `json:"callback"`
+	CanBeCanceled    bool   `json:"canbecanceled"`
+	CanBeRestarted   bool   `json:"canberestarted"`
+	Type             string `json:"type"`
+	Status           string `json:"status"`
+	Settings         string `json:"settings"`
+}
diff --git a/backend/filefabric/filefabric.go b/backend/filefabric/filefabric.go
new file mode 100644
index 000000000..877b6d07c
--- /dev/null
+++ b/backend/filefabric/filefabric.go
@@ -0,0 +1,1347 @@
+// Package filefabric provides an interface to Storage Made Easy's
+// Enterprise File Fabric storage system.
+package filefabric
+
+/*
+Docs: https://product-demo.smestorage.com/?p=apidoc
+
+Missing features:
+- M-Stream support
+- Oauth-like flow (soon being changed to oauth)
+
+// TestFileFabric
+maxFileLength = 14094
+*/
+
+import (
+	"bytes"
+	"context"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rclone/rclone/lib/atexit"
+	"github.com/rclone/rclone/lib/encoder"
+	"github.com/rclone/rclone/lib/random"
+
+	"github.com/pkg/errors"
+	"github.com/rclone/rclone/backend/filefabric/api"
+	"github.com/rclone/rclone/fs"
+	"github.com/rclone/rclone/fs/config"
+	"github.com/rclone/rclone/fs/config/configmap"
+	"github.com/rclone/rclone/fs/config/configstruct"
+	"github.com/rclone/rclone/fs/fserrors"
+	"github.com/rclone/rclone/fs/fshttp"
+	"github.com/rclone/rclone/fs/hash"
+	"github.com/rclone/rclone/fs/log"
+	"github.com/rclone/rclone/lib/dircache"
+	"github.com/rclone/rclone/lib/pacer"
+	"github.com/rclone/rclone/lib/rest"
+)
+
+const (
+	minSleep      = 20 * time.Millisecond
+	maxSleep      = 10 * time.Second
+	decayConstant = 2                // bigger for slower decay, exponential
+	listChunks    = 1000             // chunk size to read directory listings
+	tokenLifeTime = 55 * time.Minute // 1 hour minus a bit of leeway
+	defaultRootID = ""               // default root ID
+	emptyMimeType = "application/vnd.rclone.empty.file"
+)
+
+// Register with Fs
+func init() {
+	fs.Register(&fs.RegInfo{
+		Name:        "filefabric",
+		Description: "Enterprise File Fabric",
+		NewFs:       NewFs,
+		Options: []fs.Option{{
+			Name:     "url",
+			Help:     "URL of the Enterprise File Fabric to connect to",
+			Required: true,
+			Examples: []fs.OptionExample{{
+				Value: "https://storagemadeeasy.com",
+				Help:  "Storage Made Easy US",
+			}, {
+				Value: "https://eu.storagemadeeasy.com",
+				Help:  "Storage Made Easy EU",
+			}, {
+				Value: "https://yourfabric.smestorage.com",
+				Help:  "Connect to your Enterprise File Fabric",
+			}},
+		}, {
+			Name: "root_folder_id",
+			Help: `ID of the root folder
+Leave blank normally.
+
+Fill in to make rclone start with directory of a given ID.
+`,
+		}, {
+			Name: "permanent_token",
+			Help: `Permanent Authentication Token
+
+A Permanent Authentication Token can be created in the Enterprise File
+Fabric, on the users Dashboard under Security, there is an entry
+you'll see called "My Authentication Tokens". Click the Manage button
+to create one.
+
+These tokens are normally valid for several years.
+
+For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
+`,
+		}, {
+			Name: "token",
+			Help: `Session Token
+
+This is a session token which rclone caches in the config file. It is
+usually valid for 1 hour.
+
+Don't set this value - rclone will set it automatically.
+`,
+			Advanced: true,
+		}, {
+			Name: "token_expiry",
+			Help: `Token expiry time
+
+Don't set this value - rclone will set it automatically.
+`,
+			Advanced: true,
+		}, {
+			Name: "version",
+			Help: `Version read from the file fabric
+
+Don't set this value - rclone will set it automatically.
+`,
+			Advanced: true,
+		}, {
+			Name:     config.ConfigEncoding,
+			Help:     config.ConfigEncodingHelp,
+			Advanced: true,
+			Default: (encoder.Display |
+				encoder.EncodeInvalidUtf8),
+		}},
+	})
+}
+
+// Options defines the configuration for this backend
+type Options struct {
+	URL            string               `config:"url"`
+	RootFolderID   string               `config:"root_folder_id"`
+	PermanentToken string               `config:"permanent_token"`
+	Token          string               `config:"token"`
+	TokenExpiry    string               `config:"token_expiry"`
+	Version        string               `config:"version"`
+	Enc            encoder.MultiEncoder `config:"encoding"`
+}
+
+// Fs represents a remote filefabric
+type Fs struct {
+	name            string             // name of this remote
+	root            string             // the path we are working on
+	opt             Options            // parsed options
+	features        *fs.Features       // optional features
+	m               configmap.Mapper   // to save config
+	srv             *rest.Client       // the connection to the one drive server
+	dirCache        *dircache.DirCache // Map of directory path to directory id
+	pacer           *fs.Pacer          // pacer for API calls
+	tokenMu         sync.Mutex         // hold when reading the token
+	token           string             // current access token
+	tokenExpiry     time.Time          // time the current token expires
+	tokenExpired    int32              // read and written with atomic
+	canCopyWithName bool               // set if detected that can use fi_name in copy
+	precision       time.Duration      // precision reported
+}
+
+// Object describes a filefabric object
+//
+// Will definitely have info but maybe not meta
+type Object struct {
+	fs          *Fs       // what this object is part of
+	remote      string    // The remote path
+	hasMetaData bool      // whether info below has been set
+	size        int64     // size of the object
+	modTime     time.Time // modification time of the object
+	id          string    // ID of the object
+	contentType string    // ContentType of object
+}
+
+// ------------------------------------------------------------
+
+// Name of the remote (as passed into NewFs)
+func (f *Fs) Name() string {
+	return f.name
+}
+
+// Root of the remote (as passed into NewFs)
+func (f *Fs) Root() string {
+	return f.root
+}
+
+// String converts this Fs to a string
+func (f *Fs) String() string {
+	return fmt.Sprintf("filefabric root '%s'", f.root)
+}
+
+// Features returns the optional features of this Fs
+func (f *Fs) Features() *fs.Features {
+	return f.features
+}
+
+// parsePath parses a filefabric 'url'
+func parsePath(path string) (root string) {
+	root = strings.Trim(path, "/")
+	return
+}
+
+// retryErrorCodes is a slice of error codes that we will retry
+var retryErrorCodes = []int{
+	429, // Too Many Requests.
+	500, // Internal Server Error
+	502, // Bad Gateway
+	503, // Service Unavailable
+	504, // Gateway Timeout
+	509, // Bandwidth Limit Exceeded
+}
+
+// Retry any of these
+var retryStatusCodes = []struct {
+	code  string
+	sleep time.Duration
+}{
+	{
+		// Can not create folder now. We are not able to complete the
+		// requested operation with such name. We are processing
+		// delete in that folder. Please try again later or use
+		// another name. (error_background)
+		code:  "error_background",
+		sleep: 6 * time.Second,
+	},
+}
+
+// shouldRetry returns a boolean as to whether this resp and err
+// deserve to be retried.  It returns the err as a convenience
+func (f *Fs) shouldRetry(resp *http.Response, err error, status api.OKError) (bool, error) {
+	if err != nil {
+		return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
+	}
+	if status != nil && !status.OK() {
+		err = status // return the error from the RPC
+		code := status.GetCode()
+		if code == "login_token_expired" {
+			atomic.AddInt32(&f.tokenExpired, 1)
+		} else {
+			for _, retryCode := range retryStatusCodes {
+				if code == retryCode.code {
+					if retryCode.sleep > 0 {
+						// make this thread only sleep extra time
+						fs.Debugf(f, "Sleeping for %v to wait for %q error to clear", retryCode.sleep, retryCode.code)
+						time.Sleep(retryCode.sleep)
+					}
+					return true, err
+				}
+			}
+		}
+	}
+	return false, err
+}
+
+// readMetaDataForPath reads the metadata from the path
+func (f *Fs) readMetaDataForPath(ctx context.Context, rootID string, path string) (info *api.Item, err error) {
+	var resp api.FileResponse
+	_, err = f.rpc(ctx, "checkPathExists", params{
+		"path": f.opt.Enc.FromStandardPath(path),
+		"pid":  rootID,
+	}, &resp, nil)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to check path exists")
+	}
+	if resp.Exists != "y" {
+		return nil, fs.ErrorObjectNotFound
+	}
+	return &resp.Item, nil
+
+	/*
+		// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
+		leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
+		if err != nil {
+			if err == fs.ErrorDirNotFound {
+				return nil, fs.ErrorObjectNotFound
+			}
+			return nil, err
+		}
+
+		found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
+			if item.Name == leaf {
+				info = item
+				return true
+			}
+			return false
+		})
+		if err != nil {
+			return nil, err
+		}
+		if !found {
+			return nil, fs.ErrorObjectNotFound
+		}
+		return info, nil
+	*/
+}
+
+// Get the appliance info so we can set Version
+func (f *Fs) getApplianceInfo(ctx context.Context) error {
+	var applianceInfo api.ApplianceInfo
+	_, err := f.rpc(ctx, "getApplianceInfo", params{
+		"token": "*",
+	}, &applianceInfo, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to read appliance version")
+	}
+	f.opt.Version = applianceInfo.SoftwareVersionLabel
+	f.m.Set("version", f.opt.Version)
+	return nil
+}
+
+// Gets the token or gets a new one if necessary
+func (f *Fs) getToken(ctx context.Context) (token string, err error) {
+	f.tokenMu.Lock()
+	var refreshed = false
+	defer func() {
+		if refreshed {
+			atomic.StoreInt32(&f.tokenExpired, 0)
+		}
+		f.tokenMu.Unlock()
+	}()
+
+	expired := atomic.LoadInt32(&f.tokenExpired) != 0
+	if expired {
+		fs.Debugf(f, "Token invalid - refreshing")
+	}
+	if f.token == "" {
+		fs.Debugf(f, "Empty token - refreshing")
+		expired = true
+	}
+	now := time.Now()
+	if f.tokenExpiry.IsZero() || now.After(f.tokenExpiry) {
+		fs.Debugf(f, "Token expired - refreshing")
+		expired = true
+	}
+	if !expired {
+		return f.token, nil
+	}
+
+	var info api.GetTokenByAuthTokenResponse
+	_, err = f.rpc(ctx, "getTokenByAuthToken", params{
+		"token":     "*",
+		"authtoken": f.opt.PermanentToken,
+	}, &info, nil)
+	if err != nil {
+		return "", errors.Wrap(err, "failed to get session token")
+	}
+	refreshed = true
+	now = now.Add(tokenLifeTime)
+	f.token = info.Token
+	f.tokenExpiry = now
+	f.m.Set("token", f.token)
+	f.m.Set("token_expiry", now.Format(time.RFC3339))
+
+	// Read appliance info when we update the token
+	err = f.getApplianceInfo(ctx)
+	if err != nil {
+		return "", err
+	}
+	f.setCapabilities()
+
+	return f.token, nil
+}
+
+// params for rpc
+type params map[string]interface{}
+
+// rpc calls the rpc.php method of the SME file fabric
+//
+// This is an entry point to all the method calls
+//
+// If result is nil then resp.Body will need closing
+func (f *Fs) rpc(ctx context.Context, function string, p params, result api.OKError, options []fs.OpenOption) (resp *http.Response, err error) {
+	defer log.Trace(f, "%s(%+v) options=%+v", function, p, options)("result=%+v, err=%v", &result, &err)
+
+	// Get the token from params if present otherwise call getToken
+	var token string
+	if tokenI, ok := p["token"]; !ok {
+		token, err = f.getToken(ctx)
+		if err != nil {
+			return resp, err
+		}
+	} else {
+		token = tokenI.(string)
+	}
+	var data = url.Values{
+		"function":  {function},
+		"token":     {token},
+		"apiformat": {"json"},
+	}
+	for k, v := range p {
+		data.Set(k, fmt.Sprint(v))
+	}
+	opts := rest.Opts{
+		Method:      "POST",
+		Path:        "/api/rpc.php",
+		ContentType: "application/x-www-form-urlencoded",
+		Options:     options,
+	}
+	err = f.pacer.Call(func() (bool, error) {
+		// Refresh the body each retry
+		opts.Body = strings.NewReader(data.Encode())
+		resp, err = f.srv.CallJSON(ctx, &opts, nil, result)
+		return f.shouldRetry(resp, err, result)
+	})
+	if err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+// NewFs constructs an Fs from the path, container:path
+func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
+	// Parse config into Options struct
+	opt := new(Options)
+	err := configstruct.Set(m, opt)
+	if err != nil {
+		return nil, err
+	}
+
+	opt.URL = strings.TrimSuffix(opt.URL, "/")
+	if opt.URL == "" {
+		return nil, errors.New("url must be set")
+	}
+
+	root = parsePath(root)
+
+	client := fshttp.NewClient(fs.Config)
+
+	f := &Fs{
+		name:  name,
+		root:  root,
+		opt:   *opt,
+		m:     m,
+		srv:   rest.NewClient(client).SetRoot(opt.URL),
+		pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
+		token: opt.Token,
+	}
+	f.features = (&fs.Features{
+		CaseInsensitive:         true,
+		CanHaveEmptyDirectories: true,
+	}).Fill(ctx, f)
+	if f.opt.Version == "" {
+		err = f.getApplianceInfo(ctx)
+		if err != nil {
+			return nil, err
+		}
+	}
+	f.setCapabilities()
+
+	if opt.TokenExpiry != "" {
+		tokenExpiry, err := time.Parse(time.RFC3339, opt.TokenExpiry)
+		if err != nil {
+			fs.Errorf(nil, "Failed to parse token_expiry option: %v", err)
+		} else {
+			f.tokenExpiry = tokenExpiry
+		}
+	}
+
+	if opt.RootFolderID == "" {
+		opt.RootFolderID = defaultRootID
+	}
+
+	f.dirCache = dircache.New(f.root, opt.RootFolderID, f)
+
+	// Find out whether the root is a file or a directory or doesn't exist
+	var errReturn error
+	if f.root != "" {
+		info, err := f.readMetaDataForPath(ctx, f.opt.RootFolderID, f.root)
+		if err == nil && info != nil {
+			if info.Type == api.ItemTypeFile {
+				// Root is a file
+				// Point the root to the parent directory
+				f.root, _ = dircache.SplitPath(root)
+				f.dirCache = dircache.New(f.root, opt.RootFolderID, f)
+				errReturn = fs.ErrorIsFile
+				// Cache the ID of the parent of the file as the root ID
+				f.dirCache.Put(f.root, info.PID)
+			} else if info.Type == api.ItemTypeFolder {
+				// Root is a dir - cache its ID
+				f.dirCache.Put(f.root, info.ID)
+			}
+		} else {
+			// Root is not found so a directory
+		}
+	}
+	return f, errReturn
+}
+
+// set the capabilities of this version of software
+func (f *Fs) setCapabilities() {
+	version := f.opt.Version
+	if version == "" {
+		version = "0000.00"
+	}
+	if version >= "2006.02" {
+		f.precision = time.Second
+		f.canCopyWithName = true
+	} else {
+		// times can be altered this much on renames
+		f.precision = 1 * time.Hour
+		f.canCopyWithName = false
+	}
+}
+
+// Return an Object from a path
+//
+// If it can't be found it returns the error fs.ErrorObjectNotFound.
+func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
+	o := &Object{
+		fs:     f,
+		remote: remote,
+	}
+	var err error
+	if info != nil {
+		// Set info
+		err = o.setMetaData(info)
+	} else {
+		err = o.readMetaData(ctx) // reads info and meta, returning an error
+	}
+	if err != nil {
+		return nil, err
+	}
+	return o, nil
+}
+
+// NewObject finds the Object at remote.  If it can't be found
+// it returns the error fs.ErrorObjectNotFound.
+func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
+	return f.newObjectWithInfo(ctx, remote, nil)
+}
+
+// FindLeaf finds a directory of name leaf in the folder with ID pathID
+func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
+	// Find the leaf in pathID
+	found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
+		if item.Name == leaf {
+			pathIDOut = item.ID
+			return true
+		}
+		return false
+	})
+	return pathIDOut, found, err
+}
+
+// CreateDir makes a directory with pathID as parent and name leaf
+func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
+	//fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
+	var info api.DoCreateNewFolderResponse
+	_, err = f.rpc(ctx, "doCreateNewFolder", params{
+		"fi_pid":  pathID,
+		"fi_name": f.opt.Enc.FromStandardName(leaf),
+	}, &info, nil)
+	if err != nil {
+		return "", errors.Wrap(err, "failed to create directory")
+	}
+	// fmt.Printf("...Id %q\n", *info.Id)
+	return info.Item.ID, nil
+}
+
+// list the objects into the function supplied
+//
+// If directories is set it only sends directories
+// User function to process a File item from listAll
+//
+// Should return true to finish processing
+type listAllFn func(*api.Item) bool
+
+// Lists the directory required calling the user function on each item found
+//
+// If the user fn ever returns true then it early exits with found = true
+func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
+	var (
+		p = params{
+			"fi_pid":     dirID,
+			"count":      listChunks,
+			"subfolders": "y",
+			// Cut down the things that are returned
+			"options": "filelist|" + api.ItemFields,
+		}
+		n = 0
+	)
+OUTER:
+	for {
+		var info api.GetFolderContentsResponse
+		_, err = f.rpc(ctx, "getFolderContents", p, &info, nil)
+		if err != nil {
+			return false, errors.Wrap(err, "failed to list directory")
+		}
+		for i := range info.Items {
+			item := &info.Items[i]
+			if item.Type == api.ItemTypeFolder {
+				if filesOnly {
+					continue
+				}
+			} else if item.Type == api.ItemTypeFile {
+				if directoriesOnly {
+					continue
+				}
+			} else {
+				fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
+				continue
+			}
+			if item.Trash {
+				continue
+			}
+			item.Name = f.opt.Enc.ToStandardName(item.Name)
+			if fn(item) {
+				found = true
+				break OUTER
+			}
+		}
+		// if didn't get any items then exit
+		if len(info.Items) == 0 {
+			break
+		}
+		n += len(info.Items)
+		if n >= info.Total {
+			break
+		}
+		p["from"] = n
+	}
+
+	return found, nil
+}
+
+// List the objects and directories in dir into entries.  The
+// entries can be returned in any order but should be for a
+// complete directory.
+//
+// dir should be "" to list the root, and should not have
+// trailing slashes.
+//
+// This should return ErrDirNotFound if the directory isn't
+// found.
+func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
+	directoryID, err := f.dirCache.FindDir(ctx, dir, false)
+	if err != nil {
+		return nil, err
+	}
+	var iErr error
+	_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
+		remote := path.Join(dir, info.Name)
+		if info.Type == api.ItemTypeFolder {
+			// cache the directory ID for later lookups
+			f.dirCache.Put(remote, info.ID)
+			d := fs.NewDir(remote, time.Time(info.Modified)).SetID(info.ID).SetItems(info.SubFolders)
+			entries = append(entries, d)
+		} else if info.Type == api.ItemTypeFile {
+			o, err := f.newObjectWithInfo(ctx, remote, info)
+			if err != nil {
+				iErr = err
+				return true
+			}
+			entries = append(entries, o)
+		}
+		return false
+	})
+	if err != nil {
+		return nil, err
+	}
+	if iErr != nil {
+		return nil, iErr
+	}
+	return entries, nil
+}
+
+// Creates from the parameters passed in a half finished Object which
+// must have setMetaData called on it
+//
+// Returns the object, leaf, directoryID and error
+//
+// Used to create new objects
+func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
+	// Create the directory for the object if it doesn't exist
+	leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
+	if err != nil {
+		return
+	}
+	// Temporary Object under construction
+	o = &Object{
+		fs:     f,
+		remote: remote,
+	}
+	return o, leaf, directoryID, nil
+}
+
+// Put the object
+//
+// Copy the reader in to the new object which is returned
+//
+// The new object may have been created if an error is returned
+func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
+	remote := src.Remote()
+	size := src.Size()
+	modTime := src.ModTime(ctx)
+
+	o, _, _, err := f.createObject(ctx, remote, modTime, size)
+	if err != nil {
+		return nil, err
+	}
+	return o, o.Update(ctx, in, src, options...)
+}
+
+// Mkdir creates the container if it doesn't exist
+func (f *Fs) Mkdir(ctx context.Context, dir string) error {
+	_, err := f.dirCache.FindDir(ctx, dir, true)
+	return err
+}
+
+// deleteObject removes an object by ID
+func (f *Fs) deleteObject(ctx context.Context, id string) (err error) {
+	var info api.DeleteResponse
+	_, err = f.rpc(ctx, "doDeleteFile", params{
+		"fi_id":            id,
+		"completedeletion": "n",
+	}, &info, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to delete file")
+	}
+	return nil
+}
+
+// purgeCheck removes the root directory, if check is set then it
+// refuses to do so if it has anything in
+func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
+	root := path.Join(f.root, dir)
+	if root == "" {
+		return errors.New("can't purge root directory")
+	}
+	dc := f.dirCache
+	rootID, err := dc.FindDir(ctx, dir, false)
+	if err != nil {
+		return err
+	}
+
+	if check {
+		found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
+			fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
+			return true
+		})
+		if err != nil {
+			return err
+		}
+		if found {
+			return fs.ErrorDirectoryNotEmpty
+		}
+	}
+
+	var info api.EmptyResponse
+	_, err = f.rpc(ctx, "doDeleteFolder", params{
+		"fi_id": rootID,
+	}, &info, nil)
+	f.dirCache.FlushDir(dir)
+	if err != nil {
+		return errors.Wrap(err, "failed to remove directory")
+	}
+	return nil
+}
+
+// Rmdir deletes the root folder
+//
+// Returns an error if it isn't empty
+func (f *Fs) Rmdir(ctx context.Context, dir string) error {
+	//fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
+	return f.purgeCheck(ctx, dir, true)
+}
+
+// Precision return the precision of this Fs
+func (f *Fs) Precision() time.Duration {
+	return f.precision
+}
+
+// Copy src to this remote using server side copy operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantCopy
+func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
+	srcObj, ok := src.(*Object)
+	if !ok {
+		fs.Debugf(src, "Can't copy - not same remote type")
+		return nil, fs.ErrorCantCopy
+	}
+	err := srcObj.readMetaData(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create temporary object
+	dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
+	if err != nil {
+		return nil, err
+	}
+
+	if !f.canCopyWithName && leaf != path.Base(srcObj.remote) {
+		fs.Debugf(src, "Can't copy - can't change the name of files")
+		return nil, fs.ErrorCantCopy
+	}
+
+	// Copy the object
+	var info api.FileResponse
+	p := params{
+		"fi_id":   srcObj.id,
+		"fi_pid":  directoryID,
+		"force":   "y",
+		"options": "allownoextension", // without this the filefabric adds extensions to files without
+	}
+	if f.canCopyWithName {
+		p["fi_name"] = f.opt.Enc.FromStandardName(leaf)
+	}
+	_, err = f.rpc(ctx, "doCopyFile", p, &info, nil)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to copy file")
+	}
+	err = dstObj.setMetaData(&info.Item)
+	if err != nil {
+		return nil, err
+	}
+	return dstObj, nil
+}
+
+// Purge deletes all the files and the container
+//
+// Optional interface: Only implement this if you have a way of
+// deleting all the files quicker than just running Remove() on the
+// result of List()
+func (f *Fs) Purge(ctx context.Context, dir string) error {
+	return f.purgeCheck(ctx, dir, false)
+}
+
+// Wait for the the background task to complete if necessary
+func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID string) (err error) {
+	if taskID == "" || taskID == "0" {
+		// No task to wait for
+		return nil
+	}
+	start := time.Now()
+	sleepTime := time.Second
+	for {
+		var info api.TasksResponse
+		_, err = f.rpc(ctx, "getUserBackgroundTasks", params{
+			"taskid": taskID,
+		}, &info, nil)
+		if err != nil {
+			return errors.Wrapf(err, "failed to wait for task %s to complete", taskID)
+		}
+		if len(info.Tasks) == 0 {
+			// task has finished
+			break
+		}
+		if len(info.Tasks) > 1 {
+			fs.Errorf(f, "Unexpected number of tasks returned %d", len(info.Tasks))
+		}
+		task := info.Tasks[0]
+		if task.BtStatus == "c" {
+			// task completed
+			break
+		}
+		dt := time.Since(start)
+		fs.Debugf(f, "Waiting for task ID %s: %s: to completed for %v - waited %v already", task.BtID, task.BtTitle, sleepTime, dt)
+		time.Sleep(sleepTime)
+	}
+	return nil
+}
+
+// Rename the leaf of a file or directory in a directory
+func (f *Fs) renameLeaf(ctx context.Context, isDir bool, id string, newLeaf string) (item *api.Item, err error) {
+	var info api.FileResponse
+	method := "doRenameFile"
+	if isDir {
+		method = "doRenameFolder"
+	}
+	_, err = f.rpc(ctx, method, params{
+		"fi_id":   id,
+		"fi_name": newLeaf,
+	}, &info, nil)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to rename leaf")
+	}
+	err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
+	if err != nil {
+		return nil, err
+	}
+	return &info.Item, nil
+}
+
+// move a file or folder
+//
+// This is complicated by the fact that there is an API to move files
+// between directories and a separate one to rename them.  We try to
+// call the minimum number of API calls.
+func (f *Fs) move(ctx context.Context, isDir bool, id, oldLeaf, newLeaf, oldDirectoryID, newDirectoryID string) (item *api.Item, err error) {
+	newLeaf = f.opt.Enc.FromStandardName(newLeaf)
+	oldLeaf = f.opt.Enc.FromStandardName(oldLeaf)
+	doRenameLeaf := oldLeaf != newLeaf
+	doMove := oldDirectoryID != newDirectoryID
+
+	// Now rename the leaf to a temporary name if we are moving to
+	// another directory to make sure we don't overwrite something
+	// in the destination directory by accident
+	if doRenameLeaf && doMove {
+		tmpLeaf := newLeaf + "." + random.String(8)
+		item, err = f.renameLeaf(ctx, isDir, id, tmpLeaf)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Move the object to a new directory (with the existing name)
+	// if required
+	if doMove {
+		var info api.MoveFilesResponse
+		method := "doMoveFiles"
+		if isDir {
+			method = "doMoveFolders"
+		}
+		_, err = f.rpc(ctx, method, params{
+			"fi_ids": id,
+			"dir_id": newDirectoryID,
+		}, &info, nil)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to move file to new directory")
+		}
+		item = &info.Item
+		err = f.waitForBackgroundTask(ctx, info.Status.TaskID)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Rename the leaf to its final name if required
+	if doRenameLeaf {
+		item, err = f.renameLeaf(ctx, isDir, id, newLeaf)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return item, nil
+}
+
+// Move src to this remote using server side move operations.
+//
+// This is stored with the remote path given
+//
+// It returns the destination Object and a possible error
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantMove
+func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
+	srcObj, ok := src.(*Object)
+	if !ok {
+		fs.Debugf(src, "Can't move - not same remote type")
+		return nil, fs.ErrorCantMove
+	}
+
+	// find the source directoryID
+	srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create temporary object
+	dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
+	if err != nil {
+		return nil, err
+	}
+
+	// Do the move
+	item, err := f.move(ctx, false, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
+	if err != nil {
+		return nil, err
+	}
+
+	// Set the metadata from what was returned or read it fresh
+	if item == nil {
+		err = dstObj.readMetaData(ctx)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		err = dstObj.setMetaData(item)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return dstObj, nil
+}
+
+// DirMove moves src, srcRemote to this remote at dstRemote
+// using server side move operations.
+//
+// Will only be called if src.Fs().Name() == f.Name()
+//
+// If it isn't possible then return fs.ErrorCantDirMove
+//
+// If destination exists then return fs.ErrorDirExists
+func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
+	srcFs, ok := src.(*Fs)
+	if !ok {
+		fs.Debugf(srcFs, "Can't move directory - not same remote type")
+		return fs.ErrorCantDirMove
+	}
+
+	srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
+	if err != nil {
+		return err
+	}
+
+	// Do the move
+	_, err = f.move(ctx, true, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
+	if err != nil {
+		return err
+	}
+	srcFs.dirCache.FlushDir(srcRemote)
+	return nil
+}
+
+// CleanUp empties the trash
+func (f *Fs) CleanUp(ctx context.Context) (err error) {
+	var info api.EmptyResponse
+	_, err = f.rpc(ctx, "emptyTrashInBackground", params{}, &info, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to empty trash")
+	}
+	return nil
+}
+
+// DirCacheFlush resets the directory cache - used in testing as an
+// optional interface
+func (f *Fs) DirCacheFlush() {
+	f.dirCache.ResetRoot()
+}
+
+// Hashes returns the supported hash sets.
+func (f *Fs) Hashes() hash.Set {
+	return hash.Set(hash.None)
+}
+
+// ------------------------------------------------------------
+
+// Fs returns the parent Fs
+func (o *Object) Fs() fs.Info {
+	return o.fs
+}
+
+// Return a string version
+func (o *Object) String() string {
+	if o == nil {
+		return "<nil>"
+	}
+	return o.remote
+}
+
+// Remote returns the remote path
+func (o *Object) Remote() string {
+	return o.remote
+}
+
+// Hash of the object in the requested format as a lowercase hex string
+func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
+	return "", hash.ErrUnsupported
+}
+
+// Size returns the size of an object in bytes
+func (o *Object) Size() int64 {
+	err := o.readMetaData(context.TODO())
+	if err != nil {
+		fs.Logf(o, "Failed to read metadata: %v", err)
+		return 0
+	}
+	if o.contentType == emptyMimeType {
+		return 0
+	}
+	return o.size
+}
+
+// setMetaData sets the metadata from info
+func (o *Object) setMetaData(info *api.Item) (err error) {
+	if info.Type != api.ItemTypeFile {
+		return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
+	}
+	o.hasMetaData = true
+	o.size = info.Size
+	o.modTime = time.Time(info.Modified)
+	if !time.Time(info.LocalTime).IsZero() {
+		o.modTime = time.Time(info.LocalTime)
+	}
+	o.id = info.ID
+	o.contentType = info.ContentType
+	return nil
+}
+
+// readMetaData gets the metadata if it hasn't already been fetched
+//
+// it also sets the info
+func (o *Object) readMetaData(ctx context.Context) (err error) {
+	if o.hasMetaData {
+		return nil
+	}
+	rootID, err := o.fs.dirCache.RootID(ctx, false)
+	if err != nil {
+		if err == fs.ErrorDirNotFound {
+			err = fs.ErrorObjectNotFound
+		}
+		return err
+	}
+	info, err := o.fs.readMetaDataForPath(ctx, rootID, o.remote)
+	if err != nil {
+		if apiErr, ok := err.(*api.Status); ok {
+			if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
+				return fs.ErrorObjectNotFound
+			}
+		}
+		return err
+	}
+	return o.setMetaData(info)
+}
+
+// ModTime returns the modification time of the object
+//
+//
+// It attempts to read the objects mtime and if that isn't present the
+// LastModified returned in the http headers
+func (o *Object) ModTime(ctx context.Context) time.Time {
+	err := o.readMetaData(ctx)
+	if err != nil {
+		fs.Logf(o, "Failed to read metadata: %v", err)
+		return time.Now()
+	}
+	return o.modTime
+}
+
+// modifyFile updates file metadata
+//
+// keyValues should be key, value pairs
+func (o *Object) modifyFile(ctx context.Context, keyValues [][2]string) error {
+	var info api.FileResponse
+	var data strings.Builder
+	for _, keyValue := range keyValues {
+		data.WriteString(keyValue[0])
+		data.WriteRune('=')
+		data.WriteString(keyValue[1])
+		data.WriteRune('\n')
+	}
+	_, err := o.fs.rpc(ctx, "doModifyFile", params{
+		"fi_id": o.id,
+		"data":  data.String(),
+	}, &info, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to update metadata")
+	}
+	return o.setMetaData(&info.Item)
+}
+
+// SetModTime sets the modification time of the local fs object
+func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
+	return o.modifyFile(ctx, [][2]string{
+		{"fi_localtime", api.Time(modTime).String()},
+	})
+}
+
+// Storable returns a boolean showing whether this object storable
+func (o *Object) Storable() bool {
+	return true
+}
+
+// Open an object for read
+func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
+	if o.id == "" {
+		return nil, errors.New("can't download - no id")
+	}
+	if o.contentType == emptyMimeType {
+		return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
+	}
+	fs.FixRangeOption(options, o.size)
+	resp, err := o.fs.rpc(ctx, "getFile", params{
+		"fi_id": o.id,
+	}, nil, options)
+	if err != nil {
+		return nil, err
+	}
+	return resp.Body, nil
+}
+
+// Update the object with the contents of the io.Reader, modTime and size
+//
+// If existing is set then it updates the object rather than creating a new one
+//
+// The new object may have been created if an error is returned
+func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
+	modTime := src.ModTime(ctx)
+	remote := o.remote
+	size := src.Size()
+
+	// Can't upload 0 length files - these upload as a single
+	// space.
+	// if size == 0 {
+	// 	return fs.ErrorCantUploadEmptyFiles
+	// }
+
+	// Create the directory for the object if it doesn't exist
+	leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
+	if err != nil {
+		return err
+	}
+
+	// Initialise the upload
+	var upload api.DoInitUploadResponse
+	timestamp := api.Time(modTime).String()
+	encodedLeaf := o.fs.opt.Enc.FromStandardName(leaf)
+	base64EncodedLeaf := base64.StdEncoding.EncodeToString([]byte(encodedLeaf))
+	contentType := fs.MimeType(ctx, src)
+	if size == 0 {
+		contentType = emptyMimeType
+	}
+	p := params{
+		"fi_name":             encodedLeaf,
+		"fi_pid":              directoryID,
+		"fi_filename":         encodedLeaf,
+		"fi_localtime":        timestamp,
+		"fi_modified":         timestamp,
+		"fi_contenttype":      contentType,
+		"responsetype":        "json", // make the upload.cgi return JSON
+		"directuploadsupport": "n",    // FIXME should we support this?
+		// "chunkifbig": "n",	    // FIXME multipart?
+	}
+	// Set the size if known
+	if size >= 0 {
+		p["fi_size"] = size
+	}
+	_, err = o.fs.rpc(ctx, "doInitUpload", p, &upload, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to initialize upload")
+	}
+
+	// Cancel the upload if aborted or it fails
+	finalized := false
+	defer atexit.OnError(&err, func() {
+		if finalized {
+			return
+		}
+		fs.Debugf(o, "Cancelling upload %s", upload.UploadCode)
+		var cancel api.EmptyResponse
+		_, fErr := o.fs.rpc(ctx, "doAbortUpload", params{
+			"uploadcode": upload.UploadCode,
+		}, &cancel, nil)
+		if fErr != nil {
+			fs.Errorf(o, "failed to cancel upload: %v", fErr)
+		}
+	})()
+
+	// Post the file with the upload code
+	var uploader api.UploaderResponse
+	opts := rest.Opts{
+		//Method: "POST",
+		Method:      "PUT",
+		Path:        "/cgi-bin/uploader/uploader1.cgi/" + base64EncodedLeaf + "?" + upload.UploadCode,
+		Body:        in,
+		ContentType: contentType,
+		// MultipartParams:      url.Values{},
+		// MultipartContentName: "file",
+		// MultipartFileName:    "datafile",
+	}
+	// Set the size if known
+	if size >= 0 {
+		var contentLength = size
+		opts.ContentLength = &contentLength // NB CallJSON scribbles on this which is naughty
+	}
+	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
+		resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploader)
+		return o.fs.shouldRetry(resp, err, nil)
+	})
+	if err != nil {
+		return errors.Wrap(err, "failed to upload")
+	}
+	if uploader.Success != "y" {
+		return errors.Errorf("upload failed")
+	}
+	if size > 0 && uploader.FileSize != size {
+		return errors.Errorf("upload failed: size mismatch: want %d got %d", size, uploader.FileSize)
+	}
+
+	// Now finalize the file
+	var finalize api.DoCompleteUploadResponse
+	p = params{
+		"uploadcode": upload.UploadCode,
+		"remotetime": timestamp,
+		"fi_size":    uploader.FileSize,
+	}
+	_, err = o.fs.rpc(ctx, "doCompleteUpload", p, &finalize, nil)
+	if err != nil {
+		return errors.Wrap(err, "failed to finalize upload")
+	}
+	finalized = true
+
+	err = o.setMetaData(&finalize.File)
+	if err != nil {
+		return err
+	}
+
+	// Make sure content type is correct
+	if o.contentType != contentType {
+		fs.Debugf(o, "Correcting mime type from %q to %q", o.contentType, contentType)
+		return o.modifyFile(ctx, [][2]string{
+			{"fi_contenttype", contentType},
+		})
+	}
+
+	return nil
+}
+
+// Remove an object
+func (o *Object) Remove(ctx context.Context) error {
+	return o.fs.deleteObject(ctx, o.id)
+}
+
+// ID returns the ID of the Object if known, or "" if not
+func (o *Object) ID() string {
+	return o.id
+}
+
+// MimeType returns the content type of the Object if
+// known, or "" if not
+func (o *Object) MimeType(ctx context.Context) string {
+	return o.contentType
+}
+
+// Check the interfaces are satisfied
+var (
+	_ fs.Fs              = (*Fs)(nil)
+	_ fs.Purger          = (*Fs)(nil)
+	_ fs.Copier          = (*Fs)(nil)
+	_ fs.Mover           = (*Fs)(nil)
+	_ fs.DirMover        = (*Fs)(nil)
+	_ fs.DirCacheFlusher = (*Fs)(nil)
+	_ fs.CleanUpper      = (*Fs)(nil)
+	_ fs.Object          = (*Object)(nil)
+	_ fs.IDer            = (*Object)(nil)
+	_ fs.MimeTyper       = (*Object)(nil)
+)
diff --git a/backend/filefabric/filefabric_test.go b/backend/filefabric/filefabric_test.go
new file mode 100644
index 000000000..554639212
--- /dev/null
+++ b/backend/filefabric/filefabric_test.go
@@ -0,0 +1,17 @@
+// Test filefabric filesystem interface
+package filefabric_test
+
+import (
+	"testing"
+
+	"github.com/rclone/rclone/backend/filefabric"
+	"github.com/rclone/rclone/fstest/fstests"
+)
+
+// TestIntegration runs integration tests against the remote
+func TestIntegration(t *testing.T) {
+	fstests.Run(t, &fstests.Opt{
+		RemoteName: "TestFileFabric:",
+		NilObject:  (*filefabric.Object)(nil),
+	})
+}
diff --git a/bin/make_manual.py b/bin/make_manual.py
index 725d99b73..210893afd 100755
--- a/bin/make_manual.py
+++ b/bin/make_manual.py
@@ -36,6 +36,7 @@ docs = [
     "sharefile.md",
     "crypt.md",
     "dropbox.md",
+    "filefabric.md",
     "ftp.md",
     "googlecloudstorage.md",
     "drive.md",
diff --git a/docs/content/_index.md b/docs/content/_index.md
index 2ed8383cb..bc9042b66 100644
--- a/docs/content/_index.md
+++ b/docs/content/_index.md
@@ -115,6 +115,7 @@ WebDAV or S3, that work out of the box.)
 {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
 {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
 {{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
+{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
 {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
 {{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
 {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
diff --git a/docs/content/docs.md b/docs/content/docs.md
index 898794668..9671686f9 100644
--- a/docs/content/docs.md
+++ b/docs/content/docs.md
@@ -30,6 +30,7 @@ See the following for detailed instructions for
   * [Crypt](/crypt/) - to encrypt other remotes
   * [DigitalOcean Spaces](/s3/#digitalocean-spaces)
   * [Dropbox](/dropbox/)
+  * [Enterprise File Fabric](/filefabric/)
   * [FTP](/ftp/)
   * [Google Cloud Storage](/googlecloudstorage/)
   * [Google Drive](/drive/)
diff --git a/docs/content/filefabric.md b/docs/content/filefabric.md
new file mode 100644
index 000000000..50c4cfafd
--- /dev/null
+++ b/docs/content/filefabric.md
@@ -0,0 +1,260 @@
+---
+title: "Enterprise File Fabric"
+description: "Rclone docs for the Enterprise File Fabric backend"
+---
+
+{{< icon "fa fa-cloud" >}} Enterprise File Fabric
+-----------------------------------------
+
+This backend supports [Storage Made Easy's Enterprise File
+Fabric™](https://storagemadeeasy.com/about/) which provides a software
+solution to integrate and unify File and Object Storage accessible
+through a global file system.
+
+The initial setup for the Enterprise File Fabric backend involves
+getting a token from the the Enterprise File Fabric which you need to
+do in your browser.  `rclone config` walks you through it.
+
+Here is an example of how to make a remote called `remote`.  First run:
+
+     rclone config
+
+This will guide you through an interactive setup process:
+
+```
+No remotes found - make a new one
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+[snip]
+XX / Enterprise File Fabric
+   \ "filefabric"
+[snip]
+Storage> filefabric
+** See help for filefabric backend at: https://rclone.org/filefabric/ **
+
+URL of the Enterprise File Fabric to connect to
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+ 1 / Storage Made Easy US
+   \ "https://storagemadeeasy.com"
+ 2 / Storage Made Easy EU
+   \ "https://eu.storagemadeeasy.com"
+ 3 / Connect to your Enterprise File Fabric
+   \ "https://yourfabric.smestorage.com"
+url> https://yourfabric.smestorage.com/
+ID of the root folder
+Leave blank normally.
+
+Fill in to make rclone start with directory of a given ID.
+
+Enter a string value. Press Enter for the default ("").
+root_folder_id> 
+Permanent Authentication Token
+
+A Permanent Authentication Token can be created in the Enterprise File
+Fabric, on the users Dashboard under Security, there is an entry
+you'll see called "My Authentication Tokens". Click the Manage button
+to create one.
+
+These tokens are normally valid for several years.
+
+For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
+
+Enter a string value. Press Enter for the default ("").
+permanent_token> xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx
+Edit advanced config? (y/n)
+y) Yes
+n) No (default)
+y/n> n
+Remote config
+--------------------
+[remote]
+type = filefabric
+url = https://yourfabric.smestorage.com/
+permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx
+--------------------
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+```
+
+Once configured you can then use `rclone` like this,
+
+List directories in top level of your Enterprise File Fabric
+
+    rclone lsd remote:
+
+List all the files in your Enterprise File Fabric
+
+    rclone ls remote:
+
+To copy a local directory to an Enterprise File Fabric directory called backup
+
+    rclone copy /home/source remote:backup
+
+### Modified time and hashes
+
+The Enterprise File Fabric allows modification times to be set on
+files accurate to 1 second.  These will be used to detect whether
+objects need syncing or not.
+
+The Enterprise File Fabric does not support any data hashes at this time.
+
+### Restricted filename characters
+
+The [default restricted characters set](/overview/#restricted-characters)
+will be replaced.
+
+Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
+as they can't be used in JSON strings.
+
+### Empty files
+
+Empty files aren't supported by the Enterprise File Fabric. Rclone will therefore
+upload an empty file as a single space with a mime type of
+`application/vnd.rclone.empty.file` and files with that mime type are
+treated as empty.
+
+### Root folder ID ###
+
+You can set the `root_folder_id` for rclone.  This is the directory
+(identified by its `Folder ID`) that rclone considers to be the root
+of your Enterprise File Fabric.
+
+Normally you will leave this blank and rclone will determine the
+correct root to use itself.
+
+However you can set this to restrict rclone to a specific folder
+hierarchy.
+
+In order to do this you will have to find the `Folder ID` of the
+directory you wish rclone to display.  These aren't displayed in the
+web interface, but you can use `rclone lsf` to find them, for example
+
+```
+$ rclone lsf --dirs-only -Fip --csv filefabric:
+120673758,Burnt PDFs/
+120673759,My Quick Uploads/
+120673755,My Syncs/
+120673756,My backups/
+120673757,My contacts/
+120673761,S3 Storage/
+```
+
+The ID for "S3 Storage" would be `120673761`.
+
+{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/filefabric/filefabric.go then run make backenddocs" >}}
+### Standard Options
+
+Here are the standard options specific to filefabric (Enterprise File Fabric).
+
+#### --filefabric-url
+
+URL of the Enterprise File Fabric to connect to
+
+- Config:      url
+- Env Var:     RCLONE_FILEFABRIC_URL
+- Type:        string
+- Default:     ""
+- Examples:
+    - "https://storagemadeeasy.com"
+        - Storage Made Easy US
+    - "https://eu.storagemadeeasy.com"
+        - Storage Made Easy EU
+    - "https://yourfabric.smestorage.com"
+        - Connect to your Enterprise File Fabric
+
+#### --filefabric-root-folder-id
+
+ID of the root folder
+Leave blank normally.
+
+Fill in to make rclone start with directory of a given ID.
+
+
+- Config:      root_folder_id
+- Env Var:     RCLONE_FILEFABRIC_ROOT_FOLDER_ID
+- Type:        string
+- Default:     ""
+
+#### --filefabric-permanent-token
+
+Permanent Authentication Token
+
+A Permanent Authentication Token can be created in the Enterprise File
+Fabric, on the users Dashboard under Security, there is an entry
+you'll see called "My Authentication Tokens". Click the Manage button
+to create one.
+
+These tokens are normally valid for several years.
+
+For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
+
+
+- Config:      permanent_token
+- Env Var:     RCLONE_FILEFABRIC_PERMANENT_TOKEN
+- Type:        string
+- Default:     ""
+
+### Advanced Options
+
+Here are the advanced options specific to filefabric (Enterprise File Fabric).
+
+#### --filefabric-token
+
+Session Token
+
+This is a session token which rclone caches in the config file. It is
+usually valid for 1 hour.
+
+Don't set this value - rclone will set it automatically.
+
+
+- Config:      token
+- Env Var:     RCLONE_FILEFABRIC_TOKEN
+- Type:        string
+- Default:     ""
+
+#### --filefabric-token-expiry
+
+Token expiry time
+
+Don't set this value - rclone will set it automatically.
+
+
+- Config:      token_expiry
+- Env Var:     RCLONE_FILEFABRIC_TOKEN_EXPIRY
+- Type:        string
+- Default:     ""
+
+#### --filefabric-version
+
+Version read from the file fabric
+
+Don't set this value - rclone will set it automatically.
+
+
+- Config:      version
+- Env Var:     RCLONE_FILEFABRIC_VERSION
+- Type:        string
+- Default:     ""
+
+#### --filefabric-encoding
+
+This sets the encoding for the backend.
+
+See: the [encoding section in the overview](/overview/#encoding) for more info.
+
+- Config:      encoding
+- Env Var:     RCLONE_FILEFABRIC_ENCODING
+- Type:        MultiEncoder
+- Default:     Slash,Del,Ctl,InvalidUtf8,Dot
+
+{{< rem autogenerated options stop >}}
diff --git a/docs/content/overview.md b/docs/content/overview.md
index 10f6867c2..9cffb8cf6 100644
--- a/docs/content/overview.md
+++ b/docs/content/overview.md
@@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
 | Box                          | SHA1        | Yes     | Yes              | No              | -         |
 | Citrix ShareFile             | MD5         | Yes     | Yes              | No              | -         |
 | Dropbox                      | DBHASH ¹    | Yes     | Yes              | No              | -         |
+| Enterprise File Fabric       | -           | Yes     | Yes              | No              | R/W       |
 | FTP                          | -           | No      | No               | No              | -         |
 | Google Cloud Storage         | MD5         | Yes     | No               | No              | R/W       |
 | Google Drive                 | MD5         | Yes     | No               | Yes             | R/W       |
@@ -334,6 +335,7 @@ upon backend specific capabilities.
 | Box                          | Yes   | Yes  | Yes  | Yes     | Yes ‡‡  | No    | Yes          | Yes          | No  | Yes |
 | Citrix ShareFile             | Yes   | Yes  | Yes  | Yes     | No      | No    | Yes          | No          | No  | Yes |
 | Dropbox                      | Yes   | Yes  | Yes  | Yes     | No [#575](https://github.com/rclone/rclone/issues/575) | No  | Yes | Yes | Yes | Yes |
+| Enterprise File Fabric       | Yes   | Yes  | Yes  | Yes     | No      | No    | No           | No          | No  | Yes |
 | FTP                          | No    | No   | Yes  | Yes     | No      | No    | Yes          | No [#2178](https://github.com/rclone/rclone/issues/2178) | No  | Yes |
 | Google Cloud Storage         | Yes   | Yes  | No   | No      | No      | Yes   | Yes          | No [#2178](https://github.com/rclone/rclone/issues/2178) | No  | No |
 | Google Drive                 | Yes   | Yes  | Yes  | Yes     | Yes     | Yes   | Yes          | Yes         | Yes | Yes |
diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html
index de9eef0ff..1625ba58c 100644
--- a/docs/layouts/chrome/navbar.html
+++ b/docs/layouts/chrome/navbar.html
@@ -72,6 +72,7 @@
           <a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square"></i> Citrix ShareFile</a>
           <a class="dropdown-item" href="/crypt/"><i class="fa fa-lock"></i> Crypt (encrypts the others)</a>
           <a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox"></i> Dropbox</a>
+          <a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud"></i> Enterprise File Fabric</a>
           <a class="dropdown-item" href="/ftp/"><i class="fa fa-file"></i> FTP</a>
           <a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google"></i> Google Cloud Storage</a>
           <a class="dropdown-item" href="/drive/"><i class="fab fa-google"></i> Google Drive</a>
diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml
index 00ed13251..ecb4702ea 100644
--- a/fstest/test_all/config.yaml
+++ b/fstest/test_all/config.yaml
@@ -94,6 +94,9 @@ backends:
  - backend:  "dropbox"
    remote:   "TestDropbox:"
    fastlist: false
+ - backend:  "filefabric"
+   remote:   "TestFileFabric:"
+   fastlist: false
  - backend:  "googlecloudstorage"
    remote:   "TestGoogleCloudStorage:"
    fastlist: true
@@ -291,3 +294,6 @@ backends:
  - backend:  "tardigrade"
    remote:   "TestTardigrade:"
    fastlist: true
+ - backend:  "filefabric"
+   remote:   "TestFileFabric:"
+   fastlist: false