diff --git a/backend/alias/alias.go b/backend/alias/alias.go
index 283ab1c09..33ba5d664 100644
--- a/backend/alias/alias.go
+++ b/backend/alias/alias.go
@@ -20,7 +20,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "remote",
-			Help:     "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
+			Help:     "Remote or path to alias.\n\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
 			Required: true,
 		}},
 	}
diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go
index 84eeebedd..966bf264b 100644
--- a/backend/azureblob/azureblob.go
+++ b/backend/azureblob/azureblob.go
@@ -75,7 +75,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name: "account",
-			Help: "Storage Account Name (leave blank to use SAS URL or Emulator)",
+			Help: "Storage Account Name.\n\nLeave blank to use SAS URL or Emulator.",
 		}, {
 			Name: "service_principal_file",
 			Help: `Path to file containing credentials for use with a service principal.
@@ -91,13 +91,13 @@ See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/a
 `,
 		}, {
 			Name: "key",
-			Help: "Storage Account Key (leave blank to use SAS URL or Emulator)",
+			Help: "Storage Account Key.\n\nLeave blank to use SAS URL or Emulator.",
 		}, {
 			Name: "sas_url",
-			Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)",
+			Help: "SAS URL for container level access only.\n\nLeave blank if using account/key or Emulator.",
 		}, {
 			Name: "use_msi",
-			Help: `Use a managed service identity to authenticate (only works in Azure)
+			Help: `Use a managed service identity to authenticate (only works in Azure).
 
 When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
 to authenticate to Azure Storage instead of a SAS token or account key.
@@ -110,27 +110,27 @@ msi_client_id, or msi_mi_res_id parameters.`,
 			Default: false,
 		}, {
 			Name:     "msi_object_id",
-			Help:     "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.",
+			Help:     "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
 			Advanced: true,
 		}, {
 			Name:     "msi_client_id",
-			Help:     "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.",
+			Help:     "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
 			Advanced: true,
 		}, {
 			Name:     "msi_mi_res_id",
-			Help:     "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.",
+			Help:     "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
 			Advanced: true,
 		}, {
 			Name:    "use_emulator",
-			Help:    "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)",
+			Help:    "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
 			Default: false,
 		}, {
 			Name:     "endpoint",
-			Help:     "Endpoint for the service\nLeave blank normally.",
+			Help:     "Endpoint for the service.\n\nLeave blank normally.",
 			Advanced: true,
 		}, {
 			Name:     "upload_cutoff",
-			Help:     "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
+			Help:     "Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).",
 			Advanced: true,
 		}, {
 			Name: "chunk_size",
@@ -201,6 +201,7 @@ to start uploading.`,
 			Default:  memoryPoolFlushTime,
 			Advanced: true,
 			Help: `How often internal memory buffer pools will be flushed.
+
 Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
 This option controls how often unused buffers will be removed from the pool.`,
 		}, {
@@ -220,12 +221,12 @@ This option controls how often unused buffers will be removed from the pool.`,
 				encoder.EncodeRightPeriod),
 		}, {
 			Name:    "public_access",
-			Help:    "Public access level of a container: blob, container.",
+			Help:    "Public access level of a container: blob or container.",
 			Default: string(azblob.PublicAccessNone),
 			Examples: []fs.OptionExample{
 				{
 					Value: string(azblob.PublicAccessNone),
-					Help:  "The container and its blobs can be accessed only with an authorized request. It's a default value",
+					Help:  "The container and its blobs can be accessed only with an authorized request.\nIt's a default value.",
 				}, {
 					Value: string(azblob.PublicAccessBlob),
 					Help:  "Blob data within this container can be read via anonymous request.",
diff --git a/backend/b2/b2.go b/backend/b2/b2.go
index fb3a8bbc5..32c38211d 100644
--- a/backend/b2/b2.go
+++ b/backend/b2/b2.go
@@ -75,15 +75,15 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "account",
-			Help:     "Account ID or Application Key ID",
+			Help:     "Account ID or Application Key ID.",
 			Required: true,
 		}, {
 			Name:     "key",
-			Help:     "Application Key",
+			Help:     "Application Key.",
 			Required: true,
 		}, {
 			Name:     "endpoint",
-			Help:     "Endpoint for the service.\nLeave blank normally.",
+			Help:     "Endpoint for the service.\n\nLeave blank normally.",
 			Advanced: true,
 		}, {
 			Name: "test_mode",
@@ -103,7 +103,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
 			Advanced: true,
 		}, {
 			Name:     "versions",
-			Help:     "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
+			Help:     "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
 			Default:  false,
 			Advanced: true,
 		}, {
@@ -121,7 +121,7 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
 			Advanced: true,
 		}, {
 			Name: "copy_cutoff",
-			Help: `Cutoff for switching to multipart copy
+			Help: `Cutoff for switching to multipart copy.
 
 Any files larger than this that need to be server-side copied will be
 copied in chunks of this size.
@@ -131,17 +131,19 @@ The minimum is 0 and the maximum is 4.6 GiB.`,
 			Advanced: true,
 		}, {
 			Name: "chunk_size",
-			Help: `Upload chunk size. Must fit in memory.
+			Help: `Upload chunk size.
 
-When uploading large files, chunk the file into this size.  Note that
-these chunks are buffered in memory and there might a maximum of
-"--transfers" chunks in progress at once.  5,000,000 Bytes is the
-minimum size.`,
+When uploading large files, chunk the file into this size.
+
+Must fit in memory. These chunks are buffered in memory and there
+might a maximum of "--transfers" chunks in progress at once.
+
+5,000,000 Bytes is the minimum size.`,
 			Default:  defaultChunkSize,
 			Advanced: true,
 		}, {
 			Name: "disable_checksum",
-			Help: `Disable checksums for large (> upload cutoff) files
+			Help: `Disable checksums for large (> upload cutoff) files.
 
 Normally rclone will calculate the SHA1 checksum of the input before
 uploading it so it can add it to metadata on the object. This is great
diff --git a/backend/box/box.go b/backend/box/box.go
index 93bd373e4..4281a46ca 100644
--- a/backend/box/box.go
+++ b/backend/box/box.go
@@ -110,19 +110,19 @@ func init() {
 			Advanced: true,
 		}, {
 			Name: "box_config_file",
-			Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
+			Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
 		}, {
 			Name: "access_token",
-			Help: "Box App Primary Access Token\nLeave blank normally.",
+			Help: "Box App Primary Access Token\n\nLeave blank normally.",
 		}, {
 			Name:    "box_sub_type",
 			Default: "user",
 			Examples: []fs.OptionExample{{
 				Value: "user",
-				Help:  "Rclone should act on behalf of a user",
+				Help:  "Rclone should act on behalf of a user.",
 			}, {
 				Value: "enterprise",
-				Help:  "Rclone should act on behalf of a service account",
+				Help:  "Rclone should act on behalf of a service account.",
 			}},
 		}, {
 			Name:     "upload_cutoff",
diff --git a/backend/cache/cache.go b/backend/cache/cache.go
index 161df7118..9962406f7 100644
--- a/backend/cache/cache.go
+++ b/backend/cache/cache.go
@@ -69,26 +69,26 @@ func init() {
 		CommandHelp: commandHelp,
 		Options: []fs.Option{{
 			Name:     "remote",
-			Help:     "Remote to cache.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
+			Help:     "Remote to cache.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
 			Required: true,
 		}, {
 			Name: "plex_url",
-			Help: "The URL of the Plex server",
+			Help: "The URL of the Plex server.",
 		}, {
 			Name: "plex_username",
-			Help: "The username of the Plex user",
+			Help: "The username of the Plex user.",
 		}, {
 			Name:       "plex_password",
-			Help:       "The password of the Plex user",
+			Help:       "The password of the Plex user.",
 			IsPassword: true,
 		}, {
 			Name:     "plex_token",
-			Help:     "The plex token for authentication - auto set normally",
+			Help:     "The plex token for authentication - auto set normally.",
 			Hide:     fs.OptionHideBoth,
 			Advanced: true,
 		}, {
 			Name:     "plex_insecure",
-			Help:     "Skip all certificate verification when connecting to the Plex server",
+			Help:     "Skip all certificate verification when connecting to the Plex server.",
 			Advanced: true,
 		}, {
 			Name: "chunk_size",
@@ -144,7 +144,7 @@ oldest chunks until it goes under this value.`,
 		}, {
 			Name:     "db_path",
 			Default:  filepath.Join(config.GetCacheDir(), "cache-backend"),
-			Help:     "Directory to store file structure metadata DB.\nThe remote name is used as the DB file name.",
+			Help:     "Directory to store file structure metadata DB.\n\nThe remote name is used as the DB file name.",
 			Advanced: true,
 		}, {
 			Name:    "chunk_path",
@@ -168,6 +168,7 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".`,
 			Name:    "chunk_clean_interval",
 			Default: DefCacheChunkCleanInterval,
 			Help: `How often should the cache perform cleanups of the chunk storage.
+
 The default value should be ok for most people. If you find that the
 cache goes over "cache-chunk-total-size" too often then try to lower
 this value to force it to perform cleanups more often.`,
@@ -221,7 +222,7 @@ available on the local machine.`,
 		}, {
 			Name:    "rps",
 			Default: int(DefCacheRps),
-			Help: `Limits the number of requests per second to the source FS (-1 to disable)
+			Help: `Limits the number of requests per second to the source FS (-1 to disable).
 
 This setting places a hard limit on the number of requests per second
 that cache will be doing to the cloud provider remote and try to
@@ -242,7 +243,7 @@ still pass.`,
 		}, {
 			Name:    "writes",
 			Default: DefCacheWrites,
-			Help: `Cache file data on writes through the FS
+			Help: `Cache file data on writes through the FS.
 
 If you need to read files immediately after you upload them through
 cache you can enable this flag to have their data stored in the
@@ -263,7 +264,7 @@ provider`,
 		}, {
 			Name:    "tmp_wait_time",
 			Default: DefCacheTmpWaitTime,
-			Help: `How long should files be stored in local cache before being uploaded
+			Help: `How long should files be stored in local cache before being uploaded.
 
 This is the duration that a file must wait in the temporary location
 _cache-tmp-upload-path_ before it is selected for upload.
@@ -274,7 +275,7 @@ to start the upload if a queue formed for this purpose.`,
 		}, {
 			Name:    "db_wait_time",
 			Default: DefCacheDbWaitTime,
-			Help: `How long to wait for the DB to be available - 0 is unlimited
+			Help: `How long to wait for the DB to be available - 0 is unlimited.
 
 Only one process can have the DB open at any one time, so rclone waits
 for this duration for the DB to become available before it gives an
diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go
index 3a1a16657..3d4de1b85 100644
--- a/backend/chunker/chunker.go
+++ b/backend/chunker/chunker.go
@@ -150,6 +150,7 @@ func init() {
 			Name:     "remote",
 			Required: true,
 			Help: `Remote to chunk/unchunk.
+
 Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
 "myremote:bucket" or maybe "myremote:" (not recommended).`,
 		}, {
@@ -163,6 +164,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
 			Hide:     fs.OptionHideCommandLine,
 			Default:  `*.rclone_chunk.###`,
 			Help: `String format of chunk file names.
+
 The two placeholders are: base file name (*) and chunk number (#...).
 There must be one and only one asterisk and one or more consecutive hash characters.
 If chunk number has less digits than the number of hashes, it is left-padded by zeros.
@@ -174,48 +176,57 @@ Possible chunk files are ignored if their name does not match given format.`,
 			Hide:     fs.OptionHideCommandLine,
 			Default:  1,
 			Help: `Minimum valid chunk number. Usually 0 or 1.
+
 By default chunk numbers start from 1.`,
 		}, {
 			Name:     "meta_format",
 			Advanced: true,
 			Hide:     fs.OptionHideCommandLine,
 			Default:  "simplejson",
-			Help: `Format of the metadata object or "none". By default "simplejson".
+			Help: `Format of the metadata object or "none".
+
+By default "simplejson".
 Metadata is a small JSON file named after the composite file.`,
 			Examples: []fs.OptionExample{{
 				Value: "none",
-				Help:  `Do not use metadata files at all. Requires hash type "none".`,
+				Help: `Do not use metadata files at all.
+Requires hash type "none".`,
 			}, {
 				Value: "simplejson",
 				Help: `Simple JSON supports hash sums and chunk validation.
+
 It has the following fields: ver, size, nchunks, md5, sha1.`,
 			}},
 		}, {
 			Name:     "hash_type",
 			Advanced: false,
 			Default:  "md5",
-			Help:     `Choose how chunker handles hash sums. All modes but "none" require metadata.`,
+			Help: `Choose how chunker handles hash sums.
+
+All modes but "none" require metadata.`,
 			Examples: []fs.OptionExample{{
 				Value: "none",
-				Help:  `Pass any hash supported by wrapped remote for non-chunked files, return nothing otherwise`,
+				Help: `Pass any hash supported by wrapped remote for non-chunked files.
+Return nothing otherwise.`,
 			}, {
 				Value: "md5",
-				Help:  `MD5 for composite files`,
+				Help:  `MD5 for composite files.`,
 			}, {
 				Value: "sha1",
-				Help:  `SHA1 for composite files`,
+				Help:  `SHA1 for composite files.`,
 			}, {
 				Value: "md5all",
-				Help:  `MD5 for all files`,
+				Help:  `MD5 for all files.`,
 			}, {
 				Value: "sha1all",
-				Help:  `SHA1 for all files`,
+				Help:  `SHA1 for all files.`,
 			}, {
 				Value: "md5quick",
-				Help:  `Copying a file to chunker will request MD5 from the source falling back to SHA1 if unsupported`,
+				Help: `Copying a file to chunker will request MD5 from the source.
+Falling back to SHA1 if unsupported.`,
 			}, {
 				Value: "sha1quick",
-				Help:  `Similar to "md5quick" but prefers SHA1 over MD5`,
+				Help:  `Similar to "md5quick" but prefers SHA1 over MD5.`,
 			}},
 		}, {
 			Name:     "fail_hard",
diff --git a/backend/compress/compress.go b/backend/compress/compress.go
index e5f9451c6..6fc04a8bd 100644
--- a/backend/compress/compress.go
+++ b/backend/compress/compress.go
@@ -83,23 +83,23 @@ func init() {
 			Name: "level",
 			Help: `GZIP compression level (-2 to 9).
 
-			Generally -1 (default, equivalent to 5) is recommended.
-			Levels 1 to 9 increase compression at the cost of speed. Going past 6 
-			generally offers very little return.
-			
-			Level -2 uses Huffmann encoding only. Only use if you know what you
-			are doing.
-			Level 0 turns off compression.`,
+Generally -1 (default, equivalent to 5) is recommended.
+Levels 1 to 9 increase compression at the cost of speed. Going past 6 
+generally offers very little return.
+
+Level -2 uses Huffmann encoding only. Only use if you know what you
+are doing.
+Level 0 turns off compression.`,
 			Default:  sgzip.DefaultCompression,
 			Advanced: true,
 		}, {
 			Name: "ram_cache_limit",
 			Help: `Some remotes don't allow the upload of files with unknown size.
-				   In this case the compressed file will need to be cached to determine
-				   it's size.
-				   
-				   Files smaller than this limit will be cached in RAM, files larger than 
-				   this limit will be cached on disk.`,
+In this case the compressed file will need to be cached to determine
+it's size.
+
+Files smaller than this limit will be cached in RAM, files larger than 
+this limit will be cached on disk.`,
 			Default:  fs.SizeSuffix(20 * 1024 * 1024),
 			Advanced: true,
 		}},
diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go
index 2c40a1835..57f2858bd 100644
--- a/backend/crypt/crypt.go
+++ b/backend/crypt/crypt.go
@@ -30,7 +30,7 @@ func init() {
 		CommandHelp: commandHelp,
 		Options: []fs.Option{{
 			Name:     "remote",
-			Help:     "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
+			Help:     "Remote to encrypt/decrypt.\n\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
 			Required: true,
 		}, {
 			Name:    "filename_encryption",
@@ -39,13 +39,13 @@ func init() {
 			Examples: []fs.OptionExample{
 				{
 					Value: "standard",
-					Help:  "Encrypt the filenames. See the docs for the details.",
+					Help:  "Encrypt the filenames.\nSee the docs for the details.",
 				}, {
 					Value: "obfuscate",
 					Help:  "Very simple filename obfuscation.",
 				}, {
 					Value: "off",
-					Help:  "Don't encrypt the file names.  Adds a \".bin\" extension only.",
+					Help:  "Don't encrypt the file names.\nAdds a \".bin\" extension only.",
 				},
 			},
 		}, {
@@ -71,7 +71,7 @@ NB If filename_encryption is "off" then this option will do nothing.`,
 			Required:   true,
 		}, {
 			Name:       "password2",
-			Help:       "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
+			Help:       "Password or pass phrase for salt.\n\nOptional but recommended.\nShould be different to the previous password.",
 			IsPassword: true,
 		}, {
 			Name:    "server_side_across_configs",
diff --git a/backend/drive/drive.go b/backend/drive/drive.go
index 12282889e..5be87962b 100755
--- a/backend/drive/drive.go
+++ b/backend/drive/drive.go
@@ -270,7 +270,7 @@ func init() {
 			}},
 		}, {
 			Name: "root_folder_id",
-			Help: `ID of the root folder
+			Help: `ID of the root folder.
 Leave blank normally.
 
 Fill in to access "Computers" folders (see docs), or for rclone to use
@@ -278,15 +278,15 @@ a non root folder as its starting point.
 `,
 		}, {
 			Name: "service_account_file",
-			Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
+			Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
 		}, {
 			Name:     "service_account_credentials",
-			Help:     "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
+			Help:     "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
 			Hide:     fs.OptionHideConfigurator,
 			Advanced: true,
 		}, {
 			Name:     "team_drive",
-			Help:     "ID of the Shared Drive (Team Drive)",
+			Help:     "ID of the Shared Drive (Team Drive).",
 			Hide:     fs.OptionHideConfigurator,
 			Advanced: true,
 		}, {
@@ -297,12 +297,12 @@ a non root folder as its starting point.
 		}, {
 			Name:     "use_trash",
 			Default:  true,
-			Help:     "Send files to the trash instead of deleting permanently.\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
+			Help:     "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
 			Advanced: true,
 		}, {
 			Name:     "skip_gdocs",
 			Default:  false,
-			Help:     "Skip google documents in all listings.\nIf given, gdocs practically become invisible to rclone.",
+			Help:     "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone.",
 			Advanced: true,
 		}, {
 			Name:    "skip_checksum_gphotos",
@@ -335,7 +335,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
 		}, {
 			Name:     "trashed_only",
 			Default:  false,
-			Help:     "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
+			Help:     "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
 			Advanced: true,
 		}, {
 			Name:     "starred_only",
@@ -345,7 +345,7 @@ commands (copy, sync, etc.), and with all other commands too.`,
 		}, {
 			Name:     "formats",
 			Default:  "",
-			Help:     "Deprecated: see export_formats",
+			Help:     "Deprecated: See export_formats.",
 			Advanced: true,
 			Hide:     fs.OptionHideConfigurator,
 		}, {
@@ -361,12 +361,12 @@ commands (copy, sync, etc.), and with all other commands too.`,
 		}, {
 			Name:     "allow_import_name_change",
 			Default:  false,
-			Help:     "Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.",
+			Help:     "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time.",
 			Advanced: true,
 		}, {
 			Name:    "use_created_date",
 			Default: false,
-			Help: `Use file created date instead of modified date.,
+			Help: `Use file created date instead of modified date.
 
 Useful when downloading data and you want the creation date used in
 place of the last modified date.
@@ -400,7 +400,7 @@ date is used.`,
 		}, {
 			Name:     "list_chunk",
 			Default:  1000,
-			Help:     "Size of listing chunk 100-1000. 0 to disable.",
+			Help:     "Size of listing chunk 100-1000, 0 to disable.",
 			Advanced: true,
 		}, {
 			Name:     "impersonate",
@@ -410,17 +410,19 @@ date is used.`,
 		}, {
 			Name:    "alternate_export",
 			Default: false,
-			Help:    "Deprecated: no longer needed",
+			Help:    "Deprecated: No longer needed.",
 			Hide:    fs.OptionHideBoth,
 		}, {
 			Name:     "upload_cutoff",
 			Default:  defaultChunkSize,
-			Help:     "Cutoff for switching to chunked upload",
+			Help:     "Cutoff for switching to chunked upload.",
 			Advanced: true,
 		}, {
 			Name:    "chunk_size",
 			Default: defaultChunkSize,
-			Help: `Upload chunk size. Must a power of 2 >= 256k.
+			Help: `Upload chunk size.
+
+Must a power of 2 >= 256k.
 
 Making this larger will improve performance, but note that each chunk
 is buffered in memory one per transfer.
@@ -490,7 +492,7 @@ configurations.`,
 		}, {
 			Name:    "disable_http2",
 			Default: true,
-			Help: `Disable drive using http2
+			Help: `Disable drive using http2.
 
 There is currently an unsolved issue with the google drive backend and
 HTTP/2.  HTTP/2 is therefore disabled by default for the drive backend
@@ -504,7 +506,7 @@ See: https://github.com/rclone/rclone/issues/3631
 		}, {
 			Name:    "stop_on_upload_limit",
 			Default: false,
-			Help: `Make upload limit errors be fatal
+			Help: `Make upload limit errors be fatal.
 
 At the time of writing it is only possible to upload 750 GiB of data to
 Google Drive a day (this is an undocumented limit). When this limit is
@@ -521,7 +523,7 @@ See: https://github.com/rclone/rclone/issues/3857
 		}, {
 			Name:    "stop_on_download_limit",
 			Default: false,
-			Help: `Make download limit errors be fatal
+			Help: `Make download limit errors be fatal.
 
 At the time of writing it is only possible to download 10 TiB of data from
 Google Drive a day (this is an undocumented limit). When this limit is
@@ -535,7 +537,7 @@ Google don't document so it may break in the future.
 			Advanced: true,
 		}, {
 			Name: "skip_shortcuts",
-			Help: `If set skip shortcut files
+			Help: `If set skip shortcut files.
 
 Normally rclone dereferences shortcut files making them appear as if
 they are the original file (see [the shortcuts section](#shortcuts)).
diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go
index 8fd80f835..b8c69a3be 100755
--- a/backend/dropbox/dropbox.go
+++ b/backend/dropbox/dropbox.go
@@ -154,7 +154,7 @@ func init() {
 		},
 		Options: append(oauthutil.SharedOptions, []fs.Option{{
 			Name: "chunk_size",
-			Help: fmt.Sprintf(`Upload chunk size. (< %v).
+			Help: fmt.Sprintf(`Upload chunk size (< %v).
 
 Any files larger than this will be uploaded in chunks of this size.
 
@@ -252,7 +252,7 @@ maximise throughput.
 			Advanced: true,
 		}, {
 			Name: "batch_timeout",
-			Help: `Max time to allow an idle upload batch before uploading
+			Help: `Max time to allow an idle upload batch before uploading.
 
 If an upload batch is idle for more than this long then it will be
 uploaded.
diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go
index ad9311c23..92a41432c 100644
--- a/backend/fichier/fichier.go
+++ b/backend/fichier/fichier.go
@@ -37,21 +37,21 @@ func init() {
 		Description: "1Fichier",
 		NewFs:       NewFs,
 		Options: []fs.Option{{
-			Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
+			Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
 			Name: "api_key",
 		}, {
-			Help:     "If you want to download a shared folder, add this parameter",
+			Help:     "If you want to download a shared folder, add this parameter.",
 			Name:     "shared_folder",
 			Required: false,
 			Advanced: true,
 		}, {
-			Help:       "If you want to download a shared file that is password protected, add this parameter",
+			Help:       "If you want to download a shared file that is password protected, add this parameter.",
 			Name:       "file_password",
 			Required:   false,
 			Advanced:   true,
 			IsPassword: true,
 		}, {
-			Help:       "If you want to list the files in a shared folder that is password protected, add this parameter",
+			Help:       "If you want to list the files in a shared folder that is password protected, add this parameter.",
 			Name:       "folder_password",
 			Required:   false,
 			Advanced:   true,
diff --git a/backend/filefabric/filefabric.go b/backend/filefabric/filefabric.go
index b5132a241..b4b5e8487 100644
--- a/backend/filefabric/filefabric.go
+++ b/backend/filefabric/filefabric.go
@@ -65,7 +65,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "url",
-			Help:     "URL of the Enterprise File Fabric to connect to",
+			Help:     "URL of the Enterprise File Fabric to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "https://storagemadeeasy.com",
@@ -79,14 +79,15 @@ func init() {
 			}},
 		}, {
 			Name: "root_folder_id",
-			Help: `ID of the root folder
+			Help: `ID of the root folder.
+
 Leave blank normally.
 
 Fill in to make rclone start with directory of a given ID.
 `,
 		}, {
 			Name: "permanent_token",
-			Help: `Permanent Authentication Token
+			Help: `Permanent Authentication Token.
 
 A Permanent Authentication Token can be created in the Enterprise File
 Fabric, on the users Dashboard under Security, there is an entry
@@ -99,7 +100,7 @@ For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
 `,
 		}, {
 			Name: "token",
-			Help: `Session Token
+			Help: `Session Token.
 
 This is a session token which rclone caches in the config file. It is
 usually valid for 1 hour.
@@ -109,14 +110,14 @@ Don't set this value - rclone will set it automatically.
 			Advanced: true,
 		}, {
 			Name: "token_expiry",
-			Help: `Token expiry time
+			Help: `Token expiry time.
 
 Don't set this value - rclone will set it automatically.
 `,
 			Advanced: true,
 		}, {
 			Name: "version",
-			Help: `Version read from the file fabric
+			Help: `Version read from the file fabric.
 
 Don't set this value - rclone will set it automatically.
 `,
diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go
index ee021a308..f7a5992de 100644
--- a/backend/ftp/ftp.go
+++ b/backend/ftp/ftp.go
@@ -48,7 +48,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "host",
-			Help:     "FTP host to connect to",
+			Help:     "FTP host to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "ftp.example.com",
@@ -56,18 +56,19 @@ func init() {
 			}},
 		}, {
 			Name: "user",
-			Help: "FTP username, leave blank for current username, " + currentUser,
+			Help: "FTP username, leave blank for current username, " + currentUser + ".",
 		}, {
 			Name: "port",
-			Help: "FTP port, leave blank to use default (21)",
+			Help: "FTP port, leave blank to use default (21).",
 		}, {
 			Name:       "pass",
-			Help:       "FTP password",
+			Help:       "FTP password.",
 			IsPassword: true,
 			Required:   true,
 		}, {
 			Name: "tls",
-			Help: `Use Implicit FTPS (FTP over TLS)
+			Help: `Use Implicit FTPS (FTP over TLS).
+
 When using implicit FTP over TLS the client connects using TLS
 right from the start which breaks compatibility with
 non-TLS-aware servers. This is usually served over port 990 rather
@@ -75,35 +76,36 @@ than port 21. Cannot be used in combination with explicit FTP.`,
 			Default: false,
 		}, {
 			Name: "explicit_tls",
-			Help: `Use Explicit FTPS (FTP over TLS)
+			Help: `Use Explicit FTPS (FTP over TLS).
+
 When using explicit FTP over TLS the client explicitly requests
 security from the server in order to upgrade a plain text connection
 to an encrypted one. Cannot be used in combination with implicit FTP.`,
 			Default: false,
 		}, {
 			Name:     "concurrency",
-			Help:     "Maximum number of FTP simultaneous connections, 0 for unlimited",
+			Help:     "Maximum number of FTP simultaneous connections, 0 for unlimited.",
 			Default:  0,
 			Advanced: true,
 		}, {
 			Name:     "no_check_certificate",
-			Help:     "Do not verify the TLS certificate of the server",
+			Help:     "Do not verify the TLS certificate of the server.",
 			Default:  false,
 			Advanced: true,
 		}, {
 			Name:     "disable_epsv",
-			Help:     "Disable using EPSV even if server advertises support",
+			Help:     "Disable using EPSV even if server advertises support.",
 			Default:  false,
 			Advanced: true,
 		}, {
 			Name:     "disable_mlsd",
-			Help:     "Disable using MLSD even if server advertises support",
+			Help:     "Disable using MLSD even if server advertises support.",
 			Default:  false,
 			Advanced: true,
 		}, {
 			Name:    "idle_timeout",
 			Default: fs.Duration(60 * time.Second),
-			Help: `Max time before closing idle connections
+			Help: `Max time before closing idle connections.
 
 If no connections have been returned to the connection pool in the time
 given, rclone will empty the connection pool.
diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go
index 7b7cc5297..2e8b5125c 100644
--- a/backend/googlecloudstorage/googlecloudstorage.go
+++ b/backend/googlecloudstorage/googlecloudstorage.go
@@ -89,58 +89,58 @@ func init() {
 		},
 		Options: append(oauthutil.SharedOptions, []fs.Option{{
 			Name: "project_number",
-			Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
+			Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
 		}, {
 			Name: "service_account_file",
-			Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
+			Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
 		}, {
 			Name: "service_account_credentials",
-			Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
+			Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
 			Hide: fs.OptionHideBoth,
 		}, {
 			Name:    "anonymous",
-			Help:    "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.",
+			Help:    "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
 			Default: false,
 		}, {
 			Name: "object_acl",
 			Help: "Access Control List for new objects.",
 			Examples: []fs.OptionExample{{
 				Value: "authenticatedRead",
-				Help:  "Object owner gets OWNER access, and all Authenticated Users get READER access.",
+				Help:  "Object owner gets OWNER access.\nAll Authenticated Users get READER access.",
 			}, {
 				Value: "bucketOwnerFullControl",
-				Help:  "Object owner gets OWNER access, and project team owners get OWNER access.",
+				Help:  "Object owner gets OWNER access.\nProject team owners get OWNER access.",
 			}, {
 				Value: "bucketOwnerRead",
-				Help:  "Object owner gets OWNER access, and project team owners get READER access.",
+				Help:  "Object owner gets OWNER access.\nProject team owners get READER access.",
 			}, {
 				Value: "private",
-				Help:  "Object owner gets OWNER access [default if left blank].",
+				Help:  "Object owner gets OWNER access.\nDefault if left blank.",
 			}, {
 				Value: "projectPrivate",
-				Help:  "Object owner gets OWNER access, and project team members get access according to their roles.",
+				Help:  "Object owner gets OWNER access.\nProject team members get access according to their roles.",
 			}, {
 				Value: "publicRead",
-				Help:  "Object owner gets OWNER access, and all Users get READER access.",
+				Help:  "Object owner gets OWNER access.\nAll Users get READER access.",
 			}},
 		}, {
 			Name: "bucket_acl",
 			Help: "Access Control List for new buckets.",
 			Examples: []fs.OptionExample{{
 				Value: "authenticatedRead",
-				Help:  "Project team owners get OWNER access, and all Authenticated Users get READER access.",
+				Help:  "Project team owners get OWNER access.\nAll Authenticated Users get READER access.",
 			}, {
 				Value: "private",
-				Help:  "Project team owners get OWNER access [default if left blank].",
+				Help:  "Project team owners get OWNER access.\nDefault if left blank.",
 			}, {
 				Value: "projectPrivate",
 				Help:  "Project team members get access according to their roles.",
 			}, {
 				Value: "publicRead",
-				Help:  "Project team owners get OWNER access, and all Users get READER access.",
+				Help:  "Project team owners get OWNER access.\nAll Users get READER access.",
 			}, {
 				Value: "publicReadWrite",
-				Help:  "Project team owners get OWNER access, and all Users get WRITER access.",
+				Help:  "Project team owners get OWNER access.\nAll Users get WRITER access.",
 			}},
 		}, {
 			Name: "bucket_policy_only",
@@ -163,64 +163,64 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
 			Help: "Location for the newly created buckets.",
 			Examples: []fs.OptionExample{{
 				Value: "",
-				Help:  "Empty for default location (US).",
+				Help:  "Empty for default location (US)",
 			}, {
 				Value: "asia",
-				Help:  "Multi-regional location for Asia.",
+				Help:  "Multi-regional location for Asia",
 			}, {
 				Value: "eu",
-				Help:  "Multi-regional location for Europe.",
+				Help:  "Multi-regional location for Europe",
 			}, {
 				Value: "us",
-				Help:  "Multi-regional location for United States.",
+				Help:  "Multi-regional location for United States",
 			}, {
 				Value: "asia-east1",
-				Help:  "Taiwan.",
+				Help:  "Taiwan",
 			}, {
 				Value: "asia-east2",
-				Help:  "Hong Kong.",
+				Help:  "Hong Kong",
 			}, {
 				Value: "asia-northeast1",
-				Help:  "Tokyo.",
+				Help:  "Tokyo",
 			}, {
 				Value: "asia-south1",
-				Help:  "Mumbai.",
+				Help:  "Mumbai",
 			}, {
 				Value: "asia-southeast1",
-				Help:  "Singapore.",
+				Help:  "Singapore",
 			}, {
 				Value: "australia-southeast1",
-				Help:  "Sydney.",
+				Help:  "Sydney",
 			}, {
 				Value: "europe-north1",
-				Help:  "Finland.",
+				Help:  "Finland",
 			}, {
 				Value: "europe-west1",
-				Help:  "Belgium.",
+				Help:  "Belgium",
 			}, {
 				Value: "europe-west2",
-				Help:  "London.",
+				Help:  "London",
 			}, {
 				Value: "europe-west3",
-				Help:  "Frankfurt.",
+				Help:  "Frankfurt",
 			}, {
 				Value: "europe-west4",
-				Help:  "Netherlands.",
+				Help:  "Netherlands",
 			}, {
 				Value: "us-central1",
-				Help:  "Iowa.",
+				Help:  "Iowa",
 			}, {
 				Value: "us-east1",
-				Help:  "South Carolina.",
+				Help:  "South Carolina",
 			}, {
 				Value: "us-east4",
-				Help:  "Northern Virginia.",
+				Help:  "Northern Virginia",
 			}, {
 				Value: "us-west1",
-				Help:  "Oregon.",
+				Help:  "Oregon",
 			}, {
 				Value: "us-west2",
-				Help:  "California.",
+				Help:  "California",
 			}},
 		}, {
 			Name: "storage_class",
diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go
index a8b704ba7..8c557fe13 100644
--- a/backend/googlephotos/googlephotos.go
+++ b/backend/googlephotos/googlephotos.go
@@ -132,7 +132,7 @@ you want to read the media.`,
 		}, {
 			Name:     "start_year",
 			Default:  2000,
-			Help:     `Year limits the photos to be downloaded to those which are uploaded after the given year`,
+			Help:     `Year limits the photos to be downloaded to those which are uploaded after the given year.`,
 			Advanced: true,
 		}, {
 			Name:    "include_archived",
diff --git a/backend/hdfs/hdfs.go b/backend/hdfs/hdfs.go
index 878217c65..c19971095 100644
--- a/backend/hdfs/hdfs.go
+++ b/backend/hdfs/hdfs.go
@@ -19,23 +19,23 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "namenode",
-			Help:     "hadoop name node and port",
+			Help:     "Hadoop name node and port.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "namenode:8020",
-				Help:  "Connect to host namenode at port 8020",
+				Help:  "Connect to host namenode at port 8020.",
 			}},
 		}, {
 			Name:     "username",
-			Help:     "hadoop user name",
+			Help:     "Hadoop user name.",
 			Required: false,
 			Examples: []fs.OptionExample{{
 				Value: "root",
-				Help:  "Connect to hdfs as root",
+				Help:  "Connect to hdfs as root.",
 			}},
 		}, {
 			Name: "service_principal_name",
-			Help: `Kerberos service principal name for the namenode
+			Help: `Kerberos service principal name for the namenode.
 
 Enables KERBEROS authentication. Specifies the Service Principal Name
 (SERVICE/FQDN) for the namenode.`,
@@ -47,7 +47,7 @@ Enables KERBEROS authentication. Specifies the Service Principal Name
 			Advanced: true,
 		}, {
 			Name: "data_transfer_protection",
-			Help: `Kerberos data transfer protection: authentication|integrity|privacy
+			Help: `Kerberos data transfer protection: authentication|integrity|privacy.
 
 Specifies whether or not authentication, data signature integrity
 checks, and wire encryption is required when communicating the the
diff --git a/backend/http/http.go b/backend/http/http.go
index a5c367402..54bdd89e3 100644
--- a/backend/http/http.go
+++ b/backend/http/http.go
@@ -38,20 +38,20 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "url",
-			Help:     "URL of http host to connect to",
+			Help:     "URL of http host to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "https://example.com",
-				Help:  "Connect to example.com",
+				Help:  "Connect to example.com.",
 			}, {
 				Value: "https://user:pass@example.com",
-				Help:  "Connect to example.com using a username and password",
+				Help:  "Connect to example.com using a username and password.",
 			}},
 		}, {
 			Name: "headers",
-			Help: `Set HTTP headers for all transactions
+			Help: `Set HTTP headers for all transactions.
 
-Use this to set additional HTTP headers for all transactions
+Use this to set additional HTTP headers for all transactions.
 
 The input format is comma separated list of key,value pairs.  Standard
 [CSV encoding](https://godoc.org/encoding/csv) may be used.
@@ -64,7 +64,7 @@ You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'
 			Advanced: true,
 		}, {
 			Name: "no_slash",
-			Help: `Set this if the site doesn't end directories with /
+			Help: `Set this if the site doesn't end directories with /.
 
 Use this if your target website does not use / on the end of
 directories.
@@ -80,7 +80,7 @@ directories.`,
 			Advanced: true,
 		}, {
 			Name: "no_head",
-			Help: `Don't use HEAD requests to find file sizes in dir listing
+			Help: `Don't use HEAD requests to find file sizes in dir listing.
 
 If your site is being very slow to load then you can try this option.
 Normally rclone does a HEAD request for each potential file in a
diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go
index 7f6d1972d..64898d9f6 100644
--- a/backend/jottacloud/jottacloud.go
+++ b/backend/jottacloud/jottacloud.go
@@ -86,7 +86,7 @@ func init() {
 			Advanced: true,
 		}, {
 			Name:     "trashed_only",
-			Help:     "Only show files that are in the trash.\nThis will show trashed files in their original directory structure.",
+			Help:     "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
 			Default:  false,
 			Advanced: true,
 		}, {
@@ -122,15 +122,15 @@ func init() {
 func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
 	switch config.State {
 	case "":
-		return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
+		return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type.`, []fs.OptionExample{{
 			Value: "standard",
-			Help:  "Standard authentication - use this if you're a normal Jottacloud user.",
+			Help:  "Standard authentication.\nUse this if you're a normal Jottacloud user.",
 		}, {
 			Value: "legacy",
-			Help:  "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
+			Help:  "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
 		}, {
 			Value: "telia",
-			Help:  "Telia Cloud authentication - use this if you are using Telia Cloud.",
+			Help:  "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
 		}})
 	case "auth_type_done":
 		// Jump to next state according to config chosen
diff --git a/backend/koofr/koofr.go b/backend/koofr/koofr.go
index d2d6339c0..26dbcc9e7 100644
--- a/backend/koofr/koofr.go
+++ b/backend/koofr/koofr.go
@@ -32,29 +32,29 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "endpoint",
-			Help:     "The Koofr API endpoint to use",
+			Help:     "The Koofr API endpoint to use.",
 			Default:  "https://app.koofr.net",
 			Required: true,
 			Advanced: true,
 		}, {
 			Name:     "mountid",
-			Help:     "Mount ID of the mount to use. If omitted, the primary mount is used.",
+			Help:     "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
 			Required: false,
 			Default:  "",
 			Advanced: true,
 		}, {
 			Name:     "setmtime",
-			Help:     "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
+			Help:     "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
 			Default:  true,
 			Required: true,
 			Advanced: true,
 		}, {
 			Name:     "user",
-			Help:     "Your Koofr user name",
+			Help:     "Your Koofr user name.",
 			Required: true,
 		}, {
 			Name:       "password",
-			Help:       "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
+			Help:       "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
 			IsPassword: true,
 			Required:   true,
 		}, {
diff --git a/backend/local/local.go b/backend/local/local.go
index c932d605c..90f3e9a3f 100644
--- a/backend/local/local.go
+++ b/backend/local/local.go
@@ -44,11 +44,11 @@ func init() {
 		CommandHelp: commandHelp,
 		Options: []fs.Option{{
 			Name:     "nounc",
-			Help:     "Disable UNC (long path names) conversion on Windows",
+			Help:     "Disable UNC (long path names) conversion on Windows.",
 			Advanced: runtime.GOOS != "windows",
 			Examples: []fs.OptionExample{{
 				Value: "true",
-				Help:  "Disables long file names",
+				Help:  "Disables long file names.",
 			}},
 		}, {
 			Name:     "copy_links",
@@ -59,7 +59,7 @@ func init() {
 			Advanced: true,
 		}, {
 			Name:     "links",
-			Help:     "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
+			Help:     "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
 			Default:  false,
 			NoPrefix: true,
 			ShortOpt: "l",
@@ -67,6 +67,7 @@ func init() {
 		}, {
 			Name: "skip_links",
 			Help: `Don't warn about skipped symlinks.
+
 This flag disables warning messages on skipped symlinks or junction
 points, as you explicitly acknowledge that they should be skipped.`,
 			Default:  false,
@@ -74,21 +75,21 @@ points, as you explicitly acknowledge that they should be skipped.`,
 			Advanced: true,
 		}, {
 			Name: "zero_size_links",
-			Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
+			Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
 
-Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
+Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
 
 - Windows
 - On some virtual filesystems (such ash LucidLink)
 - Android
 
-So rclone now always reads the link
+So rclone now always reads the link.
 `,
 			Default:  false,
 			Advanced: true,
 		}, {
 			Name: "unicode_normalization",
-			Help: `Apply unicode NFC normalization to paths and filenames
+			Help: `Apply unicode NFC normalization to paths and filenames.
 
 This flag can be used to normalize file names into unicode NFC form
 that are read from the local filesystem.
@@ -106,7 +107,7 @@ routine so this flag shouldn't normally be used.`,
 			Advanced: true,
 		}, {
 			Name: "no_check_updated",
-			Help: `Don't check to see if the files change during upload
+			Help: `Don't check to see if the files change during upload.
 
 Normally rclone checks the size and modification time of files as they
 are being uploaded and aborts with a message which starts "can't copy
@@ -152,7 +153,7 @@ to override the default choice.`,
 			Advanced: true,
 		}, {
 			Name: "case_insensitive",
-			Help: `Force the filesystem to report itself as case insensitive
+			Help: `Force the filesystem to report itself as case insensitive.
 
 Normally the local backend declares itself as case insensitive on
 Windows/macOS and case sensitive for everything else.  Use this flag
@@ -161,7 +162,7 @@ to override the default choice.`,
 			Advanced: true,
 		}, {
 			Name: "no_preallocate",
-			Help: `Disable preallocation of disk space for transferred files
+			Help: `Disable preallocation of disk space for transferred files.
 
 Preallocation of disk space helps prevent filesystem fragmentation.
 However, some virtual filesystem layers (such as Google Drive File
@@ -172,7 +173,7 @@ Use this flag to disable preallocation.`,
 			Advanced: true,
 		}, {
 			Name: "no_sparse",
-			Help: `Disable sparse files for multi-thread downloads
+			Help: `Disable sparse files for multi-thread downloads.
 
 On Windows platforms rclone will make sparse files when doing
 multi-thread downloads. This avoids long pauses on large files where
@@ -182,7 +183,7 @@ cause disk fragmentation and can be slow to work with.`,
 			Advanced: true,
 		}, {
 			Name: "no_set_modtime",
-			Help: `Disable setting modtime
+			Help: `Disable setting modtime.
 
 Normally rclone updates modification time of files after they are done
 uploading. This can cause permissions issues on Linux platforms when 
diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go
index 74eb73444..3ab0c0cfc 100644
--- a/backend/mailru/mailru.go
+++ b/backend/mailru/mailru.go
@@ -87,11 +87,11 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "user",
-			Help:     "User name (usually email)",
+			Help:     "User name (usually email).",
 			Required: true,
 		}, {
 			Name:       "pass",
-			Help:       "Password",
+			Help:       "Password.",
 			Required:   true,
 			IsPassword: true,
 		}, {
@@ -99,6 +99,7 @@ func init() {
 			Default:  true,
 			Advanced: false,
 			Help: `Skip full upload if there is another file with same data hash.
+
 This feature is called "speedup" or "put by hash". It is especially efficient
 in case of generally available files like popular books, video or audio clips,
 because files are searched by hash in all accounts of all mailru users.
@@ -119,6 +120,7 @@ streaming or partial uploads), it will not even try this optimization.`,
 			Default:  "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf",
 			Advanced: true,
 			Help: `Comma separated list of file name patterns eligible for speedup (put by hash).
+
 Patterns are case insensitive and can contain '*' or '?' meta characters.`,
 			Examples: []fs.OptionExample{{
 				Value: "",
@@ -137,8 +139,9 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
 			Name:     "speedup_max_disk",
 			Default:  fs.SizeSuffix(3 * 1024 * 1024 * 1024),
 			Advanced: true,
-			Help: `This option allows you to disable speedup (put by hash) for large files
-(because preliminary hashing can exhaust you RAM or disk space)`,
+			Help: `This option allows you to disable speedup (put by hash) for large files.
+
+Reason is that preliminary hashing can exhaust your RAM or disk space.`,
 			Examples: []fs.OptionExample{{
 				Value: "0",
 				Help:  "Completely disable speedup (put by hash).",
@@ -168,7 +171,7 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
 			Name:     "check_hash",
 			Default:  true,
 			Advanced: true,
-			Help:     "What should copy do if file checksum is mismatched or invalid",
+			Help:     "What should copy do if file checksum is mismatched or invalid.",
 			Examples: []fs.OptionExample{{
 				Value: "true",
 				Help:  "Fail with error.",
@@ -182,6 +185,7 @@ Patterns are case insensitive and can contain '*' or '?' meta characters.`,
 			Advanced: true,
 			Hide:     fs.OptionHideBoth,
 			Help: `HTTP user agent used internally by client.
+
 Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
 		}, {
 			Name:     "quirks",
@@ -189,6 +193,7 @@ Defaults to "rclone/VERSION" or "--user-agent" provided on command line.`,
 			Advanced: true,
 			Hide:     fs.OptionHideBoth,
 			Help: `Comma separated list of internal maintenance flags.
+
 This option must not be used by an ordinary user. It is intended only to
 facilitate remote troubleshooting of backend issues. Strict meaning of
 flags is not documented and not guaranteed to persist between releases.
diff --git a/backend/mega/mega.go b/backend/mega/mega.go
index d732d6559..269c15695 100644
--- a/backend/mega/mega.go
+++ b/backend/mega/mega.go
@@ -59,7 +59,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "user",
-			Help:     "User name",
+			Help:     "User name.",
 			Required: true,
 		}, {
 			Name:       "pass",
diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go
index 2cd47706c..48e1570e2 100755
--- a/backend/onedrive/onedrive.go
+++ b/backend/onedrive/onedrive.go
@@ -129,12 +129,12 @@ Note that the chunks will be buffered into memory.`,
 			Advanced: true,
 		}, {
 			Name:     "drive_id",
-			Help:     "The ID of the drive to use",
+			Help:     "The ID of the drive to use.",
 			Default:  "",
 			Advanced: true,
 		}, {
 			Name:     "drive_type",
-			Help:     "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
+			Help:     "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
 			Default:  "",
 			Advanced: true,
 		}, {
@@ -165,7 +165,7 @@ fall back to normal copy (which will be slightly slower).`,
 		}, {
 			Name:    "no_versions",
 			Default: false,
-			Help: `Remove all versions on modifying operations
+			Help: `Remove all versions on modifying operations.
 
 Onedrive for business creates versions when rclone uploads new files
 overwriting an existing one and when it sets the modification time.
@@ -186,10 +186,10 @@ this flag there.
 			Advanced: true,
 			Examples: []fs.OptionExample{{
 				Value: "anonymous",
-				Help:  "Anyone with the link has access, without needing to sign in. This may include people outside of your organization. Anonymous link support may be disabled by an administrator.",
+				Help:  "Anyone with the link has access, without needing to sign in.\nThis may include people outside of your organization.\nAnonymous link support may be disabled by an administrator.",
 			}, {
 				Value: "organization",
-				Help:  "Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.",
+				Help:  "Anyone signed into your organization (tenant) can use the link to get access.\nOnly available in OneDrive for Business and SharePoint.",
 			}},
 		}, {
 			Name:     "link_type",
@@ -399,7 +399,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
 			Help:  "Root Sharepoint site",
 		}, {
 			Value: "url",
-			Help:  "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
+			Help:  "Sharepoint site name or URL\nE.g. mysite or https://contoso.sharepoint.com/sites/mysite",
 		}, {
 			Value: "search",
 			Help:  "Search for a Sharepoint site",
@@ -411,7 +411,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
 			Help:  "Type in SiteID (advanced)",
 		}, {
 			Value: "path",
-			Help:  "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
+			Help:  "Sharepoint server-relative path (advanced)\nE.g. /teams/hr",
 		}})
 	case "choose_type_done":
 		// Jump to next state according to config chosen
diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go
index 75bea04f7..00d83357e 100644
--- a/backend/opendrive/opendrive.go
+++ b/backend/opendrive/opendrive.go
@@ -42,7 +42,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "username",
-			Help:     "Username",
+			Help:     "Username.",
 			Required: true,
 		}, {
 			Name:       "password",
diff --git a/backend/qingstor/qingstor.go b/backend/qingstor/qingstor.go
index 7e06c036a..da9581edc 100644
--- a/backend/qingstor/qingstor.go
+++ b/backend/qingstor/qingstor.go
@@ -40,36 +40,36 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:    "env_auth",
-			Help:    "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
+			Help:    "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.",
 			Default: false,
 			Examples: []fs.OptionExample{{
 				Value: "false",
-				Help:  "Enter QingStor credentials in the next step",
+				Help:  "Enter QingStor credentials in the next step.",
 			}, {
 				Value: "true",
-				Help:  "Get QingStor credentials from the environment (env vars or IAM)",
+				Help:  "Get QingStor credentials from the environment (env vars or IAM).",
 			}},
 		}, {
 			Name: "access_key_id",
-			Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
+			Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
 		}, {
 			Name: "secret_access_key",
-			Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
+			Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
 		}, {
 			Name: "endpoint",
-			Help: "Enter an endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
+			Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
 		}, {
 			Name: "zone",
-			Help: "Zone to connect to.\nDefault is \"pek3a\".",
+			Help: "Zone to connect to.\n\nDefault is \"pek3a\".",
 			Examples: []fs.OptionExample{{
 				Value: "pek3a",
-				Help:  "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
+				Help:  "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.",
 			}, {
 				Value: "sh1a",
-				Help:  "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
+				Help:  "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.",
 			}, {
 				Value: "gd2a",
-				Help:  "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
+				Help:  "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.",
 			}},
 		}, {
 			Name:     "connection_retries",
@@ -78,7 +78,7 @@ func init() {
 			Advanced: true,
 		}, {
 			Name: "upload_cutoff",
-			Help: `Cutoff for switching to chunked upload
+			Help: `Cutoff for switching to chunked upload.
 
 Any files larger than this will be uploaded in chunks of chunk_size.
 The minimum is 0 and the maximum is 5 GiB.`,
diff --git a/backend/s3/s3.go b/backend/s3/s3.go
index f93d66633..aab0d8df8 100644
--- a/backend/s3/s3.go
+++ b/backend/s3/s3.go
@@ -109,21 +109,21 @@ func init() {
 			}},
 		}, {
 			Name:    "env_auth",
-			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
+			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\n\nOnly applies if access_key_id and secret_access_key is blank.",
 			Default: false,
 			Examples: []fs.OptionExample{{
 				Value: "false",
-				Help:  "Enter AWS credentials in the next step",
+				Help:  "Enter AWS credentials in the next step.",
 			}, {
 				Value: "true",
-				Help:  "Get AWS credentials from the environment (env vars or IAM)",
+				Help:  "Get AWS credentials from the environment (env vars or IAM).",
 			}},
 		}, {
 			Name: "access_key_id",
-			Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
+			Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
 		}, {
 			Name: "secret_access_key",
-			Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
+			Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
 		}, {
 			// References:
 			// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
@@ -136,76 +136,76 @@ func init() {
 				Help:  "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
 			}, {
 				Value: "us-east-2",
-				Help:  "US East (Ohio) Region\nNeeds location constraint us-east-2.",
+				Help:  "US East (Ohio) Region.\nNeeds location constraint us-east-2.",
 			}, {
 				Value: "us-west-1",
-				Help:  "US West (Northern California) Region\nNeeds location constraint us-west-1.",
+				Help:  "US West (Northern California) Region.\nNeeds location constraint us-west-1.",
 			}, {
 				Value: "us-west-2",
-				Help:  "US West (Oregon) Region\nNeeds location constraint us-west-2.",
+				Help:  "US West (Oregon) Region.\nNeeds location constraint us-west-2.",
 			}, {
 				Value: "ca-central-1",
-				Help:  "Canada (Central) Region\nNeeds location constraint ca-central-1.",
+				Help:  "Canada (Central) Region.\nNeeds location constraint ca-central-1.",
 			}, {
 				Value: "eu-west-1",
-				Help:  "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
+				Help:  "EU (Ireland) Region.\nNeeds location constraint EU or eu-west-1.",
 			}, {
 				Value: "eu-west-2",
-				Help:  "EU (London) Region\nNeeds location constraint eu-west-2.",
+				Help:  "EU (London) Region.\nNeeds location constraint eu-west-2.",
 			}, {
 				Value: "eu-west-3",
-				Help:  "EU (Paris) Region\nNeeds location constraint eu-west-3.",
+				Help:  "EU (Paris) Region.\nNeeds location constraint eu-west-3.",
 			}, {
 				Value: "eu-north-1",
-				Help:  "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
+				Help:  "EU (Stockholm) Region.\nNeeds location constraint eu-north-1.",
 			}, {
 				Value: "eu-south-1",
-				Help:  "EU (Milan) Region\nNeeds location constraint eu-south-1.",
+				Help:  "EU (Milan) Region.\nNeeds location constraint eu-south-1.",
 			}, {
 				Value: "eu-central-1",
-				Help:  "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
+				Help:  "EU (Frankfurt) Region.\nNeeds location constraint eu-central-1.",
 			}, {
 				Value: "ap-southeast-1",
-				Help:  "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
+				Help:  "Asia Pacific (Singapore) Region.\nNeeds location constraint ap-southeast-1.",
 			}, {
 				Value: "ap-southeast-2",
-				Help:  "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
+				Help:  "Asia Pacific (Sydney) Region.\nNeeds location constraint ap-southeast-2.",
 			}, {
 				Value: "ap-northeast-1",
-				Help:  "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
+				Help:  "Asia Pacific (Tokyo) Region.\nNeeds location constraint ap-northeast-1.",
 			}, {
 				Value: "ap-northeast-2",
-				Help:  "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
+				Help:  "Asia Pacific (Seoul).\nNeeds location constraint ap-northeast-2.",
 			}, {
 				Value: "ap-northeast-3",
-				Help:  "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
+				Help:  "Asia Pacific (Osaka-Local).\nNeeds location constraint ap-northeast-3.",
 			}, {
 				Value: "ap-south-1",
-				Help:  "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
+				Help:  "Asia Pacific (Mumbai).\nNeeds location constraint ap-south-1.",
 			}, {
 				Value: "ap-east-1",
-				Help:  "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
+				Help:  "Asia Pacific (Hong Kong) Region.\nNeeds location constraint ap-east-1.",
 			}, {
 				Value: "sa-east-1",
-				Help:  "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
+				Help:  "South America (Sao Paulo) Region.\nNeeds location constraint sa-east-1.",
 			}, {
 				Value: "me-south-1",
-				Help:  "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
+				Help:  "Middle East (Bahrain) Region.\nNeeds location constraint me-south-1.",
 			}, {
 				Value: "af-south-1",
-				Help:  "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
+				Help:  "Africa (Cape Town) Region.\nNeeds location constraint af-south-1.",
 			}, {
 				Value: "cn-north-1",
-				Help:  "China (Beijing) Region\nNeeds location constraint cn-north-1.",
+				Help:  "China (Beijing) Region.\nNeeds location constraint cn-north-1.",
 			}, {
 				Value: "cn-northwest-1",
-				Help:  "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
+				Help:  "China (Ningxia) Region.\nNeeds location constraint cn-northwest-1.",
 			}, {
 				Value: "us-gov-east-1",
-				Help:  "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
+				Help:  "AWS GovCloud (US-East) Region.\nNeeds location constraint us-gov-east-1.",
 			}, {
 				Value: "us-gov-west-1",
-				Help:  "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
+				Help:  "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
 			}},
 		}, {
 			Name:     "region",
@@ -220,22 +220,22 @@ func init() {
 			}},
 		}, {
 			Name:     "region",
-			Help:     "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
+			Help:     "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
 			Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
 			Examples: []fs.OptionExample{{
 				Value: "",
-				Help:  "Use this if unsure. Will use v4 signatures and an empty region.",
+				Help:  "Use this if unsure.\nWill use v4 signatures and an empty region.",
 			}, {
 				Value: "other-v2-signature",
-				Help:  "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.",
+				Help:  "Use this only if v4 signatures don't work.\nE.g. pre Jewel/v10 CEPH.",
 			}},
 		}, {
 			Name:     "endpoint",
-			Help:     "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
+			Help:     "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
 			Provider: "AWS",
 		}, {
 			Name:     "endpoint",
-			Help:     "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
+			Help:     "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
 			Provider: "IBMCOS",
 			Examples: []fs.OptionExample{{
 				Value: "s3.us.cloud-object-storage.appdomain.cloud",
@@ -537,65 +537,65 @@ func init() {
 			Provider: "TencentCOS",
 			Examples: []fs.OptionExample{{
 				Value: "cos.ap-beijing.myqcloud.com",
-				Help:  "Beijing Region.",
+				Help:  "Beijing Region",
 			}, {
 				Value: "cos.ap-nanjing.myqcloud.com",
-				Help:  "Nanjing Region.",
+				Help:  "Nanjing Region",
 			}, {
 				Value: "cos.ap-shanghai.myqcloud.com",
-				Help:  "Shanghai Region.",
+				Help:  "Shanghai Region",
 			}, {
 				Value: "cos.ap-guangzhou.myqcloud.com",
-				Help:  "Guangzhou Region.",
+				Help:  "Guangzhou Region",
 			}, {
 				Value: "cos.ap-nanjing.myqcloud.com",
-				Help:  "Nanjing Region.",
+				Help:  "Nanjing Region",
 			}, {
 				Value: "cos.ap-chengdu.myqcloud.com",
-				Help:  "Chengdu Region.",
+				Help:  "Chengdu Region",
 			}, {
 				Value: "cos.ap-chongqing.myqcloud.com",
-				Help:  "Chongqing Region.",
+				Help:  "Chongqing Region",
 			}, {
 				Value: "cos.ap-hongkong.myqcloud.com",
-				Help:  "Hong Kong (China) Region.",
+				Help:  "Hong Kong (China) Region",
 			}, {
 				Value: "cos.ap-singapore.myqcloud.com",
-				Help:  "Singapore Region.",
+				Help:  "Singapore Region",
 			}, {
 				Value: "cos.ap-mumbai.myqcloud.com",
-				Help:  "Mumbai Region.",
+				Help:  "Mumbai Region",
 			}, {
 				Value: "cos.ap-seoul.myqcloud.com",
-				Help:  "Seoul Region.",
+				Help:  "Seoul Region",
 			}, {
 				Value: "cos.ap-bangkok.myqcloud.com",
-				Help:  "Bangkok Region.",
+				Help:  "Bangkok Region",
 			}, {
 				Value: "cos.ap-tokyo.myqcloud.com",
-				Help:  "Tokyo Region.",
+				Help:  "Tokyo Region",
 			}, {
 				Value: "cos.na-siliconvalley.myqcloud.com",
-				Help:  "Silicon Valley Region.",
+				Help:  "Silicon Valley Region",
 			}, {
 				Value: "cos.na-ashburn.myqcloud.com",
-				Help:  "Virginia Region.",
+				Help:  "Virginia Region",
 			}, {
 				Value: "cos.na-toronto.myqcloud.com",
-				Help:  "Toronto Region.",
+				Help:  "Toronto Region",
 			}, {
 				Value: "cos.eu-frankfurt.myqcloud.com",
-				Help:  "Frankfurt Region.",
+				Help:  "Frankfurt Region",
 			}, {
 				Value: "cos.eu-moscow.myqcloud.com",
-				Help:  "Moscow Region.",
+				Help:  "Moscow Region",
 			}, {
 				Value: "cos.accelerate.myqcloud.com",
-				Help:  "Use Tencent COS Accelerate Endpoint.",
+				Help:  "Use Tencent COS Accelerate Endpoint",
 			}},
 		}, {
 			Name:     "endpoint",
-			Help:     "Endpoint for S3 API.\nRequired when using an S3 clone.",
+			Help:     "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
 			Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
 			Examples: []fs.OptionExample{{
 				Value:    "objects-us-east-1.dream.io",
@@ -636,87 +636,87 @@ func init() {
 			}},
 		}, {
 			Name:     "location_constraint",
-			Help:     "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
+			Help:     "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
 			Provider: "AWS",
 			Examples: []fs.OptionExample{{
 				Value: "",
-				Help:  "Empty for US Region, Northern Virginia, or Pacific Northwest.",
+				Help:  "Empty for US Region, Northern Virginia, or Pacific Northwest",
 			}, {
 				Value: "us-east-2",
-				Help:  "US East (Ohio) Region.",
+				Help:  "US East (Ohio) Region",
 			}, {
 				Value: "us-west-1",
-				Help:  "US West (Northern California) Region.",
+				Help:  "US West (Northern California) Region",
 			}, {
 				Value: "us-west-2",
-				Help:  "US West (Oregon) Region.",
+				Help:  "US West (Oregon) Region",
 			}, {
 				Value: "ca-central-1",
-				Help:  "Canada (Central) Region.",
+				Help:  "Canada (Central) Region",
 			}, {
 				Value: "eu-west-1",
-				Help:  "EU (Ireland) Region.",
+				Help:  "EU (Ireland) Region",
 			}, {
 				Value: "eu-west-2",
-				Help:  "EU (London) Region.",
+				Help:  "EU (London) Region",
 			}, {
 				Value: "eu-west-3",
-				Help:  "EU (Paris) Region.",
+				Help:  "EU (Paris) Region",
 			}, {
 				Value: "eu-north-1",
-				Help:  "EU (Stockholm) Region.",
+				Help:  "EU (Stockholm) Region",
 			}, {
 				Value: "eu-south-1",
-				Help:  "EU (Milan) Region.",
+				Help:  "EU (Milan) Region",
 			}, {
 				Value: "EU",
-				Help:  "EU Region.",
+				Help:  "EU Region",
 			}, {
 				Value: "ap-southeast-1",
-				Help:  "Asia Pacific (Singapore) Region.",
+				Help:  "Asia Pacific (Singapore) Region",
 			}, {
 				Value: "ap-southeast-2",
-				Help:  "Asia Pacific (Sydney) Region.",
+				Help:  "Asia Pacific (Sydney) Region",
 			}, {
 				Value: "ap-northeast-1",
-				Help:  "Asia Pacific (Tokyo) Region.",
+				Help:  "Asia Pacific (Tokyo) Region",
 			}, {
 				Value: "ap-northeast-2",
-				Help:  "Asia Pacific (Seoul) Region.",
+				Help:  "Asia Pacific (Seoul) Region",
 			}, {
 				Value: "ap-northeast-3",
-				Help:  "Asia Pacific (Osaka-Local) Region.",
+				Help:  "Asia Pacific (Osaka-Local) Region",
 			}, {
 				Value: "ap-south-1",
-				Help:  "Asia Pacific (Mumbai) Region.",
+				Help:  "Asia Pacific (Mumbai) Region",
 			}, {
 				Value: "ap-east-1",
-				Help:  "Asia Pacific (Hong Kong) Region.",
+				Help:  "Asia Pacific (Hong Kong) Region",
 			}, {
 				Value: "sa-east-1",
-				Help:  "South America (Sao Paulo) Region.",
+				Help:  "South America (Sao Paulo) Region",
 			}, {
 				Value: "me-south-1",
-				Help:  "Middle East (Bahrain) Region.",
+				Help:  "Middle East (Bahrain) Region",
 			}, {
 				Value: "af-south-1",
-				Help:  "Africa (Cape Town) Region.",
+				Help:  "Africa (Cape Town) Region",
 			}, {
 				Value: "cn-north-1",
 				Help:  "China (Beijing) Region",
 			}, {
 				Value: "cn-northwest-1",
-				Help:  "China (Ningxia) Region.",
+				Help:  "China (Ningxia) Region",
 			}, {
 				Value: "us-gov-east-1",
-				Help:  "AWS GovCloud (US-East) Region.",
+				Help:  "AWS GovCloud (US-East) Region",
 			}, {
 				Value: "us-gov-west-1",
-				Help:  "AWS GovCloud (US) Region.",
+				Help:  "AWS GovCloud (US) Region",
 			}},
 		}, {
 			Name:     "location_constraint",
-			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
+			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
 			Provider: "IBMCOS",
 			Examples: []fs.OptionExample{{
 				Value: "us-standard",
@@ -817,7 +817,7 @@ func init() {
 			}},
 		}, {
 			Name:     "location_constraint",
-			Help:     "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
+			Help:     "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
 			Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
 		}, {
 			Name: "acl",
@@ -831,27 +831,27 @@ Note that this ACL is applied when server-side copying objects as S3
 doesn't copy the ACL from the source but rather writes a fresh one.`,
 			Examples: []fs.OptionExample{{
 				Value:    "default",
-				Help:     "Owner gets Full_CONTROL. No one else has access rights (default).",
+				Help:     "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
 				Provider: "TencentCOS",
 			}, {
 				Value:    "private",
-				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default).",
+				Help:     "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
 				Provider: "!IBMCOS,TencentCOS",
 			}, {
 				Value:    "public-read",
-				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
+				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
 				Provider: "!IBMCOS",
 			}, {
 				Value:    "public-read-write",
-				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
+				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
 				Provider: "!IBMCOS",
 			}, {
 				Value:    "authenticated-read",
-				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
+				Help:     "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
 				Provider: "!IBMCOS",
 			}, {
 				Value:    "bucket-owner-read",
-				Help:     "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
+				Help:     "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
 				Provider: "!IBMCOS",
 			}, {
 				Value:    "bucket-owner-full-control",
@@ -859,19 +859,19 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
 				Provider: "!IBMCOS",
 			}, {
 				Value:    "private",
-				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
+				Help:     "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
 				Provider: "IBMCOS",
 			}, {
 				Value:    "public-read",
-				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
+				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.",
 				Provider: "IBMCOS",
 			}, {
 				Value:    "public-read-write",
-				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
+				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nThis acl is available on IBM Cloud (Infra), On-Premise IBM COS.",
 				Provider: "IBMCOS",
 			}, {
 				Value:    "authenticated-read",
-				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
+				Help:     "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.\nNot supported on Buckets.\nThis acl is available on IBM Cloud (Infra) and On-Premise IBM COS.",
 				Provider: "IBMCOS",
 			}},
 		}, {
@@ -885,16 +885,16 @@ isn't set then "acl" is used instead.`,
 			Advanced: true,
 			Examples: []fs.OptionExample{{
 				Value: "private",
-				Help:  "Owner gets FULL_CONTROL. No one else has access rights (default).",
+				Help:  "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
 			}, {
 				Value: "public-read",
-				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
+				Help:  "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
 			}, {
 				Value: "public-read-write",
-				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
+				Help:  "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
 			}, {
 				Value: "authenticated-read",
-				Help:  "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
+				Help:  "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
 			}},
 		}, {
 			Name:     "requester_pays",
@@ -1002,10 +1002,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
 				Help:  "Standard storage class",
 			}, {
 				Value: "GLACIER",
-				Help:  "Archive storage mode.",
+				Help:  "Archive storage mode",
 			}, {
 				Value: "STANDARD_IA",
-				Help:  "Infrequent access storage mode.",
+				Help:  "Infrequent access storage mode",
 			}},
 		}, {
 			// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
@@ -1020,10 +1020,10 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
 				Help:  "Standard storage class",
 			}, {
 				Value: "ARCHIVE",
-				Help:  "Archive storage mode.",
+				Help:  "Archive storage mode",
 			}, {
 				Value: "STANDARD_IA",
-				Help:  "Infrequent access storage mode.",
+				Help:  "Infrequent access storage mode",
 			}},
 		}, {
 			// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
@@ -1032,17 +1032,17 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
 			Provider: "Scaleway",
 			Examples: []fs.OptionExample{{
 				Value: "",
-				Help:  "Default",
+				Help:  "Default.",
 			}, {
 				Value: "STANDARD",
-				Help:  "The Standard class for any upload; suitable for on-demand content like streaming or CDN.",
+				Help:  "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.",
 			}, {
 				Value: "GLACIER",
-				Help:  "Archived storage; prices are lower, but it needs to be restored first to be accessed.",
+				Help:  "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
 			}},
 		}, {
 			Name: "upload_cutoff",
-			Help: `Cutoff for switching to chunked upload
+			Help: `Cutoff for switching to chunked upload.
 
 Any files larger than this will be uploaded in chunks of chunk_size.
 The minimum is 0 and the maximum is 5 GiB.`,
@@ -1090,7 +1090,7 @@ large file of a known size to stay below this number of chunks limit.
 			Advanced: true,
 		}, {
 			Name: "copy_cutoff",
-			Help: `Cutoff for switching to multipart copy
+			Help: `Cutoff for switching to multipart copy.
 
 Any files larger than this that need to be server-side copied will be
 copied in chunks of this size.
@@ -1100,7 +1100,7 @@ The minimum is 0 and the maximum is 5 GiB.`,
 			Advanced: true,
 		}, {
 			Name: "disable_checksum",
-			Help: `Don't store MD5 checksum with object metadata
+			Help: `Don't store MD5 checksum with object metadata.
 
 Normally rclone will calculate the MD5 checksum of the input before
 uploading it so it can add it to metadata on the object. This is great
@@ -1110,7 +1110,7 @@ to start uploading.`,
 			Advanced: true,
 		}, {
 			Name: "shared_credentials_file",
-			Help: `Path to the shared credentials file
+			Help: `Path to the shared credentials file.
 
 If env_auth = true then rclone can use a shared credentials file.
 
@@ -1124,7 +1124,7 @@ it will default to the current user's home directory.
 			Advanced: true,
 		}, {
 			Name: "profile",
-			Help: `Profile to use in the shared credentials file
+			Help: `Profile to use in the shared credentials file.
 
 If env_auth = true then rclone can use a shared credentials file. This
 variable controls which profile is used in that file.
@@ -1135,7 +1135,7 @@ If empty it will default to the environment variable "AWS_PROFILE" or
 			Advanced: true,
 		}, {
 			Name:     "session_token",
-			Help:     "An AWS session token",
+			Help:     "An AWS session token.",
 			Advanced: true,
 		}, {
 			Name: "upload_concurrency",
@@ -1205,7 +1205,7 @@ In Ceph, this can be increased with the "rgw list buckets max chunk" option.
 			Advanced: true,
 		}, {
 			Name: "no_check_bucket",
-			Help: `If set, don't attempt to check the bucket exists or create it
+			Help: `If set, don't attempt to check the bucket exists or create it.
 
 This can be useful when trying to minimise the number of transactions
 rclone does if you know the bucket exists already.
@@ -1218,7 +1218,7 @@ due to a bug.
 			Advanced: true,
 		}, {
 			Name: "no_head",
-			Help: `If set, don't HEAD uploaded objects to check integrity
+			Help: `If set, don't HEAD uploaded objects to check integrity.
 
 This can be useful when trying to minimise the number of transactions
 rclone does.
@@ -1276,6 +1276,7 @@ very small even with this flag.
 			Default:  memoryPoolFlushTime,
 			Advanced: true,
 			Help: `How often internal memory buffer pools will be flushed.
+
 Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
 This option controls how often unused buffers will be removed from the pool.`,
 		}, {
@@ -1287,7 +1288,7 @@ This option controls how often unused buffers will be removed from the pool.`,
 			Name:     "disable_http2",
 			Default:  false,
 			Advanced: true,
-			Help: `Disable usage of http2 for S3 backends
+			Help: `Disable usage of http2 for S3 backends.
 
 There is currently an unsolved issue with the s3 (specifically minio) backend
 and HTTP/2.  HTTP/2 is enabled by default for the s3 backend but can be
diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go
index 3445c6e3b..e6100ab00 100644
--- a/backend/seafile/seafile.go
+++ b/backend/seafile/seafile.go
@@ -60,41 +60,41 @@ func init() {
 		Config:      Config,
 		Options: []fs.Option{{
 			Name:     configURL,
-			Help:     "URL of seafile host to connect to",
+			Help:     "URL of seafile host to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "https://cloud.seafile.com/",
-				Help:  "Connect to cloud.seafile.com",
+				Help:  "Connect to cloud.seafile.com.",
 			}},
 		}, {
 			Name:     configUser,
-			Help:     "User name (usually email address)",
+			Help:     "User name (usually email address).",
 			Required: true,
 		}, {
 			// Password is not required, it will be left blank for 2FA
 			Name:       configPassword,
-			Help:       "Password",
+			Help:       "Password.",
 			IsPassword: true,
 		}, {
 			Name:    config2FA,
-			Help:    "Two-factor authentication ('true' if the account has 2FA enabled)",
+			Help:    "Two-factor authentication ('true' if the account has 2FA enabled).",
 			Default: false,
 		}, {
 			Name: configLibrary,
-			Help: "Name of the library. Leave blank to access all non-encrypted libraries.",
+			Help: "Name of the library.\n\nLeave blank to access all non-encrypted libraries.",
 		}, {
 			Name:       configLibraryKey,
-			Help:       "Library password (for encrypted libraries only). Leave blank if you pass it through the command line.",
+			Help:       "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
 			IsPassword: true,
 		}, {
 			Name:     configCreateLibrary,
-			Help:     "Should rclone create a library if it doesn't exist",
+			Help:     "Should rclone create a library if it doesn't exist.",
 			Advanced: true,
 			Default:  false,
 		}, {
 			// Keep the authentication token after entering the 2FA code
 			Name: configAuthToken,
-			Help: "Authentication token",
+			Help: "Authentication token.",
 			Hide: fs.OptionHideBoth,
 		}, {
 			Name:     config.ConfigEncoding,
diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go
index 7b6e2ae3b..45031da78 100644
--- a/backend/sftp/sftp.go
+++ b/backend/sftp/sftp.go
@@ -56,28 +56,28 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "host",
-			Help:     "SSH host to connect to",
+			Help:     "SSH host to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "example.com",
-				Help:  "Connect to example.com",
+				Help:  "Connect to example.com.",
 			}},
 		}, {
 			Name: "user",
-			Help: "SSH username, leave blank for current username, " + currentUser,
+			Help: "SSH username, leave blank for current username, " + currentUser + ".",
 		}, {
 			Name: "port",
-			Help: "SSH port, leave blank to use default (22)",
+			Help: "SSH port, leave blank to use default (22).",
 		}, {
 			Name:       "pass",
 			Help:       "SSH password, leave blank to use ssh-agent.",
 			IsPassword: true,
 		}, {
 			Name: "key_pem",
-			Help: "Raw PEM-encoded private key, If specified, will override key_file parameter.",
+			Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
 		}, {
 			Name: "key_file",
-			Help: "Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
+			Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
 		}, {
 			Name: "key_file_pass",
 			Help: `The passphrase to decrypt the PEM-encoded private key file.
@@ -98,7 +98,7 @@ Set this value to enable server host key validation.` + env.ShellExpandHelp,
 			Advanced: true,
 			Examples: []fs.OptionExample{{
 				Value: "~/.ssh/known_hosts",
-				Help:  "Use OpenSSH's known_hosts file",
+				Help:  "Use OpenSSH's known_hosts file.",
 			}},
 		}, {
 			Name: "key_use_agent",
@@ -135,7 +135,7 @@ Those algorithms are insecure and may allow plaintext data to be recovered by an
 		}, {
 			Name:    "disable_hashcheck",
 			Default: false,
-			Help:    "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
+			Help:    "Disable the execution of SSH commands to determine if remote file hashing is available.\n\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
 		}, {
 			Name:    "ask_password",
 			Default: false,
@@ -170,12 +170,12 @@ Home directory can be found in a shared folder called "home"
 		}, {
 			Name:     "md5sum_command",
 			Default:  "",
-			Help:     "The command used to read md5 hashes. Leave blank for autodetect.",
+			Help:     "The command used to read md5 hashes.\n\nLeave blank for autodetect.",
 			Advanced: true,
 		}, {
 			Name:     "sha1sum_command",
 			Default:  "",
-			Help:     "The command used to read sha1 hashes. Leave blank for autodetect.",
+			Help:     "The command used to read sha1 hashes.\n\nLeave blank for autodetect.",
 			Advanced: true,
 		}, {
 			Name:     "skip_links",
@@ -197,7 +197,7 @@ The subsystem option is ignored when server_command is defined.`,
 		}, {
 			Name:    "use_fstat",
 			Default: false,
-			Help: `If set use fstat instead of stat
+			Help: `If set use fstat instead of stat.
 
 Some servers limit the amount of open files and calling Stat after opening
 the file will throw an error from the server. Setting this flag will call
@@ -211,7 +211,7 @@ any given time.
 		}, {
 			Name:    "disable_concurrent_reads",
 			Default: false,
-			Help: `If set don't use concurrent reads
+			Help: `If set don't use concurrent reads.
 
 Normally concurrent reads are safe to use and not using them will
 degrade performance, so this option is disabled by default.
@@ -230,7 +230,7 @@ If concurrent reads are disabled, the use_fstat option is ignored.
 		}, {
 			Name:    "disable_concurrent_writes",
 			Default: false,
-			Help: `If set don't use concurrent writes
+			Help: `If set don't use concurrent writes.
 
 Normally rclone uses concurrent writes to upload files. This improves
 the performance greatly, especially for distant servers.
@@ -241,7 +241,7 @@ This option disables concurrent writes should that be necessary.
 		}, {
 			Name:    "idle_timeout",
 			Default: fs.Duration(60 * time.Second),
-			Help: `Max time before closing idle connections
+			Help: `Max time before closing idle connections.
 
 If no connections have been returned to the connection pool in the time
 given, rclone will empty the connection pool.
diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go
index c8cbdf06c..62378d9f3 100644
--- a/backend/sharefile/sharefile.go
+++ b/backend/sharefile/sharefile.go
@@ -163,13 +163,13 @@ func init() {
 			Advanced: true,
 		}, {
 			Name: "root_folder_id",
-			Help: `ID of the root folder
+			Help: `ID of the root folder.
 
 Leave blank to access "Personal Folders".  You can use one of the
 standard values here or any folder ID (long hex number ID).`,
 			Examples: []fs.OptionExample{{
 				Value: "",
-				Help:  `Access the Personal Folders. (Default)`,
+				Help:  `Access the Personal Folders (default).`,
 			}, {
 				Value: "favorites",
 				Help:  "Access the Favorites folder.",
@@ -186,7 +186,9 @@ standard values here or any folder ID (long hex number ID).`,
 		}, {
 			Name:    "chunk_size",
 			Default: defaultChunkSize,
-			Help: `Upload chunk size. Must a power of 2 >= 256k.
+			Help: `Upload chunk size.
+
+Must a power of 2 >= 256k.
 
 Making this larger will improve performance, but note that each chunk
 is buffered in memory one per transfer.
diff --git a/backend/sugarsync/sugarsync.go b/backend/sugarsync/sugarsync.go
index 306ad0037..35801090b 100644
--- a/backend/sugarsync/sugarsync.go
+++ b/backend/sugarsync/sugarsync.go
@@ -139,34 +139,34 @@ func init() {
 			Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
 		}, {
 			Name: "private_access_key",
-			Help: "Sugarsync Private Access Key\n\nLeave blank to use rclone's.",
+			Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
 		}, {
 			Name:    "hard_delete",
 			Help:    "Permanently delete files if true\notherwise put them in the deleted files.",
 			Default: false,
 		}, {
 			Name:     "refresh_token",
-			Help:     "Sugarsync refresh token\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     "authorization",
-			Help:     "Sugarsync authorization\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     "authorization_expiry",
-			Help:     "Sugarsync authorization expiry\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     "user",
-			Help:     "Sugarsync user\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     "root_id",
-			Help:     "Sugarsync root id\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     "deleted_id",
-			Help:     "Sugarsync deleted folder id\n\nLeave blank normally, will be auto configured by rclone.",
+			Help:     "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
 			Advanced: true,
 		}, {
 			Name:     config.ConfigEncoding,
diff --git a/backend/swift/swift.go b/backend/swift/swift.go
index 10d2bb8e3..0dcead728 100644
--- a/backend/swift/swift.go
+++ b/backend/swift/swift.go
@@ -84,10 +84,10 @@ func init() {
 			Examples: []fs.OptionExample{
 				{
 					Value: "false",
-					Help:  "Enter swift credentials in the next step",
+					Help:  "Enter swift credentials in the next step.",
 				}, {
 					Value: "true",
-					Help:  "Get swift credentials from environment vars. Leave other fields blank if using this.",
+					Help:  "Get swift credentials from environment vars.\nLeave other fields blank if using this.",
 				},
 			},
 		}, {
@@ -100,23 +100,23 @@ func init() {
 			Name: "auth",
 			Help: "Authentication URL for server (OS_AUTH_URL).",
 			Examples: []fs.OptionExample{{
-				Help:  "Rackspace US",
 				Value: "https://auth.api.rackspacecloud.com/v1.0",
+				Help:  "Rackspace US",
 			}, {
-				Help:  "Rackspace UK",
 				Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
+				Help:  "Rackspace UK",
 			}, {
-				Help:  "Rackspace v2",
 				Value: "https://identity.api.rackspacecloud.com/v2.0",
+				Help:  "Rackspace v2",
 			}, {
-				Help:  "Memset Memstore UK",
 				Value: "https://auth.storage.memset.com/v1.0",
+				Help:  "Memset Memstore UK",
 			}, {
-				Help:  "Memset Memstore UK v2",
 				Value: "https://auth.storage.memset.com/v2.0",
+				Help:  "Memset Memstore UK v2",
 			}, {
-				Help:  "OVH",
 				Value: "https://auth.cloud.ovh.net/v3",
+				Help:  "OVH",
 			}},
 		}, {
 			Name: "user_id",
@@ -126,57 +126,59 @@ func init() {
 			Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
 		}, {
 			Name: "tenant",
-			Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)",
+			Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
 		}, {
 			Name: "tenant_id",
-			Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)",
+			Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
 		}, {
 			Name: "tenant_domain",
-			Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)",
+			Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
 		}, {
 			Name: "region",
-			Help: "Region name - optional (OS_REGION_NAME)",
+			Help: "Region name - optional (OS_REGION_NAME).",
 		}, {
 			Name: "storage_url",
-			Help: "Storage URL - optional (OS_STORAGE_URL)",
+			Help: "Storage URL - optional (OS_STORAGE_URL).",
 		}, {
 			Name: "auth_token",
-			Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
+			Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
 		}, {
 			Name: "application_credential_id",
-			Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
+			Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
 		}, {
 			Name: "application_credential_name",
-			Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
+			Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
 		}, {
 			Name: "application_credential_secret",
-			Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
+			Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
 		}, {
 			Name:    "auth_version",
-			Help:    "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
+			Help:    "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
 			Default: 0,
 		}, {
 			Name:    "endpoint_type",
-			Help:    "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
+			Help:    "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).",
 			Default: "public",
 			Examples: []fs.OptionExample{{
-				Help:  "Public (default, choose this if not sure)",
 				Value: "public",
+				Help:  "Public (default, choose this if not sure)",
 			}, {
-				Help:  "Internal (use internal service net)",
 				Value: "internal",
+				Help:  "Internal (use internal service net)",
 			}, {
-				Help:  "Admin",
 				Value: "admin",
+				Help:  "Admin",
 			}},
 		}, {
-			Name:     "leave_parts_on_error",
-			Help:     `If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.`,
+			Name: "leave_parts_on_error",
+			Help: `If true avoid calling abort upload on a failure.
+
+It should be set to true for resuming uploads across different sessions.`,
 			Default:  false,
 			Advanced: true,
 		}, {
 			Name: "storage_policy",
-			Help: `The storage policy to use when creating a new container
+			Help: `The storage policy to use when creating a new container.
 
 This applies the specified storage policy when creating a new
 container. The policy cannot be changed afterwards. The allowed
@@ -184,14 +186,14 @@ configuration values and their meaning depend on your Swift storage
 provider.`,
 			Default: "",
 			Examples: []fs.OptionExample{{
-				Help:  "Default",
 				Value: "",
+				Help:  "Default",
 			}, {
-				Help:  "OVH Public Cloud Storage",
 				Value: "pcs",
+				Help:  "OVH Public Cloud Storage",
 			}, {
-				Help:  "OVH Public Cloud Archive",
 				Value: "pca",
+				Help:  "OVH Public Cloud Archive",
 			}},
 		}}, SharedOptions...),
 	})
diff --git a/backend/tardigrade/fs.go b/backend/tardigrade/fs.go
index 3c981737c..0613ca2d6 100644
--- a/backend/tardigrade/fs.go
+++ b/backend/tardigrade/fs.go
@@ -98,13 +98,13 @@ func init() {
 				}},
 			{
 				Name:     "access_grant",
-				Help:     "Access Grant.",
+				Help:     "Access grant.",
 				Required: false,
 				Provider: "existing",
 			},
 			{
 				Name:     "satellite_address",
-				Help:     "Satellite Address. Custom satellite address should match the format: `<nodeid>@<address>:<port>`.",
+				Help:     "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
 				Required: false,
 				Provider: newProvider,
 				Default:  "us-central-1.tardigrade.io",
@@ -122,13 +122,13 @@ func init() {
 			},
 			{
 				Name:     "api_key",
-				Help:     "API Key.",
+				Help:     "API key.",
 				Required: false,
 				Provider: newProvider,
 			},
 			{
 				Name:     "passphrase",
-				Help:     "Encryption Passphrase. To access existing objects enter passphrase used for uploading.",
+				Help:     "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
 				Required: false,
 				Provider: newProvider,
 			},
diff --git a/backend/union/union.go b/backend/union/union.go
index 966f8ccc1..7c3951771 100644
--- a/backend/union/union.go
+++ b/backend/union/union.go
@@ -30,7 +30,7 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "upstreams",
-			Help:     "List of space separated upstreams.\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.\n",
+			Help:     "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
 			Required: true,
 		}, {
 			Name:     "action_policy",
@@ -49,7 +49,7 @@ func init() {
 			Default:  "ff",
 		}, {
 			Name:     "cache_time",
-			Help:     "Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used.",
+			Help:     "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
 			Required: true,
 			Default:  120,
 		}},
diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go
index 2e641e9c1..3f1448a30 100644
--- a/backend/uptobox/uptobox.go
+++ b/backend/uptobox/uptobox.go
@@ -43,7 +43,7 @@ func init() {
 		Description: "Uptobox",
 		NewFs:       NewFs,
 		Options: []fs.Option{{
-			Help: "Your access Token, get it from https://uptobox.com/my_account",
+			Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
 			Name: "access_token",
 		}, {
 			Name:     config.ConfigEncoding,
diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 76dabff0c..284c73487 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -70,15 +70,15 @@ func init() {
 		NewFs:       NewFs,
 		Options: []fs.Option{{
 			Name:     "url",
-			Help:     "URL of http host to connect to",
+			Help:     "URL of http host to connect to.",
 			Required: true,
 			Examples: []fs.OptionExample{{
 				Value: "https://example.com",
-				Help:  "Connect to example.com",
+				Help:  "Connect to example.com.",
 			}},
 		}, {
 			Name: "vendor",
-			Help: "Name of the Webdav site/service/software you are using",
+			Help: "Name of the Webdav site/service/software you are using.",
 			Examples: []fs.OptionExample{{
 				Value: "nextcloud",
 				Help:  "Nextcloud",
@@ -87,27 +87,27 @@ func init() {
 				Help:  "Owncloud",
 			}, {
 				Value: "sharepoint",
-				Help:  "Sharepoint Online, authenticated by Microsoft account.",
+				Help:  "Sharepoint Online, authenticated by Microsoft account",
 			}, {
 				Value: "sharepoint-ntlm",
-				Help:  "Sharepoint with NTLM authentication. Usually self-hosted or on-premises.",
+				Help:  "Sharepoint with NTLM authentication, usually self-hosted or on-premises",
 			}, {
 				Value: "other",
 				Help:  "Other site/service or software",
 			}},
 		}, {
 			Name: "user",
-			Help: "User name. In case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
+			Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
 		}, {
 			Name:       "pass",
 			Help:       "Password.",
 			IsPassword: true,
 		}, {
 			Name: "bearer_token",
-			Help: "Bearer token instead of user/pass (e.g. a Macaroon)",
+			Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
 		}, {
 			Name:     "bearer_token_command",
-			Help:     "Command to run to get a bearer token",
+			Help:     "Command to run to get a bearer token.",
 			Advanced: true,
 		}, {
 			Name:     config.ConfigEncoding,
@@ -115,7 +115,7 @@ func init() {
 			Advanced: true,
 		}, {
 			Name: "headers",
-			Help: `Set HTTP headers for all transactions
+			Help: `Set HTTP headers for all transactions.
 
 Use this to set additional HTTP headers for all transactions
 
diff --git a/cmd/backend/backend.go b/cmd/backend/backend.go
index bdfe600d1..0b00d2389 100644
--- a/cmd/backend/backend.go
+++ b/cmd/backend/backend.go
@@ -24,8 +24,8 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name.")
-	flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format.")
+	flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
+	flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/cat/cat.go b/cmd/cat/cat.go
index 8cbe43361..4f90f0478 100644
--- a/cmd/cat/cat.go
+++ b/cmd/cat/cat.go
@@ -26,11 +26,11 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters.")
-	flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters.")
-	flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
-	flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters.")
-	flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing.")
+	flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters")
+	flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters")
+	flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)")
+	flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters")
+	flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/check/check.go b/cmd/check/check.go
index d9dc1b1ad..c17f6e1c0 100644
--- a/cmd/check/check.go
+++ b/cmd/check/check.go
@@ -32,7 +32,7 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
+	flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash")
 	flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
 	AddFlags(cmdFlags)
 }
diff --git a/cmd/checksum/checksum.go b/cmd/checksum/checksum.go
index 9b9865b18..1083e3800 100644
--- a/cmd/checksum/checksum.go
+++ b/cmd/checksum/checksum.go
@@ -18,7 +18,7 @@ var download = false
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents.")
+	flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents")
 	check.AddFlags(cmdFlags)
 }
 
diff --git a/cmd/cmd.go b/cmd/cmd.go
index 8c42e0ceb..5ce9dbf6a 100644
--- a/cmd/cmd.go
+++ b/cmd/cmd.go
@@ -49,11 +49,11 @@ var (
 	// Flags
 	cpuProfile      = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
 	memProfile      = flags.StringP("memprofile", "", "", "Write memory profile to file")
-	statsInterval   = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
+	statsInterval   = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)")
 	dataRateUnit    = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
 	version         bool
 	retries         = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
-	retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")
+	retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)")
 	// Errors
 	errorCommandNotFound    = errors.New("command not found")
 	errorUncategorized      = errors.New("uncategorized error")
diff --git a/cmd/config/config.go b/cmd/config/config.go
index 5a28f3aae..3b27c2042 100644
--- a/cmd/config/config.go
+++ b/cmd/config/config.go
@@ -260,13 +260,13 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
 
 func init() {
 	for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
-		flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
-		flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
-		flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
-		flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
-		flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
-		flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
-		flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
+		flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured")
+		flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured")
+		flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions")
+		flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer")
+		flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions")
+		flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue")
+		flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue")
 	}
 }
 
diff --git a/cmd/dedupe/dedupe.go b/cmd/dedupe/dedupe.go
index f6999b1c9..088cd78a4 100644
--- a/cmd/dedupe/dedupe.go
+++ b/cmd/dedupe/dedupe.go
@@ -19,7 +19,7 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlag := commandDefinition.Flags()
-	flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename.")
+	flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename")
 	flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find indentical hashes rather than names")
 }
 
diff --git a/cmd/listremotes/listremotes.go b/cmd/listremotes/listremotes.go
index 40b4ea12d..9d1e7f0b8 100644
--- a/cmd/listremotes/listremotes.go
+++ b/cmd/listremotes/listremotes.go
@@ -18,7 +18,7 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names.")
+	flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/lsd/lsd.go b/cmd/lsd/lsd.go
index 8bb6fcf2d..75db3bb73 100644
--- a/cmd/lsd/lsd.go
+++ b/cmd/lsd/lsd.go
@@ -19,7 +19,7 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
+	flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/lsf/lsf.go b/cmd/lsf/lsf.go
index 90b43d031..b9450f7ef 100644
--- a/cmd/lsf/lsf.go
+++ b/cmd/lsf/lsf.go
@@ -32,14 +32,14 @@ func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
 	flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see  help for details")
-	flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format.")
-	flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
+	flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format")
+	flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names")
 	flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
-	flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files.")
-	flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories.")
-	flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format.")
-	flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names.")
-	flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
+	flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files")
+	flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories")
+	flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format")
+	flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names")
+	flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go
index 850d2b036..1ff4cf069 100644
--- a/cmd/lsjson/lsjson.go
+++ b/cmd/lsjson/lsjson.go
@@ -23,16 +23,16 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
-	flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
-	flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
-	flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up).")
-	flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
-	flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
-	flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
-	flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
-	flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated).")
-	flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file.")
+	flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing")
+	flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer)")
+	flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up)")
+	flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up)")
+	flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names")
+	flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object")
+	flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing")
+	flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing")
+	flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated)")
+	flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go
index c3581f7b8..3a20540ea 100644
--- a/cmd/mountlib/mount.go
+++ b/cmd/mountlib/mount.go
@@ -111,29 +111,29 @@ var Opt Options
 // AddFlags adds the non filing system specific flags to the command
 func AddFlags(flagSet *pflag.FlagSet) {
 	rc.AddOption("mount", &Opt)
-	flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v.")
-	flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached.")
-	flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
-	flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
+	flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v")
+	flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached")
+	flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp (repeat if required)")
+	flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)")
 	// Non-Windows only
-	flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process. Not supported on Windows. As background output is suppressed, use --log-file with --log-format=pid,... to monitor.")
-	flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads. Not supported on Windows.")
-	flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads. Not supported on Windows.")
-	flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.")
+	flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)")
+	flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
+	flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
+	flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
 	// Windows and OSX
-	flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name. Supported on Windows and OSX only.")
+	flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
 	// OSX only
-	flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files. Supported on OSX only.")
-	flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes. Supported on OSX only.")
+	flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)")
+	flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)")
 	// Windows only
-	flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive. Supported on Windows only")
+	flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)")
 	// Unix only
-	flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD). Ignored on Windows.")
+	flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)")
 }
 
 // NewMountCommand makes a mount command with the given name and Mount function
diff --git a/cmd/mountlib/rc.go b/cmd/mountlib/rc.go
index 3042c48fa..1f613da38 100644
--- a/cmd/mountlib/rc.go
+++ b/cmd/mountlib/rc.go
@@ -57,22 +57,22 @@ Rclone's cloud storage systems as a file system with FUSE.
 
 If no mountType is provided, the priority is given as follows: 1. mount 2.cmount 3.mount2
 
-This takes the following parameters
+This takes the following parameters:
 
 - fs - a remote path to be mounted (required)
 - mountPoint: valid path on the local machine (required)
-- mountType: One of the values (mount, cmount, mount2) specifies the mount implementation to use
+- mountType: one of the values (mount, cmount, mount2) specifies the mount implementation to use
 - mountOpt: a JSON object with Mount options in.
 - vfsOpt: a JSON object with VFS options in.
 
-Eg
+Example:
 
     rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
     rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
     rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
 
 The vfsOpt are as described in options/get and can be seen in the the
-"vfs" section when running and the mountOpt can be seen in the "mount" section.
+"vfs" section when running and the mountOpt can be seen in the "mount" section:
 
     rclone rc options/get
 `,
@@ -150,11 +150,11 @@ rclone allows Linux, FreeBSD, macOS and Windows to
 mount any of Rclone's cloud storage systems as a file system with
 FUSE.
 
-This takes the following parameters
+This takes the following parameters:
 
 - mountPoint: valid path on the local machine where the mount was created (required)
 
-Eg
+Example:
 
     rclone rc mount/unmount mountPoint=/home/<user>/mountPoint
 `,
@@ -222,7 +222,7 @@ func init() {
 		AuthRequired: true,
 		Fn:           listMountsRc,
 		Title:        "Show current mount points",
-		Help: `This shows currently mounted points, which can be used for performing an unmount
+		Help: `This shows currently mounted points, which can be used for performing an unmount.
 
 This takes no parameters and returns
 
@@ -272,7 +272,7 @@ func init() {
 		AuthRequired: true,
 		Fn:           unmountAll,
 		Title:        "Show current mount points",
-		Help: `This shows currently mounted points, which can be used for performing an unmount
+		Help: `This shows currently mounted points, which can be used for performing an unmount.
 
 This takes no parameters and returns error if unmount does not succeed.
 
diff --git a/cmd/rc/rc.go b/cmd/rc/rc.go
index bea9df92d..92ae1fdf9 100644
--- a/cmd/rc/rc.go
+++ b/cmd/rc/rc.go
@@ -35,14 +35,14 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result.")
-	flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control.")
-	flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
-	flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control.")
-	flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
-	flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
-	flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array.")
-	flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array.")
+	flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result")
+	flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control")
+	flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args")
+	flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control")
+	flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control")
+	flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP")
+	flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array")
+	flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/selfupdate/selfupdate.go b/cmd/selfupdate/selfupdate.go
index 0e354e1a7..c87561c45 100644
--- a/cmd/selfupdate/selfupdate.go
+++ b/cmd/selfupdate/selfupdate.go
@@ -51,10 +51,10 @@ var Opt = Options{}
 func init() {
 	cmd.Root.AddCommand(cmdSelfUpdate)
 	cmdFlags := cmdSelfUpdate.Flags()
-	flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download.")
+	flags.BoolVarP(cmdFlags, &Opt.Check, "check", "", Opt.Check, "Check for latest release, do not download")
 	flags.StringVarP(cmdFlags, &Opt.Output, "output", "", Opt.Output, "Save the downloaded binary at a given path (default: replace running binary)")
 	flags.BoolVarP(cmdFlags, &Opt.Stable, "stable", "", Opt.Stable, "Install stable release (this is the default)")
-	flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release.")
+	flags.BoolVarP(cmdFlags, &Opt.Beta, "beta", "", Opt.Beta, "Install beta release")
 	flags.StringVarP(cmdFlags, &Opt.Version, "version", "", Opt.Version, "Install the given rclone version (default: latest)")
 	flags.StringVarP(cmdFlags, &Opt.Package, "package", "", Opt.Package, "Package format: zip|deb|rpm (default: zip)")
 }
diff --git a/cmd/serve/dlna/dlnaflags/dlnaflags.go b/cmd/serve/dlna/dlnaflags/dlnaflags.go
index 520c76f33..c8c1e6f05 100644
--- a/cmd/serve/dlna/dlnaflags/dlnaflags.go
+++ b/cmd/serve/dlna/dlnaflags/dlnaflags.go
@@ -42,9 +42,9 @@ var (
 
 func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
 	rc.AddOption("dlna", &Opt)
-	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "ip:port or :port to bind the DLNA http server to.")
-	flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "name of DLNA server")
-	flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "enable trace logging of SOAP traffic")
+	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "The ip:port or :port to bind the DLNA http server to")
+	flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "Name of DLNA server")
+	flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "Enable trace logging of SOAP traffic")
 }
 
 // AddFlags add the command line flags for DLNA serving.
diff --git a/cmd/serve/docker/docker.go b/cmd/serve/docker/docker.go
index 7b7725180..f9f6fcd93 100644
--- a/cmd/serve/docker/docker.go
+++ b/cmd/serve/docker/docker.go
@@ -33,11 +33,11 @@ var (
 func init() {
 	cmdFlags := Command.Flags()
 	// Add command specific flags
-	flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "base directory for volumes")
-	flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "<host:port> or absolute path (default: /run/docker/plugins/rclone.sock)")
+	flags.StringVarP(cmdFlags, &baseDir, "base-dir", "", baseDir, "Base directory for volumes")
+	flags.StringVarP(cmdFlags, &socketAddr, "socket-addr", "", socketAddr, "Address <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)")
 	flags.IntVarP(cmdFlags, &socketGid, "socket-gid", "", socketGid, "GID for unix socket (default: current process GID)")
-	flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "skip restoring previous state")
-	flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "do not write spec file")
+	flags.BoolVarP(cmdFlags, &forgetState, "forget-state", "", forgetState, "Skip restoring previous state")
+	flags.BoolVarP(cmdFlags, &noSpec, "no-spec", "", noSpec, "Do not write spec file")
 	// Add common mount/vfs flags
 	mountlib.AddFlags(cmdFlags)
 	vfsflags.AddFlags(cmdFlags)
diff --git a/cmd/serve/ftp/ftp.go b/cmd/serve/ftp/ftp.go
index 843ee9ef4..de7bc4ac6 100644
--- a/cmd/serve/ftp/ftp.go
+++ b/cmd/serve/ftp/ftp.go
@@ -59,11 +59,11 @@ var Opt = DefaultOpt
 // AddFlags adds flags for ftp
 func AddFlags(flagSet *pflag.FlagSet) {
 	rc.AddOption("ftp", &Opt)
-	flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
-	flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
-	flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use.")
-	flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication.")
-	flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")
+	flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
+	flags.StringVarP(flagSet, &Opt.PublicIP, "public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections")
+	flags.StringVarP(flagSet, &Opt.PassivePorts, "passive-port", "", Opt.PassivePorts, "Passive port range to use")
+	flags.StringVarP(flagSet, &Opt.BasicUser, "user", "", Opt.BasicUser, "User name for authentication")
+	flags.StringVarP(flagSet, &Opt.BasicPass, "pass", "", Opt.BasicPass, "Password for authentication (empty value allow every password)")
 	flags.StringVarP(flagSet, &Opt.TLSCert, "cert", "", Opt.TLSCert, "TLS PEM key (concatenation of certificate and CA certificate)")
 	flags.StringVarP(flagSet, &Opt.TLSKey, "key", "", Opt.TLSKey, "TLS PEM Private key")
 }
diff --git a/cmd/serve/http/data/data.go b/cmd/serve/http/data/data.go
index e4840f8ed..461e91663 100644
--- a/cmd/serve/http/data/data.go
+++ b/cmd/serve/http/data/data.go
@@ -47,7 +47,7 @@ type Options struct {
 
 // AddFlags for the templating functionality
 func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
-	flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
+	flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
 }
 
 // AfterEpoch returns the time since the epoch for the given time
diff --git a/cmd/serve/httplib/httpflags/httpflags.go b/cmd/serve/httplib/httpflags/httpflags.go
index 073715504..499d63092 100644
--- a/cmd/serve/httplib/httpflags/httpflags.go
+++ b/cmd/serve/httplib/httpflags/httpflags.go
@@ -15,7 +15,7 @@ var (
 // AddFlagsPrefix adds flags for the httplib
 func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options) {
 	rc.AddOption(prefix+"http", &Opt)
-	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
+	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
 	flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
 	flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")
 	flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header")
@@ -24,10 +24,10 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
 	flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
 	flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
 	flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
-	flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
-	flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
-	flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.")
-	flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
+	flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
+	flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
+	flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
+	flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
 
 }
 
diff --git a/cmd/serve/proxy/proxyflags/proxyflags.go b/cmd/serve/proxy/proxyflags/proxyflags.go
index aff2af328..043cc6e44 100644
--- a/cmd/serve/proxy/proxyflags/proxyflags.go
+++ b/cmd/serve/proxy/proxyflags/proxyflags.go
@@ -14,5 +14,5 @@ var (
 
 // AddFlags adds the non filing system specific flags to the command
 func AddFlags(flagSet *pflag.FlagSet) {
-	flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth.")
+	flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth")
 }
diff --git a/cmd/serve/restic/restic.go b/cmd/serve/restic/restic.go
index e1bd75533..43bcadc87 100644
--- a/cmd/serve/restic/restic.go
+++ b/cmd/serve/restic/restic.go
@@ -37,10 +37,10 @@ var (
 func init() {
 	httpflags.AddFlags(Command.Flags())
 	flagSet := Command.Flags()
-	flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "run an HTTP2 server on stdin/stdout")
-	flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "disallow deletion of repository data")
-	flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "users can only access their private repo")
-	flags.BoolVarP(flagSet, &cacheObjects, "cache-objects", "", true, "cache listed objects")
+	flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout")
+	flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "Disallow deletion of repository data")
+	flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "Users can only access their private repo")
+	flags.BoolVarP(flagSet, &cacheObjects, "cache-objects", "", true, "Cache listed objects")
 }
 
 // Command definition for cobra
diff --git a/cmd/serve/sftp/sftp.go b/cmd/serve/sftp/sftp.go
index 42325e1d5..ef7a8f983 100644
--- a/cmd/serve/sftp/sftp.go
+++ b/cmd/serve/sftp/sftp.go
@@ -43,12 +43,12 @@ var Opt = DefaultOpt
 // AddFlags adds flags for the sftp
 func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
 	rc.AddOption("sftp", &Opt)
-	flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
+	flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
 	flags.StringArrayVarP(flagSet, &Opt.HostKeys, "key", "", Opt.HostKeys, "SSH private host key file (Can be multi-valued, leave blank to auto generate)")
 	flags.StringVarP(flagSet, &Opt.AuthorizedKeys, "authorized-keys", "", Opt.AuthorizedKeys, "Authorized keys file")
-	flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.")
-	flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.")
-	flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.")
+	flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication")
+	flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication")
+	flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set")
 	flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout")
 }
 
diff --git a/cmd/size/size.go b/cmd/size/size.go
index c68038519..be4ef2d5e 100644
--- a/cmd/size/size.go
+++ b/cmd/size/size.go
@@ -18,7 +18,7 @@ var jsonOutput bool
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "format output as JSON")
+	flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/test/changenotify/changenotify.go b/cmd/test/changenotify/changenotify.go
index 7e79fa941..70eab98f3 100644
--- a/cmd/test/changenotify/changenotify.go
+++ b/cmd/test/changenotify/changenotify.go
@@ -20,7 +20,7 @@ var (
 func init() {
 	test.Command.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes.")
+	flags.DurationVarP(cmdFlags, &pollInterval, "poll-interval", "", pollInterval, "Time to wait between polling for changes")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/test/info/info.go b/cmd/test/info/info.go
index 377353c36..6b6b35f07 100644
--- a/cmd/test/info/info.go
+++ b/cmd/test/info/info.go
@@ -47,13 +47,13 @@ var (
 func init() {
 	test.Command.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
-	flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization.")
-	flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters.")
-	flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
-	flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length.")
-	flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size.")
-	flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests.")
+	flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file")
+	flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", false, "Check UTF-8 Normalization")
+	flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", false, "Check control characters")
+	flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file")
+	flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", false, "Check max filename length")
+	flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", false, "Check uploads with indeterminate file size")
+	flags.BoolVarP(cmdFlags, &all, "all", "", false, "Run all tests")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go
index 82d49eec2..e61987c47 100644
--- a/cmd/touch/touch.go
+++ b/cmd/touch/touch.go
@@ -30,10 +30,10 @@ const (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist. Implied with --recursive.")
-	flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day.")
-	flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC.")
-	flags.BoolVarP(cmdFlags, &recursive, "recursive", "R", false, "Recursively touch all files.")
+	flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist (implied with --recursive)")
+	flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Use specified time instead of the current time of day")
+	flags.BoolVarP(cmdFlags, &localTime, "localtime", "", false, "Use localtime for timestamp, not UTC")
+	flags.BoolVarP(cmdFlags, &recursive, "recursive", "R", false, "Recursively touch all files")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go
index f473a7699..8fd75e53f 100644
--- a/cmd/tree/tree.go
+++ b/cmd/tree/tree.go
@@ -32,18 +32,19 @@ func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
 	// List
-	flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too).")
-	flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only.")
-	flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.")
-	//flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.")
-	flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.")
-	// flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.")
-	flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep.")
-	// flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.")
-	// flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.")
-	flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout.")
+	flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too)")
+	flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only")
+	flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file")
+	//flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching")
+	flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing")
+	// flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories")
+	flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep")
+	// flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given")
+	// flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern")
+	flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout")
 	// Files
 	flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
+	flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
 	flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
 	// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
 	// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
@@ -52,16 +53,16 @@ func init() {
 	// flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.")
 	// flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
 	// Sort
-	flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted.")
-	flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version.")
-	flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.")
-	flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.")
-	flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.")
-	flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
-	flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
+	flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted")
+	flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version")
+	flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time")
+	flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time")
+	flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort")
+	flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables)")
+	flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime")
 	// Graphics
-	flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines.")
-	flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always.")
+	flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines")
+	flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/cmd/version/version.go b/cmd/version/version.go
index 1f72b178a..4ccca007a 100644
--- a/cmd/version/version.go
+++ b/cmd/version/version.go
@@ -22,7 +22,7 @@ var (
 func init() {
 	cmd.Root.AddCommand(commandDefinition)
 	cmdFlags := commandDefinition.Flags()
-	flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version.")
+	flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version")
 }
 
 var commandDefinition = &cobra.Command{
diff --git a/docs/content/flags.md b/docs/content/flags.md
index c5d8d1d37..95453bc62 100644
--- a/docs/content/flags.md
+++ b/docs/content/flags.md
@@ -13,35 +13,35 @@ split into two groups, non backend and backend flags.
 These flags are available for every command.
 
 ```
-      --ask-password                         Allow prompt for password for encrypted configuration. (default true)
-      --auto-confirm                         If enabled, do not request console confirmation.
-      --backup-dir string                    Make backups into hierarchy based in DIR.
-      --bind string                          Local address to bind to for outgoing connections, IPv4, IPv6 or name.
-      --buffer-size SizeSuffix               In memory buffer size when reading files for each --transfer. (default 16Mi)
-      --bwlimit BwTimetable                  Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.
-      --bwlimit-file BwTimetable             Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.
+      --ask-password                         Allow prompt for password for encrypted configuration (default true)
+      --auto-confirm                         If enabled, do not request console confirmation
+      --backup-dir string                    Make backups into hierarchy based in DIR
+      --bind string                          Local address to bind to for outgoing connections, IPv4, IPv6 or name
+      --buffer-size SizeSuffix               In memory buffer size when reading files for each --transfer (default 16Mi)
+      --bwlimit BwTimetable                  Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
+      --bwlimit-file BwTimetable             Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable
       --ca-cert string                       CA certificate used to verify servers
-      --temp-dir string                      Directory rclone will use for temporary files. (default "$TMPDIR")
-      --cache-dir string                     Directory rclone will use for caching. (default "$HOME/.cache/rclone")
-      --check-first                          Do all the checks before starting transfers.
-      --checkers int                         Number of checkers to run in parallel. (default 8)
+      --temp-dir string                      Directory rclone will use for temporary files (default "$TMPDIR")
+      --cache-dir string                     Directory rclone will use for caching (default "$HOME/.cache/rclone")
+      --check-first                          Do all the checks before starting transfers
+      --checkers int                         Number of checkers to run in parallel (default 8)
   -c, --checksum                             Skip based on checksum (if available) & size, not mod-time & size
       --client-cert string                   Client SSL certificate (PEM) for mutual TLS auth
       --client-key string                    Client SSL private key (PEM) for mutual TLS auth
-      --compare-dest stringArray             Include additional comma separated server-side paths during comparison.
-      --config string                        Config file. (default "$HOME/.config/rclone/rclone.conf")
+      --compare-dest stringArray             Include additional comma separated server-side paths during comparison
+      --config string                        Config file (default "$HOME/.config/rclone/rclone.conf")
       --contimeout duration                  Connect timeout (default 1m0s)
-      --copy-dest stringArray                Implies --compare-dest but also copies files from paths into destination.
+      --copy-dest stringArray                Implies --compare-dest but also copies files from paths into destination
       --cpuprofile string                    Write cpu profile to file
       --cutoff-mode string                   Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default "HARD")
       --delete-after                         When synchronizing, delete files on destination after transferring (default)
       --delete-before                        When synchronizing, delete files on destination before transferring
       --delete-during                        When synchronizing, delete files during transfer
       --delete-excluded                      Delete files on dest excluded from sync
-      --disable string                       Disable a comma separated list of features.  Use --disable help to see a list.
-      --disable-http2                        Disable HTTP/2 in the global transport.
+      --disable string                       Disable a comma separated list of features (use --disable help to see a list)
+      --disable-http2                        Disable HTTP/2 in the global transport
   -n, --dry-run                              Do a trial run with no permanent changes
-      --dscp string                          Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.
+      --dscp string                          Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21
       --dump DumpFlags                       List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
       --dump-bodies                          Dump HTTP headers and bodies - may contain sensitive info
       --dump-headers                         Dump HTTP headers - may contain sensitive info
@@ -50,7 +50,7 @@ These flags are available for every command.
       --exclude-from stringArray             Read exclude patterns from file (use - to read from stdin)
       --exclude-if-present string            Exclude directories if filename is present
       --expect-continue-timeout duration     Timeout when using expect / 100-continue in HTTP (default 1s)
-      --fast-list                            Use recursive list if available. Uses more memory but fewer transactions.
+      --fast-list                            Use recursive list if available; Uses more memory but fewer transactions
       --files-from stringArray               Read list of source-file names from file (use - to read from stdin)
       --files-from-raw stringArray           Read list of source-file names from file without any processing of lines (use - to read from stdin)
   -f, --filter stringArray                   Add a file-filtering rule
@@ -62,12 +62,12 @@ These flags are available for every command.
       --header-upload stringArray            Set HTTP header for upload transactions
       --ignore-case                          Ignore case in filters (case insensitive)
       --ignore-case-sync                     Ignore case when synchronizing
-      --ignore-checksum                      Skip post copy check of checksums.
+      --ignore-checksum                      Skip post copy check of checksums
       --ignore-errors                        delete even if there are I/O errors
       --ignore-existing                      Skip all files that exist on destination
-      --ignore-size                          Ignore size when skipping use mod-time or checksum.
+      --ignore-size                          Ignore size when skipping use mod-time or checksum
   -I, --ignore-times                         Don't skip files that match size and time - transfer all files
-      --immutable                            Do not modify files. Fail if existing files have been modified.
+      --immutable                            Do not modify files, fail if existing files have been modified
       --include stringArray                  Include files matching pattern
       --include-from stringArray             Read include patterns from file (use - to read from stdin)
   -i, --interactive                          Enable interactive mode
@@ -75,88 +75,88 @@ These flags are available for every command.
       --log-file string                      Log everything to this file
       --log-format string                    Comma separated list of log format options (default "date,time")
       --log-level string                     Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
-      --log-systemd                          Activate systemd integration for the logger.
-      --low-level-retries int                Number of low level retries to do. (default 10)
+      --log-systemd                          Activate systemd integration for the logger
+      --low-level-retries int                Number of low level retries to do (default 10)
       --max-age Duration                     Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
-      --max-backlog int                      Maximum number of objects in sync or check backlog. (default 10000)
+      --max-backlog int                      Maximum number of objects in sync or check backlog (default 10000)
       --max-delete int                       When synchronizing, limit the number of deletes (default -1)
-      --max-depth int                        If set limits the recursion depth to this. (default -1)
-      --max-duration duration                Maximum duration rclone will transfer data for.
+      --max-depth int                        If set limits the recursion depth to this (default -1)
+      --max-duration duration                Maximum duration rclone will transfer data for
       --max-size SizeSuffix                  Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
-      --max-stats-groups int                 Maximum number of stats groups to keep in memory. On max oldest is discarded. (default 1000)
-      --max-transfer SizeSuffix              Maximum size of data to transfer. (default off)
+      --max-stats-groups int                 Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
+      --max-transfer SizeSuffix              Maximum size of data to transfer (default off)
       --memprofile string                    Write memory profile to file
       --min-age Duration                     Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
       --min-size SizeSuffix                  Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
       --modify-window duration               Max time diff to be considered the same (default 1ns)
-      --multi-thread-cutoff SizeSuffix       Use multi-thread downloads for files above this size. (default 250Mi)
-      --multi-thread-streams int             Max number of streams to use for multi-thread downloads. (default 4)
-      --no-check-certificate                 Do not verify the server SSL certificate. Insecure.
-      --no-check-dest                        Don't check the destination, copy regardless.
-      --no-console                           Hide console window. Supported on Windows only.
-      --no-gzip-encoding                     Don't set Accept-Encoding: gzip.
-      --no-traverse                          Don't traverse destination file system on copy.
-      --no-unicode-normalization             Don't normalize unicode characters in filenames.
-      --no-update-modtime                    Don't update destination mod-time if files identical.
+      --multi-thread-cutoff SizeSuffix       Use multi-thread downloads for files above this size (default 250Mi)
+      --multi-thread-streams int             Max number of streams to use for multi-thread downloads (default 4)
+      --no-check-certificate                 Do not verify the server SSL certificate (insecure)
+      --no-check-dest                        Don't check the destination, copy regardless
+      --no-console                           Hide console window (supported on Windows only)
+      --no-gzip-encoding                     Don't set Accept-Encoding: gzip
+      --no-traverse                          Don't traverse destination file system on copy
+      --no-unicode-normalization             Don't normalize unicode characters in filenames
+      --no-update-modtime                    Don't update destination mod-time if files identical
       --order-by string                      Instructions on how to order the transfers, e.g. 'size,descending'
-      --password-command SpaceSepList        Command for supplying password for encrypted configuration.
-  -P, --progress                             Show progress during transfer.
-      --progress-terminal-title              Show progress on the terminal title. Requires -P/--progress.
+      --password-command SpaceSepList        Command for supplying password for encrypted configuration
+  -P, --progress                             Show progress during transfer
+      --progress-terminal-title              Show progress on the terminal title (requires -P/--progress)
   -q, --quiet                                Print as little stuff as possible
-      --rc                                   Enable the remote control server.
-      --rc-addr string                       IPaddress:Port or :Port to bind server to. (default "localhost:5572")
-      --rc-allow-origin string               Set the allowed origin for CORS.
-      --rc-baseurl string                    Prefix for URLs - leave blank for root.
+      --rc                                   Enable the remote control server
+      --rc-addr string                       IPaddress:Port or :Port to bind server to (default "localhost:5572")
+      --rc-allow-origin string               Set the allowed origin for CORS
+      --rc-baseurl string                    Prefix for URLs - leave blank for root
       --rc-cert string                       SSL PEM key (concatenation of certificate and CA certificate)
       --rc-client-ca string                  Client certificate authority to verify clients with
       --rc-enable-metrics                    Enable prometheus metrics on /metrics
-      --rc-files string                      Path to local files to serve on the HTTP server.
+      --rc-files string                      Path to local files to serve on the HTTP server
       --rc-htpasswd string                   htpasswd file - if not provided no authentication is done
       --rc-job-expire-duration duration      expire finished async jobs older than this value (default 1m0s)
       --rc-job-expire-interval duration      interval to check for expired async jobs (default 10s)
       --rc-key string                        SSL PEM Private key
       --rc-max-header-bytes int              Maximum size of request header (default 4096)
-      --rc-no-auth                           Don't require auth for certain methods.
-      --rc-pass string                       Password for authentication.
+      --rc-no-auth                           Don't require auth for certain methods
+      --rc-pass string                       Password for authentication
       --rc-realm string                      realm for authentication (default "rclone")
-      --rc-serve                             Enable the serving of remote objects.
+      --rc-serve                             Enable the serving of remote objects
       --rc-server-read-timeout duration      Timeout for server reading data (default 1h0m0s)
       --rc-server-write-timeout duration     Timeout for server writing data (default 1h0m0s)
-      --rc-template string                   User Specified Template.
-      --rc-user string                       User name for authentication.
-      --rc-web-fetch-url string              URL to fetch the releases for webgui. (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
+      --rc-template string                   User Specified Template
+      --rc-user string                       User name for authentication
+      --rc-web-fetch-url string              URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
       --rc-web-gui                           Launch WebGUI on localhost
       --rc-web-gui-force-update              Force update to latest version of web gui
       --rc-web-gui-no-open-browser           Don't open the browser automatically
       --rc-web-gui-update                    Check and update to latest version of web gui
-      --refresh-times                        Refresh the modtime of remote files.
+      --refresh-times                        Refresh the modtime of remote files
       --retries int                          Retry operations this many times if they fail (default 3)
-      --retries-sleep duration               Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
+      --retries-sleep duration               Interval between retrying operations if they fail, e.g 500ms, 60s, 5. (0 to disable)
       --size-only                            Skip based on size only, not mod-time or checksum
       --stats duration                       Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
-      --stats-file-name-length int           Max file name length in stats. 0 for no limit (default 45)
+      --stats-file-name-length int           Max file name length in stats, 0 for no limit (default 45)
       --stats-log-level string               Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
-      --stats-one-line                       Make the stats fit on one line.
-      --stats-one-line-date                  Enables --stats-one-line and add current date/time prefix.
-      --stats-one-line-date-format string    Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes ("). See https://golang.org/pkg/time/#Time.Format
+      --stats-one-line                       Make the stats fit on one line
+      --stats-one-line-date                  Enable --stats-one-line and add current date/time prefix
+      --stats-one-line-date-format string    Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format
       --stats-unit string                    Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
-      --streaming-upload-cutoff SizeSuffix   Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100Ki)
-      --suffix string                        Suffix to add to changed files.
-      --suffix-keep-extension                Preserve the extension when using --suffix.
+      --streaming-upload-cutoff SizeSuffix   Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
+      --suffix string                        Suffix to add to changed files
+      --suffix-keep-extension                Preserve the extension when using --suffix
       --syslog                               Use Syslog for logging
       --syslog-facility string               Facility for syslog, e.g. KERN,USER,... (default "DAEMON")
       --timeout duration                     IO idle timeout (default 5m0s)
-      --tpslimit float                       Limit HTTP transactions per second to this.
-      --tpslimit-burst int                   Max burst of transactions for --tpslimit. (default 1)
+      --tpslimit float                       Limit HTTP transactions per second to this
+      --tpslimit-burst int                   Max burst of transactions for --tpslimit (default 1)
       --track-renames                        When synchronizing, track file renames and do a server-side move if possible
       --track-renames-strategy string        Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash")
-      --transfers int                        Number of file transfers to run in parallel. (default 4)
-  -u, --update                               Skip files that are newer on the destination.
-      --use-cookies                          Enable session cookiejar.
-      --use-json-log                         Use json log format.
-      --use-mmap                             Use mmap allocator (see docs).
+      --transfers int                        Number of file transfers to run in parallel (default 4)
+  -u, --update                               Skip files that are newer on the destination
+      --use-cookies                          Enable session cookiejar
+      --use-json-log                         Use json log format
+      --use-mmap                             Use mmap allocator (see docs)
       --use-server-modtime                   Use server modified time instead of object metadata
-      --user-agent string                    Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0")
+      --user-agent string                    Set the user-agent to a specified string (default "rclone/v1.56.0")
   -v, --verbose count                        Print lots more stuff (repeat for more)
 ```
 
@@ -166,168 +166,168 @@ These flags are available for every command. They control the backends
 and may be set in the config file.
 
 ```
-      --acd-auth-url string                                      Auth server URL.
+      --acd-auth-url string                                      Auth server URL
       --acd-client-id string                                     OAuth Client Id
       --acd-client-secret string                                 OAuth Client Secret
-      --acd-encoding MultiEncoder                                This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
-      --acd-templink-threshold SizeSuffix                        Files >= this size will be downloaded via their tempLink. (default 9Gi)
-      --acd-token string                                         OAuth Access Token as a JSON blob.
-      --acd-token-url string                                     Token server url.
-      --acd-upload-wait-per-gb Duration                          Additional time per GiB to wait after a failed complete upload to see if it appears. (default 3m0s)
-      --alias-remote string                                      Remote or path to alias.
-      --azureblob-access-tier string                             Access tier of blob: hot, cool or archive.
+      --acd-encoding MultiEncoder                                This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
+      --acd-templink-threshold SizeSuffix                        Files >= this size will be downloaded via their tempLink (default 9Gi)
+      --acd-token string                                         OAuth Access Token as a JSON blob
+      --acd-token-url string                                     Token server url
+      --acd-upload-wait-per-gb Duration                          Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+      --alias-remote string                                      Remote or path to alias
+      --azureblob-access-tier string                             Access tier of blob: hot, cool or archive
       --azureblob-account string                                 Storage Account Name (leave blank to use SAS URL or Emulator)
-      --azureblob-archive-tier-delete                            Delete archive tier blobs before overwriting.
-      --azureblob-chunk-size SizeSuffix                          Upload chunk size (<= 100 MiB). (default 4Mi)
-      --azureblob-disable-checksum                               Don't store MD5 checksum with object metadata.
-      --azureblob-encoding MultiEncoder                          This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
+      --azureblob-archive-tier-delete                            Delete archive tier blobs before overwriting
+      --azureblob-chunk-size SizeSuffix                          Upload chunk size (<= 100 MiB) (default 4Mi)
+      --azureblob-disable-checksum                               Don't store MD5 checksum with object metadata
+      --azureblob-encoding MultiEncoder                          This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
       --azureblob-endpoint string                                Endpoint for the service
       --azureblob-key string                                     Storage Account Key (leave blank to use SAS URL or Emulator)
-      --azureblob-list-chunk int                                 Size of blob list. (default 5000)
-      --azureblob-memory-pool-flush-time Duration                How often internal memory buffer pools will be flushed. (default 1m0s)
-      --azureblob-memory-pool-use-mmap                           Whether to use mmap buffers in internal memory pool.
-      --azureblob-msi-client-id string                           Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.
-      --azureblob-msi-mi-res-id string                           Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.
-      --azureblob-msi-object-id string                           Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.
-      --azureblob-public-access string                           Public access level of a container: blob, container.
+      --azureblob-list-chunk int                                 Size of blob list (default 5000)
+      --azureblob-memory-pool-flush-time Duration                How often internal memory buffer pools will be flushed (default 1m0s)
+      --azureblob-memory-pool-use-mmap                           Whether to use mmap buffers in internal memory pool
+      --azureblob-msi-client-id string                           Object ID of the user-assigned MSI to use, if any
+      --azureblob-msi-mi-res-id string                           Azure resource ID of the user-assigned MSI to use, if any
+      --azureblob-msi-object-id string                           Object ID of the user-assigned MSI to use, if any
+      --azureblob-public-access string                           Public access level of a container: blob, container
       --azureblob-sas-url string                                 SAS URL for container level access only
-      --azureblob-service-principal-file string                  Path to file containing credentials for use with a service principal.
-      --azureblob-upload-cutoff string                           Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)
+      --azureblob-service-principal-file string                  Path to file containing credentials for use with a service principal
+      --azureblob-upload-cutoff string                           Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
       --azureblob-use-emulator                                   Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
       --azureblob-use-msi                                        Use a managed service identity to authenticate (only works in Azure)
       --b2-account string                                        Account ID or Application Key ID
-      --b2-chunk-size SizeSuffix                                 Upload chunk size. Must fit in memory. (default 96Mi)
+      --b2-chunk-size SizeSuffix                                 Upload chunk size (default 96Mi)
       --b2-copy-cutoff SizeSuffix                                Cutoff for switching to multipart copy (default 4Gi)
       --b2-disable-checksum                                      Disable checksums for large (> upload cutoff) files
-      --b2-download-auth-duration Duration                       Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default 1w)
-      --b2-download-url string                                   Custom endpoint for downloads.
-      --b2-encoding MultiEncoder                                 This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
-      --b2-endpoint string                                       Endpoint for the service.
-      --b2-hard-delete                                           Permanently delete files on remote removal, otherwise hide files.
+      --b2-download-auth-duration Duration                       Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+      --b2-download-url string                                   Custom endpoint for downloads
+      --b2-encoding MultiEncoder                                 This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --b2-endpoint string                                       Endpoint for the service
+      --b2-hard-delete                                           Permanently delete files on remote removal, otherwise hide files
       --b2-key string                                            Application Key
-      --b2-memory-pool-flush-time Duration                       How often internal memory buffer pools will be flushed. (default 1m0s)
-      --b2-memory-pool-use-mmap                                  Whether to use mmap buffers in internal memory pool.
-      --b2-test-mode string                                      A flag string for X-Bz-Test-Mode header for debugging.
-      --b2-upload-cutoff SizeSuffix                              Cutoff for switching to chunked upload. (default 200Mi)
-      --b2-versions                                              Include old versions in directory listings.
+      --b2-memory-pool-flush-time Duration                       How often internal memory buffer pools will be flushed (default 1m0s)
+      --b2-memory-pool-use-mmap                                  Whether to use mmap buffers in internal memory pool
+      --b2-test-mode string                                      A flag string for X-Bz-Test-Mode header for debugging
+      --b2-upload-cutoff SizeSuffix                              Cutoff for switching to chunked upload (default 200Mi)
+      --b2-versions                                              Include old versions in directory listings
       --box-access-token string                                  Box App Primary Access Token
-      --box-auth-url string                                      Auth server URL.
+      --box-auth-url string                                      Auth server URL
       --box-box-config-file string                               Box App config.json location
-      --box-box-sub-type string                                   (default "user")
+      --box-box-sub-type string                                  (Default "user")
       --box-client-id string                                     OAuth Client Id
       --box-client-secret string                                 OAuth Client Secret
-      --box-commit-retries int                                   Max number of times to try committing a multipart file. (default 100)
-      --box-encoding MultiEncoder                                This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
-      --box-root-folder-id string                                Fill in for rclone to use a non root folder as its starting point.
-      --box-token string                                         OAuth Access Token as a JSON blob.
-      --box-token-url string                                     Token server url.
-      --box-upload-cutoff SizeSuffix                             Cutoff for switching to multipart upload (>= 50 MiB). (default 50Mi)
-      --cache-chunk-clean-interval Duration                      How often should the cache perform cleanups of the chunk storage. (default 1m0s)
-      --cache-chunk-no-memory                                    Disable the in-memory cache for storing chunks during streaming.
-      --cache-chunk-path string                                  Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
-      --cache-chunk-size SizeSuffix                              The size of a chunk (partial file data). (default 5Mi)
-      --cache-chunk-total-size SizeSuffix                        The total size that the chunks can take up on the local disk. (default 10Gi)
-      --cache-db-path string                                     Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
-      --cache-db-purge                                           Clear all the cached data for this remote on start.
+      --box-commit-retries int                                   Max number of times to try committing a multipart file (default 100)
+      --box-encoding MultiEncoder                                This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
+      --box-root-folder-id string                                Fill in for rclone to use a non root folder as its starting point
+      --box-token string                                         OAuth Access Token as a JSON blob
+      --box-token-url string                                     Token server url
+      --box-upload-cutoff SizeSuffix                             Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi)
+      --cache-chunk-clean-interval Duration                      How often should the cache perform cleanups of the chunk storage (default 1m0s)
+      --cache-chunk-no-memory                                    Disable the in-memory cache for storing chunks during streaming
+      --cache-chunk-path string                                  Directory to cache chunk files (default "$HOME/.cache/rclone/cache-backend")
+      --cache-chunk-size SizeSuffix                              The size of a chunk (partial file data) (default 5Mi)
+      --cache-chunk-total-size SizeSuffix                        The total size that the chunks can take up on the local disk (default 10Gi)
+      --cache-db-path string                                     Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
+      --cache-db-purge                                           Clear all the cached data for this remote on start
       --cache-db-wait-time Duration                              How long to wait for the DB to be available - 0 is unlimited (default 1s)
-      --cache-info-age Duration                                  How long to cache file structure information (directory listings, file size, times, etc.). (default 6h0m0s)
+      --cache-info-age Duration                                  How long to cache file structure information (directory listings, file size, times, etc) (default 6h0m0s)
       --cache-plex-insecure string                               Skip all certificate verification when connecting to the Plex server
       --cache-plex-password string                               The password of the Plex user (obscured)
       --cache-plex-url string                                    The URL of the Plex server
       --cache-plex-username string                               The username of the Plex user
-      --cache-read-retries int                                   How many times to retry a read from a cache storage. (default 10)
-      --cache-remote string                                      Remote to cache.
+      --cache-read-retries int                                   How many times to retry a read from a cache storage (default 10)
+      --cache-remote string                                      Remote to cache
       --cache-rps int                                            Limits the number of requests per second to the source FS (-1 to disable) (default -1)
-      --cache-tmp-upload-path string                             Directory to keep temporary files until they are uploaded.
+      --cache-tmp-upload-path string                             Directory to keep temporary files until they are uploaded
       --cache-tmp-wait-time Duration                             How long should files be stored in local cache before being uploaded (default 15s)
-      --cache-workers int                                        How many workers should run in parallel to download chunks. (default 4)
+      --cache-workers int                                        How many workers should run in parallel to download chunks (default 4)
       --cache-writes                                             Cache file data on writes through the FS
-      --chunker-chunk-size SizeSuffix                            Files larger than chunk size will be split in chunks. (default 2Gi)
-      --chunker-fail-hard                                        Choose how chunker should handle files with missing or invalid chunks.
-      --chunker-hash-type string                                 Choose how chunker handles hash sums. All modes but "none" require metadata. (default "md5")
-      --chunker-remote string                                    Remote to chunk/unchunk.
-      --compress-level int                                       GZIP compression level (-2 to 9). (default -1)
-      --compress-mode string                                     Compression mode. (default "gzip")
-      --compress-ram-cache-limit SizeSuffix                      Some remotes don't allow the upload of files with unknown size. (default 20Mi)
-      --compress-remote string                                   Remote to compress.
-  -L, --copy-links                                               Follow symlinks and copy the pointed to item.
-      --crypt-directory-name-encryption                          Option to either encrypt directory names or leave them intact. (default true)
-      --crypt-filename-encryption string                         How to encrypt the filenames. (default "standard")
-      --crypt-no-data-encryption                                 Option to either encrypt file data or leave it unencrypted.
-      --crypt-password string                                    Password or pass phrase for encryption. (obscured)
-      --crypt-password2 string                                   Password or pass phrase for salt. Optional but recommended. (obscured)
-      --crypt-remote string                                      Remote to encrypt/decrypt.
-      --crypt-server-side-across-configs                         Allow server-side operations (e.g. copy) to work across different crypt configs.
-      --crypt-show-mapping                                       For all files listed show how the names encrypt.
-      --drive-acknowledge-abuse                                  Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
-      --drive-allow-import-name-change                           Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
-      --drive-auth-owner-only                                    Only consider files owned by the authenticated user.
-      --drive-auth-url string                                    Auth server URL.
-      --drive-chunk-size SizeSuffix                              Upload chunk size. Must a power of 2 >= 256k. (default 8Mi)
+      --chunker-chunk-size SizeSuffix                            Files larger than chunk size will be split in chunks (default 2Gi)
+      --chunker-fail-hard                                        Choose how chunker should handle files with missing or invalid chunks
+      --chunker-hash-type string                                 Choose how chunker handles hash sums (default "md5")
+      --chunker-remote string                                    Remote to chunk/unchunk
+      --compress-level int                                       GZIP compression level (-2 to 9) (default -1)
+      --compress-mode string                                     Compression mode (default "gzip")
+      --compress-ram-cache-limit SizeSuffix                      Some remotes don't allow the upload of files with unknown size (default 20Mi)
+      --compress-remote string                                   Remote to compress
+  -L, --copy-links                                               Follow symlinks and copy the pointed to item
+      --crypt-directory-name-encryption                          Option to either encrypt directory names or leave them intact (default true)
+      --crypt-filename-encryption string                         How to encrypt the filenames (default "standard")
+      --crypt-no-data-encryption                                 Option to either encrypt file data or leave it unencrypted
+      --crypt-password string                                    Password or pass phrase for encryption (obscured)
+      --crypt-password2 string                                   Password or pass phrase for salt (obscured)
+      --crypt-remote string                                      Remote to encrypt/decrypt
+      --crypt-server-side-across-configs                         Allow server-side operations (e.g. copy) to work across different crypt configs
+      --crypt-show-mapping                                       For all files listed show how the names encrypt
+      --drive-acknowledge-abuse                                  Set to allow files which return cannotDownloadAbusiveFile to be downloaded
+      --drive-allow-import-name-change                           Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx)
+      --drive-auth-owner-only                                    Only consider files owned by the authenticated user
+      --drive-auth-url string                                    Auth server URL
+      --drive-chunk-size SizeSuffix                              Upload chunk size (default 8Mi)
       --drive-client-id string                                   Google Application Client Id
       --drive-client-secret string                               OAuth Client Secret
       --drive-disable-http2                                      Disable drive using http2 (default true)
-      --drive-encoding MultiEncoder                              This sets the encoding for the backend. (default InvalidUtf8)
-      --drive-export-formats string                              Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
+      --drive-encoding MultiEncoder                              This sets the encoding for the backend (default InvalidUtf8)
+      --drive-export-formats string                              Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
       --drive-formats string                                     Deprecated: see export_formats
-      --drive-impersonate string                                 Impersonate this user when using a service account.
-      --drive-import-formats string                              Comma separated list of preferred formats for uploading Google docs.
-      --drive-keep-revision-forever                              Keep new head revision of each file forever.
-      --drive-list-chunk int                                     Size of listing chunk 100-1000. 0 to disable. (default 1000)
-      --drive-pacer-burst int                                    Number of API calls to allow without sleeping. (default 100)
-      --drive-pacer-min-sleep Duration                           Minimum time to sleep between API calls. (default 100ms)
+      --drive-impersonate string                                 Impersonate this user when using a service account
+      --drive-import-formats string                              Comma separated list of preferred formats for uploading Google docs
+      --drive-keep-revision-forever                              Keep new head revision of each file forever
+      --drive-list-chunk int                                     Size of listing chunk 100-1000, 0 to disable (default 1000)
+      --drive-pacer-burst int                                    Number of API calls to allow without sleeping (default 100)
+      --drive-pacer-min-sleep Duration                           Minimum time to sleep between API calls (default 100ms)
       --drive-root-folder-id string                              ID of the root folder
-      --drive-scope string                                       Scope that rclone should use when requesting access from drive.
-      --drive-server-side-across-configs                         Allow server-side operations (e.g. copy) to work across different drive configs.
+      --drive-scope string                                       Scope that rclone should use when requesting access from drive
+      --drive-server-side-across-configs                         Allow server-side operations (e.g. copy) to work across different drive configs
       --drive-service-account-credentials string                 Service Account Credentials JSON blob
       --drive-service-account-file string                        Service Account Credentials JSON file path
-      --drive-shared-with-me                                     Only show files that are shared with me.
-      --drive-size-as-quota                                      Show sizes as storage quota usage, not actual size.
-      --drive-skip-checksum-gphotos                              Skip MD5 checksum on Google photos and videos only.
-      --drive-skip-gdocs                                         Skip google documents in all listings.
+      --drive-shared-with-me                                     Only show files that are shared with me
+      --drive-size-as-quota                                      Show sizes as storage quota usage, not actual size
+      --drive-skip-checksum-gphotos                              Skip MD5 checksum on Google photos and videos only
+      --drive-skip-gdocs                                         Skip google documents in all listings
       --drive-skip-shortcuts                                     If set skip shortcut files
-      --drive-starred-only                                       Only show files that are starred.
+      --drive-starred-only                                       Only show files that are starred
       --drive-stop-on-download-limit                             Make download limit errors be fatal
       --drive-stop-on-upload-limit                               Make upload limit errors be fatal
       --drive-team-drive string                                  ID of the Shared Drive (Team Drive)
-      --drive-token string                                       OAuth Access Token as a JSON blob.
-      --drive-token-url string                                   Token server url.
-      --drive-trashed-only                                       Only show files that are in the trash.
+      --drive-token string                                       OAuth Access Token as a JSON blob
+      --drive-token-url string                                   Token server url
+      --drive-trashed-only                                       Only show files that are in the trash
       --drive-upload-cutoff SizeSuffix                           Cutoff for switching to chunked upload (default 8Mi)
-      --drive-use-created-date                                   Use file created date instead of modified date.,
-      --drive-use-shared-date                                    Use date file was shared instead of modified date.
-      --drive-use-trash                                          Send files to the trash instead of deleting permanently. (default true)
-      --drive-v2-download-min-size SizeSuffix                    If Object's are greater, use drive v2 API to download. (default off)
-      --dropbox-auth-url string                                  Auth server URL.
-      --dropbox-batch-mode string                                Upload file batching sync|async|off. (default "sync")
-      --dropbox-batch-size int                                   Max number of files in upload batch.
+      --drive-use-created-date                                   Use file created date instead of modified date
+      --drive-use-shared-date                                    Use date file was shared instead of modified date
+      --drive-use-trash                                          Send files to the trash instead of deleting permanently (default true)
+      --drive-v2-download-min-size SizeSuffix                    If Object's are greater, use drive v2 API to download (default off)
+      --dropbox-auth-url string                                  Auth server URL
+      --dropbox-batch-mode string                                Upload file batching sync|async|off (default "sync")
+      --dropbox-batch-size int                                   Max number of files in upload batch
       --dropbox-batch-timeout Duration                           Max time to allow an idle upload batch before uploading (default 0s)
-      --dropbox-chunk-size SizeSuffix                            Upload chunk size. (< 150Mi). (default 48Mi)
+      --dropbox-chunk-size SizeSuffix                            Upload chunk size (< 150Mi) (default 48Mi)
       --dropbox-client-id string                                 OAuth Client Id
       --dropbox-client-secret string                             OAuth Client Secret
-      --dropbox-encoding MultiEncoder                            This sets the encoding for the backend. (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
-      --dropbox-impersonate string                               Impersonate this user when using a business account.
-      --dropbox-shared-files                                     Instructs rclone to work on individual shared files.
-      --dropbox-shared-folders                                   Instructs rclone to work on shared folders.
-      --dropbox-token string                                     OAuth Access Token as a JSON blob.
-      --dropbox-token-url string                                 Token server url.
+      --dropbox-encoding MultiEncoder                            This sets the encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
+      --dropbox-impersonate string                               Impersonate this user when using a business account
+      --dropbox-shared-files                                     Instructs rclone to work on individual shared files
+      --dropbox-shared-folders                                   Instructs rclone to work on shared folders
+      --dropbox-token string                                     OAuth Access Token as a JSON blob
+      --dropbox-token-url string                                 Token server url
       --fichier-api-key string                                   Your API Key, get it from https://1fichier.com/console/params.pl
-      --fichier-encoding MultiEncoder                            This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
+      --fichier-encoding MultiEncoder                            This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
       --fichier-file-password string                             If you want to download a shared file that is password protected, add this parameter (obscured)
       --fichier-folder-password string                           If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
       --fichier-shared-folder string                             If you want to download a shared folder, add this parameter
-      --filefabric-encoding MultiEncoder                         This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
+      --filefabric-encoding MultiEncoder                         This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
       --filefabric-permanent-token string                        Permanent Authentication Token
       --filefabric-root-folder-id string                         ID of the root folder
       --filefabric-token string                                  Session Token
       --filefabric-token-expiry string                           Token expiry time
       --filefabric-url string                                    URL of the Enterprise File Fabric to connect to
       --filefabric-version string                                Version read from the file fabric
-      --ftp-close-timeout Duration                               Maximum time to wait for a response to close. (default 1m0s)
+      --ftp-close-timeout Duration                               Maximum time to wait for a response to close (default 1m0s)
       --ftp-concurrency int                                      Maximum number of FTP simultaneous connections, 0 for unlimited
       --ftp-disable-epsv                                         Disable using EPSV even if server advertises support
       --ftp-disable-mlsd                                         Disable using MLSD even if server advertises support
-      --ftp-encoding MultiEncoder                                This sets the encoding for the backend. (default Slash,Del,Ctl,RightSpace,Dot)
+      --ftp-encoding MultiEncoder                                This sets the encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
       --ftp-explicit-tls                                         Use Explicit FTPS (FTP over TLS)
       --ftp-host string                                          FTP host to connect to
       --ftp-idle-timeout Duration                                Max time before closing idle connections (default 1m0s)
@@ -337,34 +337,34 @@ and may be set in the config file.
       --ftp-tls                                                  Use Implicit FTPS (FTP over TLS)
       --ftp-user string                                          FTP username, leave blank for current username, $USER
       --gcs-anonymous                                            Access public buckets and objects without credentials
-      --gcs-auth-url string                                      Auth server URL.
-      --gcs-bucket-acl string                                    Access Control List for new buckets.
-      --gcs-bucket-policy-only                                   Access checks should use bucket-level IAM policies.
+      --gcs-auth-url string                                      Auth server URL
+      --gcs-bucket-acl string                                    Access Control List for new buckets
+      --gcs-bucket-policy-only                                   Access checks should use bucket-level IAM policies
       --gcs-client-id string                                     OAuth Client Id
       --gcs-client-secret string                                 OAuth Client Secret
-      --gcs-encoding MultiEncoder                                This sets the encoding for the backend. (default Slash,CrLf,InvalidUtf8,Dot)
-      --gcs-location string                                      Location for the newly created buckets.
-      --gcs-object-acl string                                    Access Control List for new objects.
-      --gcs-project-number string                                Project number.
+      --gcs-encoding MultiEncoder                                This sets the encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
+      --gcs-location string                                      Location for the newly created buckets
+      --gcs-object-acl string                                    Access Control List for new objects
+      --gcs-project-number string                                Project number
       --gcs-service-account-file string                          Service Account Credentials JSON file path
-      --gcs-storage-class string                                 The storage class to use when storing objects in Google Cloud Storage.
-      --gcs-token string                                         OAuth Access Token as a JSON blob.
-      --gcs-token-url string                                     Token server url.
-      --gphotos-auth-url string                                  Auth server URL.
+      --gcs-storage-class string                                 The storage class to use when storing objects in Google Cloud Storage
+      --gcs-token string                                         OAuth Access Token as a JSON blob
+      --gcs-token-url string                                     Token server url
+      --gphotos-auth-url string                                  Auth server URL
       --gphotos-client-id string                                 OAuth Client Id
       --gphotos-client-secret string                             OAuth Client Secret
-      --gphotos-include-archived                                 Also view and download archived media.
-      --gphotos-read-only                                        Set to make the Google Photos backend read only.
-      --gphotos-read-size                                        Set to read the size of media items.
+      --gphotos-include-archived                                 Also view and download archived media
+      --gphotos-read-only                                        Set to make the Google Photos backend read only
+      --gphotos-read-size                                        Set to read the size of media items
       --gphotos-start-year int                                   Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
       --gphotos-token string                                     OAuth Access Token as a JSON blob.
-      --gphotos-token-url string                                 Token server url.
-      --hasher-auto-size SizeSuffix                              Auto-update checksum for files smaller than this size (disabled by default).
-      --hasher-hashes CommaSepList                               Comma separated list of supported checksum types. (default md5,sha1)
-      --hasher-max-age Duration                                  Maximum time to keep checksums in cache (0 = no cache, off = cache forever). (default off)
-      --hasher-remote string                                     Remote to cache checksums for (e.g. myRemote:path).
+      --gphotos-token-url string                                 Token server url
+      --hasher-auto-size SizeSuffix                              Auto-update checksum for files smaller than this size (disabled by default)
+      --hasher-hashes CommaSepList                               Comma separated list of supported checksum types (default md5,sha1)
+      --hasher-max-age Duration                                  Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
+      --hasher-remote string                                     Remote to cache checksums for (e.g. myRemote:path)
       --hdfs-data-transfer-protection string                     Kerberos data transfer protection: authentication|integrity|privacy
-      --hdfs-encoding MultiEncoder                               This sets the encoding for the backend. (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
+      --hdfs-encoding MultiEncoder                               This sets the encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
       --hdfs-namenode string                                     hadoop name node and port
       --hdfs-service-principal-name string                       Kerberos service principal name for the namenode
       --hdfs-username string                                     hadoop user name
@@ -372,173 +372,173 @@ and may be set in the config file.
       --http-no-head                                             Don't use HEAD requests to find file sizes in dir listing
       --http-no-slash                                            Set this if the site doesn't end directories with /
       --http-url string                                          URL of http host to connect to
-      --hubic-auth-url string                                    Auth server URL.
-      --hubic-chunk-size SizeSuffix                              Above this size files will be chunked into a _segments container. (default 5Gi)
+      --hubic-auth-url string                                    Auth server URL
+      --hubic-chunk-size SizeSuffix                              Above this size files will be chunked into a _segments container (default 5Gi)
       --hubic-client-id string                                   OAuth Client Id
       --hubic-client-secret string                               OAuth Client Secret
-      --hubic-encoding MultiEncoder                              This sets the encoding for the backend. (default Slash,InvalidUtf8)
-      --hubic-no-chunk                                           Don't chunk files during streaming upload.
-      --hubic-token string                                       OAuth Access Token as a JSON blob.
-      --hubic-token-url string                                   Token server url.
-      --jottacloud-encoding MultiEncoder                         This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
-      --jottacloud-hard-delete                                   Delete files permanently rather than putting them into the trash.
-      --jottacloud-md5-memory-limit SizeSuffix                   Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10Mi)
-      --jottacloud-no-versions                                   Avoid server side versioning by deleting files and recreating files instead of overwriting them.
-      --jottacloud-trashed-only                                  Only show files that are in the trash.
-      --jottacloud-upload-resume-limit SizeSuffix                Files bigger than this can be resumed if the upload fail's. (default 10Mi)
-      --koofr-encoding MultiEncoder                              This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --hubic-encoding MultiEncoder                              This sets the encoding for the backend (default Slash,InvalidUtf8)
+      --hubic-no-chunk                                           Don't chunk files during streaming upload
+      --hubic-token string                                       OAuth Access Token as a JSON blob
+      --hubic-token-url string                                   Token server url
+      --jottacloud-encoding MultiEncoder                         This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
+      --jottacloud-hard-delete                                   Delete files permanently rather than putting them into the trash
+      --jottacloud-md5-memory-limit SizeSuffix                   Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
+      --jottacloud-no-versions                                   Avoid server side versioning by deleting files and recreating files instead of overwriting them
+      --jottacloud-trashed-only                                  Only show files that are in the trash
+      --jottacloud-upload-resume-limit SizeSuffix                Files bigger than this can be resumed if the upload fail's (default 10Mi)
+      --koofr-encoding MultiEncoder                              This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
       --koofr-endpoint string                                    The Koofr API endpoint to use (default "https://app.koofr.net")
-      --koofr-mountid string                                     Mount ID of the mount to use. If omitted, the primary mount is used.
+      --koofr-mountid string                                     Mount ID of the mount to use
       --koofr-password string                                    Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
-      --koofr-setmtime                                           Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend. (default true)
+      --koofr-setmtime                                           Does the backend support setting modification time (default true)
       --koofr-user string                                        Your Koofr user name
   -l, --links                                                    Translate symlinks to/from regular files with a '.rclonelink' extension
       --local-case-insensitive                                   Force the filesystem to report itself as case insensitive
-      --local-case-sensitive                                     Force the filesystem to report itself as case sensitive.
-      --local-encoding MultiEncoder                              This sets the encoding for the backend. (default Slash,Dot)
+      --local-case-sensitive                                     Force the filesystem to report itself as case sensitive
+      --local-encoding MultiEncoder                              This sets the encoding for the backend (default Slash,Dot)
       --local-no-check-updated                                   Don't check to see if the files change during upload
       --local-no-preallocate                                     Disable preallocation of disk space for transferred files
       --local-no-set-modtime                                     Disable setting modtime
       --local-no-sparse                                          Disable sparse files for multi-thread downloads
       --local-nounc string                                       Disable UNC (long path names) conversion on Windows
       --local-unicode-normalization                              Apply unicode NFC normalization to paths and filenames
-      --local-zero-size-links                                    Assume the Stat size of links is zero (and read them instead) (Deprecated)
+      --local-zero-size-links                                    Assume the Stat size of links is zero (and read them instead) (deprecated)
       --mailru-check-hash                                        What should copy do if file checksum is mismatched or invalid (default true)
-      --mailru-encoding MultiEncoder                             This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --mailru-encoding MultiEncoder                             This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
       --mailru-pass string                                       Password (obscured)
-      --mailru-speedup-enable                                    Skip full upload if there is another file with same data hash. (default true)
-      --mailru-speedup-file-patterns string                      Comma separated list of file name patterns eligible for speedup (put by hash). (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
+      --mailru-speedup-enable                                    Skip full upload if there is another file with same data hash (default true)
+      --mailru-speedup-file-patterns string                      Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
       --mailru-speedup-max-disk SizeSuffix                       This option allows you to disable speedup (put by hash) for large files (default 3Gi)
-      --mailru-speedup-max-memory SizeSuffix                     Files larger than the size given below will always be hashed on disk. (default 32Mi)
+      --mailru-speedup-max-memory SizeSuffix                     Files larger than the size given below will always be hashed on disk (default 32Mi)
       --mailru-user string                                       User name (usually email)
-      --mega-debug                                               Output more debug from Mega.
-      --mega-encoding MultiEncoder                               This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
-      --mega-hard-delete                                         Delete files permanently rather than putting them into the trash.
-      --mega-pass string                                         Password. (obscured)
+      --mega-debug                                               Output more debug from Mega
+      --mega-encoding MultiEncoder                               This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
+      --mega-hard-delete                                         Delete files permanently rather than putting them into the trash
+      --mega-pass string                                         Password (obscured)
       --mega-user string                                         User name
-  -x, --one-file-system                                          Don't cross filesystem boundaries (unix/macOS only).
-      --onedrive-auth-url string                                 Auth server URL.
-      --onedrive-chunk-size SizeSuffix                           Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default 10Mi)
+  -x, --one-file-system                                          Don't cross filesystem boundaries (unix/macOS only)
+      --onedrive-auth-url string                                 Auth server URL
+      --onedrive-chunk-size SizeSuffix                           Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
       --onedrive-client-id string                                OAuth Client Id
       --onedrive-client-secret string                            OAuth Client Secret
       --onedrive-drive-id string                                 The ID of the drive to use
-      --onedrive-drive-type string                               The type of the drive ( personal | business | documentLibrary )
-      --onedrive-encoding MultiEncoder                           This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
-      --onedrive-expose-onenote-files                            Set to make OneNote files show up in directory listings.
-      --onedrive-link-password string                            Set the password for links created by the link command.
-      --onedrive-link-scope string                               Set the scope of the links created by the link command. (default "anonymous")
-      --onedrive-link-type string                                Set the type of the links created by the link command. (default "view")
-      --onedrive-list-chunk int                                  Size of listing chunk. (default 1000)
+      --onedrive-drive-type string                               The type of the drive (personal | business | documentLibrary)
+      --onedrive-encoding MultiEncoder                           This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
+      --onedrive-expose-onenote-files                            Set to make OneNote files show up in directory listings
+      --onedrive-link-password string                            Set the password for links created by the link command
+      --onedrive-link-scope string                               Set the scope of the links created by the link command (default "anonymous")
+      --onedrive-link-type string                                Set the type of the links created by the link command (default "view")
+      --onedrive-list-chunk int                                  Size of listing chunk (default 1000)
       --onedrive-no-versions                                     Remove all versions on modifying operations
-      --onedrive-region string                                   Choose national cloud region for OneDrive. (default "global")
-      --onedrive-server-side-across-configs                      Allow server-side operations (e.g. copy) to work across different onedrive configs.
-      --onedrive-token string                                    OAuth Access Token as a JSON blob.
-      --onedrive-token-url string                                Token server url.
-      --opendrive-chunk-size SizeSuffix                          Files will be uploaded in chunks this size. (default 10Mi)
-      --opendrive-encoding MultiEncoder                          This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
-      --opendrive-password string                                Password. (obscured)
+      --onedrive-region string                                   Choose national cloud region for OneDrive (default "global")
+      --onedrive-server-side-across-configs                      Allow server-side operations (e.g. copy) to work across different onedrive configs
+      --onedrive-token string                                    OAuth Access Token as a JSON blob
+      --onedrive-token-url string                                Token server url
+      --opendrive-chunk-size SizeSuffix                          Files will be uploaded in chunks this size (default 10Mi)
+      --opendrive-encoding MultiEncoder                          This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
+      --opendrive-password string                                Password (obscured)
       --opendrive-username string                                Username
-      --pcloud-auth-url string                                   Auth server URL.
+      --pcloud-auth-url string                                   Auth server URL
       --pcloud-client-id string                                  OAuth Client Id
       --pcloud-client-secret string                              OAuth Client Secret
-      --pcloud-encoding MultiEncoder                             This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
-      --pcloud-hostname string                                   Hostname to connect to. (default "api.pcloud.com")
-      --pcloud-root-folder-id string                             Fill in for rclone to use a non root folder as its starting point. (default "d0")
-      --pcloud-token string                                      OAuth Access Token as a JSON blob.
-      --pcloud-token-url string                                  Token server url.
-      --premiumizeme-encoding MultiEncoder                       This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
-      --putio-encoding MultiEncoder                              This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --pcloud-encoding MultiEncoder                             This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --pcloud-hostname string                                   Hostname to connect to (default "api.pcloud.com")
+      --pcloud-root-folder-id string                             Fill in for rclone to use a non root folder as its starting point (default "d0")
+      --pcloud-token string                                      OAuth Access Token as a JSON blob
+      --pcloud-token-url string                                  Token server url
+      --premiumizeme-encoding MultiEncoder                       This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
+      --putio-encoding MultiEncoder                              This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
       --qingstor-access-key-id string                            QingStor Access Key ID
-      --qingstor-chunk-size SizeSuffix                           Chunk size to use for uploading. (default 4Mi)
-      --qingstor-connection-retries int                          Number of connection retries. (default 3)
-      --qingstor-encoding MultiEncoder                           This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8)
-      --qingstor-endpoint string                                 Enter an endpoint URL to connection QingStor API.
-      --qingstor-env-auth                                        Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
+      --qingstor-chunk-size SizeSuffix                           Chunk size to use for uploading (default 4Mi)
+      --qingstor-connection-retries int                          Number of connection retries (default 3)
+      --qingstor-encoding MultiEncoder                           This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8)
+      --qingstor-endpoint string                                 Enter an endpoint URL to connection QingStor API
+      --qingstor-env-auth                                        Get QingStor credentials from runtime
       --qingstor-secret-access-key string                        QingStor Secret Access Key (password)
-      --qingstor-upload-concurrency int                          Concurrency for multipart uploads. (default 1)
+      --qingstor-upload-concurrency int                          Concurrency for multipart uploads (default 1)
       --qingstor-upload-cutoff SizeSuffix                        Cutoff for switching to chunked upload (default 200Mi)
-      --qingstor-zone string                                     Zone to connect to.
-      --s3-access-key-id string                                  AWS Access Key ID.
-      --s3-acl string                                            Canned ACL used when creating buckets and storing or copying objects.
-      --s3-bucket-acl string                                     Canned ACL used when creating buckets.
-      --s3-chunk-size SizeSuffix                                 Chunk size to use for uploading. (default 5Mi)
+      --qingstor-zone string                                     Zone to connect to
+      --s3-access-key-id string                                  AWS Access Key ID
+      --s3-acl string                                            Canned ACL used when creating buckets and storing or copying objects
+      --s3-bucket-acl string                                     Canned ACL used when creating buckets
+      --s3-chunk-size SizeSuffix                                 Chunk size to use for uploading (default 5Mi)
       --s3-copy-cutoff SizeSuffix                                Cutoff for switching to multipart copy (default 4.656Gi)
       --s3-disable-checksum                                      Don't store MD5 checksum with object metadata
       --s3-disable-http2                                         Disable usage of http2 for S3 backends
-      --s3-encoding MultiEncoder                                 This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
-      --s3-endpoint string                                       Endpoint for S3 API.
-      --s3-env-auth                                              Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
-      --s3-force-path-style                                      If true use path style access if false use virtual hosted style. (default true)
-      --s3-leave-parts-on-error                                  If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
-      --s3-list-chunk int                                        Size of listing chunk (response list for each ListObject S3 request). (default 1000)
-      --s3-location-constraint string                            Location constraint - must be set to match the Region.
-      --s3-max-upload-parts int                                  Maximum number of parts in a multipart upload. (default 10000)
-      --s3-memory-pool-flush-time Duration                       How often internal memory buffer pools will be flushed. (default 1m0s)
-      --s3-memory-pool-use-mmap                                  Whether to use mmap buffers in internal memory pool.
+      --s3-encoding MultiEncoder                                 This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
+      --s3-endpoint string                                       Endpoint for S3 API
+      --s3-env-auth                                              Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
+      --s3-force-path-style                                      If true use path style access if false use virtual hosted style (default true)
+      --s3-leave-parts-on-error                                  If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
+      --s3-list-chunk int                                        Size of listing chunk (response list for each ListObject S3 request) (default 1000)
+      --s3-location-constraint string                            Location constraint - must be set to match the Region
+      --s3-max-upload-parts int                                  Maximum number of parts in a multipart upload (default 10000)
+      --s3-memory-pool-flush-time Duration                       How often internal memory buffer pools will be flushed (default 1m0s)
+      --s3-memory-pool-use-mmap                                  Whether to use mmap buffers in internal memory pool
       --s3-no-check-bucket                                       If set, don't attempt to check the bucket exists or create it
       --s3-no-head                                               If set, don't HEAD uploaded objects to check integrity
       --s3-no-head-object                                        If set, don't HEAD objects
       --s3-profile string                                        Profile to use in the shared credentials file
-      --s3-provider string                                       Choose your S3 provider.
-      --s3-region string                                         Region to connect to.
-      --s3-requester-pays                                        Enables requester pays option when interacting with S3 bucket.
+      --s3-provider string                                       Choose your S3 provider
+      --s3-region string                                         Region to connect to
+      --s3-requester-pays                                        Enables requester pays option when interacting with S3 bucket
       --s3-secret-access-key string                              AWS Secret Access Key (password)
-      --s3-server-side-encryption string                         The server-side encryption algorithm used when storing this object in S3.
+      --s3-server-side-encryption string                         The server-side encryption algorithm used when storing this object in S3
       --s3-session-token string                                  An AWS session token
       --s3-shared-credentials-file string                        Path to the shared credentials file
-      --s3-sse-customer-algorithm string                         If using SSE-C, the server-side encryption algorithm used when storing this object in S3.
-      --s3-sse-customer-key string                               If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.
-      --s3-sse-customer-key-md5 string                           If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
-      --s3-sse-kms-key-id string                                 If using KMS ID you must provide the ARN of Key.
-      --s3-storage-class string                                  The storage class to use when storing new objects in S3.
-      --s3-upload-concurrency int                                Concurrency for multipart uploads. (default 4)
+      --s3-sse-customer-algorithm string                         If using SSE-C, the server-side encryption algorithm used when storing this object in S3
+      --s3-sse-customer-key string                               If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data
+      --s3-sse-customer-key-md5 string                           If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
+      --s3-sse-kms-key-id string                                 If using KMS ID you must provide the ARN of Key
+      --s3-storage-class string                                  The storage class to use when storing new objects in S3
+      --s3-upload-concurrency int                                Concurrency for multipart uploads (default 4)
       --s3-upload-cutoff SizeSuffix                              Cutoff for switching to chunked upload (default 200Mi)
-      --s3-use-accelerate-endpoint                               If true use the AWS S3 accelerated endpoint.
-      --s3-v2-auth                                               If true use v2 authentication.
+      --s3-use-accelerate-endpoint                               If true use the AWS S3 accelerated endpoint
+      --s3-v2-auth                                               If true use v2 authentication
       --seafile-2fa                                              Two-factor authentication ('true' if the account has 2FA enabled)
       --seafile-create-library                                   Should rclone create a library if it doesn't exist
-      --seafile-encoding MultiEncoder                            This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
-      --seafile-library string                                   Name of the library. Leave blank to access all non-encrypted libraries.
-      --seafile-library-key string                               Library password (for encrypted libraries only). Leave blank if you pass it through the command line. (obscured)
+      --seafile-encoding MultiEncoder                            This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
+      --seafile-library string                                   Name of the library
+      --seafile-library-key string                               Library password (for encrypted libraries only) (obscured)
       --seafile-pass string                                      Password (obscured)
       --seafile-url string                                       URL of seafile host to connect to
       --seafile-user string                                      User name (usually email address)
-      --sftp-ask-password                                        Allow asking for SFTP password when needed.
+      --sftp-ask-password                                        Allow asking for SFTP password when needed
       --sftp-disable-concurrent-reads                            If set don't use concurrent reads
       --sftp-disable-concurrent-writes                           If set don't use concurrent writes
-      --sftp-disable-hashcheck                                   Disable the execution of SSH commands to determine if remote file hashing is available.
+      --sftp-disable-hashcheck                                   Disable the execution of SSH commands to determine if remote file hashing is available
       --sftp-host string                                         SSH host to connect to
       --sftp-idle-timeout Duration                               Max time before closing idle connections (default 1m0s)
-      --sftp-key-file string                                     Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
-      --sftp-key-file-pass string                                The passphrase to decrypt the PEM-encoded private key file. (obscured)
-      --sftp-key-pem string                                      Raw PEM-encoded private key, If specified, will override key_file parameter.
-      --sftp-key-use-agent                                       When set forces the usage of the ssh-agent.
-      --sftp-known-hosts-file string                             Optional path to known_hosts file.
-      --sftp-md5sum-command string                               The command used to read md5 hashes. Leave blank for autodetect.
-      --sftp-pass string                                         SSH password, leave blank to use ssh-agent. (obscured)
-      --sftp-path-override string                                Override path used by SSH connection.
+      --sftp-key-file string                                     Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent
+      --sftp-key-file-pass string                                The passphrase to decrypt the PEM-encoded private key file (obscured)
+      --sftp-key-pem string                                      Raw PEM-encoded private key, If specified, will override key_file parameter
+      --sftp-key-use-agent                                       When set forces the usage of the ssh-agent
+      --sftp-known-hosts-file string                             Optional path to known_hosts file
+      --sftp-md5sum-command string                               The command used to read md5 hashes
+      --sftp-pass string                                         SSH password, leave blank to use ssh-agent (obscured)
+      --sftp-path-override string                                Override path used by SSH connection
       --sftp-port string                                         SSH port, leave blank to use default (22)
-      --sftp-pubkey-file string                                  Optional path to public key file.
-      --sftp-server-command string                               Specifies the path or command to run a sftp server on the remote host.
-      --sftp-set-modtime                                         Set the modified time on the remote if set. (default true)
-      --sftp-sha1sum-command string                              The command used to read sha1 hashes. Leave blank for autodetect.
-      --sftp-skip-links                                          Set to skip any symlinks and any other non regular files.
-      --sftp-subsystem string                                    Specifies the SSH2 subsystem on the remote host. (default "sftp")
+      --sftp-pubkey-file string                                  Optional path to public key file
+      --sftp-server-command string                               Specifies the path or command to run a sftp server on the remote host
+      --sftp-set-modtime                                         Set the modified time on the remote if set (default true)
+      --sftp-sha1sum-command string                              The command used to read sha1 hashes
+      --sftp-skip-links                                          Set to skip any symlinks and any other non regular files
+      --sftp-subsystem string                                    Specifies the SSH2 subsystem on the remote host (default "sftp")
       --sftp-use-fstat                                           If set use fstat instead of stat
-      --sftp-use-insecure-cipher                                 Enable the use of insecure ciphers and key exchange methods.
+      --sftp-use-insecure-cipher                                 Enable the use of insecure ciphers and key exchange methods
       --sftp-user string                                         SSH username, leave blank for current username, $USER
-      --sharefile-chunk-size SizeSuffix                          Upload chunk size. Must a power of 2 >= 256k. (default 64Mi)
-      --sharefile-encoding MultiEncoder                          This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
-      --sharefile-endpoint string                                Endpoint for API calls.
+      --sharefile-chunk-size SizeSuffix                          Upload chunk size (default 64Mi)
+      --sharefile-encoding MultiEncoder                          This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
+      --sharefile-endpoint string                                Endpoint for API calls
       --sharefile-root-folder-id string                          ID of the root folder
-      --sharefile-upload-cutoff SizeSuffix                       Cutoff for switching to multipart upload. (default 128Mi)
-      --skip-links                                               Don't warn about skipped symlinks.
-      --sugarsync-access-key-id string                           Sugarsync Access Key ID.
-      --sugarsync-app-id string                                  Sugarsync App ID.
+      --sharefile-upload-cutoff SizeSuffix                       Cutoff for switching to multipart upload (default 128Mi)
+      --skip-links                                               Don't warn about skipped symlinks
+      --sugarsync-access-key-id string                           Sugarsync Access Key ID
+      --sugarsync-app-id string                                  Sugarsync App ID
       --sugarsync-authorization string                           Sugarsync authorization
       --sugarsync-authorization-expiry string                    Sugarsync authorization expiry
       --sugarsync-deleted-id string                              Sugarsync deleted folder id
-      --sugarsync-encoding MultiEncoder                          This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8,Dot)
+      --sugarsync-encoding MultiEncoder                          This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
       --sugarsync-hard-delete                                    Permanently delete files if true
       --sugarsync-private-access-key string                      Sugarsync Private Access Key
       --sugarsync-refresh-token string                           Sugarsync refresh token
@@ -547,56 +547,56 @@ and may be set in the config file.
       --swift-application-credential-id string                   Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
       --swift-application-credential-name string                 Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
       --swift-application-credential-secret string               Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
-      --swift-auth string                                        Authentication URL for server (OS_AUTH_URL).
+      --swift-auth string                                        Authentication URL for server (OS_AUTH_URL)
       --swift-auth-token string                                  Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
       --swift-auth-version int                                   AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
-      --swift-chunk-size SizeSuffix                              Above this size files will be chunked into a _segments container. (default 5Gi)
+      --swift-chunk-size SizeSuffix                              Above this size files will be chunked into a _segments container (default 5Gi)
       --swift-domain string                                      User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
-      --swift-encoding MultiEncoder                              This sets the encoding for the backend. (default Slash,InvalidUtf8)
+      --swift-encoding MultiEncoder                              This sets the encoding for the backend (default Slash,InvalidUtf8)
       --swift-endpoint-type string                               Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
-      --swift-env-auth                                           Get swift credentials from environment variables in standard OpenStack form.
-      --swift-key string                                         API key or password (OS_PASSWORD).
-      --swift-leave-parts-on-error                               If true avoid calling abort upload on a failure. It should be set to true for resuming uploads across different sessions.
-      --swift-no-chunk                                           Don't chunk files during streaming upload.
+      --swift-env-auth                                           Get swift credentials from environment variables in standard OpenStack form
+      --swift-key string                                         API key or password (OS_PASSWORD)
+      --swift-leave-parts-on-error                               If true avoid calling abort upload on a failure
+      --swift-no-chunk                                           Don't chunk files during streaming upload
       --swift-region string                                      Region name - optional (OS_REGION_NAME)
       --swift-storage-policy string                              The storage policy to use when creating a new container
       --swift-storage-url string                                 Storage URL - optional (OS_STORAGE_URL)
       --swift-tenant string                                      Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
       --swift-tenant-domain string                               Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
       --swift-tenant-id string                                   Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
-      --swift-user string                                        User name to log in (OS_USERNAME).
-      --swift-user-id string                                     User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
-      --tardigrade-access-grant string                           Access Grant.
-      --tardigrade-api-key string                                API Key.
-      --tardigrade-passphrase string                             Encryption Passphrase. To access existing objects enter passphrase used for uploading.
-      --tardigrade-provider string                               Choose an authentication method. (default "existing")
-      --tardigrade-satellite-address <nodeid>@<address>:<port>   Satellite Address. Custom satellite address should match the format: <nodeid>@<address>:<port>. (default "us-central-1.tardigrade.io")
-      --union-action-policy string                               Policy to choose upstream on ACTION category. (default "epall")
-      --union-cache-time int                                     Cache time of usage and free space (in seconds). This option is only useful when a path preserving policy is used. (default 120)
-      --union-create-policy string                               Policy to choose upstream on CREATE category. (default "epmfs")
-      --union-search-policy string                               Policy to choose upstream on SEARCH category. (default "ff")
-      --union-upstreams string                                   List of space separated upstreams.
+      --swift-user string                                        User name to log in (OS_USERNAME)
+      --swift-user-id string                                     User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
+      --tardigrade-access-grant string                           Access grant
+      --tardigrade-api-key string                                API key
+      --tardigrade-passphrase string                             Encryption passphrase
+      --tardigrade-provider string                               Choose an authentication method (default "existing")
+      --tardigrade-satellite-address <nodeid>@<address>:<port>   Satellite address (default "us-central-1.tardigrade.io")
+      --union-action-policy string                               Policy to choose upstream on ACTION category (default "epall")
+      --union-cache-time int                                     Cache time of usage and free space (in seconds) (default 120)
+      --union-create-policy string                               Policy to choose upstream on CREATE category (default "epmfs")
+      --union-search-policy string                               Policy to choose upstream on SEARCH category (default "ff")
+      --union-upstreams string                                   List of space separated upstreams
       --uptobox-access-token string                              Your access Token, get it from https://uptobox.com/my_account
-      --uptobox-encoding MultiEncoder                            This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
+      --uptobox-encoding MultiEncoder                            This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
       --webdav-bearer-token string                               Bearer token instead of user/pass (e.g. a Macaroon)
       --webdav-bearer-token-command string                       Command to run to get a bearer token
-      --webdav-encoding string                                   This sets the encoding for the backend.
+      --webdav-encoding string                                   This sets the encoding for the backend
       --webdav-headers CommaSepList                              Set HTTP headers for all transactions
-      --webdav-pass string                                       Password. (obscured)
+      --webdav-pass string                                       Password (obscured)
       --webdav-url string                                        URL of http host to connect to
-      --webdav-user string                                       User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.
+      --webdav-user string                                       User name
       --webdav-vendor string                                     Name of the Webdav site/service/software you are using
-      --yandex-auth-url string                                   Auth server URL.
+      --yandex-auth-url string                                   Auth server URL
       --yandex-client-id string                                  OAuth Client Id
       --yandex-client-secret string                              OAuth Client Secret
-      --yandex-encoding MultiEncoder                             This sets the encoding for the backend. (default Slash,Del,Ctl,InvalidUtf8,Dot)
-      --yandex-token string                                      OAuth Access Token as a JSON blob.
-      --yandex-token-url string                                  Token server url.
-      --zoho-auth-url string                                     Auth server URL.
+      --yandex-encoding MultiEncoder                             This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
+      --yandex-token string                                      OAuth Access Token as a JSON blob
+      --yandex-token-url string                                  Token server url
+      --zoho-auth-url string                                     Auth server URL
       --zoho-client-id string                                    OAuth Client Id
       --zoho-client-secret string                                OAuth Client Secret
-      --zoho-encoding MultiEncoder                               This sets the encoding for the backend. (default Del,Ctl,InvalidUtf8)
-      --zoho-region string                                       Zoho region to connect to.
-      --zoho-token string                                        OAuth Access Token as a JSON blob.
-      --zoho-token-url string                                    Token server url.
+      --zoho-encoding MultiEncoder                               This sets the encoding for the backend (default Del,Ctl,InvalidUtf8)
+      --zoho-region string                                       Zoho region to connect to
+      --zoho-token string                                        OAuth Access Token as a JSON blob
+      --zoho-token-url string                                    Token server url
 ```
diff --git a/fs/accounting/stats_groups.go b/fs/accounting/stats_groups.go
index cde662645..4c87bded4 100644
--- a/fs/accounting/stats_groups.go
+++ b/fs/accounting/stats_groups.go
@@ -235,7 +235,7 @@ func init() {
 		Fn:    rcDeleteStats,
 		Title: "Delete stats group.",
 		Help: `
-This deletes entire stats group
+This deletes entire stats group.
 
 Parameters
 
diff --git a/fs/config/config.go b/fs/config/config.go
index 5ce802224..a2db8ddfd 100644
--- a/fs/config/config.go
+++ b/fs/config/config.go
@@ -51,7 +51,7 @@ const (
 	ConfigEncoding = "encoding"
 
 	// ConfigEncodingHelp is the help for ConfigEncoding
-	ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info."
+	ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee the [encoding section in the overview](/overview/#encoding) for more info."
 
 	// ConfigAuthorize indicates that we just want "rclone authorize"
 	ConfigAuthorize = "config_authorize"
diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go
index f6ba1c41a..7c2c8ade9 100644
--- a/fs/config/configflags/configflags.go
+++ b/fs/config/configflags/configflags.go
@@ -46,16 +46,16 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
 	flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
 	flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
 	flags.DurationVarP(flagSet, &ci.ModifyWindow, "modify-window", "", ci.ModifyWindow, "Max time diff to be considered the same")
-	flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel.")
-	flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel.")
-	flags.StringVarP(flagSet, &configPath, "config", "", config.GetConfigPath(), "Config file.")
-	flags.StringVarP(flagSet, &cacheDir, "cache-dir", "", config.GetCacheDir(), "Directory rclone will use for caching.")
-	flags.StringVarP(flagSet, &tempDir, "temp-dir", "", os.TempDir(), "Directory rclone will use for temporary files.")
+	flags.IntVarP(flagSet, &ci.Checkers, "checkers", "", ci.Checkers, "Number of checkers to run in parallel")
+	flags.IntVarP(flagSet, &ci.Transfers, "transfers", "", ci.Transfers, "Number of file transfers to run in parallel")
+	flags.StringVarP(flagSet, &configPath, "config", "", config.GetConfigPath(), "Config file")
+	flags.StringVarP(flagSet, &cacheDir, "cache-dir", "", config.GetCacheDir(), "Directory rclone will use for caching")
+	flags.StringVarP(flagSet, &tempDir, "temp-dir", "", os.TempDir(), "Directory rclone will use for temporary files")
 	flags.BoolVarP(flagSet, &ci.CheckSum, "checksum", "c", ci.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
 	flags.BoolVarP(flagSet, &ci.SizeOnly, "size-only", "", ci.SizeOnly, "Skip based on size only, not mod-time or checksum")
 	flags.BoolVarP(flagSet, &ci.IgnoreTimes, "ignore-times", "I", ci.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
 	flags.BoolVarP(flagSet, &ci.IgnoreExisting, "ignore-existing", "", ci.IgnoreExisting, "Skip all files that exist on destination")
-	flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "delete even if there are I/O errors")
+	flags.BoolVarP(flagSet, &ci.IgnoreErrors, "ignore-errors", "", ci.IgnoreErrors, "Delete even if there are I/O errors")
 	flags.BoolVarP(flagSet, &ci.DryRun, "dry-run", "n", ci.DryRun, "Do a trial run with no permanent changes")
 	flags.BoolVarP(flagSet, &ci.Interactive, "interactive", "i", ci.Interactive, "Enable interactive mode")
 	flags.DurationVarP(flagSet, &ci.ConnectTimeout, "contimeout", "", ci.ConnectTimeout, "Connect timeout")
@@ -63,79 +63,79 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
 	flags.DurationVarP(flagSet, &ci.ExpectContinueTimeout, "expect-continue-timeout", "", ci.ExpectContinueTimeout, "Timeout when using expect / 100-continue in HTTP")
 	flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
 	flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
-	flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
-	flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration.")
-	flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration.")
+	flags.BoolVarP(flagSet, &ci.InsecureSkipVerify, "no-check-certificate", "", ci.InsecureSkipVerify, "Do not verify the server SSL certificate (insecure)")
+	flags.BoolVarP(flagSet, &ci.AskPassword, "ask-password", "", ci.AskPassword, "Allow prompt for password for encrypted configuration")
+	flags.FVarP(flagSet, &ci.PasswordCommand, "password-command", "", "Command for supplying password for encrypted configuration")
 	flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring")
 	flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
 	flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
 	flags.Int64VarP(flagSet, &ci.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
 	flags.BoolVarP(flagSet, &ci.TrackRenames, "track-renames", "", ci.TrackRenames, "When synchronizing, track file renames and do a server-side move if possible")
 	flags.StringVarP(flagSet, &ci.TrackRenamesStrategy, "track-renames-strategy", "", ci.TrackRenamesStrategy, "Strategies to use when synchronizing using track-renames hash|modtime|leaf")
-	flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do.")
-	flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination.")
+	flags.IntVarP(flagSet, &ci.LowLevelRetries, "low-level-retries", "", ci.LowLevelRetries, "Number of low level retries to do")
+	flags.BoolVarP(flagSet, &ci.UpdateOlder, "update", "u", ci.UpdateOlder, "Skip files that are newer on the destination")
 	flags.BoolVarP(flagSet, &ci.UseServerModTime, "use-server-modtime", "", ci.UseServerModTime, "Use server modified time instead of object metadata")
-	flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip.")
-	flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this.")
-	flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
-	flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums.")
+	flags.BoolVarP(flagSet, &ci.NoGzip, "no-gzip-encoding", "", ci.NoGzip, "Don't set Accept-Encoding: gzip")
+	flags.IntVarP(flagSet, &ci.MaxDepth, "max-depth", "", ci.MaxDepth, "If set limits the recursion depth to this")
+	flags.BoolVarP(flagSet, &ci.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum")
+	flags.BoolVarP(flagSet, &ci.IgnoreChecksum, "ignore-checksum", "", ci.IgnoreChecksum, "Skip post copy check of checksums")
 	flags.BoolVarP(flagSet, &ci.IgnoreCaseSync, "ignore-case-sync", "", ci.IgnoreCaseSync, "Ignore case when synchronizing")
-	flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy.")
-	flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers.")
-	flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless.")
-	flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames.")
-	flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical.")
-	flags.StringArrayVarP(flagSet, &ci.CompareDest, "compare-dest", "", nil, "Include additional comma separated server-side paths during comparison.")
-	flags.StringArrayVarP(flagSet, &ci.CopyDest, "copy-dest", "", nil, "Implies --compare-dest but also copies files from paths into destination.")
-	flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR.")
-	flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files.")
-	flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix.")
-	flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
-	flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this.")
-	flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
-	flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
-	flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features.  Use --disable help to see a list.")
-	flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
-	flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files. Fail if existing files have been modified.")
-	flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation.")
-	flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
+	flags.BoolVarP(flagSet, &ci.NoTraverse, "no-traverse", "", ci.NoTraverse, "Don't traverse destination file system on copy")
+	flags.BoolVarP(flagSet, &ci.CheckFirst, "check-first", "", ci.CheckFirst, "Do all the checks before starting transfers")
+	flags.BoolVarP(flagSet, &ci.NoCheckDest, "no-check-dest", "", ci.NoCheckDest, "Don't check the destination, copy regardless")
+	flags.BoolVarP(flagSet, &ci.NoUnicodeNormalization, "no-unicode-normalization", "", ci.NoUnicodeNormalization, "Don't normalize unicode characters in filenames")
+	flags.BoolVarP(flagSet, &ci.NoUpdateModTime, "no-update-modtime", "", ci.NoUpdateModTime, "Don't update destination mod-time if files identical")
+	flags.StringArrayVarP(flagSet, &ci.CompareDest, "compare-dest", "", nil, "Include additional comma separated server-side paths during comparison")
+	flags.StringArrayVarP(flagSet, &ci.CopyDest, "copy-dest", "", nil, "Implies --compare-dest but also copies files from paths into destination")
+	flags.StringVarP(flagSet, &ci.BackupDir, "backup-dir", "", ci.BackupDir, "Make backups into hierarchy based in DIR")
+	flags.StringVarP(flagSet, &ci.Suffix, "suffix", "", ci.Suffix, "Suffix to add to changed files")
+	flags.BoolVarP(flagSet, &ci.SuffixKeepExtension, "suffix-keep-extension", "", ci.SuffixKeepExtension, "Preserve the extension when using --suffix")
+	flags.BoolVarP(flagSet, &ci.UseListR, "fast-list", "", ci.UseListR, "Use recursive list if available; uses more memory but fewer transactions")
+	flags.Float64VarP(flagSet, &ci.TPSLimit, "tpslimit", "", ci.TPSLimit, "Limit HTTP transactions per second to this")
+	flags.IntVarP(flagSet, &ci.TPSLimitBurst, "tpslimit-burst", "", ci.TPSLimitBurst, "Max burst of transactions for --tpslimit")
+	flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name")
+	flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features (use --disable help to see a list)")
+	flags.StringVarP(flagSet, &ci.UserAgent, "user-agent", "", ci.UserAgent, "Set the user-agent to a specified string")
+	flags.BoolVarP(flagSet, &ci.Immutable, "immutable", "", ci.Immutable, "Do not modify files, fail if existing files have been modified")
+	flags.BoolVarP(flagSet, &ci.AutoConfirm, "auto-confirm", "", ci.AutoConfirm, "If enabled, do not request console confirmation")
+	flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats (0 for no limit)")
 	flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
 	flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
-	flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.")
-	flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable.")
-	flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
-	flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
+	flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable")
+	flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable")
+	flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer")
+	flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends")
 	flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
-	flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
-	flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for.")
+	flags.FVarP(flagSet, &ci.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer")
+	flags.DurationVarP(flagSet, &ci.MaxDuration, "max-duration", "", 0, "Maximum duration rclone will transfer data for")
 	flags.FVarP(flagSet, &ci.CutoffMode, "cutoff-mode", "", "Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS")
-	flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog.")
-	flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
-	flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line.")
-	flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.")
-	flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
+	flags.IntVarP(flagSet, &ci.MaxBacklog, "max-backlog", "", ci.MaxBacklog, "Maximum number of objects in sync or check backlog")
+	flags.IntVarP(flagSet, &ci.MaxStatsGroups, "max-stats-groups", "", ci.MaxStatsGroups, "Maximum number of stats groups to keep in memory, on max oldest is discarded")
+	flags.BoolVarP(flagSet, &ci.StatsOneLine, "stats-one-line", "", ci.StatsOneLine, "Make the stats fit on one line")
+	flags.BoolVarP(flagSet, &ci.StatsOneLineDate, "stats-one-line-date", "", ci.StatsOneLineDate, "Enable --stats-one-line and add current date/time prefix")
+	flags.StringVarP(flagSet, &ci.StatsOneLineDateFormat, "stats-one-line-date-format", "", ci.StatsOneLineDateFormat, "Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\"), see https://golang.org/pkg/time/#Time.Format")
 	flags.BoolVarP(flagSet, &ci.ErrorOnNoTransfer, "error-on-no-transfer", "", ci.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
-	flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer.")
-	flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
-	flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar.")
-	flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs).")
+	flags.BoolVarP(flagSet, &ci.Progress, "progress", "P", ci.Progress, "Show progress during transfer")
+	flags.BoolVarP(flagSet, &ci.ProgressTerminalTitle, "progress-terminal-title", "", ci.ProgressTerminalTitle, "Show progress on the terminal title (requires -P/--progress)")
+	flags.BoolVarP(flagSet, &ci.Cookie, "use-cookies", "", ci.Cookie, "Enable session cookiejar")
+	flags.BoolVarP(flagSet, &ci.UseMmap, "use-mmap", "", ci.UseMmap, "Use mmap allocator (see docs)")
 	flags.StringVarP(flagSet, &ci.CaCert, "ca-cert", "", ci.CaCert, "CA certificate used to verify servers")
 	flags.StringVarP(flagSet, &ci.ClientCert, "client-cert", "", ci.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
 	flags.StringVarP(flagSet, &ci.ClientKey, "client-key", "", ci.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
-	flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.")
-	flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.")
-	flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format.")
+	flags.FVarP(flagSet, &ci.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size")
+	flags.IntVarP(flagSet, &ci.MultiThreadStreams, "multi-thread-streams", "", ci.MultiThreadStreams, "Max number of streams to use for multi-thread downloads")
+	flags.BoolVarP(flagSet, &ci.UseJSONLog, "use-json-log", "", ci.UseJSONLog, "Use json log format")
 	flags.StringVarP(flagSet, &ci.OrderBy, "order-by", "", ci.OrderBy, "Instructions on how to order the transfers, e.g. 'size,descending'")
 	flags.StringArrayVarP(flagSet, &uploadHeaders, "header-upload", "", nil, "Set HTTP header for upload transactions")
 	flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
 	flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
-	flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
-	flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
-	flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
-	flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)")
-	flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes")
-	flags.BoolVarP(flagSet, &ci.DisableHTTP2, "disable-http2", "", ci.DisableHTTP2, "Disable HTTP/2 in the global transport.")
-	flags.BoolVarP(flagSet, &ci.HumanReadable, "human-readable", "", ci.HumanReadable, "Print numbers in a human-readable format. Sizes with suffix Ki|Mi|Gi|Ti|Pi.")
+	flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files")
+	flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window (supported on Windows only)")
+	flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21")
+	flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "Cache remotes for this long (0 to disable caching)")
+	flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "Interval to check for expired remotes")
+	flags.BoolVarP(flagSet, &ci.DisableHTTP2, "disable-http2", "", ci.DisableHTTP2, "Disable HTTP/2 in the global transport")
+	flags.BoolVarP(flagSet, &ci.HumanReadable, "human-readable", "", ci.HumanReadable, "Print numbers in a human-readable format, sizes with suffix Ki|Mi|Gi|Ti|Pi")
 	flags.DurationVarP(flagSet, &ci.KvLockTime, "kv-lock-time", "", ci.KvLockTime, "Maximum time to keep key-value database locked by process")
 }
 
diff --git a/fs/config/rc.go b/fs/config/rc.go
index 59dbb9170..b5d4d0b91 100644
--- a/fs/config/rc.go
+++ b/fs/config/rc.go
@@ -130,7 +130,7 @@ func init() {
 				return rcConfig(ctx, in, name)
 			},
 			Title: name + " the config for a remote.",
-			Help: `This takes the following parameters
+			Help: `This takes the following parameters:
 
 - name - name of remote
 - parameters - a map of \{ "key": "value" \} pairs
diff --git a/fs/log/logflags/logflags.go b/fs/log/logflags/logflags.go
index 70496b8a3..818aaac51 100644
--- a/fs/log/logflags/logflags.go
+++ b/fs/log/logflags/logflags.go
@@ -16,5 +16,5 @@ func AddFlags(flagSet *pflag.FlagSet) {
 	flags.StringVarP(flagSet, &log.Opt.Format, "log-format", "", log.Opt.Format, "Comma separated list of log format options")
 	flags.BoolVarP(flagSet, &log.Opt.UseSyslog, "syslog", "", log.Opt.UseSyslog, "Use Syslog for logging")
 	flags.StringVarP(flagSet, &log.Opt.SyslogFacility, "syslog-facility", "", log.Opt.SyslogFacility, "Facility for syslog, e.g. KERN,USER,...")
-	flags.BoolVarP(flagSet, &log.Opt.LogSystemdSupport, "log-systemd", "", log.Opt.LogSystemdSupport, "Activate systemd integration for the logger.")
+	flags.BoolVarP(flagSet, &log.Opt.LogSystemdSupport, "log-systemd", "", log.Opt.LogSystemdSupport, "Activate systemd integration for the logger")
 }
diff --git a/fs/operations/rc.go b/fs/operations/rc.go
index 434c1dc69..9b5723c38 100644
--- a/fs/operations/rc.go
+++ b/fs/operations/rc.go
@@ -21,7 +21,7 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcList,
 		Title:        "List the given remote and path in JSON format",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:"
 - remote - a path within that remote e.g. "dir"
@@ -36,7 +36,7 @@ func init() {
     - filesOnly - If set only show files
     - hashTypes - array of strings of hash types to show if showHash set
 
-The result is
+Returns:
 
 - list
     - This is an array of objects as described in the lsjson command
@@ -121,7 +121,7 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcAbout,
 		Title:        "Return the space used on the remote",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:"
 
@@ -167,7 +167,7 @@ func init() {
 				return rcMoveOrCopyFile(ctx, in, copy)
 			},
 			Title: name + " a file from source remote to destination remote",
-			Help: `This takes the following parameters
+			Help: `This takes the following parameters:
 
 - srcFs - a remote name string e.g. "drive:" for the source
 - srcRemote - a path within that remote e.g. "file.txt" for the source
@@ -202,7 +202,7 @@ func init() {
 		{name: "mkdir", title: "Make a destination directory or container"},
 		{name: "rmdir", title: "Remove an empty directory or container"},
 		{name: "purge", title: "Remove a directory or container and all of its contents"},
-		{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"},
+		{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root"},
 		{name: "delete", title: "Remove files in the path", noRemote: true},
 		{name: "deletefile", title: "Remove the single file pointed to"},
 		{name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url"},
@@ -222,7 +222,7 @@ func init() {
 				return rcSingleCommand(ctx, in, op.name, op.noRemote)
 			},
 			Title: op.title,
-			Help: `This takes the following parameters
+			Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:"
 ` + remote + op.help + `
@@ -324,11 +324,11 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcSize,
 		Title:        "Count the number of bytes and files in remote",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:path/to/dir"
 
-Returns
+Returns:
 
 - count - number of files
 - bytes - number of bytes in those files
@@ -360,14 +360,14 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcPublicLink,
 		Title:        "Create or retrieve a public link to the given file or folder.",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:"
 - remote - a path within that remote e.g. "dir"
 - unlink - boolean - if set removes the link rather than adding it (optional)
 - expire - string - the expiry time of the link e.g. "1d" (optional)
 
-Returns
+Returns:
 
 - url - URL of the resource
 
@@ -401,7 +401,7 @@ func init() {
 		Path:  "operations/fsinfo",
 		Fn:    rcFsInfo,
 		Title: "Return information about the remote",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - fs - a remote name string e.g. "drive:"
 
@@ -480,18 +480,18 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcBackend,
 		Title:        "Runs a backend command.",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
 - command - a string with the command name
 - fs - a remote name string e.g. "drive:"
 - arg - a list of arguments for the backend command
 - opt - a map of string to string of options
 
-Returns
+Returns:
 
 - result - result from the backend command
 
-For example
+Example:
 
     rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2
 
diff --git a/fs/rc/config.go b/fs/rc/config.go
index 6e9d97dac..c996b3447 100644
--- a/fs/rc/config.go
+++ b/fs/rc/config.go
@@ -34,7 +34,7 @@ func init() {
 		Path:  "options/blocks",
 		Fn:    rcOptionsBlocks,
 		Title: "List all the option blocks",
-		Help: `Returns
+		Help: `Returns:
 - options - a list of the options block names`,
 	})
 }
@@ -112,7 +112,7 @@ func init() {
 		Path:  "options/set",
 		Fn:    rcOptionsSet,
 		Title: "Set an option",
-		Help: `Parameters
+		Help: `Parameters:
 
 - option block name containing an object with
   - key: value
diff --git a/fs/rc/internal.go b/fs/rc/internal.go
index 4c8d8617f..50e2765d3 100644
--- a/fs/rc/internal.go
+++ b/fs/rc/internal.go
@@ -110,10 +110,10 @@ are explained in the go docs: https://golang.org/pkg/runtime/#MemStats
 
 The most interesting values for most people are:
 
-* HeapAlloc: This is the amount of memory rclone is actually using
-* HeapSys: This is the amount of memory rclone has obtained from the OS
-* Sys: this is the total amount of memory requested from the OS
-  * It is virtual memory so may include unused memory
+- HeapAlloc - this is the amount of memory rclone is actually using
+- HeapSys - this is the amount of memory rclone has obtained from the OS
+- Sys - this is the total amount of memory requested from the OS
+   - It is virtual memory so may include unused memory
 `,
 	})
 }
@@ -171,7 +171,7 @@ func init() {
 		Fn:    rcVersion,
 		Title: "Shows the current version of rclone and the go runtime.",
 		Help: `
-This shows the current version of go and the go runtime
+This shows the current version of go and the go runtime:
 
 - version - rclone version, e.g. "v1.53.0"
 - decomposed - version number as [major, minor, patch]
@@ -217,7 +217,7 @@ func init() {
 Pass a clear string and rclone will obscure it for the config file:
 - clear - string
 
-Returns
+Returns:
 - obscured - string
 `,
 	})
@@ -245,7 +245,7 @@ func init() {
 		Fn:    rcQuit,
 		Title: "Terminates the app.",
 		Help: `
-(optional) Pass an exit code to be used for terminating the app:
+(Optional) Pass an exit code to be used for terminating the app:
 - exitCode - int
 `,
 	})
@@ -289,11 +289,11 @@ Once this is set you can look use this to profile the mutex contention:
 
     go tool pprof http://localhost:5572/debug/pprof/mutex
 
-Parameters
+Parameters:
 
 - rate - int
 
-Results
+Results:
 
 - previousRate - int
 `,
@@ -329,7 +329,7 @@ After calling this you can use this to see the blocking profile:
 
     go tool pprof http://localhost:5572/debug/pprof/block
 
-Parameters
+Parameters:
 
 - rate - int
 `,
@@ -354,29 +354,29 @@ func init() {
 		NeedsRequest:  true,
 		NeedsResponse: true,
 		Title:         "Run a rclone terminal command over rc.",
-		Help: `This takes the following parameters
+		Help: `This takes the following parameters:
 
-- command - a string with the command name
-- arg - a list of arguments for the backend command
-- opt - a map of string to string of options
-- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR")
-    - defaults to "COMBINED_OUTPUT" if not set
-    - the STREAM returnTypes will write the output to the body of the HTTP message
-    - the COMBINED_OUTPUT will write the output to the "result" parameter
+- command - a string with the command name.
+- arg - a list of arguments for the backend command.
+- opt - a map of string to string of options.
+- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
+    - Defaults to "COMBINED_OUTPUT" if not set.
+    - The STREAM returnTypes will write the output to the body of the HTTP message.
+    - The COMBINED_OUTPUT will write the output to the "result" parameter.
 
-Returns
+Returns:
 
-- result - result from the backend command
-    - only set when using returnType "COMBINED_OUTPUT"
-- error	 - set if rclone exits with an error code
-- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR")
+- result - result from the backend command.
+    - Only set when using returnType "COMBINED_OUTPUT".
+- error	 - set if rclone exits with an error code.
+- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR").
 
-For example
+Example:
 
     rclone rc core/command command=ls -a mydrive:/ -o max-depth=1
     rclone rc core/command -a ls -a mydrive:/ -o max-depth=1
 
-Returns
+Returns:
 
 ` + "```" + `
 {
diff --git a/fs/rc/jobs/job.go b/fs/rc/jobs/job.go
index d1d5ae4cc..40a098400 100644
--- a/fs/rc/jobs/job.go
+++ b/fs/rc/jobs/job.go
@@ -317,11 +317,11 @@ func init() {
 		Path:  "job/status",
 		Fn:    rcJobStatus,
 		Title: "Reads the status of the job ID",
-		Help: `Parameters
+		Help: `Parameters:
 
-- jobid - id of the job (integer)
+- jobid - id of the job (integer).
 
-Results
+Results:
 
 - finished - boolean
 - duration - time in seconds that the job ran for
@@ -362,11 +362,11 @@ func init() {
 		Path:  "job/list",
 		Fn:    rcJobList,
 		Title: "Lists the IDs of the running jobs",
-		Help: `Parameters - None
+		Help: `Parameters: None.
 
-Results
+Results:
 
-- jobids - array of integer job ids
+- jobids - array of integer job ids.
 `,
 	})
 }
@@ -383,9 +383,9 @@ func init() {
 		Path:  "job/stop",
 		Fn:    rcJobStop,
 		Title: "Stop the running job",
-		Help: `Parameters
+		Help: `Parameters:
 
-- jobid - id of the job (integer)
+- jobid - id of the job (integer).
 `,
 	})
 }
diff --git a/fs/rc/rcflags/rcflags.go b/fs/rc/rcflags/rcflags.go
index acae13535..372f2a330 100644
--- a/fs/rc/rcflags/rcflags.go
+++ b/fs/rc/rcflags/rcflags.go
@@ -16,16 +16,16 @@ var (
 // AddFlags adds the remote control flags to the flagSet
 func AddFlags(flagSet *pflag.FlagSet) {
 	rc.AddOption("rc", &Opt)
-	flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server.")
-	flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server.")
-	flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects.")
-	flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods.")
+	flags.BoolVarP(flagSet, &Opt.Enabled, "rc", "", false, "Enable the remote control server")
+	flags.StringVarP(flagSet, &Opt.Files, "rc-files", "", "", "Path to local files to serve on the HTTP server")
+	flags.BoolVarP(flagSet, &Opt.Serve, "rc-serve", "", false, "Enable the serving of remote objects")
+	flags.BoolVarP(flagSet, &Opt.NoAuth, "rc-no-auth", "", false, "Don't require auth for certain methods")
 	flags.BoolVarP(flagSet, &Opt.WebUI, "rc-web-gui", "", false, "Launch WebGUI on localhost")
 	flags.BoolVarP(flagSet, &Opt.WebGUIUpdate, "rc-web-gui-update", "", false, "Check and update to latest version of web gui")
 	flags.BoolVarP(flagSet, &Opt.WebGUIForceUpdate, "rc-web-gui-force-update", "", false, "Force update to latest version of web gui")
 	flags.BoolVarP(flagSet, &Opt.WebGUINoOpenBrowser, "rc-web-gui-no-open-browser", "", false, "Don't open the browser automatically")
-	flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui.")
-	flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS.")
+	flags.StringVarP(flagSet, &Opt.WebGUIFetchURL, "rc-web-fetch-url", "", "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest", "URL to fetch the releases for webgui")
+	flags.StringVarP(flagSet, &Opt.AccessControlAllowOrigin, "rc-allow-origin", "", "", "Set the allowed origin for CORS")
 	flags.BoolVarP(flagSet, &Opt.EnableMetrics, "rc-enable-metrics", "", false, "Enable prometheus metrics on /metrics")
 	flags.DurationVarP(flagSet, &Opt.JobExpireDuration, "rc-job-expire-duration", "", Opt.JobExpireDuration, "expire finished async jobs older than this value")
 	flags.DurationVarP(flagSet, &Opt.JobExpireInterval, "rc-job-expire-interval", "", Opt.JobExpireInterval, "interval to check for expired async jobs")
diff --git a/fs/rc/webgui/rc.go b/fs/rc/webgui/rc.go
index 0d6a4596b..8c54040e3 100644
--- a/fs/rc/webgui/rc.go
+++ b/fs/rc/webgui/rc.go
@@ -16,13 +16,13 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcListTestPlugins,
 		Title:        "Show currently loaded test plugins",
-		Help: `allows listing of test plugins with the rclone.test set to true in package.json of the plugin
+		Help: `Allows listing of test plugins with the rclone.test set to true in package.json of the plugin.
 
-This takes no parameters and returns
+This takes no parameters and returns:
 
-- loadedTestPlugins: list of currently available test plugins
+- loadedTestPlugins - list of currently available test plugins.
 
-Eg
+E.g.
 
     rclone rc pluginsctl/listTestPlugins
 `,
@@ -45,13 +45,13 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcRemoveTestPlugin,
 		Title:        "Remove  a test plugin",
-		Help: `This allows you to remove a plugin using it's name
+		Help: `This allows you to remove a plugin using it's name.
 
-This takes the following parameters
+This takes the following parameters:
 
-- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
+- name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
 
-Eg
+Example:
 
     rclone rc pluginsctl/removeTestPlugin name=rclone/rclone-webui-react
 `,
@@ -79,13 +79,13 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcAddPlugin,
 		Title:        "Add a plugin using url",
-		Help: `used for adding a plugin to the webgui
+		Help: `Used for adding a plugin to the webgui.
 
-This takes the following parameters
+This takes the following parameters:
 
-- url: http url of the github repo where the plugin is hosted (http://github.com/rclone/rclone-webui-react)
+- url - http url of the github repo where the plugin is hosted (http://github.com/rclone/rclone-webui-react).
 
-Eg
+Example:
 
    rclone rc pluginsctl/addPlugin
 `,
@@ -191,12 +191,12 @@ func init() {
 		Title:        "Get the list of currently loaded plugins",
 		Help: `This allows you to get the currently enabled plugins and their details.
 
-This takes no parameters and returns
+This takes no parameters and returns:
 
-- loadedPlugins: list of current production plugins
-- testPlugins: list of temporarily loaded development plugins, usually running on a different server.
+- loadedPlugins - list of current production plugins.
+- testPlugins - list of temporarily loaded development plugins, usually running on a different server.
 
-Eg
+E.g.
 
    rclone rc pluginsctl/listPlugins
 `,
@@ -224,13 +224,13 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcRemovePlugin,
 		Title:        "Remove a loaded plugin",
-		Help: `This allows you to remove a plugin using it's name
+		Help: `This allows you to remove a plugin using it's name.
 
-This takes parameters
+This takes parameters:
 
-- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
+- name - name of the plugin in the format ` + "`author`/`plugin_name`" + `.
 
-Eg
+E.g.
 
    rclone rc pluginsctl/removePlugin name=rclone/video-plugin
 `,
@@ -260,19 +260,19 @@ func init() {
 		AuthRequired: true,
 		Fn:           rcGetPluginsForType,
 		Title:        "Get plugins with type criteria",
-		Help: `This shows all possible plugins by a mime type
+		Help: `This shows all possible plugins by a mime type.
 
-This takes the following parameters
+This takes the following parameters:
 
-- type: supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3)
-- pluginType: filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL) 
+- type - supported mime type by a loaded plugin e.g. (video/mp4, audio/mp3).
+- pluginType - filter plugins based on their type e.g. (DASHBOARD, FILE_HANDLER, TERMINAL).
 
-and returns
+Returns:
 
-- loadedPlugins: list of current production plugins
-- testPlugins: list of temporarily loaded development plugins, usually running on a different server.
+- loadedPlugins - list of current production plugins.
+- testPlugins - list of temporarily loaded development plugins, usually running on a different server.
 
-Eg
+Example:
 
    rclone rc pluginsctl/getPluginsForType type=video/mp4
 `,
diff --git a/fs/sync/rc.go b/fs/sync/rc.go
index 5f1de86aa..7dee9dea2 100644
--- a/fs/sync/rc.go
+++ b/fs/sync/rc.go
@@ -20,7 +20,7 @@ func init() {
 				return rcSyncCopyMove(ctx, in, name)
 			},
 			Title: name + " a directory from source remote to destination remote",
-			Help: `This takes the following parameters
+			Help: `This takes the following parameters:
 
 - srcFs - a remote name string e.g. "drive:src" for the source
 - dstFs - a remote name string e.g. "drive:dst" for the destination
diff --git a/lib/env/env.go b/lib/env/env.go
index 8e503aeed..b8feb2f73 100644
--- a/lib/env/env.go
+++ b/lib/env/env.go
@@ -9,7 +9,7 @@ import (
 )
 
 // ShellExpandHelp describes what ShellExpand does for inclusion into help
-const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.\n"
+const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`."
 
 // ShellExpand replaces a leading "~" with the home directory" and
 // expands all environment variables afterwards.
diff --git a/lib/http/auth/auth.go b/lib/http/auth/auth.go
index 59883bfdd..222d09d2e 100644
--- a/lib/http/auth/auth.go
+++ b/lib/http/auth/auth.go
@@ -70,10 +70,10 @@ var (
 
 // AddFlagsPrefix adds flags for http/auth
 func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
-	flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
-	flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
-	flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
-	flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication.")
+	flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "A htpasswd file - if not provided no authentication is done")
+	flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
+	flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
+	flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
 	flags.StringVarP(flagSet, &Opt.Salt, prefix+"salt", "", Opt.Salt, "Password hashing salt")
 }
 
diff --git a/lib/http/http.go b/lib/http/http.go
index 413a42526..7bfd5922a 100644
--- a/lib/http/http.go
+++ b/lib/http/http.go
@@ -378,14 +378,14 @@ func URL() string {
 
 // AddFlagsPrefix adds flags for the httplib
 func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
-	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
+	flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
 	flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
 	flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")
 	flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header")
 	flags.StringVarP(flagSet, &Opt.SslCert, prefix+"cert", "", Opt.SslCert, "SSL PEM key (concatenation of certificate and CA certificate)")
 	flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
 	flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
-	flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root.")
+	flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
 
 }
 
diff --git a/lib/oauthutil/oauthutil.go b/lib/oauthutil/oauthutil.go
index 90cf7b02b..fba29a937 100644
--- a/lib/oauthutil/oauthutil.go
+++ b/lib/oauthutil/oauthutil.go
@@ -76,21 +76,21 @@ All done. Please go back to rclone.
 // SharedOptions are shared between backends the utilize an OAuth flow
 var SharedOptions = []fs.Option{{
 	Name: config.ConfigClientID,
-	Help: "OAuth Client Id\nLeave blank normally.",
+	Help: "OAuth Client Id.\n\nLeave blank normally.",
 }, {
 	Name: config.ConfigClientSecret,
-	Help: "OAuth Client Secret\nLeave blank normally.",
+	Help: "OAuth Client Secret.\n\nLeave blank normally.",
 }, {
 	Name:     config.ConfigToken,
 	Help:     "OAuth Access Token as a JSON blob.",
 	Advanced: true,
 }, {
 	Name:     config.ConfigAuthURL,
-	Help:     "Auth server URL.\nLeave blank to use the provider defaults.",
+	Help:     "Auth server URL.\n\nLeave blank to use the provider defaults.",
 	Advanced: true,
 }, {
 	Name:     config.ConfigTokenURL,
-	Help:     "Token server url.\nLeave blank to use the provider defaults.",
+	Help:     "Token server url.\n\nLeave blank to use the provider defaults.",
 	Advanced: true,
 }}
 
diff --git a/vfs/help.go b/vfs/help.go
index 1cff104fb..444f400fc 100644
--- a/vfs/help.go
+++ b/vfs/help.go
@@ -30,8 +30,8 @@ directory should be considered up to date and not refreshed from the
 backend. Changes made through the mount will appear immediately or
 invalidate the cache.
 
-    --dir-cache-time duration   Time to cache directory entries for. (default 5m0s)
-    --poll-interval duration    Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
+    --dir-cache-time duration   Time to cache directory entries for (default 5m0s)
+    --poll-interval duration    Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
 
 However, changes made directly on the cloud storage by the web
 interface or a different copy of rclone will only be picked up once
@@ -85,10 +85,10 @@ find that you need one or the other or both.
 
     --cache-dir string                   Directory rclone will use for caching.
     --vfs-cache-mode CacheMode           Cache mode off|minimal|writes|full (default off)
-    --vfs-cache-max-age duration         Max age of objects in the cache. (default 1h0m0s)
-    --vfs-cache-max-size SizeSuffix      Max total size of objects in the cache. (default off)
-    --vfs-cache-poll-interval duration   Interval to poll the cache for stale objects. (default 1m0s)
-    --vfs-write-back duration            Time to writeback files after last use when using cache. (default 5s)
+    --vfs-cache-max-age duration         Max age of objects in the cache (default 1h0m0s)
+    --vfs-cache-max-size SizeSuffix      Max total size of objects in the cache (default off)
+    --vfs-cache-poll-interval duration   Interval to poll the cache for stale objects (default 1m0s)
+    --vfs-write-back duration            Time to writeback files after last use when using cache (default 5s)
 
 If run with !-vv! rclone will print the location of the file cache.  The
 files are stored in the user cache file area which is OS dependent but
@@ -231,14 +231,14 @@ than seeking rclone will wait a short time for the in sequence read or
 write to come in. These flags only come into effect when not using an
 on disk cache file.
 
-    --vfs-read-wait duration   Time to wait for in-sequence read before seeking. (default 20ms)
-    --vfs-write-wait duration  Time to wait for in-sequence write before giving error. (default 1s)
+    --vfs-read-wait duration   Time to wait for in-sequence read before seeking (default 20ms)
+    --vfs-write-wait duration  Time to wait for in-sequence write before giving error (default 1s)
 
 When using VFS write caching (!--vfs-cache-mode! with value writes or full),
 the global flag !--transfers! can be set to adjust the number of parallel uploads of
 modified files from cache (the related global flag !--checkers! have no effect on mount).
 
-    --transfers int  Number of file transfers to run in parallel. (default 4)
+    --transfers int  Number of file transfers to run in parallel (default 4)
 
 ### VFS Case Sensitivity
 
diff --git a/vfs/vfsflags/vfsflags.go b/vfs/vfsflags/vfsflags.go
index d08370399..d1341a239 100644
--- a/vfs/vfsflags/vfsflags.go
+++ b/vfs/vfsflags/vfsflags.go
@@ -18,25 +18,25 @@ var (
 // AddFlags adds the non filing system specific flags to the command
 func AddFlags(flagSet *pflag.FlagSet) {
 	rc.AddOption("vfs", &Opt)
-	flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up).")
-	flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download.")
-	flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files.")
-	flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for.")
-	flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable.")
-	flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only.")
+	flags.BoolVarP(flagSet, &Opt.NoModTime, "no-modtime", "", Opt.NoModTime, "Don't read/write the modification time (can speed things up)")
+	flags.BoolVarP(flagSet, &Opt.NoChecksum, "no-checksum", "", Opt.NoChecksum, "Don't compare checksums on up/download")
+	flags.BoolVarP(flagSet, &Opt.NoSeek, "no-seek", "", Opt.NoSeek, "Don't allow seeking in files")
+	flags.DurationVarP(flagSet, &Opt.DirCacheTime, "dir-cache-time", "", Opt.DirCacheTime, "Time to cache directory entries for")
+	flags.DurationVarP(flagSet, &Opt.PollInterval, "poll-interval", "", Opt.PollInterval, "Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable)")
+	flags.BoolVarP(flagSet, &Opt.ReadOnly, "read-only", "", Opt.ReadOnly, "Mount read-only")
 	flags.FVarP(flagSet, &Opt.CacheMode, "vfs-cache-mode", "", "Cache mode off|minimal|writes|full")
-	flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects.")
-	flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache.")
-	flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache.")
-	flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks.")
-	flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited.")
+	flags.DurationVarP(flagSet, &Opt.CachePollInterval, "vfs-cache-poll-interval", "", Opt.CachePollInterval, "Interval to poll the cache for stale objects")
+	flags.DurationVarP(flagSet, &Opt.CacheMaxAge, "vfs-cache-max-age", "", Opt.CacheMaxAge, "Max age of objects in the cache")
+	flags.FVarP(flagSet, &Opt.CacheMaxSize, "vfs-cache-max-size", "", "Max total size of objects in the cache")
+	flags.FVarP(flagSet, &Opt.ChunkSize, "vfs-read-chunk-size", "", "Read the source objects in chunks")
+	flags.FVarP(flagSet, &Opt.ChunkSizeLimit, "vfs-read-chunk-size-limit", "", "If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited)")
 	flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions")
 	flags.FVarP(flagSet, FilePerms, "file-perms", "", "File permissions")
-	flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.")
-	flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.")
-	flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.")
-	flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", Opt.WriteBack, "Time to writeback files after last use when using cache.")
-	flags.FVarP(flagSet, &Opt.ReadAhead, "vfs-read-ahead", "", "Extra read ahead over --buffer-size when using cache-mode full.")
-	flags.BoolVarP(flagSet, &Opt.UsedIsSize, "vfs-used-is-size", "", Opt.UsedIsSize, "Use the `rclone size` algorithm for Used size.")
+	flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match")
+	flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error")
+	flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking")
+	flags.DurationVarP(flagSet, &Opt.WriteBack, "vfs-write-back", "", Opt.WriteBack, "Time to writeback files after last use when using cache")
+	flags.FVarP(flagSet, &Opt.ReadAhead, "vfs-read-ahead", "", "Extra read ahead over --buffer-size when using cache-mode full")
+	flags.BoolVarP(flagSet, &Opt.UsedIsSize, "vfs-used-is-size", "", Opt.UsedIsSize, "Use the `rclone size` algorithm for Used size")
 	platformFlags(flagSet)
 }
diff --git a/vfs/vfsflags/vfsflags_unix.go b/vfs/vfsflags/vfsflags_unix.go
index 08d8159eb..0ec37991d 100644
--- a/vfs/vfsflags/vfsflags_unix.go
+++ b/vfs/vfsflags/vfsflags_unix.go
@@ -13,9 +13,9 @@ import (
 func platformFlags(flagSet *pflag.FlagSet) {
 	Opt.Umask = unix.Umask(0) // read the umask
 	unix.Umask(Opt.Umask)     // set it back to what it was
-	flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem. Not supported on Windows.")
+	flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem (not supported on Windows)")
 	Opt.UID = uint32(unix.Geteuid())
 	Opt.GID = uint32(unix.Getegid())
-	flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem. Not supported on Windows.")
-	flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem. Not supported on Windows.")
+	flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem (not supported on Windows)")
+	flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem (not supported on Windows)")
 }