docs: cleanup spelling of size and rate units

This commit is contained in:
albertony 2021-04-07 12:23:42 +02:00 committed by Ivan Andreev
parent a1a41aa0c1
commit 98579608ec
12 changed files with 37 additions and 37 deletions

View file

@ -49,7 +49,7 @@ var (
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file") cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file") memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)") statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s") dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
version bool version bool
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail") retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)") retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")

View file

@ -422,8 +422,8 @@ fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid
time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Options which use SIZE use kByte by default. However, a suffix of `b` Options which use SIZE use kByte by default. However, a suffix of `b`
for bytes, `k` for kBytes, `M` for MBytes, `G` for GBytes, `T` for for bytes, `k` for kByte, `M` for MByte, `G` for GByte, `T` for
TBytes and `P` for PBytes may be used. These are the binary units, e.g. TByte and `P` for PByte may be used. These are the binary units, e.g.
1, 2\*\*10, 2\*\*20, 2\*\*30 respectively. 1, 2\*\*10, 2\*\*20, 2\*\*30 respectively.
### --backup-dir=DIR ### ### --backup-dir=DIR ###
@ -469,7 +469,7 @@ This option controls the bandwidth limit. For example
would mean limit the upload and download bandwidth to 10 MByte/s. would mean limit the upload and download bandwidth to 10 MByte/s.
**NB** this is **bytes** per second not **bits** per second. To use a **NB** this is **bytes** per second not **bits** per second. To use a
single limit, specify the desired bandwidth in kBytes/s, or use a single limit, specify the desired bandwidth in kByte/s, or use a
suffix b|k|M|G. The default is `0` which means to not limit bandwidth. suffix b|k|M|G. The default is `0` which means to not limit bandwidth.
The upload and download bandwidth can be specified seperately, as The upload and download bandwidth can be specified seperately, as
@ -505,9 +505,9 @@ working hours could be:
`--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"` `--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"`
In this example, the transfer bandwidth will be set to 512kBytes/sec In this example, the transfer bandwidth will be set to 512 kByte/s
at 8am every day. At noon, it will rise to 10 MByte/s, and drop back at 8am every day. At noon, it will rise to 10 MByte/s, and drop back
to 512kBytes/sec at 1pm. At 6pm, the bandwidth limit will be set to to 512 kByte/sec at 1pm. At 6pm, the bandwidth limit will be set to
30 MByte/s, and at 11pm it will be completely disabled (full speed). 30 MByte/s, and at 11pm it will be completely disabled (full speed).
Anything between 11pm and 8am will remain unlimited. Anything between 11pm and 8am will remain unlimited.
@ -515,7 +515,7 @@ An example of timetable with `WEEKDAY` could be:
`--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"` `--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"`
It means that, the transfer bandwidth will be set to 512kBytes/sec on It means that, the transfer bandwidth will be set to 512 kByte/s on
Monday. It will rise to 10 MByte/s before the end of Friday. At 10:00 Monday. It will rise to 10 MByte/s before the end of Friday. At 10:00
on Saturday it will be set to 1 MByte/s. From 20:00 on Sunday it will on Saturday it will be set to 1 MByte/s. From 20:00 on Sunday it will
be unlimited. be unlimited.
@ -533,8 +533,8 @@ Bandwidth limit apply to the data transfer for all backends. For most
backends the directory listing bandwidth is also included (exceptions backends the directory listing bandwidth is also included (exceptions
being the non HTTP backends, `ftp`, `sftp` and `tardigrade`). being the non HTTP backends, `ftp`, `sftp` and `tardigrade`).
Note that the units are **Bytes/s**, not **Bits/s**. Typically Note that the units are **Byte/s**, not **bit/s**. Typically
connections are measured in Bits/s - to convert divide by 8. For connections are measured in bit/s - to convert divide by 8. For
example, let's say you have a 10 Mbit/s connection and you wish rclone example, let's say you have a 10 Mbit/s connection and you wish rclone
to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MByte/s so you would to use half of it - 5 Mbit/s. This is 5/8 = 0.625 MByte/s so you would
use a `--bwlimit 0.625M` parameter for rclone. use a `--bwlimit 0.625M` parameter for rclone.
@ -1473,14 +1473,14 @@ date formatting syntax.
### --stats-unit=bits|bytes ### ### --stats-unit=bits|bytes ###
By default, data transfer rates will be printed in bytes/second. By default, data transfer rates will be printed in bytes per second.
This option allows the data rate to be printed in bits/second. This option allows the data rate to be printed in bits per second.
Data transfer volume will still be reported in bytes. Data transfer volume will still be reported in bytes.
The rate is reported as a binary unit, not SI unit. So 1 Mbit/s The rate is reported as a binary unit, not SI unit. So 1 Mbit/s
equals 1,048,576 bits/s and not 1,000,000 bits/s. equals 1,048,576 bit/s and not 1,000,000 bit/s.
The default is `bytes`. The default is `bytes`.

View file

@ -1226,7 +1226,7 @@ Use the -i flag to see what would be copied before copying.
Drive has quite a lot of rate limiting. This causes rclone to be Drive has quite a lot of rate limiting. This causes rclone to be
limited to transferring about 2 files per second only. Individual limited to transferring about 2 files per second only. Individual
files may be transferred much faster at 100s of MBytes/s but lots of files may be transferred much faster at 100s of MByte/s but lots of
small files can take a long time. small files can take a long time.
Server side copies are also subject to a separate rate limit. If you Server side copies are also subject to a separate rate limit. If you

View file

@ -18,8 +18,8 @@ These flags are available for every command.
--backup-dir string Make backups into hierarchy based in DIR. --backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name. --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M) --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable. --bwlimit BwTimetable Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.
--bwlimit-file BwTimetable Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable. --bwlimit-file BwTimetable Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.
--ca-cert string CA certificate used to verify servers --ca-cert string CA certificate used to verify servers
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone") --cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--check-first Do all the checks before starting transfers. --check-first Do all the checks before starting transfers.

View file

@ -788,8 +788,8 @@ Returns the following values:
"eta": estimated time in seconds until file transfer completion "eta": estimated time in seconds until file transfer completion
"name": name of the file, "name": name of the file,
"percentage": progress of the file transfer in percent, "percentage": progress of the file transfer in percent,
"speed": average speed over the whole transfer in bytes/sec, "speed": average speed over the whole transfer in bytes per second,
"speedAvg": current speed in bytes/sec as an exponentially weighted moving average, "speedAvg": current speed in bytes per second as an exponentially weighted moving average,
"size": size of the file in bytes "size": size of the file in bytes
} }
], ],

View file

@ -74,7 +74,7 @@ type accountValues struct {
start time.Time // Start time of first read start time.Time // Start time of first read
lpTime time.Time // Time of last average measurement lpTime time.Time // Time of last average measurement
lpBytes int // Number of bytes read since last measurement lpBytes int // Number of bytes read since last measurement
avg float64 // Moving average of last few measurements in bytes/s avg float64 // Moving average of last few measurements in Byte/s
} }
const averagePeriod = 16 // period to do exponentially weighted averages over const averagePeriod = 16 // period to do exponentially weighted averages over

View file

@ -32,7 +32,7 @@ func NewRcloneCollector(ctx context.Context) *RcloneCollector {
nil, nil, nil, nil,
), ),
transferSpeed: prometheus.NewDesc(namespace+"speed", transferSpeed: prometheus.NewDesc(namespace+"speed",
"Average speed in bytes/sec since the start of the Rclone process", "Average speed in bytes per second since the start of the Rclone process",
nil, nil, nil, nil,
), ),
numOfErrors: prometheus.NewDesc(namespace+"errors_total", numOfErrors: prometheus.NewDesc(namespace+"errors_total",

View file

@ -96,7 +96,7 @@ Returns the following values:
"lastError": last error string, "lastError": last error string,
"renames" : number of files renamed, "renames" : number of files renamed,
"retryError": boolean showing whether there has been at least one non-NoRetryError, "retryError": boolean showing whether there has been at least one non-NoRetryError,
"speed": average speed in bytes/sec since start of the group, "speed": average speed in bytes per second since start of the group,
"totalBytes": total number of bytes in the group, "totalBytes": total number of bytes in the group,
"totalChecks": total number of checks in the group, "totalChecks": total number of checks in the group,
"totalTransfers": total number of transfers in the group, "totalTransfers": total number of transfers in the group,
@ -109,8 +109,8 @@ Returns the following values:
"eta": estimated time in seconds until file transfer completion "eta": estimated time in seconds until file transfer completion
"name": name of the file, "name": name of the file,
"percentage": progress of the file transfer in percent, "percentage": progress of the file transfer in percent,
"speed": average speed over the whole transfer in bytes/sec, "speed": average speed over the whole transfer in bytes per second,
"speedAvg": current speed in bytes/sec as an exponentially weighted moving average, "speedAvg": current speed in bytes per second as an exponentially weighted moving average,
"size": size of the file in bytes "size": size of the file in bytes
} }
], ],

View file

@ -92,7 +92,7 @@ func (tb *tokenBucket) StartTokenBucket(ctx context.Context) {
tb.currLimit = ci.BwLimit.LimitAt(time.Now()) tb.currLimit = ci.BwLimit.LimitAt(time.Now())
if tb.currLimit.Bandwidth.IsSet() { if tb.currLimit.Bandwidth.IsSet() {
tb.curr = newTokenBucket(tb.currLimit.Bandwidth) tb.curr = newTokenBucket(tb.currLimit.Bandwidth)
fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &tb.currLimit.Bandwidth) fs.Infof(nil, "Starting bandwidth limiter at %v Byte/s", &tb.currLimit.Bandwidth)
// Start the SIGUSR2 signal handler to toggle bandwidth. // Start the SIGUSR2 signal handler to toggle bandwidth.
// This function does nothing in windows systems. // This function does nothing in windows systems.
@ -133,9 +133,9 @@ func (tb *tokenBucket) StartTokenTicker(ctx context.Context) {
*targetBucket = newTokenBucket(limitNow.Bandwidth) *targetBucket = newTokenBucket(limitNow.Bandwidth)
if tb.toggledOff { if tb.toggledOff {
fs.Logf(nil, "Scheduled bandwidth change. "+ fs.Logf(nil, "Scheduled bandwidth change. "+
"Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth) "Limit will be set to %v Byte/s when toggled on again.", &limitNow.Bandwidth)
} else { } else {
fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth) fs.Logf(nil, "Scheduled bandwidth change. Limit set to %v Byte/s", &limitNow.Bandwidth)
} }
} else { } else {
targetBucket._setOff() targetBucket._setOff()

View file

@ -97,8 +97,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit") flags.IntVarP(flagSet, &ci.StatsFileNameLength, "stats-file-name-length", "", ci.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR") flags.FVarP(flagSet, &ci.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &ci.BwLimit, "bwlimit", "", "Bandwidth limit in kByte/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.") flags.FVarP(flagSet, &ci.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kByte/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.") flags.FVarP(flagSet, &ci.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &ci.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) flags.FVarP(flagSet, &ci.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)

View file

@ -1048,7 +1048,7 @@ func TestSyncWithMaxDuration(t *testing.T) {
ci.Transfers = 1 ci.Transfers = 1
defer accounting.TokenBucket.SetBwLimit(fs.BwPair{Tx: -1, Rx: -1}) defer accounting.TokenBucket.SetBwLimit(fs.BwPair{Tx: -1, Rx: -1})
// 5 files of 60 bytes at 60 bytes/s 5 seconds // 5 files of 60 bytes at 60 Byte/s 5 seconds
testFiles := make([]fstest.Item, 5) testFiles := make([]fstest.Item, 5)
for i := 0; i < len(testFiles); i++ { for i := 0; i < len(testFiles); i++ {
testFiles[i] = r.WriteFile(fmt.Sprintf("file%d", i), "------------------------------------------------------------", t1) testFiles[i] = r.WriteFile(fmt.Sprintf("file%d", i), "------------------------------------------------------------", t1)