2018-01-12 16:30:54 +00:00
|
|
|
// Package accounting providers an accounting and limiting reader
|
|
|
|
package accounting
|
2013-01-03 22:50:00 +00:00
|
|
|
|
|
|
|
import (
|
2020-06-20 15:10:02 +00:00
|
|
|
"context"
|
2021-11-04 10:12:57 +00:00
|
|
|
"errors"
|
2013-01-03 22:50:00 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2019-01-14 16:12:39 +00:00
|
|
|
"unicode/utf8"
|
2015-02-19 19:26:00 +00:00
|
|
|
|
2019-07-28 17:47:38 +00:00
|
|
|
"github.com/rclone/rclone/fs/rc"
|
2019-07-26 07:51:51 +00:00
|
|
|
|
2019-07-28 17:47:38 +00:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/asyncreader"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2013-01-03 22:50:00 +00:00
|
|
|
)
|
|
|
|
|
2020-03-10 12:00:10 +00:00
|
|
|
// ErrorMaxTransferLimitReached defines error when transfer limit is reached.
|
|
|
|
// Used for checking on exit and matching to correct exit code.
|
2022-06-08 20:54:39 +00:00
|
|
|
var ErrorMaxTransferLimitReached = errors.New("max transfer limit reached as set by --max-transfer")
|
2020-03-10 12:00:10 +00:00
|
|
|
|
|
|
|
// ErrorMaxTransferLimitReachedFatal is returned from Read when the max
|
2018-04-21 21:03:27 +00:00
|
|
|
// transfer limit is reached.
|
2020-03-10 12:00:10 +00:00
|
|
|
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
2018-04-21 21:03:27 +00:00
|
|
|
|
2020-09-09 11:53:21 +00:00
|
|
|
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
|
|
|
// transfer limit is reached and a graceful stop is required.
|
|
|
|
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
|
|
|
|
2021-03-10 20:16:17 +00:00
|
|
|
// Start sets up the accounting, in particular the bandwidth limiting
|
|
|
|
func Start(ctx context.Context) {
|
|
|
|
// Start the token bucket limiter
|
|
|
|
TokenBucket.StartTokenBucket(ctx)
|
|
|
|
|
|
|
|
// Start the bandwidth update ticker
|
|
|
|
TokenBucket.StartTokenTicker(ctx)
|
|
|
|
|
|
|
|
// Start the transactions per second limiter
|
|
|
|
StartLimitTPS(ctx)
|
|
|
|
}
|
|
|
|
|
2013-01-03 22:50:00 +00:00
|
|
|
// Account limits and accounts for one transfer
|
|
|
|
type Account struct {
|
2019-07-16 11:56:20 +00:00
|
|
|
stats *StatsInfo
|
2015-06-09 16:29:25 +00:00
|
|
|
// The mutex is to make sure Read() and Close() aren't called
|
|
|
|
// concurrently. Unfortunately the persistent connection loop
|
|
|
|
// in http transport calls Read() after Do() returns on
|
|
|
|
// CancelRequest so this race can happen when it apparently
|
|
|
|
// shouldn't.
|
2024-01-04 11:39:51 +00:00
|
|
|
mu sync.Mutex // mutex protects these values
|
|
|
|
in io.Reader
|
|
|
|
ctx context.Context // current context for transfer - may change
|
|
|
|
ci *fs.ConfigInfo
|
|
|
|
origIn io.ReadCloser
|
|
|
|
close io.Closer
|
|
|
|
size int64
|
|
|
|
name string
|
|
|
|
closed bool // set if the file is closed
|
|
|
|
exit chan struct{} // channel that will be closed when transfer is finished
|
|
|
|
withBuf bool // is using a buffered in
|
|
|
|
checking bool // set if attached transfer is checking
|
2020-05-12 15:16:17 +00:00
|
|
|
|
2020-07-04 16:20:54 +00:00
|
|
|
tokenBucket buckets // per file bandwidth limiter (may be nil)
|
2020-06-20 15:10:02 +00:00
|
|
|
|
2020-05-12 15:16:17 +00:00
|
|
|
values accountValues
|
|
|
|
}
|
|
|
|
|
|
|
|
// accountValues holds statistics for this Account
|
|
|
|
type accountValues struct {
|
|
|
|
mu sync.Mutex // Mutex for stat values.
|
|
|
|
bytes int64 // Total number of bytes read
|
|
|
|
max int64 // if >=0 the max number of bytes to transfer
|
|
|
|
start time.Time // Start time of first read
|
|
|
|
lpTime time.Time // Time of last average measurement
|
|
|
|
lpBytes int // Number of bytes read since last measurement
|
2021-04-07 10:23:42 +00:00
|
|
|
avg float64 // Moving average of last few measurements in Byte/s
|
2013-01-03 22:50:00 +00:00
|
|
|
}
|
|
|
|
|
2018-06-11 10:28:12 +00:00
|
|
|
const averagePeriod = 16 // period to do exponentially weighted averages over
|
|
|
|
|
2020-05-25 06:05:53 +00:00
|
|
|
// newAccountSizeName makes an Account reader for an io.ReadCloser of
|
2016-11-30 20:18:14 +00:00
|
|
|
// the given size and name
|
2020-06-04 14:09:03 +00:00
|
|
|
func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser, size int64, name string) *Account {
|
2015-09-15 14:46:06 +00:00
|
|
|
acc := &Account{
|
2019-07-16 11:56:20 +00:00
|
|
|
stats: stats,
|
2015-09-15 14:46:06 +00:00
|
|
|
in: in,
|
2020-06-04 14:09:03 +00:00
|
|
|
ctx: ctx,
|
2020-11-05 11:33:32 +00:00
|
|
|
ci: fs.GetConfig(ctx),
|
2018-02-01 15:41:58 +00:00
|
|
|
close: in,
|
2017-02-16 23:57:58 +00:00
|
|
|
origIn: in,
|
2016-11-30 20:18:14 +00:00
|
|
|
size: size,
|
|
|
|
name: name,
|
2015-09-15 14:46:06 +00:00
|
|
|
exit: make(chan struct{}),
|
2020-05-12 15:16:17 +00:00
|
|
|
values: accountValues{
|
|
|
|
avg: 0,
|
|
|
|
lpTime: time.Now(),
|
|
|
|
max: -1,
|
|
|
|
},
|
2019-10-30 19:23:17 +00:00
|
|
|
}
|
2020-11-05 11:33:32 +00:00
|
|
|
if acc.ci.CutoffMode == fs.CutoffModeHard {
|
|
|
|
acc.values.max = int64((acc.ci.MaxTransfer))
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
2020-11-05 11:33:32 +00:00
|
|
|
currLimit := acc.ci.BwLimitFile.LimitAt(time.Now())
|
2020-12-07 16:19:20 +00:00
|
|
|
if currLimit.Bandwidth.IsSet() {
|
2020-06-20 15:10:02 +00:00
|
|
|
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
|
|
|
|
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
|
|
|
|
}
|
|
|
|
|
2015-09-15 14:46:06 +00:00
|
|
|
go acc.averageLoop()
|
2019-07-16 11:56:20 +00:00
|
|
|
stats.inProgress.set(acc.name, acc)
|
2015-09-15 14:46:06 +00:00
|
|
|
return acc
|
|
|
|
}
|
|
|
|
|
2017-02-17 09:15:24 +00:00
|
|
|
// WithBuffer - If the file is above a certain size it adds an Async reader
|
|
|
|
func (acc *Account) WithBuffer() *Account {
|
2019-11-18 12:19:44 +00:00
|
|
|
// if already have a buffer then just return
|
|
|
|
if acc.withBuf {
|
|
|
|
return acc
|
|
|
|
}
|
2017-02-17 09:15:24 +00:00
|
|
|
acc.withBuf = true
|
2017-02-14 19:31:33 +00:00
|
|
|
var buffers int
|
2020-11-05 11:33:32 +00:00
|
|
|
if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 {
|
|
|
|
buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize)
|
2017-02-14 19:31:33 +00:00
|
|
|
} else {
|
2018-01-12 16:30:54 +00:00
|
|
|
buffers = int(acc.size / asyncreader.BufferSize)
|
2017-02-14 19:31:33 +00:00
|
|
|
}
|
2016-12-14 21:15:12 +00:00
|
|
|
// On big files add a buffer
|
2017-02-14 19:31:33 +00:00
|
|
|
if buffers > 0 {
|
2020-11-05 11:33:32 +00:00
|
|
|
rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers)
|
2016-12-14 21:15:12 +00:00
|
|
|
if err != nil {
|
2018-01-12 16:30:54 +00:00
|
|
|
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
|
2016-12-14 21:15:12 +00:00
|
|
|
} else {
|
2018-02-01 15:41:58 +00:00
|
|
|
acc.in = rc
|
|
|
|
acc.close = rc
|
2016-12-14 21:15:12 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-16 23:57:58 +00:00
|
|
|
return acc
|
2016-12-14 21:15:12 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 16:24:16 +00:00
|
|
|
// HasBuffer - returns true if this Account has an AsyncReader with a buffer
|
|
|
|
func (acc *Account) HasBuffer() bool {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
_, ok := acc.in.(*asyncreader.AsyncReader)
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// GetReader returns the underlying io.ReadCloser under any Buffer
|
2016-12-14 21:15:12 +00:00
|
|
|
func (acc *Account) GetReader() io.ReadCloser {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2017-02-16 23:57:58 +00:00
|
|
|
return acc.origIn
|
|
|
|
}
|
|
|
|
|
2018-08-11 08:18:19 +00:00
|
|
|
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
|
|
|
|
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
|
|
|
return asyncIn
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-16 23:57:58 +00:00
|
|
|
// StopBuffering stops the async buffer doing any more buffering
|
|
|
|
func (acc *Account) StopBuffering() {
|
2020-06-08 15:24:29 +00:00
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
|
|
|
asyncIn.StopBuffering()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Abandon stops the async buffer doing any more buffering
|
|
|
|
func (acc *Account) Abandon() {
|
2018-01-12 16:30:54 +00:00
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
2017-02-16 23:57:58 +00:00
|
|
|
asyncIn.Abandon()
|
|
|
|
}
|
2016-12-14 21:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// UpdateReader updates the underlying io.ReadCloser stopping the
|
2019-04-30 12:06:24 +00:00
|
|
|
// async buffer (if any) and re-adding it
|
2020-06-04 14:09:03 +00:00
|
|
|
func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser) {
|
2016-12-14 21:15:12 +00:00
|
|
|
acc.mu.Lock()
|
2019-11-18 12:19:44 +00:00
|
|
|
withBuf := acc.withBuf
|
|
|
|
if withBuf {
|
2020-06-08 15:24:29 +00:00
|
|
|
acc.Abandon()
|
2019-11-18 12:19:44 +00:00
|
|
|
acc.withBuf = false
|
2019-09-12 10:12:19 +00:00
|
|
|
}
|
2017-02-17 09:15:24 +00:00
|
|
|
acc.in = in
|
2020-06-04 14:09:03 +00:00
|
|
|
acc.ctx = ctx
|
2018-02-01 15:41:58 +00:00
|
|
|
acc.close = in
|
2017-02-16 23:57:58 +00:00
|
|
|
acc.origIn = in
|
2019-09-18 15:54:34 +00:00
|
|
|
acc.closed = false
|
2019-11-18 12:19:44 +00:00
|
|
|
if withBuf {
|
2019-09-12 10:12:19 +00:00
|
|
|
acc.WithBuffer()
|
|
|
|
}
|
2016-12-14 21:15:12 +00:00
|
|
|
acc.mu.Unlock()
|
2020-05-12 15:20:30 +00:00
|
|
|
|
|
|
|
// Reset counter to stop percentage going over 100%
|
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.lpBytes = 0
|
|
|
|
acc.values.bytes = 0
|
|
|
|
acc.values.mu.Unlock()
|
2016-12-14 21:15:12 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// averageLoop calculates averages for the stats in the background
|
2016-08-22 20:19:38 +00:00
|
|
|
func (acc *Account) averageLoop() {
|
2015-09-15 14:46:06 +00:00
|
|
|
tick := time.NewTicker(time.Second)
|
2018-08-28 21:55:51 +00:00
|
|
|
var period float64
|
2015-09-15 14:46:06 +00:00
|
|
|
defer tick.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case now := <-tick.C:
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
2015-09-15 14:46:06 +00:00
|
|
|
// Add average of last second.
|
2020-05-12 15:16:17 +00:00
|
|
|
elapsed := now.Sub(acc.values.lpTime).Seconds()
|
2021-03-18 09:02:30 +00:00
|
|
|
avg := 0.0
|
|
|
|
if elapsed > 0 {
|
|
|
|
avg = float64(acc.values.lpBytes) / elapsed
|
|
|
|
}
|
2018-08-28 21:55:51 +00:00
|
|
|
// Soft start the moving average
|
|
|
|
if period < averagePeriod {
|
|
|
|
period++
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.avg = (avg + (period-1)*acc.values.avg) / period
|
|
|
|
acc.values.lpBytes = 0
|
|
|
|
acc.values.lpTime = now
|
2015-09-15 14:46:06 +00:00
|
|
|
// Unlock stats
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Unlock()
|
2016-08-22 20:19:38 +00:00
|
|
|
case <-acc.exit:
|
2015-09-15 14:46:06 +00:00
|
|
|
return
|
|
|
|
}
|
2013-01-03 22:50:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-23 09:39:06 +00:00
|
|
|
// Check the read before it has happened is valid returning the number
|
|
|
|
// of bytes remaining to read.
|
|
|
|
func (acc *Account) checkReadBefore() (bytesUntilLimit int64, err error) {
|
2020-06-04 14:32:17 +00:00
|
|
|
// Check to see if context is cancelled
|
|
|
|
if err = acc.ctx.Err(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
if acc.values.max >= 0 {
|
|
|
|
bytesUntilLimit = acc.values.max - acc.stats.GetBytes()
|
2020-04-23 09:39:06 +00:00
|
|
|
if bytesUntilLimit < 0 {
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Unlock()
|
2020-04-23 09:39:06 +00:00
|
|
|
return bytesUntilLimit, ErrorMaxTransferLimitReachedFatal
|
2020-03-13 16:20:15 +00:00
|
|
|
}
|
2020-04-23 09:39:06 +00:00
|
|
|
} else {
|
|
|
|
bytesUntilLimit = 1 << 62
|
2018-04-21 21:03:27 +00:00
|
|
|
}
|
|
|
|
// Set start time.
|
2020-05-12 15:16:17 +00:00
|
|
|
if acc.values.start.IsZero() {
|
|
|
|
acc.values.start = time.Now()
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Unlock()
|
2020-04-23 09:39:06 +00:00
|
|
|
return bytesUntilLimit, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the read call after the read has happened
|
2020-06-04 14:32:17 +00:00
|
|
|
func (acc *Account) checkReadAfter(bytesUntilLimit int64, n int, err error) (outN int, outErr error) {
|
2020-04-23 09:39:06 +00:00
|
|
|
bytesUntilLimit -= int64(n)
|
|
|
|
if bytesUntilLimit < 0 {
|
|
|
|
// chop the overage off
|
|
|
|
n += int(bytesUntilLimit)
|
|
|
|
if n < 0 {
|
|
|
|
n = 0
|
|
|
|
}
|
|
|
|
err = ErrorMaxTransferLimitReachedFatal
|
|
|
|
}
|
|
|
|
return n, err
|
2019-04-24 16:04:12 +00:00
|
|
|
}
|
2015-09-15 14:46:06 +00:00
|
|
|
|
2023-07-30 04:39:01 +00:00
|
|
|
// ServerSideTransferStart should be called at the start of a server-side transfer
|
2019-08-28 16:35:58 +00:00
|
|
|
//
|
|
|
|
// This pretends a transfer has started
|
2023-07-30 04:39:01 +00:00
|
|
|
func (acc *Account) ServerSideTransferStart() {
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
2019-08-28 16:35:58 +00:00
|
|
|
// Set start time.
|
2020-05-12 15:16:17 +00:00
|
|
|
if acc.values.start.IsZero() {
|
|
|
|
acc.values.start = time.Now()
|
2019-08-28 16:35:58 +00:00
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Unlock()
|
2019-08-28 16:35:58 +00:00
|
|
|
}
|
|
|
|
|
2023-07-30 04:39:01 +00:00
|
|
|
// ServerSideTransferEnd accounts for a read of n bytes in a sever
|
|
|
|
// side transfer to be treated as a normal transfer.
|
|
|
|
func (acc *Account) ServerSideTransferEnd(n int64) {
|
2019-08-28 16:35:58 +00:00
|
|
|
// Update Stats
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.bytes += n
|
|
|
|
acc.values.mu.Unlock()
|
2019-08-28 16:35:58 +00:00
|
|
|
|
|
|
|
acc.stats.Bytes(n)
|
|
|
|
}
|
|
|
|
|
2024-01-04 11:39:51 +00:00
|
|
|
// serverSideEnd accounts for non specific server side data
|
|
|
|
func (acc *Account) serverSideEnd(n int64) {
|
|
|
|
// Account for bytes unless we are checking
|
|
|
|
if !acc.checking {
|
|
|
|
acc.stats.BytesNoNetwork(n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-30 04:39:01 +00:00
|
|
|
// ServerSideCopyEnd accounts for a read of n bytes in a sever side copy
|
|
|
|
func (acc *Account) ServerSideCopyEnd(n int64) {
|
|
|
|
acc.stats.AddServerSideCopy(n)
|
2024-01-04 11:39:51 +00:00
|
|
|
acc.serverSideEnd(n)
|
2023-07-30 04:39:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerSideMoveEnd accounts for a read of n bytes in a sever side move
|
|
|
|
func (acc *Account) ServerSideMoveEnd(n int64) {
|
|
|
|
acc.stats.AddServerSideMove(n)
|
2024-01-04 11:39:51 +00:00
|
|
|
acc.serverSideEnd(n)
|
2023-07-30 04:39:01 +00:00
|
|
|
}
|
|
|
|
|
2020-11-05 07:15:42 +00:00
|
|
|
// DryRun accounts for statistics without running the operation
|
|
|
|
func (acc *Account) DryRun(n int64) {
|
2023-07-30 04:39:01 +00:00
|
|
|
acc.ServerSideTransferStart()
|
|
|
|
acc.ServerSideTransferEnd(n)
|
2020-11-05 07:15:42 +00:00
|
|
|
}
|
|
|
|
|
2020-06-20 15:10:02 +00:00
|
|
|
// Account for n bytes from the current file bandwidth limit (if any)
|
|
|
|
func (acc *Account) limitPerFileBandwidth(n int) {
|
|
|
|
acc.values.mu.Lock()
|
2020-07-04 16:20:54 +00:00
|
|
|
tokenBucket := acc.tokenBucket[TokenBucketSlotAccounting]
|
2020-06-20 15:10:02 +00:00
|
|
|
acc.values.mu.Unlock()
|
|
|
|
|
|
|
|
if tokenBucket != nil {
|
|
|
|
err := tokenBucket.WaitN(context.Background(), n)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(nil, "Token bucket error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-24 16:04:12 +00:00
|
|
|
// Account the read and limit bandwidth
|
|
|
|
func (acc *Account) accountRead(n int) {
|
2015-09-15 14:46:06 +00:00
|
|
|
// Update Stats
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.lpBytes += n
|
|
|
|
acc.values.bytes += int64(n)
|
|
|
|
acc.values.mu.Unlock()
|
2015-09-15 14:46:06 +00:00
|
|
|
|
2019-07-16 11:56:20 +00:00
|
|
|
acc.stats.Bytes(int64(n))
|
2015-09-15 14:46:06 +00:00
|
|
|
|
2020-07-04 16:20:54 +00:00
|
|
|
TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n)
|
2020-06-20 15:10:02 +00:00
|
|
|
acc.limitPerFileBandwidth(n)
|
2019-04-24 16:04:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// read bytes from the io.Reader passed in and account them
|
|
|
|
func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
|
2020-04-23 09:39:06 +00:00
|
|
|
bytesUntilLimit, err := acc.checkReadBefore()
|
2019-04-24 16:04:12 +00:00
|
|
|
if err == nil {
|
|
|
|
n, err = in.Read(p)
|
|
|
|
acc.accountRead(n)
|
2020-06-04 14:32:17 +00:00
|
|
|
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
|
2019-04-24 16:04:12 +00:00
|
|
|
}
|
|
|
|
return n, err
|
2013-01-03 22:50:00 +00:00
|
|
|
}
|
|
|
|
|
2016-08-22 20:19:38 +00:00
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
func (acc *Account) Read(p []byte) (n int, err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
return acc.read(acc.in, p)
|
|
|
|
}
|
|
|
|
|
2020-02-13 16:06:05 +00:00
|
|
|
// Thin wrapper for w
|
|
|
|
type accountWriteTo struct {
|
|
|
|
w io.Writer
|
|
|
|
acc *Account
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write writes len(p) bytes from p to the underlying data stream. It
|
|
|
|
// returns the number of bytes written from p (0 <= n <= len(p)) and
|
|
|
|
// any error encountered that caused the write to stop early. Write
|
|
|
|
// must return a non-nil error if it returns n < len(p). Write must
|
|
|
|
// not modify the slice data, even temporarily.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (awt *accountWriteTo) Write(p []byte) (n int, err error) {
|
2020-04-23 09:39:06 +00:00
|
|
|
bytesUntilLimit, err := awt.acc.checkReadBefore()
|
2020-02-13 16:06:05 +00:00
|
|
|
if err == nil {
|
|
|
|
n, err = awt.w.Write(p)
|
2020-06-04 14:32:17 +00:00
|
|
|
n, err = awt.acc.checkReadAfter(bytesUntilLimit, n, err)
|
2020-02-13 16:06:05 +00:00
|
|
|
awt.acc.accountRead(n)
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo writes data to w until there's no more data to write or
|
|
|
|
// when an error occurs. The return value n is the number of bytes
|
|
|
|
// written. Any error encountered during the write is also returned.
|
|
|
|
func (acc *Account) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
in := acc.in
|
|
|
|
acc.mu.Unlock()
|
|
|
|
wrappedWriter := accountWriteTo{w: w, acc: acc}
|
|
|
|
if do, ok := in.(io.WriterTo); ok {
|
|
|
|
n, err = do.WriteTo(&wrappedWriter)
|
|
|
|
} else {
|
|
|
|
n, err = io.Copy(&wrappedWriter, in)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-24 16:04:12 +00:00
|
|
|
// AccountRead account having read n bytes
|
|
|
|
func (acc *Account) AccountRead(n int) (err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2020-04-23 09:39:06 +00:00
|
|
|
bytesUntilLimit, err := acc.checkReadBefore()
|
2019-04-24 16:04:12 +00:00
|
|
|
if err == nil {
|
2020-06-04 14:32:17 +00:00
|
|
|
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
|
2019-04-24 16:04:12 +00:00
|
|
|
acc.accountRead(n)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// Close the object
|
|
|
|
func (acc *Account) Close() error {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
if acc.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
acc.closed = true
|
2019-04-24 16:04:12 +00:00
|
|
|
if acc.close == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-01 15:41:58 +00:00
|
|
|
return acc.close.Close()
|
|
|
|
}
|
|
|
|
|
2019-09-18 15:54:34 +00:00
|
|
|
// Done with accounting - must be called to free accounting goroutine
|
|
|
|
func (acc *Account) Done() {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
close(acc.exit)
|
|
|
|
acc.stats.inProgress.clear(acc.name)
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// progress returns bytes read as well as the size.
|
2015-09-15 14:46:06 +00:00
|
|
|
// Size can be <= 0 if the size is unknown.
|
2018-02-01 15:41:58 +00:00
|
|
|
func (acc *Account) progress() (bytes, size int64) {
|
2016-08-22 20:19:38 +00:00
|
|
|
if acc == nil {
|
2015-09-15 14:46:06 +00:00
|
|
|
return 0, 0
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
bytes, size = acc.values.bytes, acc.size
|
|
|
|
acc.values.mu.Unlock()
|
2017-06-13 10:22:16 +00:00
|
|
|
return bytes, size
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// speed returns the speed of the current file transfer
|
2020-05-20 10:39:20 +00:00
|
|
|
// in bytes per second, as well an exponentially weighted moving average
|
2015-09-15 14:46:06 +00:00
|
|
|
// If no read has completed yet, 0 is returned for both values.
|
2018-02-01 15:41:58 +00:00
|
|
|
func (acc *Account) speed() (bps, current float64) {
|
2016-08-22 20:19:38 +00:00
|
|
|
if acc == nil {
|
2015-09-15 14:46:06 +00:00
|
|
|
return 0, 0
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
defer acc.values.mu.Unlock()
|
|
|
|
if acc.values.bytes == 0 {
|
2015-09-15 14:46:06 +00:00
|
|
|
return 0, 0
|
|
|
|
}
|
|
|
|
// Calculate speed from first read.
|
2022-06-08 20:25:17 +00:00
|
|
|
total := float64(time.Since(acc.values.start)) / float64(time.Second)
|
2021-03-18 09:02:30 +00:00
|
|
|
if total > 0 {
|
|
|
|
bps = float64(acc.values.bytes) / total
|
|
|
|
} else {
|
|
|
|
bps = 0.0
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
current = acc.values.avg
|
2015-09-15 14:46:06 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// eta returns the ETA of the current operation,
|
2015-09-15 14:46:06 +00:00
|
|
|
// rounded to full seconds.
|
|
|
|
// If the ETA cannot be determined 'ok' returns false.
|
2018-08-28 10:17:05 +00:00
|
|
|
func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
|
|
|
|
if acc == nil {
|
2015-09-15 14:46:06 +00:00
|
|
|
return 0, false
|
|
|
|
}
|
2020-05-12 15:16:17 +00:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
defer acc.values.mu.Unlock()
|
|
|
|
return eta(acc.values.bytes, acc.size, acc.values.avg)
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 16:12:39 +00:00
|
|
|
// shortenName shortens in to size runes long
|
|
|
|
// If size <= 0 then in is left untouched
|
|
|
|
func shortenName(in string, size int) string {
|
|
|
|
if size <= 0 {
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
if utf8.RuneCountInString(in) <= size {
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
name := []rune(in)
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 00:17:24 +00:00
|
|
|
size-- // don't count ellipsis rune
|
2019-01-14 16:12:39 +00:00
|
|
|
suffixLength := size / 2
|
|
|
|
prefixLength := size - suffixLength
|
|
|
|
suffixStart := len(name) - suffixLength
|
|
|
|
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
|
|
|
return string(name)
|
|
|
|
}
|
|
|
|
|
2015-09-15 14:46:06 +00:00
|
|
|
// String produces stats for this file
|
2016-08-22 20:19:38 +00:00
|
|
|
func (acc *Account) String() string {
|
2018-02-01 15:41:58 +00:00
|
|
|
a, b := acc.progress()
|
|
|
|
_, cur := acc.speed()
|
|
|
|
eta, etaok := acc.eta()
|
2015-09-15 14:46:06 +00:00
|
|
|
etas := "-"
|
|
|
|
if etaok {
|
|
|
|
if eta > 0 {
|
|
|
|
etas = fmt.Sprintf("%v", eta)
|
|
|
|
} else {
|
|
|
|
etas = "0s"
|
|
|
|
}
|
|
|
|
}
|
2018-09-16 11:08:24 +00:00
|
|
|
|
2020-11-05 11:33:32 +00:00
|
|
|
if acc.ci.DataRateUnit == "bits" {
|
2017-06-13 10:22:16 +00:00
|
|
|
cur = cur * 8
|
2016-11-22 04:04:05 +00:00
|
|
|
}
|
|
|
|
|
2017-12-08 08:02:57 +00:00
|
|
|
percentageDone := 0
|
2016-11-22 04:04:05 +00:00
|
|
|
if b > 0 {
|
2017-12-08 08:02:57 +00:00
|
|
|
percentageDone = int(100 * float64(a) / float64(b))
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
2017-12-08 08:02:57 +00:00
|
|
|
|
2019-01-14 16:12:39 +00:00
|
|
|
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
|
2020-11-05 11:33:32 +00:00
|
|
|
acc.ci.StatsFileNameLength,
|
|
|
|
shortenName(acc.name, acc.ci.StatsFileNameLength),
|
2019-01-14 16:12:39 +00:00
|
|
|
percentageDone,
|
|
|
|
fs.SizeSuffix(b),
|
2018-01-12 16:30:54 +00:00
|
|
|
fs.SizeSuffix(cur),
|
2016-11-22 04:04:05 +00:00
|
|
|
etas,
|
|
|
|
)
|
2018-08-07 19:56:40 +00:00
|
|
|
}
|
|
|
|
|
2020-08-05 15:59:44 +00:00
|
|
|
// rcStats produces remote control stats for this file
|
|
|
|
func (acc *Account) rcStats() (out rc.Params) {
|
2019-07-26 07:51:51 +00:00
|
|
|
out = make(rc.Params)
|
2018-08-07 19:56:40 +00:00
|
|
|
a, b := acc.progress()
|
|
|
|
out["bytes"] = a
|
|
|
|
out["size"] = b
|
|
|
|
spd, cur := acc.speed()
|
|
|
|
out["speed"] = spd
|
|
|
|
out["speedAvg"] = cur
|
|
|
|
|
2021-03-15 15:50:04 +00:00
|
|
|
eta, etaOK := acc.eta()
|
|
|
|
if etaOK {
|
|
|
|
out["eta"] = eta.Seconds()
|
|
|
|
} else {
|
|
|
|
out["eta"] = nil
|
2018-08-07 19:56:40 +00:00
|
|
|
}
|
|
|
|
out["name"] = acc.name
|
|
|
|
|
|
|
|
percentageDone := 0
|
|
|
|
if b > 0 {
|
|
|
|
percentageDone = int(100 * float64(a) / float64(b))
|
|
|
|
}
|
|
|
|
out["percentage"] = percentageDone
|
2019-10-29 10:13:21 +00:00
|
|
|
out["group"] = acc.stats.group
|
2018-08-07 19:56:40 +00:00
|
|
|
|
|
|
|
return out
|
2015-09-15 14:46:06 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// OldStream returns the top io.Reader
|
|
|
|
func (acc *Account) OldStream() io.Reader {
|
2016-08-22 20:19:38 +00:00
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2018-02-01 15:41:58 +00:00
|
|
|
return acc.in
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStream updates the top io.Reader
|
|
|
|
func (acc *Account) SetStream(in io.Reader) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
acc.in = in
|
|
|
|
acc.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WrapStream wraps an io Reader so it will be accounted in the same
|
|
|
|
// way as account
|
|
|
|
func (acc *Account) WrapStream(in io.Reader) io.Reader {
|
|
|
|
return &accountStream{
|
|
|
|
acc: acc,
|
|
|
|
in: in,
|
2015-10-05 21:56:16 +00:00
|
|
|
}
|
2016-08-22 20:19:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// accountStream accounts a single io.Reader into a parent *Account
|
|
|
|
type accountStream struct {
|
|
|
|
acc *Account
|
|
|
|
in io.Reader
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// OldStream return the underlying stream
|
|
|
|
func (a *accountStream) OldStream() io.Reader {
|
|
|
|
return a.in
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStream set the underlying stream
|
|
|
|
func (a *accountStream) SetStream(in io.Reader) {
|
|
|
|
a.in = in
|
|
|
|
}
|
|
|
|
|
|
|
|
// WrapStream wrap in in an accounter
|
|
|
|
func (a *accountStream) WrapStream(in io.Reader) io.Reader {
|
|
|
|
return a.acc.WrapStream(in)
|
2016-08-22 20:19:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
func (a *accountStream) Read(p []byte) (n int, err error) {
|
|
|
|
return a.acc.read(a.in, p)
|
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// Accounter accounts a stream allowing the accounting to be removed and re-added
|
|
|
|
type Accounter interface {
|
|
|
|
io.Reader
|
|
|
|
OldStream() io.Reader
|
|
|
|
SetStream(io.Reader)
|
|
|
|
WrapStream(io.Reader) io.Reader
|
2016-08-22 20:19:38 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 15:41:58 +00:00
|
|
|
// WrapFn wraps an io.Reader (for accounting purposes usually)
|
|
|
|
type WrapFn func(io.Reader) io.Reader
|
|
|
|
|
|
|
|
// UnWrap unwraps a reader returning unwrapped and wrap, a function to
|
|
|
|
// wrap it back up again. If `in` is an Accounter then this function
|
|
|
|
// will take the accounting unwrapped and wrap will put it back on
|
|
|
|
// again the new Reader passed in.
|
2016-08-22 20:19:38 +00:00
|
|
|
//
|
2018-02-01 15:41:58 +00:00
|
|
|
// This allows functions which wrap io.Readers to move the accounting
|
|
|
|
// to the end of the wrapped chain of readers. This is very important
|
|
|
|
// if buffering is being introduced and if the Reader might be wrapped
|
|
|
|
// again.
|
|
|
|
func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
|
|
|
|
acc, ok := in.(Accounter)
|
|
|
|
if !ok {
|
|
|
|
return in, func(r io.Reader) io.Reader { return r }
|
2016-08-22 20:19:38 +00:00
|
|
|
}
|
2018-02-01 15:41:58 +00:00
|
|
|
return acc.OldStream(), acc.WrapStream
|
2013-01-03 22:50:00 +00:00
|
|
|
}
|
2023-08-27 22:10:58 +00:00
|
|
|
|
|
|
|
// UnWrapAccounting unwraps a reader returning unwrapped and acc a
|
|
|
|
// pointer to the accounting.
|
|
|
|
//
|
|
|
|
// The caller is expected to manage the accounting at this point.
|
|
|
|
func UnWrapAccounting(in io.Reader) (unwrapped io.Reader, acc *Account) {
|
|
|
|
a, ok := in.(*accountStream)
|
|
|
|
if !ok {
|
|
|
|
return in, nil
|
|
|
|
}
|
|
|
|
return a.in, a.acc
|
|
|
|
}
|