sftp: add --sftp-chunk-size to control packets sizes for high latency links
See: https://forum.rclone.org/t/increasing-sftp-transfer-speed/29928
This commit is contained in:
parent
1651429041
commit
95e0934755
1 changed files with 47 additions and 25 deletions
|
@ -272,6 +272,26 @@ given, rclone will empty the connection pool.
|
|||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload and download chunk size.
|
||||
|
||||
This controls the maximum packet size used in the SFTP protocol. The
|
||||
RFC limits this to 32768 bytes (32k), however a lot of servers
|
||||
support larger sizes and setting it larger will increase transfer
|
||||
speed dramatically on high latency links.
|
||||
|
||||
Only use a setting higher than 32k if you always connect to the same
|
||||
server or after sufficiently broad testing.
|
||||
|
||||
For example using the value of 252k with OpenSSH works well with its
|
||||
maximum packet size of 256k.
|
||||
|
||||
If you get the error "failed to send packet header: EOF" when copying
|
||||
a large file, try lowering this number.
|
||||
`,
|
||||
Default: 32 * fs.Kibi,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
|
@ -304,6 +324,7 @@ type Options struct {
|
|||
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
|
||||
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
|
@ -481,6 +502,7 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
|
|||
sftp.UseFstat(f.opt.UseFstat),
|
||||
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
|
||||
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
|
||||
sftp.MaxPacketUnchecked(int(f.opt.ChunkSize)),
|
||||
)
|
||||
return sftp.NewClientPipe(pr, pw, opts...)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue