forked from TrueCloudLab/rclone
Compare commits
37 commits
fix-8082-v
...
tcl/master
Author | SHA1 | Date | |
---|---|---|---|
|
b93e134b5b | ||
|
0255757831 | ||
|
5385ce33a5 | ||
|
f2d16ab4c5 | ||
|
c0fc4fe0ca | ||
|
669b2f2669 | ||
|
e1ba10a86e | ||
|
022442cf58 | ||
|
5cc4488294 | ||
|
ec9566c5c3 | ||
|
f6976eb4c4 | ||
|
c242c00799 | ||
|
bf954b74ff | ||
|
88f0770d0a | ||
|
41d905c9b0 | ||
|
300a063b5e | ||
|
61bf29ed5e | ||
|
3191717572 | ||
|
961dfe97b5 | ||
|
22612b4b38 | ||
|
b9927461c3 | ||
|
6d04be99f2 | ||
|
06ae0dfa54 | ||
|
912f29b5b8 | ||
|
8d78768aaa | ||
|
6aa924f28d | ||
|
48f2c2db70 | ||
|
a88066aff3 | ||
|
75f5b06ff7 | ||
|
daeeb7c145 | ||
|
d6a5fc6ffa | ||
|
c0bfedf99c | ||
|
76b76c30bf | ||
|
737fcc804f | ||
|
70f3965354 | ||
|
d5c100edaf | ||
|
dc7458cea0 |
90 changed files with 40655 additions and 18041 deletions
45
.forgejo/ISSUE_TEMPLATE/bug_report.md
Normal file
45
.forgejo/ISSUE_TEMPLATE/bug_report.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: community, triage, bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--- Provide a general summary of the issue in the Title above -->
|
||||
|
||||
## Expected Behavior
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
<!--- Not obligatory -->
|
||||
<!--- If no reason/fix/additions for the bug can be suggested, -->
|
||||
<!--- uncomment the following phrase: -->
|
||||
|
||||
<!--- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. -->
|
||||
|
||||
1.
|
||||
|
||||
## Context
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Regression
|
||||
<!-- Is this issue a regression? (Yes / No) -->
|
||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Version used:
|
||||
* Server setup and configuration:
|
||||
* Operating System and version (`uname -a`):
|
1
.forgejo/ISSUE_TEMPLATE/config.yml
Normal file
1
.forgejo/ISSUE_TEMPLATE/config.yml
Normal file
|
@ -0,0 +1 @@
|
|||
blank_issues_enabled: false
|
24
.forgejo/workflows/builds.yaml
Normal file
24
.forgejo/workflows/builds.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
67
.forgejo/workflows/tests.yml
Normal file
67
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,67 @@
|
|||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- tcl/master
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
- name: Run linters
|
||||
run: make check
|
||||
test:
|
||||
name: Test
|
||||
runs-on: oci-runner
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Tests for the FrostFS backend
|
||||
env:
|
||||
RESTIC_TEST_FUSE: false
|
||||
AIO_IMAGE: truecloudlab/frostfs-aio
|
||||
AIO_VERSION: 1.7.0-nightly.4
|
||||
RCLONE_CONFIG: /config/rclone.conf
|
||||
|
||||
# run only tests related to FrostFS backend
|
||||
run: |-
|
||||
podman-service.sh
|
||||
podman info
|
||||
|
||||
mkdir /config
|
||||
printf "[TestFrostFS]\ntype = frostfs\nendpoint = localhost:8080\nwallet = /config/wallet.json\nplacement_policy = REP 1\nrequest_timeout = 20s\nconnection_timeout = 21s" > /config/rclone.conf
|
||||
|
||||
echo "Run frostfs aio container"
|
||||
docker run -d --net=host --name aio $AIO_IMAGE:$AIO_VERSION --restart always -p 8080:8080
|
||||
|
||||
echo "Wait for frostfs to start"
|
||||
until docker exec aio curl --fail http://localhost:8083 > /dev/null 2>&1; do sleep 0.2; done;
|
||||
|
||||
echo "Issue creds"
|
||||
docker exec aio /usr/bin/issue-creds.sh native
|
||||
echo "Copy wallet"
|
||||
docker cp aio:/config/user-wallet.json /config/wallet.json
|
||||
|
||||
echo "Start tests"
|
||||
go test -v github.com/rclone/rclone/backend/frostfs
|
|
@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
|||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||
- `docs/content/overview.md` - overview docs
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
|
|
37998
MANUAL.html
generated
37998
MANUAL.html
generated
File diff suppressed because it is too large
Load diff
687
MANUAL.md
generated
687
MANUAL.md
generated
|
@ -1,6 +1,6 @@
|
|||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Sep 08, 2024
|
||||
% Jan 10, 2025
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
|
@ -120,6 +120,7 @@ WebDAV or S3, that work out of the box.)
|
|||
- Enterprise File Fabric
|
||||
- Fastmail Files
|
||||
- Files.com
|
||||
- FrostFS
|
||||
- FTP
|
||||
- Gofile
|
||||
- Google Cloud Storage
|
||||
|
@ -5259,7 +5260,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -5603,9 +5606,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
@ -6472,7 +6475,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -6816,9 +6821,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
@ -7734,7 +7739,7 @@ Flags to control the Remote Control API
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -16254,6 +16259,22 @@ so they take exactly the same form.
|
|||
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
| Environment Variable | Equivalent options |
|
||||
|----------------------|--------------------|
|
||||
| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` |
|
||||
| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"/directory with comma , in it /**"'` | `--exclude "/directory with comma , in it /**" |
|
||||
|
||||
If `stringArray` options are defined as environment variables **and**
|
||||
options on the command line then all the values will be used.
|
||||
|
||||
### Config file ###
|
||||
|
||||
You can set defaults for values in the config file on an individual
|
||||
|
@ -16950,6 +16971,8 @@ processed in.
|
|||
Arrange the order of filter rules with the most restrictive first and
|
||||
work down.
|
||||
|
||||
Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
|
||||
|
||||
E.g. for `filter-file.txt`:
|
||||
|
||||
# a sample filter rule file
|
||||
|
@ -16957,6 +16980,7 @@ E.g. for `filter-file.txt`:
|
|||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
|
@ -19767,7 +19791,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
|
||||
| PikPak | MD5 | R | No | No | R | - |
|
||||
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
|
@ -20474,7 +20498,7 @@ Flags for general networking and HTTP stuff.
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2-beta.8331.25cf42493.feature/add-frostfs-support")
|
||||
```
|
||||
|
||||
|
||||
|
@ -20623,7 +20647,7 @@ Flags to control the Remote Control API.
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -20659,7 +20683,7 @@ Flags to control the Remote Control API.
|
|||
Flags to control the Metrics HTTP endpoint..
|
||||
|
||||
```
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -20911,6 +20935,21 @@ Backend-only flags (these can be set in the config file also).
|
|||
--filescom-password string The password used to authenticate with Files.com (obscured)
|
||||
--filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com)
|
||||
--filescom-username string The username used to authenticate with Files.com
|
||||
--frostfs-address string Address of account
|
||||
--frostfs-ape-cache-invalidation-duration Duration APE cache invalidation duration (default 8s)
|
||||
--frostfs-ape-cache-invalidation-timeout Duration APE cache invalidation timeout (default 24s)
|
||||
--frostfs-ape-chain-check-interval Duration The interval for verifying that the APE chain is saved in FrostFS (default 500ms)
|
||||
--frostfs-connection-timeout Duration FrostFS connection timeout (default 4s)
|
||||
--frostfs-container-creation-policy string Container creation policy for new containers (default "private")
|
||||
--frostfs-description string Description of the remote
|
||||
--frostfs-endpoint string Endpoints to connect to FrostFS node
|
||||
--frostfs-password string Password to decrypt wallet
|
||||
--frostfs-placement-policy string Placement policy for new containers (default "REP 3")
|
||||
--frostfs-rebalance-interval Duration FrostFS rebalance connections interval (default 15s)
|
||||
--frostfs-request-timeout Duration FrostFS request timeout (default 12s)
|
||||
--frostfs-rpc-endpoint string Endpoint to connect to Neo rpc node
|
||||
--frostfs-session-expiration int FrostFS session expiration epoch (default 4294967295)
|
||||
--frostfs-wallet string Path to wallet
|
||||
--ftp-ask-password Allow asking for FTP password when needed
|
||||
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
|
||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||
|
@ -21153,21 +21192,18 @@ Backend-only flags (these can be set in the config file also).
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -24741,6 +24777,38 @@ there for more details.
|
|||
|
||||
Setting this flag increases the chance for undetected upload failures.
|
||||
|
||||
### Increasing performance
|
||||
|
||||
#### Using server-side copy
|
||||
|
||||
If you are copying objects between S3 buckets in the same region, you should
|
||||
use server-side copy.
|
||||
This is much faster than downloading and re-uploading the objects, as no data is transferred.
|
||||
|
||||
For rclone to use server-side copy, you must use the same remote for the source and destination.
|
||||
|
||||
rclone copy s3:source-bucket s3:destination-bucket
|
||||
|
||||
When using server-side copy, the performance is limited by the rate at which rclone issues
|
||||
API requests to S3.
|
||||
See below for how to increase the number of API requests rclone makes.
|
||||
|
||||
#### Increasing the rate of API requests
|
||||
|
||||
You can increase the rate of API requests to S3 by increasing the parallelism using `--transfers` and `--checkers`
|
||||
options.
|
||||
|
||||
Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests.
|
||||
Depending on your provider, you can increase significantly the number of transfers and checkers.
|
||||
|
||||
For example, with AWS S3, if you can increase the number of checkers to values like 200.
|
||||
If you are doing a server-side copy, you can also increase the number of transfers to 200.
|
||||
|
||||
rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket
|
||||
|
||||
You will need to experiment with these values to find the optimal settings for your setup.
|
||||
|
||||
|
||||
### Versions
|
||||
|
||||
When bucket versioning is enabled (this can be done with rclone with
|
||||
|
@ -27784,8 +27852,8 @@ chunk_size = 5M
|
|||
copy_cutoff = 5M
|
||||
```
|
||||
|
||||
[C14 Cold Storage](https://www.online.net/en/storage/c14-cold-storage) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
[Scaleway Glacier](https://www.scaleway.com/en/glacier-cold-storage/) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
|
||||
### Seagate Lyve Cloud {#lyve}
|
||||
|
||||
|
@ -34552,6 +34620,467 @@ Properties:
|
|||
|
||||
|
||||
|
||||
# FrostFS
|
||||
|
||||
Rclone FrostFS support is provided using the
|
||||
[git.frostfs.info/TrueCloudLab/frostfs-sdk-go](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go) package
|
||||
|
||||
## Configuration
|
||||
|
||||
To create an FrostFS configuration named `remote`, run
|
||||
|
||||
rclone config
|
||||
|
||||
Rclone config guides you through an interactive setup process. A minimal
|
||||
rclone FrostFS remote definition only requires endpoint and path to FrostFS user wallet.
|
||||
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
1 / 1Fichier
|
||||
\ (fichier)
|
||||
2 / Akamai NetStorage
|
||||
\ (netstorage)
|
||||
3 / Alias for an existing remote
|
||||
\ (alias)
|
||||
4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
5 / Backblaze B2
|
||||
\ (b2)
|
||||
6 / Better checksums for other remotes
|
||||
\ (hasher)
|
||||
7 / Box
|
||||
\ (box)
|
||||
8 / Cache a remote
|
||||
\ (cache)
|
||||
9 / Citrix Sharefile
|
||||
\ (sharefile)
|
||||
10 / Combine several remotes into one
|
||||
\ (combine)
|
||||
11 / Compress a remote
|
||||
\ (compress)
|
||||
12 / Distributed, decentralized object storage FrostFS
|
||||
\ (frostfs)
|
||||
13 / Dropbox
|
||||
\ (dropbox)
|
||||
14 / Encrypt/Decrypt a remote
|
||||
\ (crypt)
|
||||
15 / Enterprise File Fabric
|
||||
\ (filefabric)
|
||||
16 / FTP
|
||||
\ (ftp)
|
||||
17 / Files.com
|
||||
\ (filescom)
|
||||
18 / Gofile
|
||||
\ (gofile)
|
||||
19 / Google Cloud Storage (this is not Google Drive)
|
||||
\ (google cloud storage)
|
||||
20 / Google Drive
|
||||
\ (drive)
|
||||
21 / Google Photos
|
||||
\ (google photos)
|
||||
22 / HTTP
|
||||
\ (http)
|
||||
23 / Hadoop distributed file system
|
||||
\ (hdfs)
|
||||
24 / HiDrive
|
||||
\ (hidrive)
|
||||
25 / ImageKit.io
|
||||
\ (imagekit)
|
||||
26 / In memory object storage system.
|
||||
\ (memory)
|
||||
27 / Internet Archive
|
||||
\ (internetarchive)
|
||||
28 / Jottacloud
|
||||
\ (jottacloud)
|
||||
29 / Koofr, Digi Storage and other Koofr-compatible storage providers
|
||||
\ (koofr)
|
||||
30 / Linkbox
|
||||
\ (linkbox)
|
||||
31 / Local Disk
|
||||
\ (local)
|
||||
32 / Mail.ru Cloud
|
||||
\ (mailru)
|
||||
33 / Mega
|
||||
\ (mega)
|
||||
34 / Microsoft Azure Blob Storage
|
||||
\ (azureblob)
|
||||
35 / Microsoft Azure Files
|
||||
\ (azurefiles)
|
||||
36 / Microsoft OneDrive
|
||||
\ (onedrive)
|
||||
37 / OpenDrive
|
||||
\ (opendrive)
|
||||
38 / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
|
||||
\ (swift)
|
||||
39 / Oracle Cloud Infrastructure Object Storage
|
||||
\ (oracleobjectstorage)
|
||||
40 / Pcloud
|
||||
\ (pcloud)
|
||||
41 / PikPak
|
||||
\ (pikpak)
|
||||
42 / Pixeldrain Filesystem
|
||||
\ (pixeldrain)
|
||||
43 / Proton Drive
|
||||
\ (protondrive)
|
||||
44 / Put.io
|
||||
\ (putio)
|
||||
45 / QingCloud Object Storage
|
||||
\ (qingstor)
|
||||
46 / Quatrix by Maytech
|
||||
\ (quatrix)
|
||||
47 / SMB / CIFS
|
||||
\ (smb)
|
||||
48 / SSH/SFTP
|
||||
\ (sftp)
|
||||
49 / Sia Decentralized Cloud
|
||||
\ (sia)
|
||||
50 / Storj Decentralized Cloud Storage
|
||||
\ (storj)
|
||||
51 / Sugarsync
|
||||
\ (sugarsync)
|
||||
52 / Transparently chunk/split large files
|
||||
\ (chunker)
|
||||
53 / Uloz.to
|
||||
\ (ulozto)
|
||||
54 / Union merges the contents of several upstream fs
|
||||
\ (union)
|
||||
55 / Uptobox
|
||||
\ (uptobox)
|
||||
56 / WebDAV
|
||||
\ (webdav)
|
||||
57 / Yandex Disk
|
||||
\ (yandex)
|
||||
58 / Zoho
|
||||
\ (zoho)
|
||||
59 / premiumize.me
|
||||
\ (premiumizeme)
|
||||
60 / seafile
|
||||
\ (seafile)
|
||||
Storage> frostfs
|
||||
|
||||
Option endpoint.
|
||||
Endpoints to connect to FrostFS node
|
||||
Choose a number from below, or type in your own value.
|
||||
1 / One endpoint.
|
||||
\ (s01.frostfs.devenv:8080)
|
||||
2 / Multiple endpoints to form pool.
|
||||
\ (s01.frostfs.devenv:8080 s02.frostfs.devenv:8080)
|
||||
3 / Multiple endpoints with priority (less value is higher priority). Until s01 is healthy all request will be send to it.
|
||||
\ (s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2)
|
||||
4 / Multiple endpoints with priority and weights. After s01 is unhealthy requests will be send to s02 and s03 in proportions 10% and 90% respectively.
|
||||
\ (s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9)
|
||||
endpoint> s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2
|
||||
|
||||
Option connection_timeout.
|
||||
FrostFS connection timeout
|
||||
Enter a value of type Duration. Press Enter for the default (4s).
|
||||
connection_timeout>
|
||||
|
||||
Option request_timeout.
|
||||
FrostFS request timeout
|
||||
Enter a value of type Duration. Press Enter for the default (4s).
|
||||
request_timeout>
|
||||
|
||||
Option rebalance_interval.
|
||||
FrostFS rebalance connections interval
|
||||
Enter a value of type Duration. Press Enter for the default (15s).
|
||||
rebalance_interval>
|
||||
|
||||
Option session_expiration.
|
||||
FrostFS session expiration epoch
|
||||
Enter a signed integer. Press Enter for the default (4294967295).
|
||||
session_expiration>
|
||||
|
||||
Option ape_cache_invalidation_duration.
|
||||
APE cache invalidation duration
|
||||
Enter a value of type Duration. Press Enter for the default (8s).
|
||||
ape_cache_invalidation_duration>
|
||||
|
||||
Option ape_cache_invalidation_timeout.
|
||||
APE cache invalidation timeout
|
||||
Enter a value of type Duration. Press Enter for the default (16s).
|
||||
ape_cache_invalidation_timeout>
|
||||
|
||||
Option ape_chain_check_interval.
|
||||
The interval for verifying that the APE chain is saved in FrostFS.
|
||||
Enter a value of type Duration. Press Enter for the default (500ms).
|
||||
ape_chain_check_interval>
|
||||
|
||||
Option rpc_endpoint.
|
||||
Endpoint to connect to Neo rpc node
|
||||
Enter a value. Press Enter to leave empty.
|
||||
rpc_endpoint>
|
||||
|
||||
Option wallet.
|
||||
Path to wallet
|
||||
Enter a value.
|
||||
wallet> /wallets/wallet.conf
|
||||
|
||||
Option address.
|
||||
Address of account
|
||||
Enter a value. Press Enter to leave empty.
|
||||
address>
|
||||
|
||||
Option password.
|
||||
Password to decrypt wallet
|
||||
Enter a value. Press Enter to leave empty.
|
||||
password>
|
||||
|
||||
Option placement_policy.
|
||||
Placement policy for new containers
|
||||
Choose a number from below, or type in your own value of type string.
|
||||
Press Enter for the default (REP 3).
|
||||
1 / Container will have 3 replicas
|
||||
\ (REP 3)
|
||||
placement_policy> REP 1
|
||||
|
||||
Option container_creation_policy.
|
||||
Container creation policy for new containers
|
||||
Choose a number from below, or type in your own value of type string.
|
||||
Press Enter for the default (private).
|
||||
1 / Public container, anyone can read and write
|
||||
\ (public-read-write)
|
||||
2 / Public container, owner can read and write, others can only read
|
||||
\ (public-read)
|
||||
3 / Private container, only owner has access to it
|
||||
\ (private)
|
||||
container_creation_policy>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: frostfs
|
||||
- endpoint: s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2
|
||||
- password: M@zPGpWDravchenko/develop/go/playground/play_with_ape/cfg/wallet_akrav.js
|
||||
- placement_policy: REP 1
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
As a remote root directory, you can use either the container identifier or its user-friendly name.
|
||||
If you choose to use a container name, rclone will create a new container with that name when
|
||||
it's used for the first time.
|
||||
|
||||
For example, both of the following commands would be valid if there was a `~/test-copy` directory and a container with
|
||||
the identifier `23fk3Bcw5mPZ4YtYkTLJbQebtM2WXHz4HL8FgsrTJkSf`:
|
||||
|
||||
rclone copy ~/test-copy remote:23fk3Bcw5mPZ4YtYkTLJbQebtM2WXHz4HL8FgsrTJkSf/test-copy
|
||||
rclone copy ~/test-copy remote:container-name/test-copy
|
||||
|
||||
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to frostfs (Distributed, decentralized object storage FrostFS).
|
||||
|
||||
#### --frostfs-endpoint
|
||||
|
||||
Endpoints to connect to FrostFS node
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_FROSTFS_ENDPOINT
|
||||
- Type: string
|
||||
- Required: true
|
||||
- Examples:
|
||||
- "s01.frostfs.devenv:8080"
|
||||
- One endpoint.
|
||||
- "s01.frostfs.devenv:8080 s02.frostfs.devenv:8080"
|
||||
- Multiple endpoints to form pool.
|
||||
- "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2"
|
||||
- Multiple endpoints with priority (lower value has higher priority). Until s01 is healthy, all requests will be sent to it.
|
||||
- "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9"
|
||||
- Multiple endpoints with priority and weights. After the s01 endpoint is unhealthy, requests will be sent to the s02 and s03 endpoints in proportions of 10% and 90%, respectively.
|
||||
|
||||
#### --frostfs-connection-timeout
|
||||
|
||||
FrostFS connection timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: connection_timeout
|
||||
- Env Var: RCLONE_FROSTFS_CONNECTION_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 4s
|
||||
|
||||
#### --frostfs-request-timeout
|
||||
|
||||
FrostFS request timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: request_timeout
|
||||
- Env Var: RCLONE_FROSTFS_REQUEST_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 12s
|
||||
|
||||
#### --frostfs-rebalance-interval
|
||||
|
||||
FrostFS rebalance connections interval
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: rebalance_interval
|
||||
- Env Var: RCLONE_FROSTFS_REBALANCE_INTERVAL
|
||||
- Type: Duration
|
||||
- Default: 15s
|
||||
|
||||
#### --frostfs-session-expiration
|
||||
|
||||
FrostFS session expiration epoch
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: session_expiration
|
||||
- Env Var: RCLONE_FROSTFS_SESSION_EXPIRATION
|
||||
- Type: int
|
||||
- Default: 4294967295
|
||||
|
||||
#### --frostfs-ape-cache-invalidation-duration
|
||||
|
||||
APE cache invalidation duration
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_cache_invalidation_duration
|
||||
- Env Var: RCLONE_FROSTFS_APE_CACHE_INVALIDATION_DURATION
|
||||
- Type: Duration
|
||||
- Default: 8s
|
||||
|
||||
#### --frostfs-ape-cache-invalidation-timeout
|
||||
|
||||
APE cache invalidation timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_cache_invalidation_timeout
|
||||
- Env Var: RCLONE_FROSTFS_APE_CACHE_INVALIDATION_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 24s
|
||||
|
||||
#### --frostfs-ape-chain-check-interval
|
||||
|
||||
The interval for verifying that the APE chain is saved in FrostFS.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_chain_check_interval
|
||||
- Env Var: RCLONE_FROSTFS_APE_CHAIN_CHECK_INTERVAL
|
||||
- Type: Duration
|
||||
- Default: 500ms
|
||||
|
||||
#### --frostfs-rpc-endpoint
|
||||
|
||||
Endpoint to connect to Neo rpc node
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: rpc_endpoint
|
||||
- Env Var: RCLONE_FROSTFS_RPC_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-wallet
|
||||
|
||||
Path to wallet
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: wallet
|
||||
- Env Var: RCLONE_FROSTFS_WALLET
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --frostfs-address
|
||||
|
||||
Address of account
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: address
|
||||
- Env Var: RCLONE_FROSTFS_ADDRESS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-password
|
||||
|
||||
Password to decrypt wallet
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_FROSTFS_PASSWORD
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-placement-policy
|
||||
|
||||
Placement policy for new containers
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: placement_policy
|
||||
- Env Var: RCLONE_FROSTFS_PLACEMENT_POLICY
|
||||
- Type: string
|
||||
- Default: "REP 3"
|
||||
- Examples:
|
||||
- "REP 3"
|
||||
- Container will have 3 replicas
|
||||
|
||||
#### --frostfs-container-creation-policy
|
||||
|
||||
Container creation policy for new containers
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: container_creation_policy
|
||||
- Env Var: RCLONE_FROSTFS_CONTAINER_CREATION_POLICY
|
||||
- Type: string
|
||||
- Default: "private"
|
||||
- Examples:
|
||||
- "public-read-write"
|
||||
- Public container, anyone can read and write
|
||||
- "public-read"
|
||||
- Public container, owner can read and write, others can only read
|
||||
- "private"
|
||||
- Private container, only owner has access to it
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to frostfs (Distributed, decentralized object storage FrostFS).
|
||||
|
||||
#### --frostfs-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_FROSTFS_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
|
||||
|
||||
# FTP
|
||||
|
||||
FTP is the File Transfer Protocol. Rclone FTP support is provided using the
|
||||
|
@ -34675,13 +35204,13 @@ therefore works even in Windows Command Prompt:
|
|||
rclone lsf :ftp: --ftp-host=speedtest.tele2.net --ftp-user=anonymous --ftp-pass=IXs2wc8OJOz7SYLBk47Ji1rHTmxM
|
||||
rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47Ji1rHTmxM:
|
||||
|
||||
### Implicit TLS
|
||||
### Implicit TLS[util.go](../../backend/frostfs/util.go)
|
||||
|
||||
Rlone FTP supports implicit FTP over TLS servers (FTPS). This has to
|
||||
be enabled in the FTP backend config for the remote, or with
|
||||
[`--ftp-tls`](#ftp-tls). The default FTPS port is `990`, not `21` and
|
||||
can be set with [`--ftp-port`](#ftp-port).
|
||||
|
||||
[util.go](../../backend/frostfs/util.go)
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
|
||||
|
@ -37849,9 +38378,9 @@ then select "OAuth client ID".
|
|||
|
||||
9. It will show you a client ID and client secret. Make a note of these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to Step 9.
|
||||
(If you selected "External" at Step 5 continue to Step 10.
|
||||
If you chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||
Step 11 but your destination drive must be part of the same Google Workspace.)
|
||||
|
||||
10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
|
||||
You will also want to add yourself as a test user.
|
||||
|
@ -48038,68 +48567,29 @@ Properties:
|
|||
|
||||
Here are the Advanced options specific to pikpak (PikPak).
|
||||
|
||||
#### --pikpak-client-id
|
||||
#### --pikpak-device-id
|
||||
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
Device ID used for authorization.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_ID
|
||||
- Config: device_id
|
||||
- Env Var: RCLONE_PIKPAK_DEVICE_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-client-secret
|
||||
#### --pikpak-user-agent
|
||||
|
||||
OAuth Client Secret.
|
||||
HTTP user agent for pikpak.
|
||||
|
||||
Leave blank normally.
|
||||
Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on command line.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_PIKPAK_USER_AGENT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PIKPAK_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
|
||||
#### --pikpak-root-folder-id
|
||||
|
||||
|
@ -54602,6 +55092,55 @@ Options:
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.68.2 - 2024-11-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.1...v1.68.2)
|
||||
|
||||
* Security fixes
|
||||
* local backend: CVE-2024-52522: fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Only affects users using `--metadata` and `--links` and copying files to the local backend
|
||||
* See https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
* build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (dependabot)
|
||||
* This is an issue in a dependency which is used for JWT certificates
|
||||
* See https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r
|
||||
* Bug Fixes
|
||||
* accounting: Fix wrong message on SIGUSR2 to enable/disable bwlimit (Nick Craig-Wood)
|
||||
* bisync: Fix output capture restoring the wrong output for logrus (Dimitrios Slamaris)
|
||||
* dlna: Fix loggingResponseWriter disregarding log level (Simon Bos)
|
||||
* serve s3: Fix excess locking which was making serve s3 single threaded (Nick Craig-Wood)
|
||||
* doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy Bush)
|
||||
* Local
|
||||
* Fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Fix `--copy-links` on macOS when cloning (nielash)
|
||||
* Onedrive
|
||||
* Fix Retry-After handling to look at 503 errors also (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix cid/gcid calculations for fs.OverrideRemote (wiserain)
|
||||
* Fix fatal crash on startup with token that can't be refreshed (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix crash when using `--s3-download-url` after migration to SDKv2 (Nick Craig-Wood)
|
||||
* Storj provider: fix server-side copy of files bigger than 5GB (Kaloyan Raev)
|
||||
* Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)
|
||||
|
||||
## v1.68.1 - 2024-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Fix docker release build (ttionya)
|
||||
* doc fixes (Nick Craig-Wood, Pawel Palucha)
|
||||
* fs
|
||||
* Fix `--dump filters` not always appearing (Nick Craig-Wood)
|
||||
* Fix setting `stringArray` config values from environment variables (Nick Craig-Wood)
|
||||
* rc: Fix default value of `--metrics-addr` (Nick Craig-Wood)
|
||||
* serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam)
|
||||
* Onedrive
|
||||
* Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix login issue where token retrieval fails (wiserain)
|
||||
* S3
|
||||
* Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood)
|
||||
|
||||
## v1.68.0 - 2024-09-08
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0)
|
||||
|
|
956
MANUAL.txt
generated
956
MANUAL.txt
generated
File diff suppressed because it is too large
Load diff
6
Makefile
6
Makefile
|
@ -144,14 +144,10 @@ MANUAL.txt: MANUAL.md
|
|||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v1.69.0
|
||||
v1.68.2
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/frostfs"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
|
|
|
@ -209,22 +209,6 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
|||
keys instead of setting ` + "`service_principal_file`" + `.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_msi",
|
||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||
|
@ -259,20 +243,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
|
@ -468,12 +438,10 @@ type Options struct {
|
|||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
|
@ -758,7 +726,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
|
@ -908,12 +875,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.Account != "":
|
||||
// Anonymous access
|
||||
anonymous = true
|
||||
|
|
|
@ -43,7 +43,6 @@ import (
|
|||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/youmark/pkcs8"
|
||||
"golang.org/x/oauth2"
|
||||
|
@ -257,6 +256,7 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||
|
@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
|
@ -966,26 +966,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// check if dest already exists
|
||||
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||
err = f.deleteObject(ctx, item.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.Move(ctx, tempObj, remote)
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
|
|
1231
backend/frostfs/frostfs.go
Normal file
1231
backend/frostfs/frostfs.go
Normal file
File diff suppressed because it is too large
Load diff
16
backend/frostfs/frostfs_test.go
Normal file
16
backend/frostfs/frostfs_test.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFrostFS:",
|
||||
NilObject: (*Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
311
backend/frostfs/util.go
Normal file
311
backend/frostfs/util.go
Normal file
|
@ -0,0 +1,311 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
resolver "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/native"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type endpointInfo struct {
|
||||
Address string
|
||||
Priority int
|
||||
Weight float64
|
||||
}
|
||||
|
||||
func publicReadWriteCCPRules() []chain.Rule {
|
||||
return []chain.Rule{
|
||||
{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodPutObject,
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodDeleteObject,
|
||||
native.MethodSearchObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodPatchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Any: false},
|
||||
}
|
||||
}
|
||||
|
||||
func privateCCPRules() []chain.Rule {
|
||||
rule := publicReadWriteCCPRules()
|
||||
// The same as public-read-write, except that only the owner is allowed to perform the listed actions
|
||||
rule[0].Condition = []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOwner,
|
||||
},
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
func publicReadCCPRules() []chain.Rule {
|
||||
rule := privateCCPRules()
|
||||
// Add a rule that allows other users to perform reading actions.
|
||||
rule = append(rule, chain.Rule{
|
||||
Status: chain.Allow, Actions: chain.Actions{
|
||||
Inverted: false,
|
||||
Names: []string{
|
||||
native.MethodGetObject,
|
||||
native.MethodHeadObject,
|
||||
native.MethodRangeObject,
|
||||
native.MethodHashObject,
|
||||
native.MethodSearchObject,
|
||||
},
|
||||
}, Resources: chain.Resources{
|
||||
Inverted: false,
|
||||
Names: []string{native.ResourceFormatRootObjects},
|
||||
}, Condition: []chain.Condition{
|
||||
{
|
||||
Op: chain.CondStringEquals,
|
||||
Kind: chain.KindRequest,
|
||||
Key: native.PropertyKeyActorRole,
|
||||
Value: native.PropertyValueContainerRoleOthers,
|
||||
},
|
||||
}, Any: false})
|
||||
return rule
|
||||
}
|
||||
|
||||
func parseContainerCreationPolicyString(policyString string) ([]chain.Rule, error) {
|
||||
switch policyString {
|
||||
case "private":
|
||||
return privateCCPRules(), nil
|
||||
case "public-read":
|
||||
return publicReadCCPRules(), nil
|
||||
case "public-read-write":
|
||||
return publicReadWriteCCPRules(), nil
|
||||
}
|
||||
return nil, fmt.Errorf("invalid container creation policy: %s", policyString)
|
||||
}
|
||||
|
||||
func parseEndpoints(endpointParam string) ([]endpointInfo, error) {
|
||||
var err error
|
||||
expectedLength := -1 // to make sure all endpoints have the same format
|
||||
|
||||
endpoints := strings.Split(strings.TrimSpace(endpointParam), " ")
|
||||
res := make([]endpointInfo, 0, len(endpoints))
|
||||
seen := make(map[string]struct{}, len(endpoints))
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointInfoSplit := strings.Split(endpoint, ",")
|
||||
address := endpointInfoSplit[0]
|
||||
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[address]; ok {
|
||||
return nil, fmt.Errorf("endpoint '%s' is already defined", address)
|
||||
}
|
||||
seen[address] = struct{}{}
|
||||
|
||||
epInfo := endpointInfo{
|
||||
Address: address,
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}
|
||||
|
||||
if expectedLength == -1 {
|
||||
expectedLength = len(endpointInfoSplit)
|
||||
}
|
||||
|
||||
if len(endpointInfoSplit) != expectedLength {
|
||||
return nil, fmt.Errorf("all endpoints must have the same format: '%s'", endpointParam)
|
||||
}
|
||||
|
||||
switch len(endpointInfoSplit) {
|
||||
case 1:
|
||||
case 2:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
case 3:
|
||||
epInfo.Priority, err = parsePriority(endpointInfoSplit[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
|
||||
epInfo.Weight, err = parseWeight(endpointInfoSplit[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid endpoint '%s': %w", endpoint, err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid endpoint format '%s'", endpoint)
|
||||
}
|
||||
|
||||
res = append(res, epInfo)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parsePriority(priorityStr string) (int, error) {
|
||||
priority, err := strconv.Atoi(priorityStr)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid priority '%s': %w", priorityStr, err)
|
||||
}
|
||||
if priority <= 0 {
|
||||
return 0, fmt.Errorf("priority must be positive '%s'", priorityStr)
|
||||
}
|
||||
|
||||
return priority, nil
|
||||
}
|
||||
|
||||
func parseWeight(weightStr string) (float64, error) {
|
||||
weight, err := strconv.ParseFloat(weightStr, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid weight '%s': %w", weightStr, err)
|
||||
}
|
||||
if weight <= 0 {
|
||||
return 0, fmt.Errorf("weight must be positive '%s'", weightStr)
|
||||
}
|
||||
|
||||
return weight, nil
|
||||
}
|
||||
|
||||
func createPool(ctx context.Context, key *keys.PrivateKey, cfg *Options) (*pool.Pool, error) {
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prm.SetNodeDialTimeout(time.Duration(cfg.FrostfsConnectionTimeout))
|
||||
prm.SetHealthcheckTimeout(time.Duration(cfg.FrostfsRequestTimeout))
|
||||
prm.SetClientRebalanceInterval(time.Duration(cfg.FrostfsRebalanceInterval))
|
||||
prm.SetSessionExpirationDuration(cfg.FrostfsSessionExpiration)
|
||||
|
||||
nodes, err := getNodePoolParams(cfg.FrostfsEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
prm.AddNode(node)
|
||||
}
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func getNodePoolParams(endpointParam string) ([]pool.NodeParam, error) {
|
||||
endpointInfos, err := parseEndpoints(endpointParam)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse endpoints params: %w", err)
|
||||
}
|
||||
|
||||
res := make([]pool.NodeParam, len(endpointInfos))
|
||||
for i, info := range endpointInfos {
|
||||
res[i] = pool.NewNodeParam(info.Priority, info.Address, info.Weight)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func createNNSResolver(cfg *Options) (*resolver.NNS, error) {
|
||||
if cfg.RPCEndpoint == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var nns resolver.NNS
|
||||
if err := nns.Dial(cfg.RPCEndpoint); err != nil {
|
||||
return nil, fmt.Errorf("dial NNS resolver: %w", err)
|
||||
}
|
||||
|
||||
return &nns, nil
|
||||
}
|
||||
|
||||
func getAccount(cfg *Options) (*wallet.Account, error) {
|
||||
w, err := wallet.NewWalletFromFile(cfg.Wallet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := w.GetChangeAddress()
|
||||
if cfg.Address != "" {
|
||||
addr, err = flags.ParseAddress(cfg.Address)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
acc := w.GetAccount(addr)
|
||||
err = acc.Decrypt(cfg.Password, w.Scrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acc, nil
|
||||
}
|
||||
|
||||
func newAddress(cnrID cid.ID, objID oid.ID) oid.Address {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(cnrID)
|
||||
addr.SetObject(objID)
|
||||
return addr
|
||||
}
|
||||
|
||||
func formObject(own *user.ID, cnrID cid.ID, name string, header map[string]string) *object.Object {
|
||||
attributes := make([]object.Attribute, 0, 1+len(header))
|
||||
filename := object.NewAttribute()
|
||||
filename.SetKey(object.AttributeFileName)
|
||||
filename.SetValue(name)
|
||||
|
||||
attributes = append(attributes, *filename)
|
||||
|
||||
for key, val := range header {
|
||||
attr := object.NewAttribute()
|
||||
attr.SetKey(key)
|
||||
attr.SetValue(val)
|
||||
attributes = append(attributes, *attr)
|
||||
}
|
||||
|
||||
obj := object.New()
|
||||
obj.SetOwnerID(*own)
|
||||
obj.SetContainerID(cnrID)
|
||||
obj.SetAttributes(attributes...)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func newDir(cnrID cid.ID, cnr container.Container) *fs.Dir {
|
||||
remote := cnrID.EncodeToString()
|
||||
timestamp := container.CreatedAt(cnr)
|
||||
|
||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
||||
remote = domain.Name()
|
||||
}
|
||||
|
||||
dir := fs.NewDir(remote, timestamp)
|
||||
dir.SetID(cnrID.String())
|
||||
|
||||
return dir
|
||||
}
|
154
backend/frostfs/util_test.go
Normal file
154
backend/frostfs/util_test.go
Normal file
|
@ -0,0 +1,154 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParseContainerCreationPolicy(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
ACLString string
|
||||
ExpectedError bool
|
||||
}{
|
||||
{
|
||||
ACLString: "",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-ready",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "public-read-write",
|
||||
ExpectedError: false,
|
||||
},
|
||||
{
|
||||
ACLString: "private",
|
||||
ExpectedError: false,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
rules, err := parseContainerCreationPolicyString(tc.ACLString)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
require.Nil(t, rules)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rules)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseEndpoints(t *testing.T) {
|
||||
for i, tc := range []struct {
|
||||
EndpointsParam string
|
||||
ExpectedError bool
|
||||
ExpectedResult []endpointInfo
|
||||
}{
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,2,3",
|
||||
ExpectedResult: []endpointInfo{{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 3,
|
||||
}},
|
||||
},
|
||||
{
|
||||
EndpointsParam: " s01.frostfs.devenv:8080 s02.frostfs.devenv:8080 ",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9",
|
||||
ExpectedResult: []endpointInfo{
|
||||
{
|
||||
Address: "s01.frostfs.devenv:8080",
|
||||
Priority: 1,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s02.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "s03.frostfs.devenv:8080",
|
||||
Priority: 2,
|
||||
Weight: 9,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,-1,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,,",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,sd,sd",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,0",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080",
|
||||
ExpectedError: true,
|
||||
},
|
||||
{
|
||||
EndpointsParam: "s01.frostfs.devenv:8080,1,2 s02.frostfs.devenv:8080,1",
|
||||
ExpectedError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(strconv.Itoa(i), func(t *testing.T) {
|
||||
res, err := parseEndpoints(tc.EndpointsParam)
|
||||
if tc.ExpectedError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.ExpectedResult, res)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -180,28 +180,12 @@ If this is set and no password is supplied then rclone will ask for a password
|
|||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_upload",
|
||||
Default: false,
|
||||
Help: `Don't check the upload is OK
|
||||
|
||||
Normally rclone will try to check the upload exists after it has
|
||||
uploaded a file to make sure the size and modification time are as
|
||||
expected.
|
||||
|
||||
This flag stops rclone doing these checks. This enables uploading to
|
||||
folders which are write only.
|
||||
|
||||
You will likely need to use the --inplace flag also if uploading to
|
||||
a write only folder.
|
||||
`,
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
|
@ -248,7 +232,6 @@ type Options struct {
|
|||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
|
@ -1320,16 +1303,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return fmt.Errorf("update stor: %w", err)
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
if o.fs.opt.NoCheckUpload {
|
||||
o.info = &FileInfo{
|
||||
Name: o.remote,
|
||||
Size: uint64(src.Size()),
|
||||
ModTime: src.ModTime(ctx),
|
||||
precise: true,
|
||||
IsDir: false,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||
return fmt.Errorf("SetModTime: %w", err)
|
||||
}
|
||||
|
|
|
@ -60,14 +60,16 @@ const (
|
|||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
// Description of how to auth for this app
|
||||
var storageConfig = &oauth2.Config{
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
|
@ -104,12 +106,6 @@ func init() {
|
|||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Sensitive: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
|
@ -383,7 +379,6 @@ type Options struct {
|
|||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
AccessToken string `config:"access_token"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -540,9 +535,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else if opt.AccessToken != "" {
|
||||
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
|
@ -952,6 +944,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||
return e
|
||||
}
|
||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||
|
||||
}
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
|
@ -159,34 +160,6 @@ listings and transferred.
|
|||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "proxy",
|
||||
Default: "",
|
||||
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||
|
||||
First run with
|
||||
|
||||
gphotosdl -login
|
||||
|
||||
Then once you have logged into google photos close the browser window
|
||||
and run
|
||||
|
||||
gphotosdl
|
||||
|
||||
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||
rclone use the proxy.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
@ -208,7 +181,6 @@ type Options struct {
|
|||
BatchMode string `config:"batch_mode"`
|
||||
BatchSize int `config:"batch_size"`
|
||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||
Proxy string `config:"proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
|
@ -482,7 +454,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
|||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
// defer log.Trace(f, "remote=%q", remote)("")
|
||||
defer log.Trace(f, "remote=%q", remote)("")
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
|
@ -695,7 +667,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
|||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil || pattern.isFile {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
|
@ -712,7 +684,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// defer log.Trace(f, "src=%+v", src)("")
|
||||
defer log.Trace(f, "src=%+v", src)("")
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
|
@ -765,7 +737,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
|||
|
||||
// Mkdir creates the album if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -789,7 +761,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||
match, _, pattern := patterns.match(f.root, dir, false)
|
||||
if pattern == nil {
|
||||
return fs.ErrorDirNotFound
|
||||
|
@ -862,7 +834,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
// defer log.Trace(o, "")("")
|
||||
defer log.Trace(o, "")("")
|
||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||
return o.bytes
|
||||
}
|
||||
|
@ -963,7 +935,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
// defer log.Trace(o, "")("")
|
||||
defer log.Trace(o, "")("")
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||
|
@ -993,20 +965,16 @@ func (o *Object) downloadURL() string {
|
|||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// defer log.Trace(o, "")("")
|
||||
defer log.Trace(o, "")("")
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
url := o.downloadURL()
|
||||
if o.fs.opt.Proxy != "" {
|
||||
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: url,
|
||||
RootURL: o.downloadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -1099,7 +1067,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||
return errCantUpload
|
||||
|
|
16
backend/local/lchmod.go
Normal file
16
backend/local/lchmod.go
Normal file
|
@ -0,0 +1,16 @@
|
|||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
const haveLChmod = false
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// Can't do this safely on this OS - chmoding a symlink always
|
||||
// changes the destination.
|
||||
return nil
|
||||
}
|
41
backend/local/lchmod_unix.go
Normal file
41
backend/local/lchmod_unix.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const haveLChmod = true
|
||||
|
||||
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||
//
|
||||
// Borrowed from the syscall source since it isn't public.
|
||||
func syscallMode(i os.FileMode) (o uint32) {
|
||||
o |= uint32(i.Perm())
|
||||
if i&os.ModeSetuid != 0 {
|
||||
o |= syscall.S_ISUID
|
||||
}
|
||||
if i&os.ModeSetgid != 0 {
|
||||
o |= syscall.S_ISGID
|
||||
}
|
||||
if i&os.ModeSticky != 0 {
|
||||
o |= syscall.S_ISVTX
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// lChmod changes the mode of the named file to mode. If the file is a symbolic
|
||||
// link, it changes the link, not the target. If there is an error,
|
||||
// it will be of type *PathError.
|
||||
func lChmod(name string, mode os.FileMode) error {
|
||||
// NB linux does not support AT_SYMLINK_NOFOLLOW as a parameter to fchmodat
|
||||
// and returns ENOTSUP if you try, so we don't support this on linux
|
||||
if e := unix.Fchmodat(unix.AT_FDCWD, name, syscallMode(mode), unix.AT_SYMLINK_NOFOLLOW); e != nil {
|
||||
return &os.PathError{Op: "lChmod", Path: name, Err: e}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
//go:build windows || plan9 || js
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
|
19
backend/local/lchtimes_windows.go
Normal file
19
backend/local/lchtimes_windows.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const haveLChtimes = true
|
||||
|
||||
// lChtimes changes the access and modification times of the named
|
||||
// link, similar to the Unix utime() or utimes() functions.
|
||||
//
|
||||
// The underlying filesystem may truncate or round the values to a
|
||||
// less precise time unit.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func lChtimes(name string, atime time.Time, mtime time.Time) error {
|
||||
return setTimes(name, atime, mtime, time.Time{}, true)
|
||||
}
|
|
@ -268,22 +268,66 @@ func TestMetadata(t *testing.T) {
|
|||
r := fstest.NewRun(t)
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
r.WriteFile(filePath, "metadata file contents", when)
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Set fs into "-l" / "--links" mode
|
||||
f.opt.TranslateSymlinks = true
|
||||
|
||||
// Write a symlink to the file
|
||||
symlinkPath := "metafile-link.txt"
|
||||
osSymlinkPath := filepath.Join(f.root, symlinkPath)
|
||||
symlinkPath += linkSuffix
|
||||
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
|
||||
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
|
||||
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))
|
||||
|
||||
// Get the object
|
||||
obj, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
o := obj.(*Object)
|
||||
|
||||
// Get the symlink object
|
||||
symlinkObj, err := f.NewObject(ctx, symlinkPath)
|
||||
require.NoError(t, err)
|
||||
symlinkO := symlinkObj.(*Object)
|
||||
|
||||
// Record metadata for o
|
||||
oMeta, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test symlink first to check it doesn't mess up file
|
||||
t.Run("Symlink", func(t *testing.T) {
|
||||
testMetadata(t, r, symlinkO, symlinkModTime)
|
||||
})
|
||||
|
||||
// Read it again
|
||||
oMetaNew, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that operating on the symlink didn't change the file it was pointing to
|
||||
// See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
assert.Equal(t, oMeta, oMetaNew, "metadata setting on symlink messed up file")
|
||||
|
||||
// Now run the same tests on the file
|
||||
t.Run("File", func(t *testing.T) {
|
||||
testMetadata(t, r, o, when)
|
||||
})
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
features := f.Features()
|
||||
|
||||
var hasXID, hasAtime, hasBtime bool
|
||||
var hasXID, hasAtime, hasBtime, canSetXattrOnLinks bool
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "freebsd", "netbsd", "linux":
|
||||
hasXID, hasAtime, hasBtime = true, true, true
|
||||
canSetXattrOnLinks = runtime.GOOS != "linux"
|
||||
case "openbsd", "solaris":
|
||||
hasXID, hasAtime = true, true
|
||||
case "windows":
|
||||
|
@ -306,6 +350,10 @@ func TestMetadata(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
assert.Nil(t, m)
|
||||
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
t.Skip("Skip remainder of test as can't set xattr on symlinks on this OS")
|
||||
}
|
||||
|
||||
inM := fs.Metadata{
|
||||
"potato": "chips",
|
||||
"cabbage": "soup",
|
||||
|
@ -320,18 +368,21 @@ func TestMetadata(t *testing.T) {
|
|||
})
|
||||
|
||||
checkTime := func(m fs.Metadata, key string, when time.Time) {
|
||||
t.Helper()
|
||||
mt, ok := o.parseMetadataTime(m, key)
|
||||
assert.True(t, ok)
|
||||
dt := mt.Sub(when)
|
||||
precision := time.Second
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v", key, dt, precision))
|
||||
assert.True(t, dt >= -precision && dt <= precision, fmt.Sprintf("%s: dt %v outside +/- precision %v want %v got %v", key, dt, precision, mt, when))
|
||||
}
|
||||
|
||||
checkInt := func(m fs.Metadata, key string, base int) int {
|
||||
t.Helper()
|
||||
value, ok := o.parseMetadataInt(m, key, base)
|
||||
assert.True(t, ok)
|
||||
return value
|
||||
}
|
||||
|
||||
t.Run("Read", func(t *testing.T) {
|
||||
m, err := o.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
|
@ -341,13 +392,12 @@ func TestMetadata(t *testing.T) {
|
|||
checkInt(m, "mode", 8)
|
||||
checkTime(m, "mtime", when)
|
||||
|
||||
assert.Equal(t, len(whenRFC), len(m["mtime"]))
|
||||
assert.Equal(t, whenRFC[:dayLength], m["mtime"][:dayLength])
|
||||
|
||||
if hasAtime {
|
||||
if hasAtime && !o.translatedLink { // symlinks generally don't record atime
|
||||
checkTime(m, "atime", when)
|
||||
}
|
||||
if hasBtime {
|
||||
if hasBtime && !o.translatedLink { // symlinks generally don't record btime
|
||||
checkTime(m, "btime", when)
|
||||
}
|
||||
if hasXID {
|
||||
|
@ -371,6 +421,10 @@ func TestMetadata(t *testing.T) {
|
|||
"mode": "0767",
|
||||
"potato": "wedges",
|
||||
}
|
||||
if !canSetXattrOnLinks && o.translatedLink {
|
||||
// Don't change xattr if not supported on symlinks
|
||||
delete(newM, "potato")
|
||||
}
|
||||
err := o.writeMetadata(newM)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -380,7 +434,11 @@ func TestMetadata(t *testing.T) {
|
|||
|
||||
mode := checkInt(m, "mode", 8)
|
||||
if runtime.GOOS != "windows" {
|
||||
assert.Equal(t, 0767, mode&0777, fmt.Sprintf("mode wrong - expecting 0767 got 0%o", mode&0777))
|
||||
expectedMode := 0767
|
||||
if o.translatedLink && runtime.GOOS == "linux" {
|
||||
expectedMode = 0777 // perms of symlinks always read as 0777 on linux
|
||||
}
|
||||
assert.Equal(t, expectedMode, mode&0777, fmt.Sprintf("mode wrong - expecting 0%o got 0%o", expectedMode, mode&0777))
|
||||
}
|
||||
|
||||
checkTime(m, "mtime", newMtime)
|
||||
|
@ -390,7 +448,7 @@ func TestMetadata(t *testing.T) {
|
|||
if haveSetBTime {
|
||||
checkTime(m, "btime", newBtime)
|
||||
}
|
||||
if xattrSupported {
|
||||
if xattrSupported && (canSetXattrOnLinks || !o.translatedLink) {
|
||||
assert.Equal(t, "wedges", m["potato"])
|
||||
}
|
||||
})
|
||||
|
|
|
@ -105,7 +105,11 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
if haveSetBTime {
|
||||
if btimeOK {
|
||||
if o.translatedLink {
|
||||
err = lsetBTime(o.path, btime)
|
||||
} else {
|
||||
err = setBTime(o.path, btime)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to set birth (creation) time: %w", err)
|
||||
}
|
||||
|
@ -120,8 +124,12 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
}
|
||||
if runtime.GOOS == "windows" || runtime.GOOS == "plan9" {
|
||||
fs.Debugf(o, "Ignoring request to set ownership %o.%o on this OS", gid, uid)
|
||||
} else {
|
||||
if o.translatedLink {
|
||||
err = os.Lchown(o.path, uid, gid)
|
||||
} else {
|
||||
err = os.Chown(o.path, uid, gid)
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change ownership: %w", err)
|
||||
}
|
||||
|
@ -132,7 +140,16 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||
if mode >= 0 {
|
||||
umode := uint(mode)
|
||||
if umode <= math.MaxUint32 {
|
||||
if o.translatedLink {
|
||||
if haveLChmod {
|
||||
err = lChmod(o.path, os.FileMode(umode))
|
||||
} else {
|
||||
fs.Debugf(o, "Unable to set mode %v on a symlink on this OS", os.FileMode(umode))
|
||||
err = nil
|
||||
}
|
||||
} else {
|
||||
err = os.Chmod(o.path, os.FileMode(umode))
|
||||
}
|
||||
if err != nil {
|
||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||
}
|
||||
|
|
|
@ -13,3 +13,9 @@ func setBTime(name string, btime time.Time) error {
|
|||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -9,15 +9,20 @@ import (
|
|||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
// setTimes sets any of atime, mtime or btime
|
||||
// if link is set it sets a link rather than the target
|
||||
func setTimes(name string, atime, mtime, btime time.Time, link bool) (err error) {
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fileFlag := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
|
||||
if link {
|
||||
fileFlag |= syscall.FILE_FLAG_OPEN_REPARSE_POINT
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
syscall.OPEN_EXISTING, fileFlag, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -27,6 +32,28 @@ func setBTime(name string, btime time.Time) (err error) {
|
|||
err = closeErr
|
||||
}
|
||||
}()
|
||||
bFileTime := syscall.NsecToFiletime(btime.UnixNano())
|
||||
return syscall.SetFileTime(h, &bFileTime, nil, nil)
|
||||
var patime, pmtime, pbtime *syscall.Filetime
|
||||
if !atime.IsZero() {
|
||||
t := syscall.NsecToFiletime(atime.UnixNano())
|
||||
patime = &t
|
||||
}
|
||||
if !mtime.IsZero() {
|
||||
t := syscall.NsecToFiletime(mtime.UnixNano())
|
||||
pmtime = &t
|
||||
}
|
||||
if !btime.IsZero() {
|
||||
t := syscall.NsecToFiletime(btime.UnixNano())
|
||||
pbtime = &t
|
||||
}
|
||||
return syscall.SetFileTime(h, pbtime, patime, pmtime)
|
||||
}
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, false)
|
||||
}
|
||||
|
||||
// lsetBTime changes the birth time of the link passed in
|
||||
func lsetBTime(name string, btime time.Time) error {
|
||||
return setTimes(name, time.Time{}, time.Time{}, btime, true)
|
||||
}
|
||||
|
|
|
@ -202,9 +202,14 @@ type SharingLinkType struct {
|
|||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
// ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
ViewLinkType LinkType = "view"
|
||||
// EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EditLinkType LinkType = "edit"
|
||||
// EmbedLinkType (role: read) A view-only sharing link that can be used to embed
|
||||
// content into a host webpage. Embed links are not available for OneDrive for
|
||||
// Business or SharePoint.
|
||||
EmbedLinkType LinkType = "embed"
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
|
@ -212,9 +217,12 @@ const (
|
|||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
// AnonymousScope = Anyone with the link has access, without needing to sign in.
|
||||
// This may include people outside of your organization.
|
||||
AnonymousScope LinkScope = "anonymous"
|
||||
// OrganizationScope = Anyone signed into your organization (tenant) can use the
|
||||
// link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
OrganizationScope LinkScope = "organization"
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
|
@ -236,10 +244,14 @@ type PermissionsType struct {
|
|||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
// ReadRole provides the ability to read the metadata and contents of the item.
|
||||
ReadRole Role = "read"
|
||||
// WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
WriteRole Role = "write"
|
||||
// OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
OwnerRole Role = "owner"
|
||||
// MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member"
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -15,6 +14,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -827,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
retry = true
|
||||
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
|
||||
}
|
||||
case 429: // Too Many Requests.
|
||||
case 429, 503: // Too Many Requests, Server Too Busy
|
||||
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
|
||||
if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" {
|
||||
retryAfter, parseErr := strconv.Atoi(values[0])
|
||||
|
@ -1545,12 +1545,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
// While this is true for some OneDrive personal accounts, it
|
||||
// isn't true for all of them. See #8101 for details
|
||||
//
|
||||
// if f.driveType == driveTypePersonal {
|
||||
// return time.Millisecond
|
||||
// }
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -17,6 +16,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
|
|
|
@ -404,32 +404,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||
return dstObj, nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var uInfo usersInfoResponse
|
||||
var resp *http.Response
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/info.json/" + f.session.SessionID,
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage = &fs.Usage{
|
||||
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
|
@ -1173,7 +1147,6 @@ var (
|
|||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.ParentIDer = (*Object)(nil)
|
||||
|
|
|
@ -231,10 +231,3 @@ type permissions struct {
|
|||
type uploadFileChunkReply struct {
|
||||
TotalWritten int64 `json:"TotalWritten"`
|
||||
}
|
||||
|
||||
// usersInfoResponse describes OpenDrive users/info.json response
|
||||
type usersInfoResponse struct {
|
||||
// This response contains many other values but these are the only ones currently in use
|
||||
StorageUsed int64 `json:"StorageUsed,string"`
|
||||
MaxStorage int64 `json:"MaxStorage,string"`
|
||||
}
|
||||
|
|
|
@ -561,6 +561,7 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||
if strings.Contains(err.Error(), "invalid_grant") {
|
||||
return f, f.reAuthorize(ctx)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f, nil
|
||||
|
|
|
@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObject(ctx, remote)
|
||||
_, err := tempF.newObjectWithLink(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
|
@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||
// ErrorIsDir if possible without doing any extra work,
|
||||
// otherwise ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObject(ctx, remote)
|
||||
return f.newObjectWithLink(ctx, remote, nil)
|
||||
}
|
||||
|
||||
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
||||
|
@ -516,27 +516,35 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
|
|||
return link, nil
|
||||
}
|
||||
|
||||
// readMetaDataForLink reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
|
||||
// readMetaDataForRemote reads the metadata from the remote
|
||||
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
||||
var err error
|
||||
if err = f.pacer.Call(func() (bool, error) {
|
||||
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||
return shouldRetry(ctx, err)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return fileSystemAttrs, nil
|
||||
return link, fileSystemAttrs, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path and link
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
|
||||
if o.link != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.id = link.LinkID
|
||||
|
@ -546,10 +554,6 @@ func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.
|
|||
o.mimetype = link.MIMEType
|
||||
o.link = link
|
||||
|
||||
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileSystemAttrs != nil {
|
||||
o.modTime = fileSystemAttrs.ModificationTime
|
||||
o.originalSize = &fileSystemAttrs.Size
|
||||
|
@ -557,18 +561,23 @@ func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.
|
|||
o.digests = &fileSystemAttrs.Digests
|
||||
}
|
||||
|
||||
return o, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return an Object from a path only
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
link, err := f.getObjectLink(ctx, remote)
|
||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
err := o.readMetaData(ctx, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithLink(ctx, remote, link)
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
|
|
|
@ -2606,35 +2606,6 @@ knows about - please make a bug report if not.
|
|||
`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "directory_bucket",
|
||||
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
|
||||
|
||||
If you are using an AWS Directory Bucket then set this flag.
|
||||
|
||||
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
|
||||
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
|
||||
be set on all objects whether single or multipart uploaded.
|
||||
|
||||
This also sets |no_check_bucket = true|.
|
||||
|
||||
Note that Directory Buckets do not support:
|
||||
|
||||
- Versioning
|
||||
- |Content-Encoding: gzip|
|
||||
|
||||
Rclone limitations with Directory Buckets:
|
||||
|
||||
- rclone does not support creating Directory Buckets with |rclone mkdir|
|
||||
- ... or removing them with |rclone rmdir| yet
|
||||
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
|
||||
- Rclone can't remove auto created directories yet. In theory this should
|
||||
work with |directory_markers = true| but it doesn't.
|
||||
- Directories don't seem to appear in recursive (ListR) listings.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
Name: "sdk_log_mode",
|
||||
Help: strings.ReplaceAll(`Set to debug the SDK
|
||||
|
@ -2809,7 +2780,6 @@ type Options struct {
|
|||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
|
@ -3398,6 +3368,10 @@ func setQuirks(opt *Options) {
|
|||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
useAlreadyExists = false // returns BucketAlreadyExists
|
||||
// Storj doesn't support multi-part server side copy:
|
||||
// https://github.com/storj/roadmap/issues/40
|
||||
// So make cutoff very large which it does support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false // untested
|
||||
|
@ -3578,14 +3552,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
// MD5 digest of their object data.
|
||||
f.etagIsNotMD5 = true
|
||||
}
|
||||
if opt.DirectoryBucket {
|
||||
// Objects uploaded to directory buckets appear to have random ETags
|
||||
//
|
||||
// This doesn't appear to be documented
|
||||
f.etagIsNotMD5 = true
|
||||
// The normal API doesn't work for creating directory buckets, so don't try
|
||||
f.opt.NoCheckBucket = true
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
|
@ -5784,7 +5750,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
|||
ContentEncoding: header("Content-Encoding"),
|
||||
ContentLanguage: header("Content-Language"),
|
||||
ContentType: header("Content-Type"),
|
||||
StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")),
|
||||
StorageClass: types.StorageClass(deref(header("X-Amz-Storage-Class"))),
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
return resp.Body, err
|
||||
|
@ -5978,8 +5944,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: mOut.Bucket,
|
||||
key: mOut.Key,
|
||||
bucket: ui.req.Bucket,
|
||||
key: ui.req.Key,
|
||||
uploadID: mOut.UploadId,
|
||||
multiPartUploadInput: &mReq,
|
||||
completedParts: make([]types.CompletedPart, 0),
|
||||
|
@ -6067,10 +6033,6 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
}
|
||||
if w.f.opt.DirectoryBucket {
|
||||
// Directory buckets do not support "Content-Md5" header
|
||||
uploadPartReq.ContentMD5 = nil
|
||||
}
|
||||
var uout *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
|
@ -6347,7 +6309,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
|
@ -6362,7 +6324,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||
if size >= 0 {
|
||||
ui.req.ContentLength = &size
|
||||
}
|
||||
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
|
||||
if md5sumBase64 != "" {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
|
|
|
@ -14,30 +14,21 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
|
||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||
if resp != nil && resp.StatusCode == 404 {
|
||||
if *wasLocked {
|
||||
// Assume a 404 error after we've received a 423 error is actually a success
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// 423 LOCKED
|
||||
if resp != nil && resp.StatusCode == 423 {
|
||||
*wasLocked = true
|
||||
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
|
||||
time.Sleep(*sleepTime)
|
||||
*sleepTime *= 2
|
||||
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
@ -189,11 +180,9 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
|
|||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
sleepTime := 5 * time.Second
|
||||
wasLocked := false
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
|
||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||
|
|
|
@ -27,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// OAuthUser is a Zoho user we are only interested in the ZUID here
|
||||
type OAuthUser struct {
|
||||
// User is a Zoho user we are only interested in the ZUID here
|
||||
type User struct {
|
||||
FirstName string `json:"First_Name"`
|
||||
Email string `json:"Email"`
|
||||
LastName string `json:"Last_Name"`
|
||||
|
@ -36,41 +36,12 @@ type OAuthUser struct {
|
|||
ZUID int64 `json:"ZUID"`
|
||||
}
|
||||
|
||||
// UserInfoResponse is returned by the user info API.
|
||||
type UserInfoResponse struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"users"`
|
||||
Attributes struct {
|
||||
EmailID string `json:"email_id"`
|
||||
Edition string `json:"edition"`
|
||||
} `json:"attributes"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// PrivateSpaceInfo gives basic information about a users private folder.
|
||||
type PrivateSpaceInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CurrentTeamInfo gives information about the current user in a team.
|
||||
type CurrentTeamInfo struct {
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"string"`
|
||||
}
|
||||
}
|
||||
|
||||
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
|
||||
// TeamWorkspace represents a Zoho Team or workspace
|
||||
// It's actually a VERY large json object that differs between
|
||||
// Team and Workspace and Private Space but we are only interested in some fields
|
||||
// that all of them have so we can use the same struct.
|
||||
// Team and Workspace but we are only interested in some fields
|
||||
// that both of them have so we can use the same struct for both
|
||||
type TeamWorkspace struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Attributes struct {
|
||||
Name string `json:"name"`
|
||||
Created Time `json:"created_time_in_millisecond"`
|
||||
|
@ -78,8 +49,7 @@ type TeamWorkspace struct {
|
|||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
|
||||
// or list team private spaces API.
|
||||
// TeamWorkspaceResponse is the response by the list teams api
|
||||
type TeamWorkspaceResponse struct {
|
||||
TeamWorkspace []TeamWorkspace `json:"data"`
|
||||
}
|
||||
|
@ -210,38 +180,11 @@ func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
|||
return &ufi, nil
|
||||
}
|
||||
|
||||
// LargeUploadInfo is once again a slightly different version of UploadInfo
|
||||
// returned as part of an LargeUploadResponse by the large file upload API.
|
||||
type LargeUploadInfo struct {
|
||||
Attributes struct {
|
||||
ParentID string `json:"parent_id"`
|
||||
FileName string `json:"file_name"`
|
||||
RessourceID string `json:"resource_id"`
|
||||
FileInfo string `json:"file_info"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// GetUploadFileInfo decodes the embedded FileInfo
|
||||
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||
var ufi UploadFileInfo
|
||||
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||
}
|
||||
return &ufi, nil
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a file Upload
|
||||
type UploadResponse struct {
|
||||
Uploads []UploadInfo `json:"data"`
|
||||
}
|
||||
|
||||
// LargeUploadResponse is the response returned by large file upload API.
|
||||
type LargeUploadResponse struct {
|
||||
Uploads []LargeUploadInfo `json:"data"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// WriteMetadataRequest is used to write metadata for a
|
||||
// single item
|
||||
type WriteMetadataRequest struct {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
|
@ -37,11 +36,9 @@ const (
|
|||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 60 * time.Second
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
configRootID = "root_folder_id"
|
||||
|
||||
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
|
||||
)
|
||||
|
||||
// Globals
|
||||
|
@ -53,7 +50,6 @@ var (
|
|||
"WorkDrive.team.READ",
|
||||
"WorkDrive.workspace.READ",
|
||||
"WorkDrive.files.ALL",
|
||||
"ZohoFiles.files.ALL",
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||
|
@ -65,8 +61,6 @@ var (
|
|||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
rootURL = "https://workdrive.zoho.eu/api/v1"
|
||||
downloadURL = "https://download.zoho.eu/v1/workdrive"
|
||||
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
|
||||
accountsURL = "https://accounts.zoho.eu"
|
||||
)
|
||||
|
||||
|
@ -85,7 +79,7 @@ func init() {
|
|||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
||||
}
|
||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
|
@ -94,12 +88,12 @@ func init() {
|
|||
|
||||
switch config.State {
|
||||
case "":
|
||||
return oauthutil.ConfigOut("type", &oauthutil.Options{
|
||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
})
|
||||
case "type":
|
||||
case "teams":
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
|
@ -114,43 +108,24 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
_, apiSrv, err := getSrvs()
|
||||
authSrv, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userInfo, err := getUserInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Get the user Info
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/oauth/user/info",
|
||||
}
|
||||
// If personal Edition only one private Space is available. Directly configure that.
|
||||
if userInfo.Data.Attributes.Edition == "PERSONAL" {
|
||||
return fs.ConfigResult("private_space", userInfo.Data.ID)
|
||||
}
|
||||
// Otherwise go to team selection
|
||||
return fs.ConfigResult("team", userInfo.Data.ID)
|
||||
case "private_space":
|
||||
_, apiSrv, err := getSrvs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Name
|
||||
})
|
||||
case "team":
|
||||
_, apiSrv, err := getSrvs()
|
||||
var user api.User
|
||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the teams
|
||||
teams, err := listTeams(ctx, config.Result, apiSrv)
|
||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -168,19 +143,9 @@ func init() {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workspaces = append(workspaces, privateSpaces...)
|
||||
|
||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||
workspace := workspaces[i]
|
||||
return workspace.ID, workspace.Name
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
workspaceID := config.Result
|
||||
|
@ -214,13 +179,7 @@ browser.`,
|
|||
}, {
|
||||
Value: "com.au",
|
||||
Help: "Australia",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Advanced: true,
|
||||
}, {
|
||||
}}}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
|
@ -234,7 +193,6 @@ browser.`,
|
|||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
RootFolderID string `config:"root_folder_id"`
|
||||
Region string `config:"region"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
|
@ -247,8 +205,6 @@ type Fs struct {
|
|||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
downloadsrv *rest.Client // the connection to the download server
|
||||
uploadsrv *rest.Client // the connection to the upload server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
@ -273,8 +229,6 @@ func setupRegion(m configmap.Mapper) error {
|
|||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
|
@ -283,63 +237,11 @@ func setupRegion(m configmap.Mapper) error {
|
|||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
type workspaceInfo struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
|
||||
var userInfo api.UserInfoResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/me",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &userInfo, nil
|
||||
}
|
||||
|
||||
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
|
||||
var currentTeamInfo api.CurrentTeamInfo
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/currentuser",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ¤tTeamInfo, err
|
||||
}
|
||||
|
||||
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var privateSpaceListResponse api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + teamUserID + "/privatespace",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
|
||||
}
|
||||
return workspaceList, err
|
||||
}
|
||||
|
||||
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var teamList api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/users/" + zuid + "/teams",
|
||||
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
||||
|
@ -349,24 +251,18 @@ func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWo
|
|||
return teamList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||
var workspaceListResponse api.TeamWorkspaceResponse
|
||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||
var workspaceList api.TeamWorkspaceResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/teams/" + teamID + "/workspaces",
|
||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||
}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
|
||||
for _, workspace := range workspaceListResponse.TeamWorkspace {
|
||||
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
|
||||
}
|
||||
|
||||
return workspaceList, nil
|
||||
return workspaceList.TeamWorkspace, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------
|
||||
|
@ -389,20 +285,13 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||
}
|
||||
authRetry := false
|
||||
|
||||
// Bail out early if we are missing OAuth Scopes.
|
||||
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
|
||||
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
if resp != nil && resp.StatusCode == 429 {
|
||||
err = pacer.RetryAfterError(err, 60*time.Second)
|
||||
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
|
||||
return true, err
|
||||
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
@ -500,11 +389,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opt.UploadCutoff < defaultUploadCutoff {
|
||||
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
|
||||
}
|
||||
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -521,8 +405,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
|
||||
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
@ -761,61 +643,9 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
|||
return
|
||||
}
|
||||
|
||||
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/stream/upload",
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
ContentType: "application/octet-stream",
|
||||
Options: options,
|
||||
ExtraHeaders: map[string]string{
|
||||
"x-filename": url.QueryEscape(name),
|
||||
"x-parent_id": parent,
|
||||
"override-name-exist": "true",
|
||||
"upload-id": uuid.New().String(),
|
||||
"x-streammode": "1",
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var uploadResponse *api.LargeUploadResponse
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload large error: %v", err)
|
||||
}
|
||||
if len(uploadResponse.Uploads) != 1 {
|
||||
return nil, errors.New("upload: invalid response")
|
||||
}
|
||||
upload := uploadResponse.Uploads[0]
|
||||
uploadInfo, err := upload.GetUploadFileInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("upload error: %w", err)
|
||||
}
|
||||
|
||||
// Fill in the api.Item from the api.UploadFileInfo
|
||||
var info api.Item
|
||||
info.ID = upload.Attributes.RessourceID
|
||||
info.Attributes.Name = upload.Attributes.FileName
|
||||
// info.Attributes.Type = not used
|
||||
info.Attributes.IsFolder = false
|
||||
// info.Attributes.CreatedTime = not used
|
||||
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||
// info.Attributes.UploadedTime = 0 not used
|
||||
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||
info.Attributes.StorageInfo.FileCount = 0
|
||||
info.Attributes.StorageInfo.FolderCount = 0
|
||||
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||
params := url.Values{}
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("filename", name)
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
|
@ -875,11 +705,6 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
size := src.Size()
|
||||
remote := src.Remote()
|
||||
|
||||
|
@ -889,26 +714,12 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(f.opt.UploadCutoff) {
|
||||
// Upload the file
|
||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
|
@ -1348,7 +1159,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1372,22 +1183,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||
return err
|
||||
}
|
||||
|
||||
// use normal upload API for small sizes (<10MiB)
|
||||
if size < int64(o.fs.opt.UploadCutoff) {
|
||||
// Overwrite the old file
|
||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
// large file API otherwise
|
||||
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestZoho:",
|
||||
SkipInvalidUTF8: true,
|
||||
NilObject: (*zoho.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@ def find_backends():
|
|||
def output_docs(backend, out, cwd):
|
||||
"""Output documentation for backend options to out"""
|
||||
out.flush()
|
||||
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
|
||||
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
|
||||
|
||||
def output_backend_tool_docs(backend, out, cwd):
|
||||
"""Output documentation for backend tool to out"""
|
||||
out.flush()
|
||||
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||
|
||||
def alter_doc(backend):
|
||||
"""Alter the documentation for backend"""
|
||||
|
|
|
@ -42,6 +42,7 @@ docs = [
|
|||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filescom.md",
|
||||
"frostfs.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
"googlecloudstorage.md",
|
||||
|
|
|
@ -5,20 +5,13 @@ import (
|
|||
"bytes"
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Writer()
|
||||
defer func() {
|
||||
err := logrusSave.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error closing logrusSave: %v", err)
|
||||
}
|
||||
}()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -217,7 +216,6 @@ type bisyncTest struct {
|
|||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||
ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
|
@ -948,10 +946,6 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
|||
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
|
||||
b.t.Skip("skipping test as remote does not support empty dirs")
|
||||
}
|
||||
ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank
|
||||
if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) {
|
||||
b.ignoreBlankHash = true
|
||||
}
|
||||
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
|
||||
if b.testCase != "nomodtime" {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
|
@ -1557,12 +1551,6 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
|
||||
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
|
||||
}
|
||||
if b.ignoreBlankHash {
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*hash is missing.*$`, dropMe,
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -194,7 +193,7 @@ var commandDefinition = &cobra.Command{
|
|||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
return fserrors.FatalError(err)
|
||||
os.Exit(2)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -22,6 +21,7 @@ import (
|
|||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
|
|
|
@ -66,7 +66,8 @@ func quotePath(path string) string {
|
|||
return escapePath(path, true)
|
||||
}
|
||||
|
||||
var Colors bool // Colors controls whether terminal colors are enabled
|
||||
// Colors controls whether terminal colors are enabled
|
||||
var Colors bool
|
||||
|
||||
// Color handles terminal colors for bisync
|
||||
func Color(style string, s string) string {
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
||||
var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
|
|
10
cmd/cmd.go
10
cmd/cmd.go
|
@ -50,6 +50,7 @@ var (
|
|||
version bool
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||
errorTooManyArguments = errors.New("too many arguments")
|
||||
)
|
||||
|
@ -494,6 +495,8 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.DirNotFound)
|
||||
case errors.Is(err, fs.ErrorObjectNotFound):
|
||||
os.Exit(exitcode.FileNotFound)
|
||||
case errors.Is(err, errorUncategorized):
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
||||
|
@ -504,10 +507,8 @@ func resolveExitCode(err error) {
|
|||
os.Exit(exitcode.NoRetryError)
|
||||
case fserrors.IsFatalError(err):
|
||||
os.Exit(exitcode.FatalError)
|
||||
case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments):
|
||||
os.Exit(exitcode.UsageError)
|
||||
default:
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
os.Exit(exitcode.UsageError)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -535,7 +536,6 @@ func Main() {
|
|||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||
}
|
||||
fs.Logf(nil, "Fatal error: %v", err)
|
||||
os.Exit(exitcode.UsageError)
|
||||
fs.Fatalf(nil, "Fatal error: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ func (lrw *loggingResponseWriter) logRequest(code int, err interface{}) {
|
|||
err = ""
|
||||
}
|
||||
|
||||
fs.LogPrintf(level, lrw.request.URL, "%s %s %d %s %s",
|
||||
fs.LogLevelPrintf(level, lrw.request.URL, "%s %s %d %s %s",
|
||||
lrw.request.RemoteAddr, lrw.request.Method, code,
|
||||
lrw.request.Header.Get("SOAPACTION"), err)
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ func TestCmdTest(t *testing.T) {
|
|||
// Test error and error output
|
||||
out, err = rclone("version", "--provoke-an-error")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), "exit status 2")
|
||||
assert.Contains(t, err.Error(), "exit status 1")
|
||||
assert.Contains(t, out, "Error: unknown flag")
|
||||
}
|
||||
|
||||
|
|
|
@ -123,6 +123,7 @@ WebDAV or S3, that work out of the box.)
|
|||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||
{{< provider name="FrostFS" home="https://git.frostfs.info/TrueCloudLab/" config="/frostfs/" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
{{< provider name="Gofile" home="https://gofile.io/" config="/gofile/" >}}
|
||||
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
|
||||
|
|
|
@ -889,9 +889,3 @@ put them back in again.` >}}
|
|||
* Mathieu Moreau <mrx23dot@users.noreply.github.com>
|
||||
* fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com>
|
||||
* Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com>
|
||||
* Divyam <47589864+divyam234@users.noreply.github.com>
|
||||
* ttionya <ttionya@users.noreply.github.com>
|
||||
* quiescens <quiescens@gmail.com>
|
||||
* rishi.sridhar <rishi.sridhar@zohocorp.com>
|
||||
* Lawrence Murray <lawrence@indii.org>
|
||||
* Leandro Piccilli <leandro.piccilli@thalesgroup.com>
|
||||
|
|
|
@ -180,13 +180,6 @@ If the resource has multiple user-assigned identities you will need to
|
|||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
|
@ -297,16 +290,6 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
|||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
#### Anonymous {#anonymous}
|
||||
|
||||
If you want to access resources with public anonymous access then set
|
||||
|
|
|
@ -968,15 +968,12 @@ that while concurrent bisync runs are allowed, _be very cautious_
|
|||
that there is no overlap in the trees being synched between concurrent runs,
|
||||
lest there be replicated files, deleted files and general mayhem.
|
||||
|
||||
### Exit codes
|
||||
### Return codes
|
||||
|
||||
`rclone bisync` returns the following codes to calling program:
|
||||
- `0` on a successful run,
|
||||
- `1` for a non-critical failing run (a rerun may be successful),
|
||||
- `2` on syntax or usage error,
|
||||
- `7` for a critically aborted run (requires a `--resync` to recover).
|
||||
|
||||
See also the section about [exit codes](/docs/#exit-code) in main docs.
|
||||
- `2` for a critically aborted run (requires a `--resync` to recover).
|
||||
|
||||
### Graceful Shutdown
|
||||
|
||||
|
|
|
@ -5,6 +5,36 @@ description: "Rclone Changelog"
|
|||
|
||||
# Changelog
|
||||
|
||||
## v1.68.2 - 2024-11-15
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.1...v1.68.2)
|
||||
|
||||
* Security fixes
|
||||
* local backend: CVE-2024-52522: fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Only affects users using `--metadata` and `--links` and copying files to the local backend
|
||||
* See https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
|
||||
* build: bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (dependabot)
|
||||
* This is an issue in a dependency which is used for JWT certificates
|
||||
* See https://github.com/golang-jwt/jwt/security/advisories/GHSA-29wx-vh33-7x7r
|
||||
* Bug Fixes
|
||||
* accounting: Fix wrong message on SIGUSR2 to enable/disable bwlimit (Nick Craig-Wood)
|
||||
* bisync: Fix output capture restoring the wrong output for logrus (Dimitrios Slamaris)
|
||||
* dlna: Fix loggingResponseWriter disregarding log level (Simon Bos)
|
||||
* serve s3: Fix excess locking which was making serve s3 single threaded (Nick Craig-Wood)
|
||||
* doc fixes (Nick Craig-Wood, tgfisher, Alexandre Hamez, Randy Bush)
|
||||
* Local
|
||||
* Fix permission and ownership on symlinks with `--links` and `--metadata` (Nick Craig-Wood)
|
||||
* Fix `--copy-links` on macOS when cloning (nielash)
|
||||
* Onedrive
|
||||
* Fix Retry-After handling to look at 503 errors also (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Fix cid/gcid calculations for fs.OverrideRemote (wiserain)
|
||||
* Fix fatal crash on startup with token that can't be refreshed (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix crash when using `--s3-download-url` after migration to SDKv2 (Nick Craig-Wood)
|
||||
* Storj provider: fix server-side copy of files bigger than 5GB (Kaloyan Raev)
|
||||
* Fix multitenant multipart uploads with CEPH (Nick Craig-Wood)
|
||||
|
||||
## v1.68.1 - 2024-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1)
|
||||
|
|
|
@ -296,6 +296,21 @@ rclone [flags]
|
|||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--fix-case Force rename of case insensitive dest to match source
|
||||
--frostfs-address string Address of account
|
||||
--frostfs-ape-cache-invalidation-duration Duration APE cache invalidation duration (default 8s)
|
||||
--frostfs-ape-cache-invalidation-timeout Duration APE cache invalidation timeout (default 24s)
|
||||
--frostfs-ape-chain-check-interval Duration The interval for verifying that the APE chain is saved in FrostFS (default 500ms)
|
||||
--frostfs-connection-timeout Duration FrostFS connection timeout (default 4s)
|
||||
--frostfs-container-creation-policy string Container creation policy for new containers (default "private")
|
||||
--frostfs-description string Description of the remote
|
||||
--frostfs-endpoint string Endpoints to connect to FrostFS node
|
||||
--frostfs-password string Password to decrypt wallet
|
||||
--frostfs-placement-policy string Placement policy for new containers (default "REP 3")
|
||||
--frostfs-rebalance-interval Duration FrostFS rebalance connections interval (default 15s)
|
||||
--frostfs-request-timeout Duration FrostFS request timeout (default 12s)
|
||||
--frostfs-rpc-endpoint string Endpoint to connect to Neo rpc node
|
||||
--frostfs-session-expiration int FrostFS session expiration epoch (default 4294967295)
|
||||
--frostfs-wallet string Path to wallet
|
||||
--fs-cache-expire-duration Duration Cache remotes for this long (0 to disable caching) (default 5m0s)
|
||||
--fs-cache-expire-interval Duration Interval to check for expired remotes (default 1m0s)
|
||||
--ftp-ask-password Allow asking for FTP password when needed
|
||||
|
@ -510,7 +525,7 @@ rclone [flags]
|
|||
--metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
|
||||
--metadata-mapper SpaceSepList Program to run to transforming metadata before upload
|
||||
--metadata-set stringArray Add metadata key=value when uploading
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -616,21 +631,18 @@ rclone [flags]
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
@ -683,7 +695,7 @@ rclone [flags]
|
|||
--quatrix-skip-project-folders Skip project folders in operations
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -932,7 +944,7 @@ rclone [flags]
|
|||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2-beta.8331.25cf42493.feature/add-frostfs-support")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||
|
|
|
@ -54,7 +54,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -398,9 +400,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
|
|
@ -55,7 +55,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
# OS X
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
|
@ -399,9 +401,9 @@ Note that systemd runs mount units without any environment variables including
|
|||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||
paths via rclone arguments.
|
||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
||||
is present on this PATH.
|
||||
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||
|
||||
## Rclone as Unix mount helper
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ Flags to control the Remote Control API
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
|
|
@ -2868,9 +2868,9 @@ messages may not be valid after the retry. If rclone has done a retry
|
|||
it will log a high priority message if the retry was successful.
|
||||
|
||||
### List of exit codes ###
|
||||
* `0` - Success
|
||||
* `1` - Error not otherwise categorised
|
||||
* `2` - Syntax or usage error
|
||||
* `0` - success
|
||||
* `1` - Syntax or usage error
|
||||
* `2` - Error not otherwise categorised
|
||||
* `3` - Directory not found
|
||||
* `4` - File not found
|
||||
* `5` - Temporary error (one that more retries might fix) (Retry errors)
|
||||
|
|
|
@ -1809,9 +1809,9 @@ then select "OAuth client ID".
|
|||
|
||||
9. It will show you a client ID and client secret. Make a note of these.
|
||||
|
||||
(If you selected "External" at Step 5 continue to Step 9.
|
||||
(If you selected "External" at Step 5 continue to Step 10.
|
||||
If you chose "Internal" you don't need to publish and can skip straight to
|
||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||
Step 11 but your destination drive must be part of the same Google Workspace.)
|
||||
|
||||
10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm.
|
||||
You will also want to add yourself as a test user.
|
||||
|
|
|
@ -505,6 +505,8 @@ processed in.
|
|||
Arrange the order of filter rules with the most restrictive first and
|
||||
work down.
|
||||
|
||||
Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._
|
||||
|
||||
E.g. for `filter-file.txt`:
|
||||
|
||||
# a sample filter rule file
|
||||
|
@ -512,6 +514,7 @@ E.g. for `filter-file.txt`:
|
|||
+ *.jpg
|
||||
+ *.png
|
||||
+ file2.avi
|
||||
- /dir/tmp/** # WARNING! This text will be treated as part of the path.
|
||||
- /dir/Trash/**
|
||||
+ /dir/**
|
||||
# exclude everything else
|
||||
|
|
|
@ -115,7 +115,7 @@ Flags for general networking and HTTP stuff.
|
|||
--tpslimit float Limit HTTP transactions per second to this
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
|
||||
--use-cookies Enable session cookiejar
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.68.2-beta.8331.25cf42493.feature/add-frostfs-support")
|
||||
```
|
||||
|
||||
|
||||
|
@ -264,7 +264,7 @@ Flags to control the Remote Control API.
|
|||
|
||||
```
|
||||
--rc Enable the remote control server
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"])
|
||||
--rc-addr stringArray IPaddress:Port or :Port to bind server to (default localhost:5572)
|
||||
--rc-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--rc-baseurl string Prefix for URLs - leave blank for root
|
||||
--rc-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -300,7 +300,7 @@ Flags to control the Remote Control API.
|
|||
Flags to control the Metrics HTTP endpoint..
|
||||
|
||||
```
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""])
|
||||
--metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to
|
||||
--metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--metrics-baseurl string Prefix for URLs - leave blank for root
|
||||
--metrics-cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
|
@ -552,6 +552,21 @@ Backend-only flags (these can be set in the config file also).
|
|||
--filescom-password string The password used to authenticate with Files.com (obscured)
|
||||
--filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com)
|
||||
--filescom-username string The username used to authenticate with Files.com
|
||||
--frostfs-address string Address of account
|
||||
--frostfs-ape-cache-invalidation-duration Duration APE cache invalidation duration (default 8s)
|
||||
--frostfs-ape-cache-invalidation-timeout Duration APE cache invalidation timeout (default 24s)
|
||||
--frostfs-ape-chain-check-interval Duration The interval for verifying that the APE chain is saved in FrostFS (default 500ms)
|
||||
--frostfs-connection-timeout Duration FrostFS connection timeout (default 4s)
|
||||
--frostfs-container-creation-policy string Container creation policy for new containers (default "private")
|
||||
--frostfs-description string Description of the remote
|
||||
--frostfs-endpoint string Endpoints to connect to FrostFS node
|
||||
--frostfs-password string Password to decrypt wallet
|
||||
--frostfs-placement-policy string Placement policy for new containers (default "REP 3")
|
||||
--frostfs-rebalance-interval Duration FrostFS rebalance connections interval (default 15s)
|
||||
--frostfs-request-timeout Duration FrostFS request timeout (default 12s)
|
||||
--frostfs-rpc-endpoint string Endpoint to connect to Neo rpc node
|
||||
--frostfs-session-expiration int FrostFS session expiration epoch (default 4294967295)
|
||||
--frostfs-wallet string Path to wallet
|
||||
--ftp-ask-password Allow asking for FTP password when needed
|
||||
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
|
||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||
|
@ -794,21 +809,18 @@ Backend-only flags (these can be set in the config file also).
|
|||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--pcloud-username string Your pcloud username
|
||||
--pikpak-auth-url string Auth server URL
|
||||
--pikpak-chunk-size SizeSuffix Chunk size for multipart uploads (default 5Mi)
|
||||
--pikpak-client-id string OAuth Client Id
|
||||
--pikpak-client-secret string OAuth Client Secret
|
||||
--pikpak-description string Description of the remote
|
||||
--pikpak-device-id string Device ID used for authorization
|
||||
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
|
||||
--pikpak-pass string Pikpak password (obscured)
|
||||
--pikpak-root-folder-id string ID of the root folder
|
||||
--pikpak-token string OAuth Access Token as a JSON blob
|
||||
--pikpak-token-url string Token server url
|
||||
--pikpak-trashed-only Only show files that are in the trash
|
||||
--pikpak-upload-concurrency int Concurrency for multipart uploads (default 5)
|
||||
--pikpak-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--pikpak-user string Pikpak username
|
||||
--pikpak-user-agent string HTTP user agent for pikpak (default "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0")
|
||||
--pixeldrain-api-key string API key for your pixeldrain account
|
||||
--pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api")
|
||||
--pixeldrain-description string Description of the remote
|
||||
|
|
466
docs/content/frostfs.md
Normal file
466
docs/content/frostfs.md
Normal file
|
@ -0,0 +1,466 @@
|
|||
---
|
||||
title: "FrostFS"
|
||||
description: "Rclone docs for FrostFS backend"
|
||||
versionIntroduced: "---"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-file" >}} FrostFS
|
||||
|
||||
Rclone FrostFS support is provided using the
|
||||
[git.frostfs.info/TrueCloudLab/frostfs-sdk-go](https://git.frostfs.info/TrueCloudLab/frostfs-sdk-go) package
|
||||
|
||||
## Configuration
|
||||
|
||||
To create an FrostFS configuration named `remote`, run
|
||||
|
||||
rclone config
|
||||
|
||||
Rclone config guides you through an interactive setup process. A minimal
|
||||
rclone FrostFS remote definition only requires endpoint and path to FrostFS user wallet.
|
||||
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
1 / 1Fichier
|
||||
\ (fichier)
|
||||
2 / Akamai NetStorage
|
||||
\ (netstorage)
|
||||
3 / Alias for an existing remote
|
||||
\ (alias)
|
||||
4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
5 / Backblaze B2
|
||||
\ (b2)
|
||||
6 / Better checksums for other remotes
|
||||
\ (hasher)
|
||||
7 / Box
|
||||
\ (box)
|
||||
8 / Cache a remote
|
||||
\ (cache)
|
||||
9 / Citrix Sharefile
|
||||
\ (sharefile)
|
||||
10 / Combine several remotes into one
|
||||
\ (combine)
|
||||
11 / Compress a remote
|
||||
\ (compress)
|
||||
12 / Distributed, decentralized object storage FrostFS
|
||||
\ (frostfs)
|
||||
13 / Dropbox
|
||||
\ (dropbox)
|
||||
14 / Encrypt/Decrypt a remote
|
||||
\ (crypt)
|
||||
15 / Enterprise File Fabric
|
||||
\ (filefabric)
|
||||
16 / FTP
|
||||
\ (ftp)
|
||||
17 / Files.com
|
||||
\ (filescom)
|
||||
18 / Gofile
|
||||
\ (gofile)
|
||||
19 / Google Cloud Storage (this is not Google Drive)
|
||||
\ (google cloud storage)
|
||||
20 / Google Drive
|
||||
\ (drive)
|
||||
21 / Google Photos
|
||||
\ (google photos)
|
||||
22 / HTTP
|
||||
\ (http)
|
||||
23 / Hadoop distributed file system
|
||||
\ (hdfs)
|
||||
24 / HiDrive
|
||||
\ (hidrive)
|
||||
25 / ImageKit.io
|
||||
\ (imagekit)
|
||||
26 / In memory object storage system.
|
||||
\ (memory)
|
||||
27 / Internet Archive
|
||||
\ (internetarchive)
|
||||
28 / Jottacloud
|
||||
\ (jottacloud)
|
||||
29 / Koofr, Digi Storage and other Koofr-compatible storage providers
|
||||
\ (koofr)
|
||||
30 / Linkbox
|
||||
\ (linkbox)
|
||||
31 / Local Disk
|
||||
\ (local)
|
||||
32 / Mail.ru Cloud
|
||||
\ (mailru)
|
||||
33 / Mega
|
||||
\ (mega)
|
||||
34 / Microsoft Azure Blob Storage
|
||||
\ (azureblob)
|
||||
35 / Microsoft Azure Files
|
||||
\ (azurefiles)
|
||||
36 / Microsoft OneDrive
|
||||
\ (onedrive)
|
||||
37 / OpenDrive
|
||||
\ (opendrive)
|
||||
38 / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
|
||||
\ (swift)
|
||||
39 / Oracle Cloud Infrastructure Object Storage
|
||||
\ (oracleobjectstorage)
|
||||
40 / Pcloud
|
||||
\ (pcloud)
|
||||
41 / PikPak
|
||||
\ (pikpak)
|
||||
42 / Pixeldrain Filesystem
|
||||
\ (pixeldrain)
|
||||
43 / Proton Drive
|
||||
\ (protondrive)
|
||||
44 / Put.io
|
||||
\ (putio)
|
||||
45 / QingCloud Object Storage
|
||||
\ (qingstor)
|
||||
46 / Quatrix by Maytech
|
||||
\ (quatrix)
|
||||
47 / SMB / CIFS
|
||||
\ (smb)
|
||||
48 / SSH/SFTP
|
||||
\ (sftp)
|
||||
49 / Sia Decentralized Cloud
|
||||
\ (sia)
|
||||
50 / Storj Decentralized Cloud Storage
|
||||
\ (storj)
|
||||
51 / Sugarsync
|
||||
\ (sugarsync)
|
||||
52 / Transparently chunk/split large files
|
||||
\ (chunker)
|
||||
53 / Uloz.to
|
||||
\ (ulozto)
|
||||
54 / Union merges the contents of several upstream fs
|
||||
\ (union)
|
||||
55 / Uptobox
|
||||
\ (uptobox)
|
||||
56 / WebDAV
|
||||
\ (webdav)
|
||||
57 / Yandex Disk
|
||||
\ (yandex)
|
||||
58 / Zoho
|
||||
\ (zoho)
|
||||
59 / premiumize.me
|
||||
\ (premiumizeme)
|
||||
60 / seafile
|
||||
\ (seafile)
|
||||
Storage> frostfs
|
||||
|
||||
Option endpoint.
|
||||
Endpoints to connect to FrostFS node
|
||||
Choose a number from below, or type in your own value.
|
||||
1 / One endpoint.
|
||||
\ (s01.frostfs.devenv:8080)
|
||||
2 / Multiple endpoints to form pool.
|
||||
\ (s01.frostfs.devenv:8080 s02.frostfs.devenv:8080)
|
||||
3 / Multiple endpoints with priority (less value is higher priority). Until s01 is healthy all request will be send to it.
|
||||
\ (s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2)
|
||||
4 / Multiple endpoints with priority and weights. After s01 is unhealthy requests will be send to s02 and s03 in proportions 10% and 90% respectively.
|
||||
\ (s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9)
|
||||
endpoint> s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2
|
||||
|
||||
Option connection_timeout.
|
||||
FrostFS connection timeout
|
||||
Enter a value of type Duration. Press Enter for the default (4s).
|
||||
connection_timeout>
|
||||
|
||||
Option request_timeout.
|
||||
FrostFS request timeout
|
||||
Enter a value of type Duration. Press Enter for the default (12s).
|
||||
request_timeout>
|
||||
|
||||
Option rebalance_interval.
|
||||
FrostFS rebalance connections interval
|
||||
Enter a value of type Duration. Press Enter for the default (15s).
|
||||
rebalance_interval>
|
||||
|
||||
Option session_expiration.
|
||||
FrostFS session expiration epoch
|
||||
Enter a signed integer. Press Enter for the default (4294967295).
|
||||
session_expiration>
|
||||
|
||||
Option ape_cache_invalidation_duration.
|
||||
APE cache invalidation duration
|
||||
Enter a value of type Duration. Press Enter for the default (8s).
|
||||
ape_cache_invalidation_duration>
|
||||
|
||||
Option ape_cache_invalidation_timeout.
|
||||
APE cache invalidation timeout
|
||||
Enter a value of type Duration. Press Enter for the default (24s).
|
||||
ape_cache_invalidation_timeout>
|
||||
|
||||
Option ape_chain_check_interval.
|
||||
The interval for verifying that the APE chain is saved in FrostFS.
|
||||
Enter a value of type Duration. Press Enter for the default (500ms).
|
||||
ape_chain_check_interval>
|
||||
|
||||
Option rpc_endpoint.
|
||||
Endpoint to connect to Neo rpc node
|
||||
Enter a value. Press Enter to leave empty.
|
||||
rpc_endpoint>
|
||||
|
||||
Option wallet.
|
||||
Path to wallet
|
||||
Enter a value.
|
||||
wallet> /wallets/wallet.conf
|
||||
|
||||
Option address.
|
||||
Address of account
|
||||
Enter a value. Press Enter to leave empty.
|
||||
address>
|
||||
|
||||
Option password.
|
||||
Password to decrypt wallet
|
||||
Enter a value. Press Enter to leave empty.
|
||||
password>
|
||||
|
||||
Option placement_policy.
|
||||
Placement policy for new containers
|
||||
Choose a number from below, or type in your own value of type string.
|
||||
Press Enter for the default (REP 3).
|
||||
1 / Container will have 3 replicas
|
||||
\ (REP 3)
|
||||
placement_policy> REP 1
|
||||
|
||||
Option container_creation_policy.
|
||||
Container creation policy for new containers
|
||||
Choose a number from below, or type in your own value of type string.
|
||||
Press Enter for the default (private).
|
||||
1 / Public container, anyone can read and write
|
||||
\ (public-read-write)
|
||||
2 / Public container, owner can read and write, others can only read
|
||||
\ (public-read)
|
||||
3 / Private container, only owner has access to it
|
||||
\ (private)
|
||||
container_creation_policy>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: frostfs
|
||||
- endpoint: s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2
|
||||
- password: M@zPGpWDravchenko/develop/go/playground/play_with_ape/cfg/wallet_akrav.js
|
||||
- placement_policy: REP 1
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
As a remote root directory, you can use either the container identifier or its user-friendly name.
|
||||
If you choose to use a container name, rclone will create a new container with that name when
|
||||
it's used for the first time.
|
||||
|
||||
For example, both of the following commands would be valid if there was a `~/test-copy` directory and a container with
|
||||
the identifier `23fk3Bcw5mPZ4YtYkTLJbQebtM2WXHz4HL8FgsrTJkSf`:
|
||||
|
||||
rclone copy ~/test-copy remote:23fk3Bcw5mPZ4YtYkTLJbQebtM2WXHz4HL8FgsrTJkSf/test-copy
|
||||
rclone copy ~/test-copy remote:container-name/test-copy
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/frostfs/frostfs.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to frostfs (Distributed, decentralized object storage FrostFS).
|
||||
|
||||
#### --frostfs-endpoint
|
||||
|
||||
Endpoints to connect to FrostFS node
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_FROSTFS_ENDPOINT
|
||||
- Type: string
|
||||
- Required: true
|
||||
- Examples:
|
||||
- "s01.frostfs.devenv:8080"
|
||||
- One endpoint.
|
||||
- "s01.frostfs.devenv:8080 s02.frostfs.devenv:8080"
|
||||
- Multiple endpoints to form pool.
|
||||
- "s01.frostfs.devenv:8080,1 s02.frostfs.devenv:8080,2"
|
||||
- Multiple endpoints with priority (lower value has higher priority). Until s01 is healthy, all requests will be sent to it.
|
||||
- "s01.frostfs.devenv:8080,1,1 s02.frostfs.devenv:8080,2,1 s03.frostfs.devenv:8080,2,9"
|
||||
- Multiple endpoints with priority and weights. After the s01 endpoint is unhealthy, requests will be sent to the s02 and s03 endpoints in proportions of 10% and 90%, respectively.
|
||||
|
||||
#### --frostfs-connection-timeout
|
||||
|
||||
FrostFS connection timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: connection_timeout
|
||||
- Env Var: RCLONE_FROSTFS_CONNECTION_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 4s
|
||||
|
||||
#### --frostfs-request-timeout
|
||||
|
||||
FrostFS request timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: request_timeout
|
||||
- Env Var: RCLONE_FROSTFS_REQUEST_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 12s
|
||||
|
||||
#### --frostfs-rebalance-interval
|
||||
|
||||
FrostFS rebalance connections interval
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: rebalance_interval
|
||||
- Env Var: RCLONE_FROSTFS_REBALANCE_INTERVAL
|
||||
- Type: Duration
|
||||
- Default: 15s
|
||||
|
||||
#### --frostfs-session-expiration
|
||||
|
||||
FrostFS session expiration epoch
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: session_expiration
|
||||
- Env Var: RCLONE_FROSTFS_SESSION_EXPIRATION
|
||||
- Type: int
|
||||
- Default: 4294967295
|
||||
|
||||
#### --frostfs-ape-cache-invalidation-duration
|
||||
|
||||
APE cache invalidation duration
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_cache_invalidation_duration
|
||||
- Env Var: RCLONE_FROSTFS_APE_CACHE_INVALIDATION_DURATION
|
||||
- Type: Duration
|
||||
- Default: 8s
|
||||
|
||||
#### --frostfs-ape-cache-invalidation-timeout
|
||||
|
||||
APE cache invalidation timeout
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_cache_invalidation_timeout
|
||||
- Env Var: RCLONE_FROSTFS_APE_CACHE_INVALIDATION_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 24s
|
||||
|
||||
#### --frostfs-ape-chain-check-interval
|
||||
|
||||
The interval for verifying that the APE chain is saved in FrostFS.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ape_chain_check_interval
|
||||
- Env Var: RCLONE_FROSTFS_APE_CHAIN_CHECK_INTERVAL
|
||||
- Type: Duration
|
||||
- Default: 500ms
|
||||
|
||||
#### --frostfs-rpc-endpoint
|
||||
|
||||
Endpoint to connect to Neo rpc node
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: rpc_endpoint
|
||||
- Env Var: RCLONE_FROSTFS_RPC_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-wallet
|
||||
|
||||
Path to wallet
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: wallet
|
||||
- Env Var: RCLONE_FROSTFS_WALLET
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --frostfs-address
|
||||
|
||||
Address of account
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: address
|
||||
- Env Var: RCLONE_FROSTFS_ADDRESS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-password
|
||||
|
||||
Password to decrypt wallet
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_FROSTFS_PASSWORD
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --frostfs-placement-policy
|
||||
|
||||
Placement policy for new containers
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: placement_policy
|
||||
- Env Var: RCLONE_FROSTFS_PLACEMENT_POLICY
|
||||
- Type: string
|
||||
- Default: "REP 3"
|
||||
- Examples:
|
||||
- "REP 3"
|
||||
- Container will have 3 replicas
|
||||
|
||||
#### --frostfs-container-creation-policy
|
||||
|
||||
Container creation policy for new containers
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: container_creation_policy
|
||||
- Env Var: RCLONE_FROSTFS_CONTAINER_CREATION_POLICY
|
||||
- Type: string
|
||||
- Default: "private"
|
||||
- Examples:
|
||||
- "public-read-write"
|
||||
- Public container, anyone can read and write
|
||||
- "public-read"
|
||||
- Public container, owner can read and write, others can only read
|
||||
- "private"
|
||||
- Private container, only owner has access to it
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to frostfs (Distributed, decentralized object storage FrostFS).
|
||||
|
||||
#### --frostfs-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_FROSTFS_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
|
@ -363,20 +363,6 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --gcs-access-token
|
||||
|
||||
Short-lived access token.
|
||||
|
||||
Leave blank normally.
|
||||
Needed only if you want use short-lived access tokens instead of interactive login.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_GCS_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --gcs-anonymous
|
||||
|
||||
Access public buckets and objects without credentials.
|
||||
|
|
|
@ -502,18 +502,12 @@ is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115)
|
|||
|
||||
**The current google API does not allow photos to be downloaded at original resolution. This is very important if you are, for example, relying on "Google Photos" as a backup of your photos. You will not be able to use rclone to redownload original images. You could use 'google takeout' to recover the original photos as a last resort**
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Downloading Videos
|
||||
|
||||
When videos are downloaded they are downloaded in a really compressed
|
||||
version of the video compared to downloading it via the Google Photos
|
||||
web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044).
|
||||
|
||||
**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a
|
||||
headless browser to download images in full resolution.
|
||||
|
||||
### Duplicates
|
||||
|
||||
If a file name is duplicated in a directory then rclone will add the
|
||||
|
|
|
@ -46,7 +46,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - |
|
||||
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
|
||||
| PikPak | MD5 | R | No | No | R | - |
|
||||
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
|
@ -521,7 +521,7 @@ upon backend-specific capabilities.
|
|||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
|
|
|
@ -111,68 +111,29 @@ Properties:
|
|||
|
||||
Here are the Advanced options specific to pikpak (PikPak).
|
||||
|
||||
#### --pikpak-client-id
|
||||
#### --pikpak-device-id
|
||||
|
||||
OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
Device ID used for authorization.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_ID
|
||||
- Config: device_id
|
||||
- Env Var: RCLONE_PIKPAK_DEVICE_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-client-secret
|
||||
#### --pikpak-user-agent
|
||||
|
||||
OAuth Client Secret.
|
||||
HTTP user agent for pikpak.
|
||||
|
||||
Leave blank normally.
|
||||
Defaults to "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" or "--pikpak-user-agent" provided on command line.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_PIKPAK_USER_AGENT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-auth-url
|
||||
|
||||
Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PIKPAK_AUTH_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --pikpak-token-url
|
||||
|
||||
Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PIKPAK_TOKEN_URL
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Default: "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||
|
||||
#### --pikpak-root-folder-id
|
||||
|
||||
|
|
|
@ -2294,21 +2294,6 @@ You can also do this entirely on the command line
|
|||
|
||||
This is the provider used as main example and described in the [configuration](#configuration) section above.
|
||||
|
||||
### AWS Directory Buckets
|
||||
|
||||
From rclone v1.69 [Directory Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
|
||||
are supported.
|
||||
|
||||
You will need to set the `directory_buckets = true` config parameter
|
||||
or use `--s3-directory-buckets`.
|
||||
|
||||
Note that rclone cannot yet:
|
||||
|
||||
- Create directory buckets
|
||||
- List directory buckets
|
||||
|
||||
See [the --s3-directory-buckets flag](#s3-directory-buckets) for more info
|
||||
|
||||
### AWS Snowball Edge
|
||||
|
||||
[AWS Snowball](https://aws.amazon.com/snowball/) is a hardware
|
||||
|
@ -3491,8 +3476,8 @@ chunk_size = 5M
|
|||
copy_cutoff = 5M
|
||||
```
|
||||
|
||||
[C14 Cold Storage](https://www.online.net/en/storage/c14-cold-storage) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
[Scaleway Glacier](https://www.scaleway.com/en/glacier-cold-storage/) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`.
|
||||
So you can configure your remote with the `storage_class = GLACIER` option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above)
|
||||
|
||||
### Seagate Lyve Cloud {#lyve}
|
||||
|
||||
|
|
|
@ -224,17 +224,6 @@ Properties:
|
|||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --zoho-upload-cutoff
|
||||
|
||||
Cutoff for switching to large file upload api (>= 10 MiB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_ZOHO_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 10Mi
|
||||
|
||||
#### --zoho-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
|
|
@ -1 +1 @@
|
|||
v1.69.0
|
||||
v1.68.2
|
|
@ -35,13 +35,12 @@ func (tb *tokenBucket) startSignalHandler() {
|
|||
|
||||
tb.toggledOff = !tb.toggledOff
|
||||
tb.curr, tb.prev = tb.prev, tb.curr
|
||||
s, limit := "disabled", "off"
|
||||
s := "disabled"
|
||||
if !tb.curr._isOff() {
|
||||
s = "enabled"
|
||||
limit = tb.currLimit.Bandwidth.String()
|
||||
}
|
||||
|
||||
fs.Logf(nil, "Bandwidth limit %s by user (now %s)", s, limit)
|
||||
fs.Logf(nil, "Bandwidth limit %s by user", s)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
|
105
fs/cache/cache.go
vendored
105
fs/cache/cache.go
vendored
|
@ -4,7 +4,6 @@ package cache
|
|||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
|
@ -16,8 +15,7 @@ var (
|
|||
once sync.Once // creation
|
||||
c *cache.Cache
|
||||
mu sync.Mutex // mutex to protect remap
|
||||
remap = map[string]string{} // map user supplied names to canonical names - [fsString]canonicalName
|
||||
childParentMap = map[string]string{} // tracks a one-to-many relationship between parent dirs and their direct children files - [child]parent
|
||||
remap = map[string]string{} // map user supplied names to canonical names
|
||||
)
|
||||
|
||||
// Create the cache just once
|
||||
|
@ -59,39 +57,6 @@ func addMapping(fsString, canonicalName string) {
|
|||
mu.Unlock()
|
||||
}
|
||||
|
||||
// addChild tracks known file (child) to directory (parent) relationships.
|
||||
// Note that the canonicalName of a child will always equal that of its parent,
|
||||
// but not everything with an equal canonicalName is a child.
|
||||
// It could be an alias or overridden version of a directory.
|
||||
func addChild(child, parent string) {
|
||||
if child == parent {
|
||||
return
|
||||
}
|
||||
mu.Lock()
|
||||
childParentMap[child] = parent
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// returns true if name is definitely known to be a child (i.e. a file, not a dir).
|
||||
// returns false if name is a dir or if we don't know.
|
||||
func isChild(child string) bool {
|
||||
mu.Lock()
|
||||
_, found := childParentMap[child]
|
||||
mu.Unlock()
|
||||
return found
|
||||
}
|
||||
|
||||
// ensures that we return fs.ErrorIsFile when necessary
|
||||
func getError(fsString string, err error) error {
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return err
|
||||
}
|
||||
if isChild(fsString) {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFn gets an fs.Fs named fsString either from the cache or creates
|
||||
// it afresh with the create function
|
||||
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
|
||||
|
@ -104,39 +69,31 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
|
|||
created = ok
|
||||
return f, ok, err
|
||||
})
|
||||
f, ok := value.(fs.Fs)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
if ok {
|
||||
return f, err // for possible future uses of PutErr
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f = value.(fs.Fs)
|
||||
// Check we stored the Fs at the canonical name
|
||||
if created {
|
||||
canonicalName := fs.ConfigString(f)
|
||||
if canonicalName != canonicalFsString {
|
||||
if err == nil { // it's a dir
|
||||
// Note that if err == fs.ErrorIsFile at this moment
|
||||
// then we can't rename the remote as it will have the
|
||||
// wrong error status, we need to add a new one.
|
||||
if err == nil {
|
||||
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
|
||||
value, found := c.Rename(canonicalFsString, canonicalName)
|
||||
if found {
|
||||
f = value.(fs.Fs)
|
||||
}
|
||||
addMapping(canonicalFsString, canonicalName)
|
||||
} else { // it's a file
|
||||
// the fs we cache is always the file's parent, never the file,
|
||||
// but we use the childParentMap to return the correct error status based on the fsString passed in.
|
||||
fs.Debugf(nil, "fs cache: renaming child cache item %q to be canonical for parent %q", canonicalFsString, canonicalName)
|
||||
value, found := c.Rename(canonicalFsString, canonicalName) // rename the file entry to parent
|
||||
if found {
|
||||
f = value.(fs.Fs) // if parent already exists, use it
|
||||
}
|
||||
Put(canonicalName, f) // force err == nil for the cache
|
||||
addMapping(canonicalFsString, canonicalName) // note the fsString-canonicalName connection for future lookups
|
||||
addChild(fsString, canonicalName) // note the file-directory connection for future lookups
|
||||
} else {
|
||||
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
|
||||
Put(canonicalName, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
return f, getError(fsString, err) // ensure fs.ErrorIsFile is returned when necessary
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Pin f into the cache until Unpin is called
|
||||
|
@ -154,6 +111,7 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
|
|||
runtime.SetFinalizer(x, func(_ interface{}) {
|
||||
Unpin(f)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// Unpin f from the cache
|
||||
|
@ -216,9 +174,6 @@ func PutErr(fsString string, f fs.Fs, err error) {
|
|||
canonicalName := fs.ConfigString(f)
|
||||
c.PutErr(canonicalName, f, err)
|
||||
addMapping(fsString, canonicalName)
|
||||
if err == fs.ErrorIsFile {
|
||||
addChild(fsString, canonicalName)
|
||||
}
|
||||
}
|
||||
|
||||
// Put puts an fs.Fs named fsString into the cache
|
||||
|
@ -231,7 +186,6 @@ func Put(fsString string, f fs.Fs) {
|
|||
// Returns number of entries deleted
|
||||
func ClearConfig(name string) (deleted int) {
|
||||
createOnFirstUse()
|
||||
ClearMappingsPrefix(name)
|
||||
return c.DeletePrefix(name + ":")
|
||||
}
|
||||
|
||||
|
@ -239,7 +193,6 @@ func ClearConfig(name string) (deleted int) {
|
|||
func Clear() {
|
||||
createOnFirstUse()
|
||||
c.Clear()
|
||||
ClearMappings()
|
||||
}
|
||||
|
||||
// Entries returns the number of entries in the cache
|
||||
|
@ -247,39 +200,3 @@ func Entries() int {
|
|||
createOnFirstUse()
|
||||
return c.Entries()
|
||||
}
|
||||
|
||||
// ClearMappings removes everything from remap and childParentMap
|
||||
func ClearMappings() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
remap = map[string]string{}
|
||||
childParentMap = map[string]string{}
|
||||
}
|
||||
|
||||
// ClearMappingsPrefix deletes all mappings to parents with given prefix
|
||||
//
|
||||
// Returns number of entries deleted
|
||||
func ClearMappingsPrefix(prefix string) (deleted int) {
|
||||
mu.Lock()
|
||||
do := func(mapping map[string]string) {
|
||||
for key, val := range mapping {
|
||||
if !strings.HasPrefix(val, prefix) {
|
||||
continue
|
||||
}
|
||||
delete(mapping, key)
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
do(remap)
|
||||
do(childParentMap)
|
||||
mu.Unlock()
|
||||
return deleted
|
||||
}
|
||||
|
||||
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
|
||||
//
|
||||
// Each entry is counted only once, regardless of entry.pinCount
|
||||
func EntriesWithPinCount() (pinned, unpinned int) {
|
||||
createOnFirstUse()
|
||||
return c.EntriesWithPinCount()
|
||||
}
|
||||
|
|
95
fs/cache/cache_test.go
vendored
95
fs/cache/cache_test.go
vendored
|
@ -24,7 +24,7 @@ func mockNewFs(t *testing.T) func(ctx context.Context, path string) (fs.Fs, erro
|
|||
switch path {
|
||||
case "mock:/":
|
||||
return mockfs.NewFs(ctx, "mock", "/", nil)
|
||||
case "mock:/file.txt", "mock:file.txt", "mock:/file2.txt", "mock:file2.txt":
|
||||
case "mock:/file.txt", "mock:file.txt":
|
||||
fMock, err := mockfs.NewFs(ctx, "mock", "/", nil)
|
||||
require.NoError(t, err)
|
||||
return fMock, fs.ErrorIsFile
|
||||
|
@ -55,7 +55,6 @@ func TestGet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFile(t *testing.T) {
|
||||
defer ClearMappings()
|
||||
create := mockNewFs(t)
|
||||
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
@ -64,7 +63,7 @@ func TestGetFile(t *testing.T) {
|
|||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
@ -72,7 +71,7 @@ func TestGetFile(t *testing.T) {
|
|||
|
||||
assert.Equal(t, f, f2)
|
||||
|
||||
// check it is also found when referred to by parent name
|
||||
// check parent is there too
|
||||
f2, err = GetFn(context.Background(), "mock:/", create)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, f2)
|
||||
|
@ -81,7 +80,6 @@ func TestGetFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetFile2(t *testing.T) {
|
||||
defer ClearMappings()
|
||||
create := mockNewFs(t)
|
||||
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
@ -90,7 +88,7 @@ func TestGetFile2(t *testing.T) {
|
|||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.NotNil(t, f)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
assert.Equal(t, 2, Entries())
|
||||
|
||||
f2, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
@ -98,7 +96,7 @@ func TestGetFile2(t *testing.T) {
|
|||
|
||||
assert.Equal(t, f, f2)
|
||||
|
||||
// check it is also found when referred to by parent name
|
||||
// check parent is there too
|
||||
f2, err = GetFn(context.Background(), "mock:/", create)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, f2)
|
||||
|
@ -126,22 +124,22 @@ func TestPutErr(t *testing.T) {
|
|||
|
||||
assert.Equal(t, 0, Entries())
|
||||
|
||||
PutErr("mock:/", f, fs.ErrorNotFoundInConfigFile)
|
||||
PutErr("mock:file.txt", f, fs.ErrorIsFile)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
fNew, err := GetFn(context.Background(), "mock:file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
// Check canonicalisation
|
||||
|
||||
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
||||
PutErr("mock:/file.txt", f, fs.ErrorIsFile)
|
||||
|
||||
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
@ -192,75 +190,6 @@ func TestPin(t *testing.T) {
|
|||
Unpin(f2)
|
||||
}
|
||||
|
||||
func TestPinFile(t *testing.T) {
|
||||
defer ClearMappings()
|
||||
create := mockNewFs(t)
|
||||
|
||||
// Test pinning and unpinning nonexistent
|
||||
f, err := mockfs.NewFs(context.Background(), "mock", "/file.txt", nil)
|
||||
require.NoError(t, err)
|
||||
Pin(f)
|
||||
Unpin(f)
|
||||
|
||||
// Now test pinning an existing
|
||||
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
assert.Equal(t, 1, len(childParentMap))
|
||||
|
||||
Pin(f2)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned := EntriesWithPinCount()
|
||||
assert.Equal(t, 1, pinned)
|
||||
assert.Equal(t, 0, unpinned)
|
||||
|
||||
Unpin(f2)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned = EntriesWithPinCount()
|
||||
assert.Equal(t, 0, pinned)
|
||||
assert.Equal(t, 1, unpinned)
|
||||
|
||||
// try a different child of the same parent, and parent
|
||||
// should not add additional cache items
|
||||
called = 0 // this one does create() because we haven't seen it before and don't yet know it's a file
|
||||
f3, err := GetFn(context.Background(), "mock:/file2.txt", create)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
assert.Equal(t, 1, Entries())
|
||||
assert.Equal(t, 2, len(childParentMap))
|
||||
|
||||
parent, err := GetFn(context.Background(), "mock:/", create)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, Entries())
|
||||
assert.Equal(t, 2, len(childParentMap))
|
||||
|
||||
Pin(f3)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned = EntriesWithPinCount()
|
||||
assert.Equal(t, 1, pinned)
|
||||
assert.Equal(t, 0, unpinned)
|
||||
|
||||
Unpin(f3)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned = EntriesWithPinCount()
|
||||
assert.Equal(t, 0, pinned)
|
||||
assert.Equal(t, 1, unpinned)
|
||||
|
||||
Pin(parent)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned = EntriesWithPinCount()
|
||||
assert.Equal(t, 1, pinned)
|
||||
assert.Equal(t, 0, unpinned)
|
||||
|
||||
Unpin(parent)
|
||||
assert.Equal(t, 1, Entries())
|
||||
pinned, unpinned = EntriesWithPinCount()
|
||||
assert.Equal(t, 0, pinned)
|
||||
assert.Equal(t, 1, unpinned)
|
||||
|
||||
// all 3 should have equal configstrings
|
||||
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(f3))
|
||||
assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(parent))
|
||||
}
|
||||
|
||||
func TestClearConfig(t *testing.T) {
|
||||
create := mockNewFs(t)
|
||||
|
||||
|
@ -269,9 +198,9 @@ func TestClearConfig(t *testing.T) {
|
|||
_, err := GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorIsFile, err)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
assert.Equal(t, 2, Entries()) // file + parent
|
||||
|
||||
assert.Equal(t, 1, ClearConfig("mock"))
|
||||
assert.Equal(t, 2, ClearConfig("mock"))
|
||||
|
||||
assert.Equal(t, 0, Entries())
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func mockNewFs(t *testing.T) func() {
|
|||
require.NoError(t, err)
|
||||
cache.Put("mock:/", f)
|
||||
cache.Put(":mock:/", f)
|
||||
f, err = mockfs.NewFs(ctx, "mock", "dir/", nil)
|
||||
f, err = mockfs.NewFs(ctx, "mock", "dir/file.txt", nil)
|
||||
require.NoError(t, err)
|
||||
cache.PutErr("mock:dir/file.txt", f, fs.ErrorIsFile)
|
||||
return func() {
|
||||
|
|
|
@ -597,108 +597,6 @@ func TestServerSideCopy(t *testing.T) {
|
|||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
}
|
||||
|
||||
// Test copying a file over itself
|
||||
func TestCopyOverSelf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
|
||||
r.CheckRemoteItems(t, file1)
|
||||
file2 := r.WriteFile("sub dir/hello world", "hello world again", t2)
|
||||
r.CheckLocalItems(t, file2)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err := CopyDir(ctx, r.Fremote, r.Flocal, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckRemoteItems(t, file2)
|
||||
}
|
||||
|
||||
// Test server-side copying a file over itself
|
||||
func TestServerSideCopyOverSelf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
|
||||
r.CheckRemoteItems(t, file1)
|
||||
|
||||
FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote()
|
||||
require.NoError(t, err)
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
|
||||
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
|
||||
r.CheckRemoteItems(t, file2)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file2)
|
||||
}
|
||||
|
||||
// Test moving a file over itself
|
||||
func TestMoveOverSelf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
|
||||
r.CheckRemoteItems(t, file1)
|
||||
file2 := r.WriteFile("sub dir/hello world", "hello world again", t2)
|
||||
r.CheckLocalItems(t, file2)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err := MoveDir(ctx, r.Fremote, r.Flocal, false, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckLocalItems(t)
|
||||
r.CheckRemoteItems(t, file2)
|
||||
}
|
||||
|
||||
// Test server-side moving a file over itself
|
||||
func TestServerSideMoveOverSelf(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1)
|
||||
r.CheckRemoteItems(t, file1)
|
||||
|
||||
FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote()
|
||||
require.NoError(t, err)
|
||||
defer finaliseCopy()
|
||||
t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = CopyDir(ctx, FremoteCopy, r.Fremote, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
fstest.CheckItems(t, FremoteCopy, file1)
|
||||
|
||||
file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2)
|
||||
r.CheckRemoteItems(t, file2)
|
||||
|
||||
// ctx = predictDstFromLogger(ctx)
|
||||
err = MoveDir(ctx, FremoteCopy, r.Fremote, false, false)
|
||||
require.NoError(t, err)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // not currently supported
|
||||
r.CheckRemoteItems(t)
|
||||
fstest.CheckItems(t, FremoteCopy, file2)
|
||||
|
||||
// check that individual file moves also work without MoveDir
|
||||
file3 := r.WriteObject(ctx, "sub dir/hello world", "hello world a third time", t3)
|
||||
r.CheckRemoteItems(t, file3)
|
||||
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
fs.Debugf(nil, "testing file moves")
|
||||
err = moveDir(ctx, FremoteCopy, r.Fremote, false, false)
|
||||
require.NoError(t, err)
|
||||
testLoggerVsLsf(ctx, FremoteCopy, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
r.CheckRemoteItems(t)
|
||||
fstest.CheckItems(t, FremoteCopy, file3)
|
||||
}
|
||||
|
||||
// Check that if the local file doesn't exist when we copy it up,
|
||||
// nothing happens to the remote file
|
||||
func TestCopyAfterDelete(t *testing.T) {
|
||||
|
@ -2422,19 +2320,15 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
|
|||
|
||||
r.CheckRemoteItems(t, file1b, file2, file3a, file1a)
|
||||
}
|
||||
|
||||
func TestSyncBackupDir(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", "", false)
|
||||
}
|
||||
|
||||
func TestSyncBackupDirWithSuffix(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", ".bak", false)
|
||||
}
|
||||
|
||||
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
|
||||
testSyncBackupDir(t, "backup", "-2019-01-01", true)
|
||||
}
|
||||
|
||||
func TestSyncBackupDirSuffixOnly(t *testing.T) {
|
||||
testSyncBackupDir(t, "", ".bak", false)
|
||||
}
|
||||
|
@ -2912,7 +2806,7 @@ func predictDstFromLogger(ctx context.Context) context.Context {
|
|||
}
|
||||
|
||||
func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer {
|
||||
opt := operations.ListJSONOpt{
|
||||
var opt = operations.ListJSONOpt{
|
||||
NoModTime: false,
|
||||
NoMimeType: true,
|
||||
DirsOnly: false,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.69.0"
|
||||
var VersionTag = "v1.68.2"
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
@ -36,7 +35,6 @@ type Backend struct {
|
|||
CleanUp bool // when running clean, run cleanup first
|
||||
Ignore []string // test names to ignore the failure of
|
||||
Tests []string // paths of tests to run, blank for all
|
||||
IgnoreTests []string // paths of tests not to run, blank for none
|
||||
ListRetries int // -list-retries if > 0
|
||||
ExtraTime float64 // factor to multiply the timeout by
|
||||
}
|
||||
|
@ -44,15 +42,15 @@ type Backend struct {
|
|||
// includeTest returns true if this backend should be included in this
|
||||
// test
|
||||
func (b *Backend) includeTest(t *Test) bool {
|
||||
// Is this test ignored
|
||||
if slices.Contains(b.IgnoreTests, t.Path) {
|
||||
return false
|
||||
}
|
||||
// Empty b.Tests imples do all of them except the ignored
|
||||
if len(b.Tests) == 0 {
|
||||
return true
|
||||
}
|
||||
return slices.Contains(b.Tests, t.Path)
|
||||
for _, testPath := range b.Tests {
|
||||
if testPath == t.Path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MakeRuns creates Run objects the Backend and Test
|
||||
|
|
|
@ -395,10 +395,6 @@ backends:
|
|||
- backend: "cache"
|
||||
remote: "TestCache:"
|
||||
fastlist: false
|
||||
ignoretests:
|
||||
- TestBisyncLocalRemote
|
||||
- TestBisyncRemoteLocal
|
||||
- TestBisyncRemoteRemote
|
||||
- backend: "mega"
|
||||
remote: "TestMega:"
|
||||
fastlist: false
|
||||
|
|
114
go.mod
114
go.mod
|
@ -1,9 +1,11 @@
|
|||
module github.com/rclone/rclone
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241218062344-42a0fc8c13ae
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2
|
||||
|
@ -11,6 +13,7 @@ require (
|
|||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.34
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
|
@ -35,6 +38,7 @@ require (
|
|||
github.com/go-chi/chi/v5 v5.1.0
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.5.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.5.1
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
|
@ -52,14 +56,16 @@ require (
|
|||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/ncw/swift/v2 v2.0.3
|
||||
github.com/nspcc-dev/neo-go v0.107.2
|
||||
github.com/oracle/oci-go-sdk/v65 v65.69.2
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/sftp v1.13.6
|
||||
github.com/pkg/xattr v0.4.10
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/prometheus/client_golang v1.20.2
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
github.com/rclone/gofakes3 v0.0.3-0.20240807151802-e80146f8de87
|
||||
github.com/rclone/gofakes3 v0.0.3
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.12.0
|
||||
|
@ -76,14 +82,17 @@ require (
|
|||
github.com/xanzy/ssh-agent v0.3.3
|
||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.25.0
|
||||
golang.org/x/net v0.27.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028
|
||||
golang.org/x/net v0.28.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.22.0
|
||||
golang.org/x/text v0.17.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/time v0.5.0
|
||||
google.golang.org/api v0.188.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
|
@ -95,17 +104,25 @@ require (
|
|||
cloud.google.com/go/auth v0.7.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.3 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||
github.com/akavel/rsrc v0.10.2 // indirect
|
||||
github.com/anacrolix/generics v0.0.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
|
||||
|
@ -121,81 +138,101 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/bradenaw/juniper v0.13.1 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/cloudflare/circl v1.3.3 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-message v0.17.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/noise v1.0.1 // indirect
|
||||
github.com/flynn/noise v1.0.0 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-resty/resty/v2 v2.7.0 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/uint256 v1.3.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.7.6 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240830112754-291b000d1f3b // indirect
|
||||
github.com/nspcc-dev/hrw/v2 v2.0.2 // indirect
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20241212130705-ea0a6114d2d6 // indirect
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.14.1-0.20240827150555-5ce597aa14ea // indirect
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.12.0.20241205083504-335d9fe90f24 // indirect
|
||||
github.com/nspcc-dev/rfc6979 v0.2.3 // indirect
|
||||
github.com/nspcc-dev/tzhash v1.8.2 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect
|
||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
github.com/samber/lo v1.47.0 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/samber/lo v1.39.0 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||
github.com/urfave/cli/v2 v2.27.4 // indirect
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
|
@ -204,12 +241,11 @@ require (
|
|||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
google.golang.org/grpc v1.66.2 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
|
@ -219,13 +255,3 @@ require (
|
|||
storj.io/infectious v0.0.2 // indirect
|
||||
storj.io/picobuf v0.0.3 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.0.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/pkg/xattr v0.4.9
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6
|
||||
golang.org/x/term v0.22.0
|
||||
)
|
||||
|
|
300
go.sum
300
go.sum
|
@ -38,7 +38,23 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
|||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241218062344-42a0fc8c13ae h1:7gvuOTmS3oaOM79JkHWWlsvGqIRqsum5KnOI1TYqfn0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241218062344-42a0fc8c13ae/go.mod h1:dbWUc5jOBTXVvssCLCYxkkSTL9jgLr1KruGP2FMAfiM=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
|
||||
|
@ -51,6 +67,8 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt
|
|||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.2.2 h1:PmDhkIT8S5U4nkY/s78Xmf7CXT8qCliNEBhbrkBp3Q0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.2.2/go.mod h1:Kj2pCkQ47klX1aAlDnlN/BUvwBiARqIJkc9iw1Up7q8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
|
||||
|
@ -64,8 +82,8 @@ github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYr
|
|||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20210511135022-227b4adcab57/go.mod h1:HecWFHognK8GfRDGnFQbW/LiV7A3MX3gZVs45vk5h8I=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs69zUkSzubzjBbL+cmOXgnmt9Fyd9ug=
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
|
@ -73,16 +91,18 @@ github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcM
|
|||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF21O1mRlo=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.3 h1:AJu1OI/1UWVYZl6QcCLKGu9OTngS2r52618uGlje84I=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.3/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g=
|
||||
github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
|
||||
github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e h1:KMVieI1/Ub++GYfnhyFPoGE3g5TUiG4srE3TMGr5nM4=
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e/go.mod h1:j5astEcUkZQX8lK+KKlQ3NRQ50f4EE8ZjyZpCz3mrH4=
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs=
|
||||
|
@ -100,6 +120,8 @@ github.com/anacrolix/log v0.15.2/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5L
|
|||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
|
@ -144,8 +166,10 @@ github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
|
|||
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradenaw/juniper v0.15.2 h1:0JdjBGEF2jP1pOxmlNIrPhAoQN7Ng5IMAY5D0PHMW4U=
|
||||
github.com/bradenaw/juniper v0.15.2/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||
github.com/bits-and-blooms/bitset v1.14.2 h1:YXVoyPndbdvcEVcseEovVfp0qjJp7S+i5+xgp/Nfbdc=
|
||||
github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bradenaw/juniper v0.13.1 h1:9P7/xeaYuEyqPuJHSHCJoisWyPvZH4FAi59BxJLh7F8=
|
||||
github.com/bradenaw/juniper v0.13.1/go.mod h1:Z2B7aJlQ7xbfWsnMLROj5t/5FQ94/MkIdKC30J4WvzI=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og=
|
||||
github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU=
|
||||
|
@ -158,9 +182,11 @@ github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3z
|
|||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo=
|
||||
github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 h1:z0uK8UQqjMVYzvk4tiiu3obv2B44+XBsvgEJREQfnO8=
|
||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9/go.mod h1:Jl2neWsQaDanWORdqZ4emBl50J4/aRBBS4FyyG9/PFo=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
|
@ -168,9 +194,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
|
||||
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20231124195312-f3ec8ae2c891 h1:nPP4suUiNage0vvyEBgfAnhTPwwXhNqtHmSuiCIQwKU=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20231124195312-f3ec8ae2c891/go.mod h1:xFxVVe3plxwhM+6BgTTPByEgG8hggo8+gtRUkbc5W8Q=
|
||||
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
|
||||
|
@ -180,10 +205,22 @@ github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQ
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9W0=
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0/go.mod h1:0NAO+/3knbMx6+5pCv+Hcbaz4xn/Zzbn9+WIib2rKVI=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
|
||||
github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cronokirby/saferith v0.33.0 h1:TgoQlfsD4LIwx71+ChfRcIpjkw+RPOapDEVxa+LhwLo=
|
||||
|
@ -192,8 +229,18 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
|
||||
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 h1:xJBhC00smQpSZw3Kr0ErMUBXhUSjYoLRm2szxdbRBL0=
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00/go.mod h1:nNICngOdmNImBb/vuL+dSc0aIg3ryNATpjxThNoPw4g=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU=
|
||||
|
@ -203,8 +250,8 @@ github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq4
|
|||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
|
||||
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
|
||||
github.com/emersion/go-message v0.17.0 h1:NIdSKHiVUx4qKqdd0HyJFD41cW8iFguM2XJnRZWQH04=
|
||||
github.com/emersion/go-message v0.17.0/go.mod h1:/9Bazlb1jwUNB0npYYBsdJ2EMOiiyN3m5UVHbY7GoNw=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594/go.mod h1:aqO8z8wPrjkscevZJFVE1wXJrLpC5LtJG7fqLOsPb2U=
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 h1:ATgqloALX6cHCranzkLb8/zjivwQ9DWWDCQRnxTPfaA=
|
||||
|
@ -217,10 +264,10 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
|||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/flynn/noise v1.0.1 h1:vPp/jdQLXC6ppsXSj/pM3W1BIJ5FEHE2TulSJBpb43Y=
|
||||
github.com/flynn/noise v1.0.1/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
|
@ -253,17 +300,16 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
|||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
|
||||
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8=
|
||||
github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
|
@ -275,8 +321,8 @@ github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
|||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
|
@ -308,6 +354,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
|
@ -318,6 +366,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
|
@ -329,8 +378,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7 h1:velgFPYr1X9TDwLIfkV7fWqsFlf7TeP11M/7kPd/dVI=
|
||||
github.com/google/pprof v0.0.0-20240509144519-723abb6459b7/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
|
||||
|
@ -343,18 +392,18 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
|||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
|
||||
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hanwen/go-fuse/v2 v2.5.1 h1:OQBE8zVemSocRxA4OaFJbjJ5hlpCmIWbGr7r0M4uoQQ=
|
||||
github.com/hanwen/go-fuse/v2 v2.5.1/go.mod h1:xKwi1cF7nXAOBCXujD5ie0ZKsxc8GGSA1rlMJc+8IJs=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
|
||||
|
@ -374,6 +423,8 @@ github.com/henrybear327/Proton-API-Bridge v1.0.0 h1:gjKAaWfKu++77WsZTHg6FUyPC5W0
|
|||
github.com/henrybear327/Proton-API-Bridge v1.0.0/go.mod h1:gunH16hf6U74W2b9CGDaWRadiLICsoJ6KRkSt53zLts=
|
||||
github.com/henrybear327/go-proton-api v1.0.0 h1:zYi/IbjLwFAW7ltCeqXneUGJey0TN//Xo851a/BgLXw=
|
||||
github.com/henrybear327/go-proton-api v1.0.0/go.mod h1:w63MZuzufKcIZ93pwRgiOtxMXYafI8H74D77AxytOBc=
|
||||
github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=
|
||||
github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
|
@ -395,14 +446,16 @@ github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg=
|
|||
github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0 h1:Puhl12NSHUSALHSuzYwPYQkqa2E1+7SrtAPJorKK0C8=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 h1:JcltaO1HXM5S2KYOYcKgAV7slU0xPy1OcvrVgn98sRQ=
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7/go.mod h1:MEkhEPFwP3yudWO0lj6vfYpLIB+3eIcuIW+e0AZzUQk=
|
||||
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8 h1:+A1uT26XjTsxiUUZjAAuveILWWy+Sy2TPX8OIgGvPQE=
|
||||
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8/go.mod h1:f0ijQHcvHYAuxX6JA/JUr/Z0FVn12D9REaT/HAWVgP4=
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg=
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
|
@ -437,8 +490,12 @@ github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I=
|
|||
github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0=
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
|
@ -454,20 +511,57 @@ github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa
|
|||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/nspcc-dev/dbft v0.3.1 h1:3qoc65CVJMtdL/627JZH+1Jz839LmdsVN52L4mlP5z8=
|
||||
github.com/nspcc-dev/dbft v0.3.1/go.mod h1:BNvJkPKTE28r+qRaAk2C3VoL2J9qzox3fvEeJbh7EWE=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240830112754-291b000d1f3b h1:DRG4cRqIOmI/nUPggMgR92Jxt63Lxsuz40m5QpdvYXI=
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240830112754-291b000d1f3b/go.mod h1:d3cUseu4Asxfo9/QA/w4TtGjM0AbC9ynyab+PfH+Bso=
|
||||
github.com/nspcc-dev/hrw/v2 v2.0.2 h1:Vuc2Yu96MCv1YDUjErMuCt5tq+g/43/Y89u/XfyLkRI=
|
||||
github.com/nspcc-dev/hrw/v2 v2.0.2/go.mod h1:XRsG20axGJfr0Ytcau/UcZ/9NF54RmUIqmoYKuuliSo=
|
||||
github.com/nspcc-dev/neo-go v0.107.2 h1:BKKa+5qOrSPVYcLFyO0uUcAHh5lh9hhBIlqWdtMwbWQ=
|
||||
github.com/nspcc-dev/neo-go v0.107.2/go.mod h1:Lzh/ZA2Goco2bmsoC9RtrrbZIA6xC943wmand20RMkY=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20241212130705-ea0a6114d2d6 h1:rTnsU+Y/bP1bLN/SNWmOKEexmSeniMQe5bOJxXNbXgg=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20241212130705-ea0a6114d2d6/go.mod h1:kVLzmbeJJdbIPF2bUYhD8YppIiLXnRQj5yqNZvzbOL0=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.14.1-0.20240827150555-5ce597aa14ea h1:mK0EMGLvunXcFyq7fBURS/CsN4MH+4nlYiqn6pTwWAU=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.14.1-0.20240827150555-5ce597aa14ea/go.mod h1:YzhD4EZmC9Z/PNyd7ysC7WXgIgURc9uCG1UWDeV027Y=
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.12.0.20241205083504-335d9fe90f24 h1:+6KYoXnhs6LfGnn5f+4puuOj3M3MeofBw9iQn7LFG04=
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.12.0.20241205083504-335d9fe90f24/go.mod h1:INZZXiTr9L7gWFeg3RBuB1laH2h9+vnomvg1XE42zQU=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.3 h1:QNVykGZ3XjFwM/88rGfV3oj4rKNBy+nYI6jM7q19hDI=
|
||||
github.com/nspcc-dev/rfc6979 v0.2.3/go.mod h1:q3sCL1Ed7homjqYK8KmFSzEmm+7Ngyo7PePbZanhaDE=
|
||||
github.com/nspcc-dev/tzhash v1.8.2 h1:ebRCbPoEuoqrhC6sSZmrT/jI3h1SzCWakxxV6gp5QAg=
|
||||
github.com/nspcc-dev/tzhash v1.8.2/go.mod h1:SFwvvB1KyKm45vdWpcOCFpklkUEsXtddnHsk+zq298g=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU=
|
||||
|
@ -476,6 +570,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.69.2 h1:lROMJ8/VakGOGObAWUxTVY2AX1wQCUIzVqfL4Fb2Ay8=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.69.2/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
|
||||
github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8Kw=
|
||||
|
@ -486,6 +584,8 @@ github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6
|
|||
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU=
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28=
|
||||
|
@ -493,22 +593,22 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo=
|
||||
github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk=
|
||||
github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE=
|
||||
github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA=
|
||||
github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig=
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
|
||||
|
@ -519,8 +619,8 @@ github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1
|
|||
github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
|
||||
github.com/rclone/gofakes3 v0.0.3-0.20240807151802-e80146f8de87 h1:0YRo2aYhE+SCZsjWYMFe8zLD18xieXy7wQ8M9Ywcr/g=
|
||||
github.com/rclone/gofakes3 v0.0.3-0.20240807151802-e80146f8de87/go.mod h1:z7+o2VUwitO0WuVHReQlOW9jZ03LpeJ0PUFSULyTIds=
|
||||
github.com/rclone/gofakes3 v0.0.3 h1:0sKCxJ8TUUAG5KXGuc/fcDKGnzB/j6IjNQui9ntIZPo=
|
||||
github.com/rclone/gofakes3 v0.0.3/go.mod h1:z7+o2VUwitO0WuVHReQlOW9jZ03LpeJ0PUFSULyTIds=
|
||||
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
|
||||
github.com/relvacode/iso8601 v1.3.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
|
@ -540,11 +640,11 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a
|
|||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
|
||||
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
|
||||
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
|
||||
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
|
||||
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI=
|
||||
github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk=
|
||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
||||
|
@ -591,21 +691,29 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
|||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
|
||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
|
||||
github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
|
||||
github.com/testcontainers/testcontainers-go v0.33.0 h1:zJS9PfXYT5O0ZFXM2xxXfk4J5UMw/kRiISng037Gxdw=
|
||||
github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
|
||||
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/unknwon/goconfig v1.0.0 h1:rS7O+CmUdli1T+oDm7fYj1MwqNWtEJfNj+FqcUHML8U=
|
||||
github.com/unknwon/goconfig v1.0.0/go.mod h1:qu2ZQ/wcC/if2u32263HTVC39PeOQRSmidQk3DuDFQ8=
|
||||
github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
|
||||
github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
|
||||
github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9 h1:IGSoH2aBagQ9VI8ZwbjHYIslta5vXfczegV1B4y9KqY=
|
||||
github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9/go.mod h1:Ql2ebUpEFm/a1CAY884di2XZkdcddfHZ6ONrAlhFev0=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o=
|
||||
|
@ -614,6 +722,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0 h1:j3un8DqYvvAOqK
|
|||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
|
||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk=
|
||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -634,8 +744,8 @@ github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
|
|||
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
@ -651,10 +761,14 @@ go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGX
|
|||
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
|
||||
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
|
||||
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo=
|
||||
go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
goftp.io/server/v2 v2.0.1 h1:H+9UbCX2N206ePDSVNCjBftOKOgil6kQ5RAQNx5hJwE=
|
||||
goftp.io/server/v2 v2.0.1/go.mod h1:7+H/EIq7tXdfo1Muu5p+l3oQ6rYkDZ8lY7IM5d5kVdQ=
|
||||
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
|
@ -674,9 +788,8 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
|
|||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -687,8 +800,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
|
||||
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -702,9 +815,8 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
|
|||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6 h1:/VlmIrkuLf2wzPjkZ8imSpckHoW7Y71h66dxbLHSpi8=
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6/go.mod h1:TCsc78+c4cqb8IKEosz2LwJ6YRNkIjMuAYeHYjchGDE=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
|
@ -714,8 +826,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0=
|
||||
golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -744,11 +856,13 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
|
@ -757,10 +871,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
|||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -782,8 +894,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -814,8 +926,10 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -825,6 +939,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -837,10 +952,10 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
@ -848,11 +963,9 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
|||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -864,14 +977,13 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
|||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -922,8 +1034,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24=
|
||||
golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -981,8 +1093,9 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
|
||||
google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
|
@ -998,8 +1111,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
|
|||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
|
||||
google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
|
||||
google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo=
|
||||
google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1029,6 +1142,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1043,6 +1157,8 @@ moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHc
|
|||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
storj.io/common v0.0.0-20240812101423-26b53789c348 h1:Urs3fX+1Fyb+CFKGw0mCJV3MPR499WM+Vs6osw4Rqtk=
|
||||
storj.io/common v0.0.0-20240812101423-26b53789c348/go.mod h1:XMpwKxc04HCBl4H5IFCGv1ca5Dm0tvH4NL7Jx+JhxuA=
|
||||
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 h1:hLvEV2RMTscX3JHPd+LSQCeTt8i1Q0Yt7U2EdfyMnaQ=
|
||||
|
|
16
lib/cache/cache.go
vendored
16
lib/cache/cache.go
vendored
|
@ -260,19 +260,3 @@ func (c *Cache) SetFinalizer(finalize func(interface{})) {
|
|||
c.finalize = finalize
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache
|
||||
//
|
||||
// Each entry is counted only once, regardless of entry.pinCount
|
||||
func (c *Cache) EntriesWithPinCount() (pinned, unpinned int) {
|
||||
c.mu.Lock()
|
||||
for _, entry := range c.cache {
|
||||
if entry.pinCount <= 0 {
|
||||
unpinned++
|
||||
} else {
|
||||
pinned++
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return pinned, unpinned
|
||||
}
|
||||
|
|
|
@ -4,10 +4,10 @@ package exitcode
|
|||
const (
|
||||
// Success is returned when rclone finished without error.
|
||||
Success = iota
|
||||
// UncategorizedError is returned for any error not categorised otherwise.
|
||||
UncategorizedError
|
||||
// UsageError is returned when there was a syntax or usage error in the arguments.
|
||||
UsageError
|
||||
// UncategorizedError is returned for any error not categorised otherwise.
|
||||
UncategorizedError
|
||||
// DirNotFound is returned when a source or destination directory is not found.
|
||||
DirNotFound
|
||||
// FileNotFound is returned when a source or destination file is not found.
|
||||
|
|
|
@ -17,7 +17,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
BufferSize = 1024 * 1024 // BufferSize is the default size of the pages used in the reader
|
||||
// BufferSize is the default size of the pages used in the reader
|
||||
BufferSize = 1024 * 1024
|
||||
bufferCacheSize = 64 // max number of buffers to keep in cache
|
||||
bufferCacheFlushTime = 5 * time.Second // flush the cached buffers after this long
|
||||
)
|
||||
|
|
61
vfs/dir.go
61
vfs/dir.go
|
@ -40,7 +40,7 @@ type Dir struct {
|
|||
modTimeMu sync.Mutex // protects the following
|
||||
modTime time.Time
|
||||
|
||||
_virtuals atomic.Int32 // number of virtual directory entries in this directory and children
|
||||
_hasVirtual atomic.Bool // shows if the directory has virtual entries
|
||||
}
|
||||
|
||||
//go:generate stringer -type=vState
|
||||
|
@ -67,6 +67,7 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
|||
items: make(map[string]Node),
|
||||
}
|
||||
d.cleanupTimer = time.AfterFunc(time.Duration(vfs.Opt.DirCacheTime*2), d.cacheCleanup)
|
||||
d.setHasVirtual(false)
|
||||
return d
|
||||
}
|
||||
|
||||
|
@ -197,60 +198,58 @@ func (d *Dir) Node() Node {
|
|||
return d
|
||||
}
|
||||
|
||||
// hasVirtual returns whether the directory or children has virtual entries
|
||||
// hasVirtual returns whether the directory has virtual entries
|
||||
func (d *Dir) hasVirtual() bool {
|
||||
return d._virtuals.Load() != 0
|
||||
return d._hasVirtual.Load()
|
||||
}
|
||||
|
||||
// addVirtual adds n virtual items to this directory and all of its parents
|
||||
func (d *Dir) addVirtual(n int32) {
|
||||
for d != nil {
|
||||
d._virtuals.Add(n)
|
||||
d = d.parent
|
||||
}
|
||||
// setHasVirtual sets the hasVirtual flag for the directory
|
||||
func (d *Dir) setHasVirtual(hasVirtual bool) {
|
||||
d._hasVirtual.Store(hasVirtual)
|
||||
}
|
||||
|
||||
// ForgetAll forgets directory entries for this directory and any children.
|
||||
//
|
||||
// It does not invalidate or clear the cache of the parent directory.
|
||||
//
|
||||
// It returns true if the directory or any of its children had virtual
|
||||
// entries so could not be forgotten. Children which didn't have
|
||||
// virtual entries will be forgotten even if true is returned.
|
||||
// It returns true if the directory or any of its children had virtual entries
|
||||
// so could not be forgotten. Children which didn't have virtual entries and
|
||||
// children with virtual entries will be forgotten even if true is returned.
|
||||
func (d *Dir) ForgetAll() (hasVirtual bool) {
|
||||
// We run this part with RLock only to avoid deadlocks in the recursion
|
||||
|
||||
d.mu.RLock()
|
||||
|
||||
fs.Debugf(d.path, "forgetting directory cache")
|
||||
for _, node := range d.items {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
dir.ForgetAll()
|
||||
if dir.ForgetAll() {
|
||||
d.setHasVirtual(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d.mu.RUnlock()
|
||||
|
||||
// We run this part with Lock so we can modify the Dir
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
// Purge any unnecessary virtual entries
|
||||
d._purgeVirtual()
|
||||
|
||||
// Don't clear directory entries if there are virtual entries in this
|
||||
// directory or any children
|
||||
hasVirtual = d.hasVirtual()
|
||||
if !hasVirtual {
|
||||
d.read = time.Time{}
|
||||
d.items = make(map[string]Node)
|
||||
d.cleanupTimer.Stop()
|
||||
} else {
|
||||
d.cleanupTimer.Reset(time.Duration(d.vfs.Opt.DirCacheTime * 2))
|
||||
|
||||
// Check if this dir has virtual entries
|
||||
if len(d.virtual) != 0 {
|
||||
d.setHasVirtual(true)
|
||||
}
|
||||
|
||||
return hasVirtual
|
||||
// Don't clear directory entries if there are virtual entries in this
|
||||
// directory or any children
|
||||
if !d.hasVirtual() {
|
||||
d.items = make(map[string]Node)
|
||||
d.cleanupTimer.Stop()
|
||||
}
|
||||
|
||||
return d.hasVirtual()
|
||||
}
|
||||
|
||||
// forgetDirPath clears the cache for itself and all subdirectories if
|
||||
|
@ -449,10 +448,8 @@ func (d *Dir) addObject(node Node) {
|
|||
if node.IsDir() {
|
||||
vAdd = vAddDir
|
||||
}
|
||||
if _, found := d.virtual[leaf]; !found {
|
||||
d.addVirtual(1)
|
||||
}
|
||||
d.virtual[leaf] = vAdd
|
||||
d.setHasVirtual(true)
|
||||
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vAdd, leaf)
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
@ -495,10 +492,8 @@ func (d *Dir) delObject(leaf string) {
|
|||
if d.virtual == nil {
|
||||
d.virtual = make(map[string]vState)
|
||||
}
|
||||
if _, found := d.virtual[leaf]; !found {
|
||||
d.addVirtual(1)
|
||||
}
|
||||
d.virtual[leaf] = vDel
|
||||
d.setHasVirtual(true)
|
||||
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vDel, leaf)
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
@ -585,9 +580,9 @@ func (d *Dir) _deleteVirtual(name string) {
|
|||
return
|
||||
}
|
||||
delete(d.virtual, name)
|
||||
d.addVirtual(-1)
|
||||
if len(d.virtual) == 0 {
|
||||
d.virtual = nil
|
||||
d.setHasVirtual(false)
|
||||
}
|
||||
fs.Debugf(d.path, "Removed virtual directory entry %v: %q", virtualState, name)
|
||||
}
|
||||
|
|
|
@ -607,51 +607,3 @@ func TestDirRename(t *testing.T) {
|
|||
func TestDirStructSize(t *testing.T) {
|
||||
t.Logf("Dir struct has size %d bytes", unsafe.Sizeof(Dir{}))
|
||||
}
|
||||
|
||||
// Check that open files appear in the directory listing properly after a forget
|
||||
func TestDirFileOpen(t *testing.T) {
|
||||
_, vfs, dir, _ := dirCreate(t)
|
||||
|
||||
assert.False(t, dir.hasVirtual())
|
||||
assert.False(t, dir.parent.hasVirtual())
|
||||
|
||||
_, err := dir.Mkdir("sub")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, dir.hasVirtual())
|
||||
assert.True(t, dir.parent.hasVirtual())
|
||||
|
||||
fd0, err := vfs.Create("dir/sub/file0")
|
||||
require.NoError(t, err)
|
||||
_, err = fd0.Write([]byte("hello"))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, fd0.Close())
|
||||
}()
|
||||
|
||||
fd2, err := vfs.Create("dir/sub/file2")
|
||||
require.NoError(t, err)
|
||||
_, err = fd2.Write([]byte("hello world!"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd2.Close())
|
||||
assert.True(t, dir.hasVirtual())
|
||||
|
||||
assert.True(t, dir.hasVirtual())
|
||||
assert.True(t, dir.parent.hasVirtual())
|
||||
|
||||
// Now forget the directory
|
||||
hasVirtual := dir.parent.ForgetAll()
|
||||
assert.True(t, hasVirtual)
|
||||
|
||||
assert.True(t, dir.hasVirtual())
|
||||
assert.True(t, dir.parent.hasVirtual())
|
||||
|
||||
// Check the files can still be found
|
||||
fi, err := vfs.Stat("dir/sub/file0")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), fi.Size())
|
||||
|
||||
fi, err = vfs.Stat("dir/sub/file2")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(12), fi.Size())
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue