forked from TrueCloudLab/frostfs-s3-gw
Compare commits
49 commits
master
...
aarifullin
Author | SHA1 | Date | |
---|---|---|---|
2b0f645275 | |||
499f4c6495 | |||
2cbe3b9a27 | |||
c588d485fa | |||
3927223bb0 | |||
eba85b50b6 | |||
043447600e | |||
0cd353707a | |||
f74ab12f91 | |||
dea7b39805 | |||
9df8695463 | |||
614d703726 | |||
23593eee3d | |||
dfc4476afd | |||
84358f6742 | |||
7a380fa46c | |||
0590f84d68 | |||
751d66bde0 | |||
462589fc0c | |||
8fcaf76f41 | |||
19c89b38e6 | |||
0bcda6ea37 | |||
9dabaf6ecd | |||
840d457cb9 | |||
c2fae4c199 | |||
9e0c39dcc2 | |||
e4b1d07185 | |||
3b5fcc3c8a | |||
c75add64ec | |||
acb6e8cbca | |||
4e1fd9589b | |||
bd898ad59e | |||
43bee561cf | |||
4a6e3a19ce | |||
b445f7bbf9 | |||
868edfdb31 | |||
34bbbcf1ed | |||
fae03c2b50 | |||
81e860481d | |||
e97fea30ea | |||
cfc94e39ef | |||
ce9294685c | |||
a0f0d792b8 | |||
43e336e155 | |||
9f186d9aba | |||
11f30a037b | |||
136a186c14 | |||
24390fdec8 | |||
1fdbfb0dab |
106 changed files with 7773 additions and 2751 deletions
20
.forgejo/workflows/builds.yml
Normal file
20
.forgejo/workflows/builds.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.alexvan.in/alexvanin/dco-go@v1
|
||||
with:
|
||||
from: 3fbad97a
|
34
.forgejo/workflows/tests.yml
Normal file
34
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,34 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: golangci-lint
|
||||
uses: https://github.com/golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.19', '1.20' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
21
.forgejo/workflows/vulncheck.yml
Normal file
21
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1 +1 @@
|
|||
* @alexvanin @KirillovDenis
|
||||
* @alexvanin @dkirillov
|
||||
|
|
73
.github/workflows/builds.yml
vendored
73
.github/workflows/builds.yml
vendored
|
@ -1,73 +0,0 @@
|
|||
name: Builds
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [ opened, synchronize ]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
build_cli:
|
||||
name: Build CLI
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build CLI
|
||||
run: make
|
||||
|
||||
- name: Check version
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
||||
|
||||
build_image:
|
||||
needs: build_cli
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build Docker image
|
||||
run: make image
|
67
.github/workflows/codeql-analysis.yml
vendored
67
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, 'support/*' ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master, 'support/*' ]
|
||||
schedule:
|
||||
- cron: '35 8 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
22
.github/workflows/dco.yml
vendored
22
.github/workflows/dco.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
name: DCO check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
|
||||
jobs:
|
||||
commits_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commits Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@master
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
96
.github/workflows/tests.yml
vendored
96
.github/workflows/tests.yml
vendored
|
@ -1,96 +0,0 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [opened, synchronize]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
cover:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Test and write coverage profile
|
||||
run: make cover
|
||||
|
||||
- name: Upload coverage results to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
fail_ci_if_error: false
|
||||
path_to_write_report: ./coverage.txt
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.18.x', '1.19.x' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Get tree-service client
|
||||
run: make sync-tree
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -2,9 +2,6 @@
|
|||
.idea
|
||||
.vscode
|
||||
|
||||
# Tree service
|
||||
internal/frostfs/services/tree/
|
||||
|
||||
# Vendoring
|
||||
vendor
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 5m
|
||||
timeout: 15m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
|
415
CHANGELOG.md
415
CHANGELOG.md
|
@ -9,8 +9,13 @@ This document outlines major changes between releases.
|
|||
- Get empty bucket CORS from frostfs (TrueCloudLab#36)
|
||||
- Don't count pool error on client abort (#35)
|
||||
- Don't create unnecessary delete-markers (#83)
|
||||
- Handle negative `Content-Length` on put (#125)
|
||||
- Use `DisableURIPathEscaping` to presign urls (#125)
|
||||
- Use specific s3 errors instead of `InternalError` where possible (#143)
|
||||
|
||||
### Added
|
||||
- Implement chunk uploading (#106)
|
||||
- Reload default and custom copies numbers on SIGHUP (#104)
|
||||
- Add `copies_numbers` section to `placement_policy` in config file and support vectors of copies numbers (#70)
|
||||
- Return `X-Owner-Id` in `head-bucket` response (#79)
|
||||
- Return container name in `head-bucket` response (TrueCloudLab#18)
|
||||
|
@ -21,14 +26,18 @@ This document outlines major changes between releases.
|
|||
- Add new `kludge.use_default_xmlns_for_complete_multipart` config param (TrueCloudLab#40)
|
||||
- Support dump metrics descriptions (#80)
|
||||
- Support impersonate bearer token (#81)
|
||||
- Return bearer token in `s3-authmate obtain-secret` result (#132)
|
||||
- Add `s3-authmate update-secret` command (#131)
|
||||
|
||||
### Changed
|
||||
- Remove object from tree and reset its cache on object deletion when it is already removed from storage (#78)
|
||||
- Update prometheus to v1.15.0 (#94)
|
||||
- Update syncTree.sh due to recent renaming (#73)
|
||||
- Update neo-go to v0.101.0 (#14)
|
||||
- Update viper to v1.15.0 (#14)
|
||||
- Using multiple servers require only one healthy (TrueCloudLab#12)
|
||||
- Update go version to go1.18 (TrueCloudLab#16)
|
||||
- Update go version to go1.19 (#118)
|
||||
- Return error on invalid LocationConstraint (TrueCloudLab#23)
|
||||
- Place billing metrics to separate url path (TrueCloudLab#26)
|
||||
- Add generated deb builder files to .gitignore, and fix typo (TrueCloudLab#28)
|
||||
|
@ -37,408 +46,16 @@ This document outlines major changes between releases.
|
|||
- Support new system attributes (#64)
|
||||
- Changed values for `frostfs_s3_gw_state_health` metric (#91)
|
||||
- Support multiple tree service endpoints (#74)
|
||||
|
||||
## [0.26.0] - 2022-12-28
|
||||
|
||||
### Added
|
||||
- Use client time as `now` in some requests (#726)
|
||||
- Reload policies on SIGHUP (#747)
|
||||
- Authmate flags for pool timeouts (#760)
|
||||
- Multiple server listeners (#742)
|
||||
|
||||
### Changed
|
||||
- Placement policy configuration (#568)
|
||||
- Improved debug logging of CID and OID values (#754)
|
||||
- Timeout errors has code 504 now (#103)
|
||||
- Support multiple version credentials using GSet (#135)
|
||||
- Use request scope logger (#111)
|
||||
|
||||
### Removed
|
||||
- Deprecated linters (#755)
|
||||
|
||||
### Updating from v0.25.1
|
||||
New config parameters were added. And old one `defaul_policy` were changed.
|
||||
```yaml
|
||||
placement_policy:
|
||||
default: "REP 3"
|
||||
region_mapping: /path/to/container/policies.json
|
||||
```
|
||||
|
||||
Make sure you update the config accordingly:
|
||||
If you configure application using environment variables change:
|
||||
* `S3_GW_DEFAULT_POLICY` -> `S3_GW_PLACEMENT_POLICY_DEFAULT_POLICY`
|
||||
* `S3_GW_LISTEN_ADDRESS` -> `S3_GW_SERVER_0_ADDRESS`
|
||||
* `S3_GW_TLS_CERT_FILE` -> `S3_GW_SERVER_0_TLS_CERT_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
* `S3_GW_TLS_KEY_FILE` -> `S3_GW_SERVER_0_TLS_KEY_FILE` (and set `S3_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `defaul_policy` -> `placement_policy.default`
|
||||
* `listen_address` -> `server.0.address`
|
||||
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
|
||||
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
|
||||
|
||||
## [0.25.1] - 2022-10-30
|
||||
|
||||
### Fixed
|
||||
- Empty bucket policy (#740)
|
||||
- Big object removal (#749)
|
||||
- Checksum panic (#741)
|
||||
|
||||
### Added
|
||||
- Debian packaging (#737)
|
||||
- Timeout for individual operations in streaming RPC (#750)
|
||||
|
||||
## [0.25.0] - 2022-10-31
|
||||
|
||||
### Fixed
|
||||
- Legal hold object lock enabling (#709)
|
||||
- Errors at object locking (#719)
|
||||
- Unrestricted access to not owned objects via cache (#713)
|
||||
- Check tree service health (#699)
|
||||
- Bucket names in listing (#733)
|
||||
|
||||
### Added
|
||||
- Config reloading on SIGHUP (#702, #715, #716)
|
||||
- Stop pool dial on SIGINT (#712)
|
||||
|
||||
### Changed
|
||||
- GitHub actions update (#710)
|
||||
- Makefile help (#725)
|
||||
- Optimized object tags setting (#669)
|
||||
- Improved logging (#728)
|
||||
- Unified unit test names (#617)
|
||||
- Improved docs (#732)
|
||||
|
||||
### Removed
|
||||
- Unused cache methods (#650)
|
||||
|
||||
### Updating from v0.24.0
|
||||
New config parameters were added. Make sure the default parameters are appropriate for you.
|
||||
|
||||
```yaml
|
||||
cache:
|
||||
accesscontrol:
|
||||
lifetime: 1m
|
||||
size: 100000
|
||||
```
|
||||
|
||||
## [0.24.0] - 2022-09-14
|
||||
|
||||
### Added
|
||||
- Exposure of pool metrics (#615, #680)
|
||||
- Configuration of `set_copies_number` (#634, #637)
|
||||
- Configuration of list of allowed `AccessKeyID` prefixes (#674)
|
||||
- Tagging directive for `CopyObject` (#666, #683)
|
||||
- Customer encryption (#595)
|
||||
- `CopiesNumber` configuration (#634, #637)
|
||||
|
||||
### Changed
|
||||
- Improved wallet configuration via `.yaml` config and environment variables (#607)
|
||||
- Update go version for build to 1.19 (#694, #705)
|
||||
- Update version calculation (#653, #697)
|
||||
- Optimized lock creation (#692)
|
||||
- Update way to configure `listen_domains` (#667)
|
||||
- Use `FilePath` instead of `FileName` for object keys (#657)
|
||||
- Optimize listing (#625, #616)
|
||||
|
||||
### Removed
|
||||
- Drop any object search logic (#545)
|
||||
|
||||
### Fixed
|
||||
- Responses to `GetObject` and `HeadObject`: removed redundant `VersionID` (#577, #682)
|
||||
- Replacement of object tagging in case of overwriting of an object (#645)
|
||||
- Using tags cache with empty `versionId` (#643)
|
||||
- Fix panic on go1.19 (#678)
|
||||
- Fix panic on invalid versioning status (#660)
|
||||
- Fix panic on missing decrypt reader (#704)
|
||||
- Using multipart uploads with `/` in name (#671)
|
||||
- Don't update settings cache when request fails (#661)
|
||||
- Fix handling `X-Amz-Copy-Source` header (#672)
|
||||
- ACL related problems (#676, #606)
|
||||
- Using `ContinuationToken` for "directories" (#684)
|
||||
- Fix `connection was closed` error (#656)
|
||||
- Fix listing for nested objects (#624)
|
||||
- Fix anon requests to tree service (#504, #505)
|
||||
|
||||
### Updating from v0.23.0
|
||||
Make sure your configuration is valid:
|
||||
|
||||
If you configure application using environment variables change:
|
||||
* `S3_GW_WALLET` -> `S3_GW_WALLET_PATH`
|
||||
* `S3_GW_ADDRESS` -> `S3_GW_WALLET_ADDRESS`
|
||||
* `S3_GW_LISTEN_DOMAINS_N` -> `S3_GW_LISTEN_DOMAINS` (use it as array variable)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `wallet` -> `wallet.path`
|
||||
* `address` -> `wallet.address`
|
||||
* `listen_domains.n` -> `listen_domains` (use it as array param)
|
||||
|
||||
|
||||
## [0.23.0] - 2022-08-01
|
||||
|
||||
### Fixed
|
||||
- System metadata are filtered now (#619)
|
||||
- List objects in corner cases (#612, #627)
|
||||
- Correct removal of a deleted object (#610)
|
||||
- Bucket creation could lead to "no healthy client" error (#636)
|
||||
|
||||
### Added
|
||||
- New param to configure pool error threshold (#633)
|
||||
|
||||
### Changed
|
||||
- Pprof and prometheus metrics configuration (#591)
|
||||
- Don't set sticky bit in authmate container (#540)
|
||||
- Updated compatibility table (#638)
|
||||
- Rely on string sanitizing from zap (#498)
|
||||
|
||||
### Updating from v0.22.0
|
||||
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
||||
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
||||
If you are using the command line flags you can skip this step.
|
||||
|
||||
## [0.22.0] - 2022-07-25
|
||||
|
||||
Tree service support
|
||||
|
||||
### Fixed
|
||||
- Error logging (#450)
|
||||
- Default bucket location constraint (#463)
|
||||
- Suspended versioning status (#462)
|
||||
- CodeQL warnings (#489, #522, #539)
|
||||
- Bearer token behaviour with non-owned buckets (#459)
|
||||
- ACL issues (#495, #553, #571, #573, #574, #580)
|
||||
- Authmate policy parsing (#558)
|
||||
|
||||
### Added
|
||||
- Public key output in authmate issue-secret command (#482)
|
||||
- Support of conditional headers (#484)
|
||||
- Cache type cast error logging (#465)
|
||||
- `docker/*` target in Makefile (#471)
|
||||
- Pre signed requests (#529)
|
||||
- Tagging and ACL notifications (#361)
|
||||
- AWSv4 signer package to improve compatibility with S3 clients (#528)
|
||||
- Extension mimetype detector (#289)
|
||||
- Default params documentation (#592)
|
||||
- Health metric (#600)
|
||||
- Parallel object listing (#525)
|
||||
- Tree service (see commit links from #609)
|
||||
|
||||
### Changed
|
||||
- Reduce number of network requests (#439, #441)
|
||||
- Renamed authmate to s3-authmate (#518)
|
||||
- Version output (#578)
|
||||
- Improved error messages (#539)
|
||||
|
||||
### Removed
|
||||
- `layer/neofs` package (#438)
|
||||
|
||||
## [0.21.1] - 2022-05-16
|
||||
|
||||
### Changed
|
||||
- Update go version to go1.17 (#427)
|
||||
- Set homomorphic hashing disable attribute in container if required (#435)
|
||||
|
||||
## [0.21.0] - 2022-05-13
|
||||
|
||||
### Added
|
||||
- Support of get-object-attributes (#430)
|
||||
|
||||
### Fixed
|
||||
- Reduced time of bucket creation (#426)
|
||||
- Bucket removal (#428)
|
||||
- Obtainment of ETag value (#431)
|
||||
|
||||
### Changed
|
||||
- Authmate doesn't parse session context anymore, now it accepts application defined
|
||||
flexible structure with container ID in human-readable format (#428)
|
||||
|
||||
## [0.20.0] - 2022-04-29
|
||||
|
||||
### Added
|
||||
- Support of object locking (#195)
|
||||
- Support of basic notifications (#357, #358, #359)
|
||||
|
||||
### Changed
|
||||
- Logger behavior: now it writes to stderr instead of stdout, app name and
|
||||
version are always presented and fixed, all user options except of `level` are
|
||||
dropped (#380)
|
||||
- Improved docs, added config examples (#396, #398)
|
||||
- Updated NeoFS SDK (#365, #409)
|
||||
|
||||
### Fixed
|
||||
- Added check of `SetEACL` tokens before processing of requests (#347)
|
||||
- Authmate: returned lost session tokens when a parameter `--session-token` is
|
||||
omitted (#387)
|
||||
- Error when a bucket hasn't a settings file (#389)
|
||||
- Response to a request to delete not existing object (#392)
|
||||
- Replaced gate key in ACL Grantee by key of bearer token issuer (#395)
|
||||
- Missing attach of bearer token to requests to put system object (#399)
|
||||
- Deletion of system object while CompleteMultipartUpload (#400)
|
||||
- Improved English in docs and comments (#405)
|
||||
- Authmate: reconsidered default bearer token rules (#406)
|
||||
|
||||
## [0.19.0] - 2022-03-16
|
||||
|
||||
### Added
|
||||
- Authmate: support placement policy overriding (#343, #364)
|
||||
- Managing bucket notification configuration (#340)
|
||||
- Unit tests in go1.17 (#265)
|
||||
- NATS settings in application config (#341)
|
||||
- Support `Expires` and `Cache-Control` headers (#312)
|
||||
- Support `%` as delimiter (#313)
|
||||
- Support `null` version deletion (#319)
|
||||
- Bucket name resolving order (#285)
|
||||
- Authmate: added `timeout` flag (#290)
|
||||
- MinIO results in s3 compatibility tables (#304)
|
||||
- Support overriding response headers (#310)
|
||||
|
||||
### Changed
|
||||
- Authmate: check parameters before container creation (#372)
|
||||
- Unify cache invalidation on deletion (#368)
|
||||
- Updated NeoFS SDK to v1.0.0-rc.3 (#297, #333, #346, #376)
|
||||
- Authmate: changed session token rules handling (#329, #336, #338, #352)
|
||||
- Changed status code for some failed requests (#308)
|
||||
- GetBucketLocation returns policy name used at bucket creation (#301)
|
||||
|
||||
### Fixed
|
||||
- Waiting for bucket to be deleted (#366)
|
||||
- Authmate: changed error message for session context building (#348)
|
||||
- Authmate: fixed access key parsing in `obtain-secret` command (#295)
|
||||
- Distinguishing `BucketAlreadyExists` errors (#354)
|
||||
- Incorrect panic if handler not found (#305)
|
||||
- Authmate: use container friendly name as system name (#299, #324)
|
||||
- Use UTC `Last-Modified` timestamps (#331)
|
||||
- Don't return object system metadata (#307)
|
||||
- Handling empty post policy (#306)
|
||||
- Use `X-Amz-Verion-Id` in `CompleteMulipartUpload` (#318)
|
||||
|
||||
### Removed
|
||||
- Drop MinIO related errors (#316)
|
||||
|
||||
## [0.18.0] - 2021-12-16
|
||||
|
||||
### Added
|
||||
- Support for MultipartUpload (#186, #187)
|
||||
- CORS support (#217)
|
||||
- Authmate supports setting of tokens lifetime in a more convenient format (duration) (#258)
|
||||
- Generation of a random key for `--no-sign-request` (#276)
|
||||
|
||||
### Changed
|
||||
- Bucket name resolving mechanism from listing owner's containers to using DNS (#219)
|
||||
|
||||
### Removed
|
||||
- Deprecated golint, replaced by revive (#272)
|
||||
|
||||
## 0.17.0 (24 Sep 2021)
|
||||
With this release we introduce [ceph-based](https://github.com/ceph/s3-tests) S3 compatibility results.
|
||||
|
||||
### Added
|
||||
* Versioning support (#122, #242, #263)
|
||||
* Ceph S3 compatibility results (#150, #249, #266)
|
||||
* Handling `X-Amz-Expected-Bucket-Owner` header (#216)
|
||||
* `X-Container-Id` header for `HeadBucket` response (#220)
|
||||
* Basic ACL support (#49, #213)
|
||||
* Caching (#179, #206, #231, #236, #253)
|
||||
* Metadata directive when copying (#191)
|
||||
* Bucket name checking (189)
|
||||
* Continuation token support (#112, #154, #180)
|
||||
* Mapping `LocationConstraint` to `PlacementPolicy` (#89)
|
||||
* Tagging support (#196)
|
||||
* POST uploading support (#190)
|
||||
* Delete marker support (#248)
|
||||
* Expiration for access box (#255)
|
||||
* AWS CLI credential generating by authmate (#241)
|
||||
|
||||
### Changed
|
||||
* Default placement policy is now configurable (#218)
|
||||
* README is split into different files (#210)
|
||||
* Unified error handling (#89, #149, #184)
|
||||
* Authmate issue-secret response contains container id (#163)
|
||||
* Removed "github.com/nspcc-dev/neofs-node" dependency (#234)
|
||||
* Removed GitHub workflow of image publishing (#243)
|
||||
* Changed license to AGPLv3 (#264)
|
||||
|
||||
### Fixed
|
||||
* ListObjects results are now the same for different users (#230)
|
||||
* Error response for invalid authentication header is now correct (#199)
|
||||
* Saving object metadata (#198)
|
||||
* Range header handling (#194)
|
||||
* Correct status codes (#118, #262)
|
||||
* HeadObject for "directories" (#160)
|
||||
* Fetch-owner parameter support (#159)
|
||||
|
||||
## 0.16.0 (16 Jul 2021)
|
||||
|
||||
With this release we publish S3 gateway source code. It includes various S3
|
||||
compatibility improvements, support of bucket management, unified secp256r1
|
||||
cryptography with NEP-6 wallet support.
|
||||
|
||||
### Fixed
|
||||
* Allowed no-sign request (#65)
|
||||
* Bearer token attached to all requests (#84)
|
||||
* Time format in responses (#133)
|
||||
* Max-keys checked in ListObjects (#135)
|
||||
* Lost metadat in the objects (#131)
|
||||
* Unique bucket name check (#125)
|
||||
|
||||
### Added
|
||||
* Bucket management operations (#47, #72)
|
||||
* Node-specific owner IDs in bearer tokens (#83)
|
||||
* AWS CLI usage section in README (#77)
|
||||
* List object paging (#97)
|
||||
* Lifetime for the tokens in auth-mate (#108)
|
||||
* Support of range in GetObject request (#96)
|
||||
* Support of NEP-6 wallets instead of binary encoded keys (#92)
|
||||
* Support of JSON encoded rules in auth-mate (#71)
|
||||
* Support of delimiters in ListObjects (#98)
|
||||
* Support of object ETag (#93)
|
||||
* Support of time-based conditional CopyObject and GetObject (#94)
|
||||
|
||||
### Changed
|
||||
* Accesskey format: now `0` used as a delimiter between container ID and object
|
||||
ID instead of `_` (#164)
|
||||
* Accessbox is encoded in protobuf format (#48)
|
||||
* Authentication uses secp256r1 instead of ed25519 (#75)
|
||||
* Improved integration with NeoFS SDK and NeoFS API Go (#78, #88)
|
||||
* Optimized object put execution (#155)
|
||||
|
||||
### Removed
|
||||
* GRPC keepalive options (#73)
|
||||
|
||||
## 0.15.0 (10 Jun 2021)
|
||||
|
||||
This release brings S3 gateway to the current state of NeoFS and fixes some
|
||||
bugs, no new significant features introduced (other than moving here already
|
||||
existing authmate component).
|
||||
|
||||
New features:
|
||||
* authmate was moved into this repository and is now built along with the
|
||||
gateway itself (#46)
|
||||
|
||||
Behavior changes:
|
||||
* neofs-s3-gate was renamed to neofs-s3-gw (#50)
|
||||
|
||||
Improvements:
|
||||
* better Makefile (#43, #45, #55)
|
||||
* stricter linters (#45)
|
||||
* removed non-standard errors package from dependencies (#54)
|
||||
* refactoring, reusing new sdk-go component (#60, #62, #63)
|
||||
* updated neofs-api-go for compatibility with current NeoFS node 0.21.0 (#60, #68)
|
||||
* extended README (#67, #76)
|
||||
|
||||
Bugs fixed:
|
||||
* wrong (as per AWS specification) access key ID generated (#64)
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#133)
|
||||
|
||||
## Older versions
|
||||
|
||||
Please refer to [Github
|
||||
releases](https://github.com/nspcc-dev/neofs-s3-gw/releases/) for older
|
||||
releases.
|
||||
This project is a fork of [NeoFS S3 Gateway](https://github.com/nspcc-dev/neofs-s3-gw) from version v0.26.0.
|
||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-s3-gw/blob/master/CHANGELOG.md.
|
||||
|
||||
[0.18.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.17.0...v0.18.0
|
||||
[0.19.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.18.0...v0.19.0
|
||||
[0.20.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.19.0...v0.20.0
|
||||
[0.21.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.20.0...v0.21.0
|
||||
[0.21.1]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.0...v0.21.1
|
||||
[0.22.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.21.1...v0.22.0
|
||||
[0.23.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.22.0...v0.23.0
|
||||
[0.24.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.23.0...v0.24.0
|
||||
[0.25.0]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.24.0...v0.25.0
|
||||
[Unreleased]: https://github.com/nspcc-dev/neofs-s3-gw/compare/v0.25.0...master
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/b2148cc3...master
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||
[pull requests](https://github.com/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -27,20 +27,20 @@ Start by forking the `frostfs-s3-gw` repository, make changes in a branch and th
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
### Set up your git repository
|
||||
Fork [FrostFS S3 Gateway
|
||||
upstream](https://github.com/TrueCloudLab/frostfs-s3-gw/fork) source repository
|
||||
upstream](https://git.frostfs.info/repo/fork/15) source repository
|
||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||
the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||
$ git clone https://git.frostfs.info/<username>/frostfs-s3-gw.git
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd frostfs-s3-gw
|
||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-s3-gw
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw.git
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
Pull requests can be created via Forgejo. Refer to [this
|
||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
||||
|
|
8
Makefile
8
Makefile
|
@ -6,7 +6,6 @@ VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8
|
|||
GO_VERSION ?= 1.19
|
||||
LINT_VERSION ?= 1.49.0
|
||||
BINDIR = bin
|
||||
SYNCDIR = internal/frostfs/services/tree
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
||||
|
@ -31,7 +30,7 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
# Make all binaries
|
||||
all: $(BINS)
|
||||
|
||||
$(BINS): sync-tree $(BINDIR) dep
|
||||
$(BINS): $(BINDIR) dep
|
||||
@echo "⇒ Build $@"
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -trimpath \
|
||||
|
@ -42,10 +41,6 @@ $(BINDIR):
|
|||
@echo "⇒ Ensure dir: $@"
|
||||
@mkdir -p $@
|
||||
|
||||
# Synchronize tree service
|
||||
sync-tree:
|
||||
@./syncTree.sh
|
||||
|
||||
# Pull go dependencies
|
||||
dep:
|
||||
@printf "⇒ Download requirements: "
|
||||
|
@ -134,7 +129,6 @@ version:
|
|||
clean:
|
||||
rm -rf .cache
|
||||
rm -rf $(BINDIR)
|
||||
rm -rf $(SYNCDIR)
|
||||
|
||||
# Generate code from .proto files
|
||||
protoc:
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
---
|
||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-s3-gw)
|
||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-s3-gw/releases&query=$[0].tag_name&color=orange)
|
||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
||||
|
||||
# FrostFS S3 Gateway
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ type (
|
|||
Box struct {
|
||||
AccessBox *accessbox.Box
|
||||
ClientTime time.Time
|
||||
AuthHeaders *AuthHeader
|
||||
}
|
||||
|
||||
center struct {
|
||||
|
@ -51,7 +52,8 @@ type (
|
|||
|
||||
prs int
|
||||
|
||||
authHeader struct {
|
||||
//nolint:revive
|
||||
AuthHeader struct {
|
||||
AccessKeyID string
|
||||
Service string
|
||||
Region string
|
||||
|
@ -101,7 +103,7 @@ func New(frostFS tokens.FrostFS, key *keys.PrivateKey, prefixes []string, config
|
|||
}
|
||||
}
|
||||
|
||||
func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
||||
func (c *center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -114,7 +116,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
|||
|
||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||
|
||||
return &authHeader{
|
||||
return &AuthHeader{
|
||||
AccessKeyID: submatches["access_key_id"],
|
||||
Service: submatches["service"],
|
||||
Region: submatches["region"],
|
||||
|
@ -124,7 +126,7 @@ func (c *center) parseAuthHeader(header string) (*authHeader, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (a *authHeader) getAddress() (oid.Address, error) {
|
||||
func (a *AuthHeader) getAddress() (oid.Address, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(a.AccessKeyID, "0", "/")); err != nil {
|
||||
return addr, apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID)
|
||||
|
@ -135,7 +137,7 @@ func (a *authHeader) getAddress() (oid.Address, error) {
|
|||
func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
||||
var (
|
||||
err error
|
||||
authHdr *authHeader
|
||||
authHdr *AuthHeader
|
||||
signatureDateTimeStr string
|
||||
needClientTime bool
|
||||
)
|
||||
|
@ -146,7 +148,7 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
if len(creds) != 5 || creds[4] != "aws4_request" {
|
||||
return nil, fmt.Errorf("bad X-Amz-Credential")
|
||||
}
|
||||
authHdr = &authHeader{
|
||||
authHdr = &AuthHeader{
|
||||
AccessKeyID: creds[0],
|
||||
Service: creds[3],
|
||||
Region: creds[2],
|
||||
|
@ -200,7 +202,10 @@ func (c *center) Authenticate(r *http.Request) (*Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
result := &Box{AccessBox: box}
|
||||
result := &Box{
|
||||
AccessBox: box,
|
||||
AuthHeaders: authHdr,
|
||||
}
|
||||
if needClientTime {
|
||||
result.ClientTime = signatureDateTime
|
||||
}
|
||||
|
@ -267,7 +272,7 @@ func (c *center) checkFormData(r *http.Request) (*Box, error) {
|
|||
return &Box{AccessBox: box}, nil
|
||||
}
|
||||
|
||||
func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||
otherRequest := r.Clone(context.TODO())
|
||||
otherRequest.Header = make(http.Header)
|
||||
|
||||
|
@ -288,9 +293,10 @@ func cloneRequest(r *http.Request, authHeader *authHeader) *http.Request {
|
|||
return otherRequest
|
||||
}
|
||||
|
||||
func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
func (c *center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *http.Request, signatureDateTime time.Time) error {
|
||||
awsCreds := credentials.NewStaticCredentials(authHeader.AccessKeyID, box.Gate.AccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
var signature string
|
||||
if authHeader.IsPresigned {
|
||||
|
@ -306,7 +312,6 @@ func (c *center) checkSign(authHeader *authHeader, box *accessbox.Box, request *
|
|||
}
|
||||
signature = request.URL.Query().Get(AmzSignature)
|
||||
} else {
|
||||
signer.DisableURIPathEscaping = true
|
||||
if _, err := signer.Sign(request, nil, authHeader.Service, authHeader.Region, signatureDateTime); err != nil {
|
||||
return fmt.Errorf("failed to sign temporary HTTP request: %w", err)
|
||||
}
|
||||
|
|
|
@ -19,12 +19,12 @@ func TestAuthHeaderParse(t *testing.T) {
|
|||
for _, tc := range []struct {
|
||||
header string
|
||||
err error
|
||||
expected *authHeader
|
||||
expected *AuthHeader
|
||||
}{
|
||||
{
|
||||
header: defaultHeader,
|
||||
err: nil,
|
||||
expected: &authHeader{
|
||||
expected: &AuthHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
|
@ -54,29 +54,29 @@ func TestAuthHeaderGetAddress(t *testing.T) {
|
|||
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
||||
|
||||
for _, tc := range []struct {
|
||||
authHeader *authHeader
|
||||
authHeader *AuthHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &authHeader{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oidcid",
|
||||
},
|
||||
err: defaulErr,
|
||||
|
|
46
api/auth/presign.go
Normal file
46
api/auth/presign.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||
)
|
||||
|
||||
type RequestData struct {
|
||||
Method string
|
||||
Endpoint string
|
||||
Bucket string
|
||||
Object string
|
||||
}
|
||||
|
||||
type PresignData struct {
|
||||
Service string
|
||||
Region string
|
||||
Lifetime time.Duration
|
||||
SignTime time.Time
|
||||
}
|
||||
|
||||
// PresignRequest forms pre-signed request to access objects without aws credentials.
|
||||
func PresignRequest(creds *credentials.Credentials, reqData RequestData, presignData PresignData) (*http.Request, error) {
|
||||
urlStr := fmt.Sprintf("%s/%s/%s", reqData.Endpoint, rest.EscapePath(reqData.Bucket, false), rest.EscapePath(reqData.Object, false))
|
||||
req, err := http.NewRequest(strings.ToUpper(reqData.Method), urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
|
||||
|
||||
signer := v4.NewSigner(creds)
|
||||
signer.DisableURIPathEscaping = true
|
||||
|
||||
if _, err = signer.Presign(req, nil, presignData.Service, presignData.Region, presignData.Lifetime, presignData.SignTime); err != nil {
|
||||
return nil, fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
95
api/auth/presign_test.go
Normal file
95
api/auth/presign_test.go
Normal file
|
@ -0,0 +1,95 @@
|
|||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var _ tokens.Credentials = (*credentialsMock)(nil)
|
||||
|
||||
type credentialsMock struct {
|
||||
boxes map[string]*accessbox.Box
|
||||
}
|
||||
|
||||
func newTokensFrostfsMock() *credentialsMock {
|
||||
return &credentialsMock{
|
||||
boxes: make(map[string]*accessbox.Box),
|
||||
}
|
||||
}
|
||||
|
||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
||||
m.boxes[addr.String()] = box
|
||||
}
|
||||
|
||||
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, error) {
|
||||
box, ok := m.boxes[addr.String()]
|
||||
if !ok {
|
||||
return nil, apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
||||
return box, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func TestCheckSign(t *testing.T) {
|
||||
var accessKeyAddr oid.Address
|
||||
err := accessKeyAddr.DecodeString("8N7CYBY74kxZXoyvA5UNdmovaXqFpwNfvEPsqaN81es2/3tDwq5tR8fByrJcyJwyiuYX7Dae8tyDT7pd8oaL1MBto")
|
||||
require.NoError(t, err)
|
||||
|
||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||
secretKey := "713d0a0b9efc7d22923e17b0402a6a89b4273bc711c8bacb2da1b643d0006aeb"
|
||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
||||
|
||||
reqData := RequestData{
|
||||
Method: "GET",
|
||||
Endpoint: "http://localhost:8084",
|
||||
Bucket: "my-bucket",
|
||||
Object: "@obj/name",
|
||||
}
|
||||
presignData := PresignData{
|
||||
Service: "s3",
|
||||
Region: "spb",
|
||||
Lifetime: 10 * time.Minute,
|
||||
SignTime: time.Now().UTC(),
|
||||
}
|
||||
|
||||
req, err := PresignRequest(awsCreds, reqData, presignData)
|
||||
require.NoError(t, err)
|
||||
|
||||
expBox := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: secretKey,
|
||||
},
|
||||
}
|
||||
|
||||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := ¢er{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
}
|
||||
box, err := c.Authenticate(req)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expBox, box.AccessBox)
|
||||
}
|
|
@ -790,6 +790,8 @@ const doubleSpace = " "
|
|||
|
||||
// stripExcessSpaces will rewrite the passed in slice's string values to not
|
||||
// contain multiple side-by-side spaces.
|
||||
//
|
||||
//nolint:revive
|
||||
func stripExcessSpaces(vals []string) {
|
||||
var j, k, l, m, spaces int
|
||||
for i, str := range vals {
|
||||
|
|
|
@ -40,9 +40,10 @@ type (
|
|||
|
||||
Bucket string
|
||||
Name string
|
||||
Size int64
|
||||
Size uint64
|
||||
ContentType string
|
||||
Created time.Time
|
||||
CreationEpoch uint64
|
||||
HashSum string
|
||||
Owner user.ID
|
||||
Headers map[string]string
|
||||
|
@ -52,7 +53,7 @@ type (
|
|||
NotificationInfo struct {
|
||||
Name string
|
||||
Version string
|
||||
Size int64
|
||||
Size uint64
|
||||
HashSum string
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ type BaseNodeVersion struct {
|
|||
ParenID uint64
|
||||
OID oid.ID
|
||||
Timestamp uint64
|
||||
Size int64
|
||||
Size uint64
|
||||
ETag string
|
||||
FilePath string
|
||||
}
|
||||
|
@ -83,14 +83,14 @@ type PartInfo struct {
|
|||
UploadID string
|
||||
Number int
|
||||
OID oid.ID
|
||||
Size int64
|
||||
Size uint64
|
||||
ETag string
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||
func (p *PartInfo) ToHeaderString() string {
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatInt(p.Size, 10) + "-" + p.ETag
|
||||
return strconv.Itoa(p.Number) + "-" + strconv.FormatUint(p.Size, 10) + "-" + p.ETag
|
||||
}
|
||||
|
||||
// LockInfo is lock information to create appropriate tree node.
|
||||
|
|
|
@ -175,6 +175,7 @@ const (
|
|||
// Add new extended error codes here.
|
||||
ErrInvalidObjectName
|
||||
ErrOperationTimedOut
|
||||
ErrGatewayTimeout
|
||||
ErrOperationMaxedOut
|
||||
ErrInvalidRequest
|
||||
ErrInvalidStorageClass
|
||||
|
@ -1124,6 +1125,12 @@ var errorCodes = errorCodeMap{
|
|||
Description: "A timeout occurred while trying to lock a resource, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrGatewayTimeout: {
|
||||
ErrCode: ErrGatewayTimeout,
|
||||
Code: "GatewayTimeout",
|
||||
Description: "The server is acting as a gateway and cannot get a response in time",
|
||||
HTTPStatusCode: http.StatusGatewayTimeout,
|
||||
},
|
||||
ErrOperationMaxedOut: {
|
||||
ErrCode: ErrOperationMaxedOut,
|
||||
Code: "SlowDown",
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"strings"
|
||||
|
||||
v2acl "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
aclgrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
|
@ -243,7 +244,8 @@ func (s *statement) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
|
||||
func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -251,13 +253,13 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, h.encodeBucketACL(bktInfo.Name, bucketACL)); err != nil {
|
||||
if err = api.EncodeToResponse(w, h.encodeBucketACL(ctx, bktInfo.Name, bucketACL)); err != nil {
|
||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -269,7 +271,7 @@ func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, er
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var btoken v2acl.BearerToken
|
||||
var btoken aclgrpc.BearerToken
|
||||
box.Gate.BearerToken.WriteToV2(&btoken)
|
||||
|
||||
key, err := keys.NewPublicKeyFromBytes(btoken.GetSignature().GetKey(), elliptic.P256())
|
||||
|
@ -365,7 +367,8 @@ func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.
|
|||
}
|
||||
|
||||
func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -373,7 +376,7 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
||||
bucketACL, err := h.obj.GetBucketACL(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||
return
|
||||
|
@ -385,27 +388,28 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), prm)
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, prm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not object info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, h.encodeObjectACL(bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
if err = api.EncodeToResponse(w, h.encodeObjectACL(ctx, bucketACL, reqInfo.BucketName, objInfo.VersionID())); err != nil {
|
||||
h.logAndSendError(w, "failed to encode response", reqInfo, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get gate key", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := getSessionTokenSetEACL(r.Context())
|
||||
token, err := getSessionTokenSetEACL(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get eacl token", reqInfo, err)
|
||||
return
|
||||
|
@ -423,7 +427,7 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
VersionID: versionID,
|
||||
}
|
||||
|
||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), p)
|
||||
objInfo, err := h.obj.GetObjectInfo(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
||||
return
|
||||
|
@ -465,8 +469,8 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
@ -1410,7 +1414,7 @@ func isWriteOperation(op eacl.Operation) bool {
|
|||
return op == eacl.OperationDelete || op == eacl.OperationPut
|
||||
}
|
||||
|
||||
func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
func (h *handler) encodeObjectACL(ctx context.Context, bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||
res := &AccessControlPolicy{
|
||||
Owner: Owner{
|
||||
ID: bucketACL.Info.Owner.String(),
|
||||
|
@ -1456,7 +1460,7 @@ func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, object
|
|||
if read {
|
||||
permission = aclFullControl
|
||||
} else {
|
||||
h.log.Warn("some acl not fully mapped")
|
||||
h.reqLogger(ctx).Warn("some acl not fully mapped")
|
||||
}
|
||||
|
||||
var grantee *Grantee
|
||||
|
@ -1478,8 +1482,8 @@ func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, object
|
|||
return res
|
||||
}
|
||||
|
||||
func (h *handler) encodeBucketACL(bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(bucketACL, bucketName, "")
|
||||
func (h *handler) encodeBucketACL(ctx context.Context, bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
||||
return h.encodeObjectACL(ctx, bucketACL, bucketName, "")
|
||||
}
|
||||
|
||||
func contains(list []eacl.Operation, op eacl.Operation) bool {
|
||||
|
|
|
@ -1474,7 +1474,7 @@ func createAccessBox(t *testing.T) (*accessbox.Box, *keys.PrivateKey) {
|
|||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
var bearerToken bearer.Token
|
||||
bearerToken := bearer.NewToken()
|
||||
err = bearerToken.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -34,16 +34,16 @@ type (
|
|||
XMLDecoder XMLDecoderProvider
|
||||
DefaultMaxAge int
|
||||
NotificatorEnabled bool
|
||||
DefaultCopiesNumbers []uint32
|
||||
CopiesNumbers map[string][]uint32
|
||||
ResolveZoneList []string
|
||||
IsResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
CompleteMultipartKeepalive time.Duration
|
||||
}
|
||||
|
||||
PlacementPolicy interface {
|
||||
Default() netmap.PlacementPolicy
|
||||
Get(string) (netmap.PlacementPolicy, bool)
|
||||
DefaultPlacementPolicy() netmap.PlacementPolicy
|
||||
PlacementPolicy(string) (netmap.PlacementPolicy, bool)
|
||||
CopiesNumbers(string) ([]uint32, bool)
|
||||
DefaultCopiesNumbers() []uint32
|
||||
}
|
||||
|
||||
XMLDecoderProvider interface {
|
||||
|
@ -97,12 +97,12 @@ func (h *handler) pickCopiesNumbers(metadata map[string]string, locationConstrai
|
|||
return result, nil
|
||||
}
|
||||
|
||||
copiesNumbers, ok := h.cfg.CopiesNumbers[locationConstraint]
|
||||
copiesNumbers, ok := h.cfg.Policy.CopiesNumbers(locationConstraint)
|
||||
if ok {
|
||||
return copiesNumbers, nil
|
||||
}
|
||||
|
||||
return h.cfg.DefaultCopiesNumbers, nil
|
||||
return h.cfg.Policy.DefaultCopiesNumbers(), nil
|
||||
}
|
||||
|
||||
func parseCopiesNumbers(copiesNumbersStr string) ([]uint32, error) {
|
||||
|
|
|
@ -7,14 +7,16 @@ import (
|
|||
)
|
||||
|
||||
func TestCopiesNumberPicker(t *testing.T) {
|
||||
var locConstraints = map[string][]uint32{}
|
||||
var locationConstraints = map[string][]uint32{}
|
||||
locationConstraint1 := "one"
|
||||
locationConstraint2 := "two"
|
||||
locConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
||||
locationConstraints[locationConstraint1] = []uint32{2, 3, 4}
|
||||
|
||||
config := &Config{
|
||||
DefaultCopiesNumbers: []uint32{1},
|
||||
CopiesNumbers: locConstraints,
|
||||
Policy: &placementPolicyMock{
|
||||
copiesNumbers: locationConstraints,
|
||||
defaultCopiesNumbers: []uint32{1},
|
||||
},
|
||||
}
|
||||
h := handler{
|
||||
cfg: config,
|
||||
|
|
|
@ -17,7 +17,7 @@ type (
|
|||
GetObjectAttributesResponse struct {
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Checksum *Checksum `xml:"Checksum,omitempty"`
|
||||
ObjectSize int64 `xml:"ObjectSize,omitempty"`
|
||||
ObjectSize uint64 `xml:"ObjectSize,omitempty"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"`
|
||||
ObjectParts *ObjectParts `xml:"ObjectParts,omitempty"`
|
||||
}
|
||||
|
|
|
@ -46,7 +46,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = api.GetReqInfo(ctx)
|
||||
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -84,20 +85,20 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
||||
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||
return
|
||||
|
@ -135,7 +136,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
|
||||
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -183,14 +184,14 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(r.Context(), dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
|
||||
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -215,7 +216,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -231,16 +232,13 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
TagSet: tagSet,
|
||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
h.log.Info("object is copied",
|
||||
zap.String("bucket", dstObjInfo.Bucket),
|
||||
zap.String("object", dstObjInfo.Name),
|
||||
zap.Stringer("object_id", dstObjInfo.ID))
|
||||
h.reqLogger(ctx).Info("object is copied", zap.Stringer("object_id", dstObjInfo.ID))
|
||||
|
||||
s := &SendNotificationParams{
|
||||
Event: EventObjectCreatedCopy,
|
||||
|
@ -248,8 +246,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: dstBktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if encryptionParams.Enabled() {
|
||||
|
|
|
@ -90,19 +90,21 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
|||
if origin == "" {
|
||||
return
|
||||
}
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
if reqInfo.BucketName == "" {
|
||||
return
|
||||
}
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.log.Warn("get bucket info", zap.Error(err))
|
||||
h.reqLogger(ctx).Warn("get bucket info", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
||||
cors, err := h.obj.GetBucketCORS(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.log.Warn("get bucket cors", zap.Error(err))
|
||||
h.reqLogger(ctx).Warn("get bucket cors", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,8 @@ type DeleteObjectsResponse struct {
|
|||
}
|
||||
|
||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||
versionedObject := []*layer.VersionedObject{{
|
||||
Name: reqInfo.ObjectName,
|
||||
|
@ -74,7 +75,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -85,7 +86,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Objects: versionedObject,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
deletedObject := deletedObjects[0]
|
||||
if deletedObject.Error != nil {
|
||||
if isErrObjectLocked(deletedObject.Error) {
|
||||
|
@ -112,7 +113,7 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
var objID oid.ID
|
||||
if len(versionID) != 0 {
|
||||
if err = objID.DecodeString(versionID); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,8 +128,8 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
if err = h.sendNotifications(r.Context(), m); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, m); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if deletedObject.VersionID != "" {
|
||||
|
@ -156,7 +157,8 @@ func isErrObjectLocked(err error) bool {
|
|||
|
||||
// DeleteMultipleObjectsHandler handles multiple delete requests.
|
||||
func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
|
@ -206,7 +208,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
|
@ -224,7 +226,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
|
||||
var errs []error
|
||||
for _, obj := range deletedObjects {
|
||||
|
@ -259,7 +261,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
zap.Array("objects", marshaler),
|
||||
zap.Errors("errors", errs),
|
||||
}
|
||||
h.log.Error("couldn't delete objects", fields...)
|
||||
h.reqLogger(ctx).Error("couldn't delete objects", fields...)
|
||||
}
|
||||
|
||||
if err = api.EncodeToResponse(w, response); err != nil {
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -15,6 +17,26 @@ const (
|
|||
emptyVersion = ""
|
||||
)
|
||||
|
||||
func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
|
||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectAlreadyRemoved{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteBucket(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -31,6 +53,26 @@ func TestDeleteBucket(t *testing.T) {
|
|||
deleteBucket(t, tc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
putObject(t, hc, bktName, objName)
|
||||
|
||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||
require.NoError(t, err)
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
hc.tp.SetObjectError(addr, apistatus.ObjectNotFound{})
|
||||
|
||||
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
|
||||
|
||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func TestDeleteObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -358,6 +400,25 @@ func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version st
|
|||
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.AmzDeleteMarker) != ""
|
||||
}
|
||||
|
||||
func deleteObjects(t *testing.T, tc *handlerContext, bktName string, objVersions [][2]string) *DeleteObjectsResponse {
|
||||
req := &DeleteObjectsRequest{}
|
||||
for _, version := range objVersions {
|
||||
req.Objects = append(req.Objects, ObjectIdentifier{
|
||||
ObjectName: version[0],
|
||||
VersionID: version[1],
|
||||
})
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(tc, bktName, "", req)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
tc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
res := &DeleteObjectsResponse{}
|
||||
parseTestResponse(t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||
tc.Handler().DeleteBucketHandler(w, r)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -190,6 +191,11 @@ func createMultipartUploadBase(hc *handlerContext, bktName, objName string, encr
|
|||
}
|
||||
|
||||
func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) {
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadID, partsETags)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string, partsETags []string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
complete := &CompleteMultipartUpload{
|
||||
|
@ -204,7 +210,8 @@ func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID stri
|
|||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, complete)
|
||||
hc.Handler().CompleteMultipartUploadHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||
|
|
|
@ -88,7 +88,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
|||
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
||||
addSSECHeaders(h, requestHeader)
|
||||
} else {
|
||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||
h.Set(api.ContentLength, strconv.FormatUint(info.Size, 10))
|
||||
}
|
||||
|
||||
h.Set(api.ETag, info.HashSum)
|
||||
|
@ -163,13 +163,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
fullSize := info.Size
|
||||
if encryptionParams.Enabled() {
|
||||
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
if fullSize, err = strconv.ParseUint(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
||||
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if params, err = fetchRangeHeader(r.Header, uint64(fullSize)); err != nil {
|
||||
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
|
||||
h.logAndSendError(w, "could not parse range header", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ func parseHTTPTime(data string) (*time.Time, error) {
|
|||
return &result, nil
|
||||
}
|
||||
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) {
|
||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size uint64) {
|
||||
w.Header().Set(api.AcceptRanges, "bytes")
|
||||
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
||||
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
||||
|
|
|
@ -55,16 +55,27 @@ func (hc *handlerContext) Context() context.Context {
|
|||
|
||||
type placementPolicyMock struct {
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) Default() netmap.PlacementPolicy {
|
||||
func (p *placementPolicyMock) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
return p.defaultPolicy
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) Get(string) (netmap.PlacementPolicy, bool) {
|
||||
func (p *placementPolicyMock) PlacementPolicy(string) (netmap.PlacementPolicy, bool) {
|
||||
return netmap.PlacementPolicy{}, false
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
result, ok := p.copiesNumbers[locationConstraint]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicyMock) DefaultCopiesNumbers() []uint32 {
|
||||
return p.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
type xmlDecoderProviderMock struct{}
|
||||
|
||||
func (p *xmlDecoderProviderMock) NewCompleteMultipartDecoder(r io.Reader) *xml.Decoder {
|
||||
|
@ -179,7 +190,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
|||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: int64(len(content)),
|
||||
Size: uint64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
})
|
||||
|
|
|
@ -13,8 +13,8 @@ import (
|
|||
|
||||
const sizeToDetectType = 512
|
||||
|
||||
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
|
||||
end := uint64(maxSize)
|
||||
func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
|
||||
end := maxSize
|
||||
if sizeToDetectType < end {
|
||||
end = sizeToDetectType
|
||||
}
|
||||
|
|
|
@ -113,8 +113,8 @@ func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var btoken bearer.Token
|
||||
btoken.SetEACLTable(*eacl.NewTable())
|
||||
btoken := bearer.NewToken()
|
||||
btoken.SetEACLTable(eacl.NewTable())
|
||||
err = btoken.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -216,6 +216,11 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
p := &layer.UploadPartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
UploadID: uploadID,
|
||||
|
@ -223,10 +228,31 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
Key: reqInfo.ObjectName,
|
||||
},
|
||||
PartNumber: partNumber,
|
||||
Size: r.ContentLength,
|
||||
Size: size,
|
||||
Reader: r.Body,
|
||||
}
|
||||
|
||||
if api.IsSignedStreamingV4(r) {
|
||||
if decodeContentSize := r.Header.Get(api.AmzDecodedContentLength); len(decodeContentSize) > 0 {
|
||||
_, err := strconv.Atoi(decodeContentSize)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "cannot parse decode content length information", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.logAndSendError(w, "expecting decode content length information", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
chunkReader, err := newSignV4ChunkedReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "cannot initialize chunk reader", reqInfo, err)
|
||||
return
|
||||
}
|
||||
p.Reader = chunkReader
|
||||
}
|
||||
|
||||
p.Info.Encryption, err = formEncryptionParams(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
||||
|
@ -250,7 +276,8 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
versionID string
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = api.GetReqInfo(ctx)
|
||||
queryValues = reqInfo.URL.Query()
|
||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||
|
@ -292,7 +319,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
srcInfo, err := h.obj.GetObjectInfo(r.Context(), &layer.HeadObjectParams{
|
||||
srcInfo, err := h.obj.GetObjectInfo(ctx, &layer.HeadObjectParams{
|
||||
BktInfo: srcBktInfo,
|
||||
Object: srcObject,
|
||||
VersionID: versionID,
|
||||
|
@ -343,7 +370,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
info, err := h.obj.UploadPartCopy(r.Context(), p)
|
||||
info, err := h.obj.UploadPartCopy(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||
return
|
||||
|
@ -440,7 +467,8 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *api.ReqInfo) (*data.ObjectInfo, error) {
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(r.Context(), c)
|
||||
ctx := r.Context()
|
||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not complete multipart upload: %w", err)
|
||||
}
|
||||
|
@ -456,17 +484,17 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
TagSet: uploadData.TagSet,
|
||||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(uploadData.ACLHeaders) != 0 {
|
||||
sessionTokenSetEACL, err := getSessionTokenSetEACL(r.Context())
|
||||
sessionTokenSetEACL, err := getSessionTokenSetEACL(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get eacl token: %w", err)
|
||||
}
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get gate key: %w", err)
|
||||
}
|
||||
|
@ -494,8 +522,8 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
return objInfo, nil
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -46,3 +47,17 @@ func TestPeriodicWriter(t *testing.T) {
|
|||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestMultipartUploadInvalidPart(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 8 // less than min part size
|
||||
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
|||
|
||||
box, err := layer.GetBoxData(ctx)
|
||||
if err == nil && box.Gate.BearerToken != nil {
|
||||
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
||||
p.User = bearer.ResolveIssuer(box.Gate.BearerToken).EncodeToString()
|
||||
}
|
||||
|
||||
p.Time = layer.TimeNow(ctx)
|
||||
|
@ -202,7 +202,7 @@ func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.Notif
|
|||
return
|
||||
}
|
||||
} else {
|
||||
h.log.Warn("failed to send test event because notifications is disabled")
|
||||
h.reqLogger(ctx).Warn("failed to send test event because notifications is disabled")
|
||||
}
|
||||
|
||||
if q.ID == "" {
|
||||
|
|
|
@ -43,13 +43,13 @@ func (p *postPolicy) condition(key string) *policyCondition {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *postPolicy) CheckContentLength(size int64) bool {
|
||||
func (p *postPolicy) CheckContentLength(size uint64) bool {
|
||||
if p.empty {
|
||||
return true
|
||||
}
|
||||
for _, condition := range p.Conditions {
|
||||
if condition.Matching == "content-length-range" {
|
||||
length := strconv.FormatInt(size, 10)
|
||||
length := strconv.FormatUint(size, 10)
|
||||
return condition.Key <= length && length <= condition.Value
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +179,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
sessionTokenEACL *session.Container
|
||||
containsACL = containsACLHeaders(r)
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = api.GetReqInfo(ctx)
|
||||
)
|
||||
|
||||
if containsACL {
|
||||
|
@ -218,34 +219,60 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: r.Body,
|
||||
Size: r.ContentLength,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
}
|
||||
|
||||
if api.IsSignedStreamingV4(r) {
|
||||
if decodeContentSize := r.Header.Get(api.AmzDecodedContentLength); len(decodeContentSize) > 0 {
|
||||
_, err := strconv.Atoi(decodeContentSize)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "cannot parse decode content length information", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
} else {
|
||||
h.logAndSendError(w, "expecting decode content length information", reqInfo,
|
||||
errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
chunkReader, err := newSignV4ChunkedReader(r)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "cannot initialize chunk reader", reqInfo, err)
|
||||
return
|
||||
}
|
||||
params.Reader = chunkReader
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
params.Lock, err = formObjectLock(r.Context(), bktInfo, settings.LockConfiguration, r.Header)
|
||||
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not form object lock", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
if err != nil {
|
||||
_, err2 := io.Copy(io.Discard, r.Body)
|
||||
err3 := r.Body.Close()
|
||||
|
@ -260,8 +287,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if containsACL {
|
||||
|
@ -360,7 +387,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
newEaclTable *eacl.Table
|
||||
tagSet map[string]string
|
||||
sessionTokenEACL *session.Container
|
||||
reqInfo = api.GetReqInfo(r.Context())
|
||||
ctx = r.Context()
|
||||
reqInfo = api.GetReqInfo(ctx)
|
||||
metadata = make(map[string]string)
|
||||
containsACL = containsACLHeaders(r)
|
||||
)
|
||||
|
@ -381,17 +409,17 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if containsACL {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(r.Context()); err != nil {
|
||||
if sessionTokenEACL, err = getSessionTokenSetEACL(ctx); err != nil {
|
||||
h.logAndSendError(w, "could not get eacl session token from a box", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var contentReader io.Reader
|
||||
var size int64
|
||||
var size uint64
|
||||
if content, ok := r.MultipartForm.Value["file"]; ok {
|
||||
contentReader = bytes.NewBufferString(content[0])
|
||||
size = int64(len(content[0]))
|
||||
size = uint64(len(content[0]))
|
||||
} else {
|
||||
file, head, err := r.FormFile("file")
|
||||
if err != nil {
|
||||
|
@ -399,7 +427,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
contentReader = file
|
||||
size = head.Size
|
||||
size = uint64(head.Size)
|
||||
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
|
||||
}
|
||||
if !policy.CheckContentLength(size) {
|
||||
|
@ -407,7 +435,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
||||
bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
|
@ -421,7 +449,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
Header: metadata,
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
||||
extendedObjInfo, err := h.obj.PutObject(ctx, params)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
||||
return
|
||||
|
@ -434,8 +462,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
if acl := auth.MultipartFormValue(r, "acl"); acl != "" {
|
||||
|
@ -460,7 +488,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
NodeVersion: extendedObjInfo.NodeVersion,
|
||||
}
|
||||
|
||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
||||
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
@ -473,14 +501,14 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
SessionToken: sessionTokenEACL,
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
||||
if err = h.obj.PutBucketACL(ctx, p); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo); err != nil {
|
||||
h.log.Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
if settings, err := h.obj.GetBucketSettings(ctx, bktInfo); err != nil {
|
||||
h.reqLogger(ctx).Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||
} else if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||
}
|
||||
|
@ -652,7 +680,8 @@ func parseMetadata(r *http.Request) map[string]string {
|
|||
}
|
||||
|
||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
p := &layer.CreateBucketParams{
|
||||
Name: reqInfo.BucketName,
|
||||
}
|
||||
|
@ -662,7 +691,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
key, err := h.bearerTokenIssuerKey(r.Context())
|
||||
key, err := h.bearerTokenIssuerKey(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "couldn't get bearer token signature key", reqInfo, err)
|
||||
return
|
||||
|
@ -688,7 +717,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
var policies []*accessbox.ContainerPolicy
|
||||
boxData, err := layer.GetBoxData(r.Context())
|
||||
boxData, err := layer.GetBoxData(ctx)
|
||||
if err == nil {
|
||||
policies = boxData.Policies
|
||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||
|
@ -712,21 +741,20 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||
|
||||
bktInfo, err := h.obj.CreateBucket(r.Context(), p)
|
||||
bktInfo, err := h.obj.CreateBucket(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
h.log.Info("bucket is created", zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", reqInfo.BucketName), zap.Stringer("container_id", bktInfo.CID))
|
||||
h.reqLogger(ctx).Info("bucket is created", zap.Stringer("container_id", bktInfo.CID))
|
||||
|
||||
if p.ObjectLockEnabled {
|
||||
sp := &layer.PutSettingsParams{
|
||||
BktInfo: bktInfo,
|
||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||
}
|
||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
||||
if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
|
||||
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
return
|
||||
|
@ -737,7 +765,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) error {
|
||||
prm.Policy = h.cfg.Policy.Default()
|
||||
prm.Policy = h.cfg.Policy.DefaultPlacementPolicy()
|
||||
prm.LocationConstraint = locationConstraint
|
||||
|
||||
if locationConstraint == "" {
|
||||
|
@ -751,7 +779,7 @@ func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint str
|
|||
}
|
||||
}
|
||||
|
||||
if policy, ok := h.cfg.Policy.Get(locationConstraint); ok {
|
||||
if policy, ok := h.cfg.Policy.PlacementPolicy(locationConstraint); ok {
|
||||
prm.Policy = policy
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,15 +1,25 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -126,3 +136,106 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber])
|
||||
}
|
||||
|
||||
func TestPutObjectWithNegativeContentLength(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.ContentLength = -1
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, r = prepareTestRequest(tc, bktName, objName, nil)
|
||||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-for-put", "object-for-put"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("content")
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
|
||||
r.Header.Set(api.ContentEncoding, api.AwsChunked)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
|
||||
signer := v4.NewSigner(awsCreds)
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", "66824")
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", "66560")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = signer.Sign(req, nil, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := api.NewReqInfo(w, req, api.ObjectRequest{Bucket: bktName, Object: objName})
|
||||
req = req.WithContext(api.SetReqInfo(tc.Context(), reqInfo))
|
||||
req = req.WithContext(context.WithValue(req.Context(), api.ClientTime, signTime))
|
||||
req = req.WithContext(context.WithValue(req.Context(), api.AuthHeaders, &auth.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9",
|
||||
Service: "s3",
|
||||
Region: "us-east-1",
|
||||
}))
|
||||
req = req.WithContext(context.WithValue(req.Context(), api.BoxData, &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
AccessKey: AWSSecretAccessKey,
|
||||
},
|
||||
}))
|
||||
tc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data := getObjectRange(t, tc, bktName, objName, 0, 66824)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ type Object struct {
|
|||
Key string
|
||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
||||
ETag string `xml:"ETag,omitempty"`
|
||||
Size int64
|
||||
Size uint64
|
||||
|
||||
// Owner of the object.
|
||||
Owner *Owner `xml:"Owner,omitempty"`
|
||||
|
@ -120,7 +120,7 @@ type ObjectVersionResponse struct {
|
|||
Key string `xml:"Key"`
|
||||
LastModified string `xml:"LastModified"`
|
||||
Owner Owner `xml:"Owner"`
|
||||
Size int64 `xml:"Size"`
|
||||
Size uint64 `xml:"Size"`
|
||||
StorageClass string `xml:"StorageClass,omitempty"` // is empty!!
|
||||
VersionID string `xml:"VersionId"`
|
||||
}
|
||||
|
|
224
api/handler/s3reader.go
Normal file
224
api/handler/s3reader.go
Normal file
|
@ -0,0 +1,224 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
errs "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkSignatureHeader = "chunk-signature="
|
||||
maxChunkSize = 16 << 20
|
||||
)
|
||||
|
||||
type (
|
||||
s3ChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
for {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == ';' { // separating character
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
// Now, we read the signature of the following payload and expect:
|
||||
// chunk-signature=" + <signature-as-hex> + "\r\n"
|
||||
//
|
||||
// The signature is 64 bytes long (hex-encoded SHA256 hash) and
|
||||
// starts with a 16 byte header: len("chunk-signature=") + 64 == 80.
|
||||
var signature [80]byte
|
||||
_, err = io.ReadFull(c.reader, signature[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if !bytes.HasPrefix(signature[:], []byte(chunkSignatureHeader)) {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
||||
calculatedSignature, err := c.streamSigner.GetSignature(nil, c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if string(signature[16:]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
// Expecting to refactor this in future:
|
||||
// https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/issues/137
|
||||
box, ok := req.Context().Value(api.BoxData).(*accessbox.Box)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
authHeaders, ok := req.Context().Value(api.AuthHeaders).(*auth.AuthHeader)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
}
|
||||
|
||||
currentCredentials := credentials.NewStaticCredentials(authHeaders.AccessKeyID, box.Gate.AccessKey, "")
|
||||
seed, err := hex.DecodeString(authHeaders.SignatureV4)
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
}
|
||||
|
||||
reqTime, ok := req.Context().Value(api.ClientTime).(time.Time)
|
||||
if !ok {
|
||||
return nil, errs.GetAPIError(errs.ErrMalformedDate)
|
||||
}
|
||||
newStreamSigner := v4.NewStreamSigner(authHeaders.Region, "s3", seed, currentCredentials)
|
||||
|
||||
return &s3ChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -24,7 +24,8 @@ const (
|
|||
)
|
||||
|
||||
func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
|
||||
tagSet, err := readTagSet(r.Body)
|
||||
if err != nil {
|
||||
|
@ -46,7 +47,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
},
|
||||
TagSet: tagSet,
|
||||
}
|
||||
nodeVersion, err := h.obj.PutObjectTagging(r.Context(), tagPrm)
|
||||
nodeVersion, err := h.obj.PutObjectTagging(ctx, tagPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -63,8 +64,8 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
@ -108,7 +109,8 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := api.GetReqInfo(r.Context())
|
||||
ctx := r.Context()
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
|
@ -122,7 +124,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(r.Context(), p)
|
||||
nodeVersion, err := h.obj.DeleteObjectTagging(ctx, p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
|
||||
return
|
||||
|
@ -139,8 +141,8 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
|||
BktInfo: bktInfo,
|
||||
ReqInfo: reqInfo,
|
||||
}
|
||||
if err = h.sendNotifications(r.Context(), s); err != nil {
|
||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||
if err = h.sendNotifications(ctx, s); err != nil {
|
||||
h.reqLogger(ctx).Error("couldn't send notification: %w", zap.Error(err))
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
|
|
@ -2,19 +2,28 @@ package handler
|
|||
|
||||
import (
|
||||
"context"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := api.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return h.log
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
|
||||
code := api.WriteErrorResponse(w, reqInfo, transformToS3Error(err))
|
||||
fields := []zap.Field{
|
||||
|
@ -26,7 +35,7 @@ func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo
|
|||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.log.Error("call method", fields...)
|
||||
h.log.Error("reqeust failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string, reqInfo *api.ReqInfo, err error, additional ...zap.Field) {
|
||||
|
@ -39,20 +48,25 @@ func (h *handler) logAndSendErrorNoHeader(w http.ResponseWriter, logText string,
|
|||
zap.String("description", logText),
|
||||
zap.Error(err)}
|
||||
fields = append(fields, additional...)
|
||||
h.log.Error("call method", fields...)
|
||||
h.log.Error("reqeust failed", fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
|
||||
}
|
||||
|
||||
func transformToS3Error(err error) error {
|
||||
if _, ok := err.(errors.Error); ok {
|
||||
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||
if _, ok := err.(s3errors.Error); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if errorsStd.Is(err, layer.ErrAccessDenied) ||
|
||||
errorsStd.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||
if errors.Is(err, layer.ErrAccessDenied) ||
|
||||
errors.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return errors.GetAPIError(errors.ErrInternalError)
|
||||
if errors.Is(err, layer.ErrGatewayTimeout) {
|
||||
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
|
||||
}
|
||||
|
||||
return s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
}
|
||||
|
||||
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
|
||||
|
@ -87,26 +101,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
prefix := "bytes="
|
||||
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
|
||||
valuesStr := strings.Split(s, "-")
|
||||
if len(valuesStr) != 2 {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
values := make([]uint64, 0, len(valuesStr))
|
||||
for _, v := range valuesStr {
|
||||
num, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
values = append(values, num)
|
||||
}
|
||||
if values[0] > values[1] {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
|
@ -122,7 +136,7 @@ func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
|||
}
|
||||
sessionToken := boxData.Gate.SessionTokenForSetEACL()
|
||||
if sessionToken == nil {
|
||||
return nil, errors.GetAPIError(errors.ErrAccessDenied)
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
return sessionToken, nil
|
||||
|
|
64
api/handler/util_test.go
Normal file
64
api/handler/util_test.go
Normal file
|
@ -0,0 +1,64 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTransformS3Errors(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err error
|
||||
expected s3errors.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "simple std error to internal error",
|
||||
err: errors.New("some error"),
|
||||
expected: s3errors.ErrInternalError,
|
||||
},
|
||||
{
|
||||
name: "layer access denied error to s3 access denied error",
|
||||
err: layer.ErrAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "wrapped layer access denied error to s3 access denied error",
|
||||
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer node access denied error to s3 access denied error",
|
||||
err: layer.ErrNodeAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||
err: layer.ErrGatewayTimeout,
|
||||
expected: s3errors.ErrGatewayTimeout,
|
||||
},
|
||||
{
|
||||
name: "s3 error to s3 error",
|
||||
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
{
|
||||
name: "wrapped s3 error to s3 error",
|
||||
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := transformToS3Error(tc.err)
|
||||
s3err, ok := err.(s3errors.Error)
|
||||
require.True(t, ok, "error must be s3 error")
|
||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||
"expected: '%s', got: '%s'",
|
||||
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Standard S3 HTTP request/response constants.
|
||||
const (
|
||||
MetadataPrefix = "X-Amz-Meta-"
|
||||
|
@ -39,11 +41,13 @@ const (
|
|||
IfMatch = "If-Match"
|
||||
IfNoneMatch = "If-None-Match"
|
||||
|
||||
AmzContentSha256 = "X-Amz-Content-Sha256"
|
||||
AmzCopyIfModifiedSince = "X-Amz-Copy-Source-If-Modified-Since"
|
||||
AmzCopyIfUnmodifiedSince = "X-Amz-Copy-Source-If-Unmodified-Since"
|
||||
AmzCopyIfMatch = "X-Amz-Copy-Source-If-Match"
|
||||
AmzCopyIfNoneMatch = "X-Amz-Copy-Source-If-None-Match"
|
||||
AmzACL = "X-Amz-Acl"
|
||||
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
|
||||
AmzGrantFullControl = "X-Amz-Grant-Full-Control"
|
||||
AmzGrantRead = "X-Amz-Grant-Read"
|
||||
AmzGrantWrite = "X-Amz-Grant-Write"
|
||||
|
@ -78,9 +82,13 @@ const (
|
|||
AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
|
||||
AwsChunked = "aws-chunked"
|
||||
|
||||
Vary = "Vary"
|
||||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
)
|
||||
|
||||
// S3 request query params.
|
||||
|
@ -107,3 +115,12 @@ var SystemMetadata = map[string]struct{}{
|
|||
LastModified: {},
|
||||
ETag: {},
|
||||
}
|
||||
|
||||
func IsSignedStreamingV4(r *http.Request) bool {
|
||||
// The Content-Encoding must have "aws-chunked" as part of its value.
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
// Minio does not set this value, thus for compatibility reasons
|
||||
// we do not check it.
|
||||
return r.Header.Get(AmzContentSha256) == StreamingContentSHA256 &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
containergrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
|
@ -34,8 +34,7 @@ func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketIn
|
|||
var (
|
||||
err error
|
||||
res *container.Container
|
||||
rid = api.GetRequestID(ctx)
|
||||
log = n.log.With(zap.Stringer("cid", idCnr), zap.String("request_id", rid))
|
||||
log = n.reqLogger(ctx).With(zap.Stringer("cid", idCnr))
|
||||
|
||||
info = &data.BucketInfo{
|
||||
CID: idCnr,
|
||||
|
@ -83,13 +82,10 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
err error
|
||||
own = n.Owner(ctx)
|
||||
res []cid.ID
|
||||
rid = api.GetRequestID(ctx)
|
||||
)
|
||||
res, err = n.frostFS.UserContainers(ctx, own)
|
||||
if err != nil {
|
||||
n.log.Error("could not list user containers",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
n.reqLogger(ctx).Error("could not list user containers", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -97,9 +93,7 @@ func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
for i := range res {
|
||||
info, err := n.containerInfo(ctx, res[i])
|
||||
if err != nil {
|
||||
n.log.Error("could not fetch container info",
|
||||
zap.String("request_id", rid),
|
||||
zap.Error(err))
|
||||
n.reqLogger(ctx).Error("could not fetch container info", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -116,7 +110,7 @@ func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
}
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
Zone: v2container.SysAttributeZoneDefault,
|
||||
Zone: containergrpc.SysAttributeZoneDefault,
|
||||
Owner: ownerID,
|
||||
Created: TimeNow(ctx),
|
||||
LocationConstraint: p.LocationConstraint,
|
||||
|
|
|
@ -45,7 +45,7 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("put system object: %w", err)
|
||||
}
|
||||
|
@ -58,9 +58,8 @@ func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.log.Error("couldn't delete cors object", zap.Error(err),
|
||||
n.reqLogger(ctx).Error("couldn't delete cors object", zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,8 +128,29 @@ type PrmObjectDelete struct {
|
|||
Object oid.ID
|
||||
}
|
||||
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
var ErrAccessDenied = errors.New("access denied")
|
||||
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
||||
type PrmObjectSearch struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
||||
// Container to select the objects from.
|
||||
Container cid.ID
|
||||
|
||||
// Key-value object attribute which should be
|
||||
// presented in selected objects. Optional, empty key means any.
|
||||
ExactAttribute [2]string
|
||||
|
||||
// File prefix of the selected objects. Optional, empty value means any.
|
||||
FilePrefix string
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
ErrAccessDenied = errors.New("access denied")
|
||||
|
||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||
)
|
||||
|
||||
// FrostFS represents virtual connection to FrostFS network.
|
||||
type FrostFS interface {
|
||||
|
@ -210,6 +231,15 @@ type FrostFS interface {
|
|||
// It returns any error encountered which prevented the removal request from being sent.
|
||||
DeleteObject(context.Context, PrmObjectDelete) error
|
||||
|
||||
// SearchObjects performs object search from the NeoFS container according
|
||||
// to the specified parameters. It searches user's objects only.
|
||||
//
|
||||
// It returns ErrAccessDenied on selection access violation.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the objects from being selected.
|
||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||
// Note:
|
||||
// * future time must be after the now
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
lockv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/lock/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -29,6 +29,7 @@ type TestFrostFS struct {
|
|||
FrostFS
|
||||
|
||||
objects map[string]*object.Object
|
||||
objectErrors map[string]error
|
||||
containers map[string]*container.Container
|
||||
eaclTables map[string]*eacl.Table
|
||||
currentEpoch uint64
|
||||
|
@ -37,6 +38,7 @@ type TestFrostFS struct {
|
|||
func NewTestFrostFS() *TestFrostFS {
|
||||
return &TestFrostFS{
|
||||
objects: make(map[string]*object.Object),
|
||||
objectErrors: make(map[string]error),
|
||||
containers: make(map[string]*container.Container),
|
||||
eaclTables: make(map[string]*eacl.Table),
|
||||
}
|
||||
|
@ -46,6 +48,14 @@ func (t *TestFrostFS) CurrentEpoch() uint64 {
|
|||
return t.currentEpoch
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SetObjectError(addr oid.Address, err error) {
|
||||
if err == nil {
|
||||
delete(t.objectErrors, addr.EncodeToString())
|
||||
} else {
|
||||
t.objectErrors[addr.EncodeToString()] = err
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Objects() []*object.Object {
|
||||
res := make([]*object.Object, 0, len(t.objects))
|
||||
|
||||
|
@ -81,7 +91,7 @@ func (t *TestFrostFS) ContainerID(name string) (cid.ID, error) {
|
|||
}
|
||||
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (cid.ID, error) {
|
||||
var cnr container.Container
|
||||
cnr := container.NewContainer()
|
||||
cnr.Init()
|
||||
cnr.SetOwner(prm.Creator)
|
||||
cnr.SetPlacementPolicy(prm.Policy)
|
||||
|
@ -91,14 +101,14 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
if creationTime.IsZero() {
|
||||
creationTime = time.Now()
|
||||
}
|
||||
container.SetCreationTime(&cnr, creationTime)
|
||||
container.SetCreationTime(cnr, creationTime)
|
||||
|
||||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
d.SetName(prm.Name)
|
||||
|
||||
container.WriteDomain(&cnr, d)
|
||||
container.SetName(&cnr, prm.Name)
|
||||
container.WriteDomain(cnr, d)
|
||||
container.SetName(cnr, prm.Name)
|
||||
}
|
||||
|
||||
for i := range prm.AdditionalAttributes {
|
||||
|
@ -153,6 +163,10 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
|
||||
sAddr := addr.EncodeToString()
|
||||
|
||||
if err := t.objectErrors[sAddr]; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) && !t.isPublicRead(prm.Container) {
|
||||
|
@ -175,7 +189,7 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
|||
return nil, fmt.Errorf("%w: %s", apistatus.ObjectNotFound{}, addr)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return oid.ID{}, err
|
||||
|
@ -211,7 +225,7 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oi
|
|||
if len(prm.Locks) > 0 {
|
||||
lock := new(object.Lock)
|
||||
lock.WriteMembers(prm.Locks)
|
||||
objectv2.WriteLock(obj.ToV2(), (objectv2.Lock)(*lock))
|
||||
lockv2.WriteLock(obj.ToV2(), lock.ToV2())
|
||||
}
|
||||
|
||||
if prm.Payload != nil {
|
||||
|
@ -221,7 +235,7 @@ func (t *TestFrostFS) CreateObject(ctx context.Context, prm PrmObjectCreate) (oi
|
|||
}
|
||||
obj.SetPayload(all)
|
||||
obj.SetPayloadSize(uint64(len(all)))
|
||||
var hash checksum.Checksum
|
||||
hash := checksum.NewChecksum()
|
||||
checksum.Calculate(&hash, checksum.SHA256, all)
|
||||
obj.SetPayloadChecksum(hash)
|
||||
}
|
||||
|
@ -239,6 +253,10 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
||||
if err := t.objectErrors[addr.EncodeToString()]; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if obj, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getOwner(ctx)
|
||||
if !obj.OwnerID().Equals(owner) {
|
||||
|
@ -314,7 +332,7 @@ func (t *TestFrostFS) isPublicRead(cnrID cid.ID) bool {
|
|||
|
||||
func getOwner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
return bearer.ResolveIssuer(bd.Gate.BearerToken)
|
||||
}
|
||||
|
||||
return user.ID{}
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
|
@ -101,7 +102,7 @@ type (
|
|||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size int64
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
|
@ -134,7 +135,7 @@ type (
|
|||
ScrBktInfo *data.BucketInfo
|
||||
DstBktInfo *data.BucketInfo
|
||||
DstObject string
|
||||
SrcSize int64
|
||||
SrcSize uint64
|
||||
Header map[string]string
|
||||
Range *RangeParams
|
||||
Lock *data.ObjectLock
|
||||
|
@ -318,7 +319,7 @@ func TimeNow(ctx context.Context) time.Time {
|
|||
// Owner returns owner id from BearerToken (context) or from client owner.
|
||||
func (n *layer) Owner(ctx context.Context) user.ID {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
||||
return bearer.ResolveIssuer(bd.Gate.BearerToken)
|
||||
}
|
||||
|
||||
var ownerID user.ID
|
||||
|
@ -327,9 +328,17 @@ func (n *layer) Owner(ctx context.Context) user.ID {
|
|||
return ownerID
|
||||
}
|
||||
|
||||
func (n *layer) reqLogger(ctx context.Context) *zap.Logger {
|
||||
reqLogger := api.GetReqLog(ctx)
|
||||
if reqLogger != nil {
|
||||
return reqLogger
|
||||
}
|
||||
return n.log
|
||||
}
|
||||
|
||||
func (n *layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(bd.Gate.BearerToken)) {
|
||||
prm.BearerToken = bd.Gate.BearerToken
|
||||
return
|
||||
}
|
||||
|
@ -351,9 +360,11 @@ func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
|
||||
containerID, err := n.ResolveBucket(ctx, name)
|
||||
if err != nil {
|
||||
n.log.Debug("bucket not found", zap.Error(err))
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return n.containerInfo(ctx, containerID)
|
||||
}
|
||||
|
@ -494,12 +505,8 @@ func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("get object",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.BktInfo.Name),
|
||||
n.reqLogger(ctx).Debug("get object",
|
||||
zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.String("object", objInfo.ObjectInfo.Name),
|
||||
zap.Stringer("oid", objInfo.ObjectInfo.ID))
|
||||
|
||||
return objInfo, nil
|
||||
|
@ -519,7 +526,7 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
|||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
n.reqLogger(ctx).Error("could not get object", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -553,7 +560,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
}
|
||||
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||
return obj
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion.ID)
|
||||
}
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
|
||||
|
@ -583,7 +590,7 @@ func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
|
||||
if nullVersionToDelete != nil {
|
||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nullVersionToDelete, obj); obj.Error != nil {
|
||||
return obj
|
||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -632,6 +639,31 @@ func (n *layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
|
|||
return obj
|
||||
}
|
||||
|
||||
func (n *layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
||||
if client.IsErrObjectAlreadyRemoved(obj.Error) {
|
||||
n.reqLogger(ctx).Debug("object already removed",
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
||||
if obj.Error != nil {
|
||||
return obj
|
||||
}
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
if client.IsErrObjectNotFound(obj.Error) {
|
||||
n.reqLogger(ctx).Debug("object not found",
|
||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID))
|
||||
|
||||
obj.Error = nil
|
||||
|
||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
||||
|
@ -699,15 +731,14 @@ func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error)
|
|||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Info("resolve bucket", zap.String("reqId", reqInfo.RequestID), zap.String("bucket", name), zap.Stringer("cid", cnrID))
|
||||
n.reqLogger(ctx).Info("resolve bucket", zap.Stringer("cid", cnrID))
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
}
|
||||
|
||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||
nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, "")
|
||||
nodeVersions, err := n.getAllObjectsVersions(ctx, p.BktInfo, "", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ func TestObjectLockAttributes(t *testing.T) {
|
|||
|
||||
expEpoch := false
|
||||
for _, attr := range lockObj.Attributes() {
|
||||
if attr.Key() == object.SysAttributeExpEpoch {
|
||||
if attr.GetKey() == object.SysAttributeExpEpoch {
|
||||
expEpoch = true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
|
@ -60,7 +59,7 @@ type (
|
|||
UploadPartParams struct {
|
||||
Info *UploadInfoParams
|
||||
PartNumber int
|
||||
Size int64
|
||||
Size uint64
|
||||
Reader io.Reader
|
||||
}
|
||||
|
||||
|
@ -91,7 +90,7 @@ type (
|
|||
ETag string
|
||||
LastModified string
|
||||
PartNumber int
|
||||
Size int64
|
||||
Size uint64
|
||||
}
|
||||
|
||||
ListMultipartUploadsParams struct {
|
||||
|
@ -196,7 +195,7 @@ func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, er
|
|||
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
|
@ -212,36 +211,36 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
|
||||
decSize := p.Size
|
||||
if p.Info.Encryption.Enabled() {
|
||||
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key())
|
||||
r, encSize, err := encryptionReader(p.Reader, p.Size, p.Info.Encryption.Key())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
|
||||
}
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)})
|
||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatUint(p.Size, 10)})
|
||||
prm.Payload = r
|
||||
p.Size = int64(encSize)
|
||||
p.Size = encSize
|
||||
}
|
||||
|
||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if p.Info.Encryption.Enabled() {
|
||||
size = decSize
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("upload part",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", bktInfo.Name), zap.Stringer("cid", bktInfo.CID),
|
||||
zap.String("multipart upload", p.Info.UploadID),
|
||||
zap.Int("part number", p.PartNumber), zap.String("object", p.Info.Key), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug("upload part",
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
partInfo := &data.PartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Number: p.PartNumber,
|
||||
OID: id,
|
||||
Size: decSize,
|
||||
Size: size,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
Created: prm.CreationTime,
|
||||
}
|
||||
|
@ -253,10 +252,9 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
}
|
||||
if !oldPartIDNotFound {
|
||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.log.Error("couldn't delete old part object", zap.Error(err),
|
||||
zap.String("cnrID", bktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.String("objID", oldPartID.EncodeToString()))
|
||||
n.reqLogger(ctx).Error("couldn't delete old part object", zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,8 +283,8 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
|
||||
size := p.SrcObjInfo.Size
|
||||
if p.Range != nil {
|
||||
size = int64(p.Range.End - p.Range.Start + 1)
|
||||
if p.Range.End > uint64(p.SrcObjInfo.Size) {
|
||||
size = p.Range.End - p.Range.Start + 1
|
||||
if p.Range.End > p.SrcObjInfo.Size {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
|
||||
}
|
||||
}
|
||||
|
@ -305,7 +303,7 @@ func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
})
|
||||
|
||||
if err = pw.CloseWithError(err); err != nil {
|
||||
n.log.Error("could not get object", zap.Error(err))
|
||||
n.reqLogger(ctx).Error("could not get object", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -375,7 +373,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
||||
}
|
||||
|
||||
var multipartObjetSize int64
|
||||
var multipartObjetSize uint64
|
||||
var encMultipartObjectSize uint64
|
||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||
|
||||
|
@ -393,7 +391,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
||||
|
||||
if encInfo.Enabled {
|
||||
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size))
|
||||
encPartSize, err := sio.EncryptedSize(partInfo.Size)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
|
||||
}
|
||||
|
@ -430,8 +428,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
|
||||
initMetadata[AttributeHMACKey] = encInfo.HMACKey
|
||||
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10)
|
||||
multipartObjetSize = int64(encMultipartObjectSize)
|
||||
initMetadata[AttributeDecryptedSize] = strconv.FormatUint(multipartObjetSize, 10)
|
||||
multipartObjetSize = encMultipartObjectSize
|
||||
}
|
||||
|
||||
r := &multiObjectReader{
|
||||
|
@ -452,7 +450,7 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
})
|
||||
if err != nil {
|
||||
n.log.Error("could not put a completed object (multipart upload)",
|
||||
n.reqLogger(ctx).Error("could not put a completed object (multipart upload)",
|
||||
zap.String("uploadID", p.Info.UploadID),
|
||||
zap.String("uploadKey", p.Info.Key),
|
||||
zap.Error(err))
|
||||
|
@ -464,9 +462,8 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
addr.SetContainer(p.Info.Bkt.CID)
|
||||
for _, partInfo := range partsInfo {
|
||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.log.Warn("could not delete upload part",
|
||||
zap.Stringer("object id", &partInfo.OID),
|
||||
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
||||
n.reqLogger(ctx).Warn("could not delete upload part",
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
zap.Error(err))
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
|
@ -544,7 +541,7 @@ func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
|
||||
for _, info := range parts {
|
||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||
n.log.Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
n.reqLogger(ctx).Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
@ -561,7 +558,7 @@ func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
|
||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
n.reqLogger(ctx).Warn("mismatched obj encryptionInfo", zap.Error(err))
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
|
@ -625,12 +622,8 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
|||
oids[i] = part.OID.EncodeToString()
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("part details",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.Bkt.Name),
|
||||
n.reqLogger(ctx).Debug("part details",
|
||||
zap.Stringer("cid", p.Bkt.CID),
|
||||
zap.String("object", p.Key),
|
||||
zap.String("upload id", p.UploadID),
|
||||
zap.Ints("part numbers", partsNumbers),
|
||||
zap.Strings("oids", oids))
|
||||
|
|
|
@ -34,7 +34,7 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
_, objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -47,10 +47,9 @@ func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBu
|
|||
|
||||
if !objIDToDeleteNotFound {
|
||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||
n.log.Error("couldn't delete notification configuration object", zap.Error(err),
|
||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("bucket name", p.BktInfo.Name),
|
||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||
n.reqLogger(ctx).Error("couldn't delete notification configuration object", zap.Error(err),
|
||||
zap.String("cid", p.BktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", objIDToDelete.EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
||||
}
|
||||
size, err := strconv.Atoi(partInfo[1])
|
||||
size, err := strconv.ParseUint(partInfo[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|||
return &Part{
|
||||
ETag: partInfo[2],
|
||||
PartNumber: num,
|
||||
Size: int64(size),
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -191,26 +191,18 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
||||
}
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
FilePath: p.Object,
|
||||
Size: p.Size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
r := p.Reader
|
||||
if p.Encryption.Enabled() {
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10)
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
|
||||
var encSize uint64
|
||||
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil {
|
||||
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||
}
|
||||
p.Size = int64(encSize)
|
||||
p.Size = encSize
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
|
@ -230,7 +222,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm := PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Creator: owner,
|
||||
PayloadSize: uint64(p.Size),
|
||||
PayloadSize: p.Size,
|
||||
Filepath: p.Object,
|
||||
Payload: r,
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
@ -243,19 +235,23 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||
}
|
||||
|
||||
id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
size, id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("put object",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID),
|
||||
zap.String("object", p.Object), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug("put object", zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: size,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
newVersion.OID = id
|
||||
newVersion.ETag = hex.EncodeToString(hash)
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
||||
}
|
||||
|
@ -286,7 +282,7 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
Owner: owner,
|
||||
Bucket: p.BktInfo.Name,
|
||||
Name: p.Object,
|
||||
Size: p.Size,
|
||||
Size: size,
|
||||
Created: prm.CreationTime,
|
||||
Headers: p.Header,
|
||||
ContentType: p.Header[api.ContentType],
|
||||
|
@ -405,17 +401,19 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
|||
|
||||
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
||||
// Returns object ID and payload sha256 hash.
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) {
|
||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
var size uint64
|
||||
hash := sha256.New()
|
||||
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
||||
size += uint64(len(buf))
|
||||
hash.Write(buf)
|
||||
})
|
||||
id, err := n.frostFS.CreateObject(ctx, prm)
|
||||
if err != nil {
|
||||
return oid.ID{}, nil, err
|
||||
return 0, oid.ID{}, nil, err
|
||||
}
|
||||
return id, hash.Sum(nil), nil
|
||||
return size, id, hash.Sum(nil), nil
|
||||
}
|
||||
|
||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
||||
|
@ -560,7 +558,8 @@ func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data
|
|||
}
|
||||
|
||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{n.log}))
|
||||
reqLog := n.reqLogger(ctx)
|
||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
||||
}
|
||||
|
@ -587,8 +586,8 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
if oi == nil {
|
||||
// try to get object again
|
||||
if oi = n.objectInfoFromObjectsCacheOrFrostFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
||||
// form object info with data that the tree node contains
|
||||
oi = getPartialObjectInfo(p.Bucket, node)
|
||||
// do not process object which are definitely missing in object service
|
||||
return
|
||||
}
|
||||
}
|
||||
select {
|
||||
|
@ -598,7 +597,7 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
})
|
||||
if err != nil {
|
||||
wg.Done()
|
||||
n.log.Warn("failed to submit task to pool", zap.Error(err))
|
||||
reqLog.Warn("failed to submit task to pool", zap.Error(err))
|
||||
}
|
||||
}(node)
|
||||
}
|
||||
|
@ -610,18 +609,6 @@ func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams,
|
|||
return objCh, nil
|
||||
}
|
||||
|
||||
// getPartialObjectInfo form data.ObjectInfo using data available in data.NodeVersion.
|
||||
func getPartialObjectInfo(bktInfo *data.BucketInfo, node *data.NodeVersion) *data.ObjectInfo {
|
||||
return &data.ObjectInfo{
|
||||
ID: node.OID,
|
||||
CID: bktInfo.CID,
|
||||
Bucket: bktInfo.Name,
|
||||
Name: node.FilePath,
|
||||
Size: node.Size,
|
||||
HashSum: node.ETag,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
var err error
|
||||
|
||||
|
@ -752,7 +739,7 @@ func (n *layer) objectInfoFromObjectsCacheOrFrostFS(ctx context.Context, bktInfo
|
|||
|
||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
||||
if err != nil {
|
||||
n.log.Warn("could not fetch object meta", zap.Error(err))
|
||||
n.reqLogger(ctx).Warn("could not fetch object meta", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
|||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
_, id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
return id, err
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
errorsStd "errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -178,11 +177,8 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
|
|||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker() {
|
||||
reqInfo := api.GetReqInfo(ctx)
|
||||
n.log.Debug("target details",
|
||||
zap.String("reqId", reqInfo.RequestID),
|
||||
zap.String("bucket", objVersion.BktInfo.Name), zap.Stringer("cid", objVersion.BktInfo.CID),
|
||||
zap.String("object", objVersion.ObjectName), zap.Stringer("oid", version.OID))
|
||||
n.reqLogger(ctx).Debug("get tree node",
|
||||
zap.Stringer("cid", objVersion.BktInfo.CID), zap.Stringer("oid", version.OID))
|
||||
}
|
||||
|
||||
return version, err
|
||||
|
|
|
@ -59,17 +59,17 @@ func (t *TreeServiceMock) DeleteObjectTagging(_ context.Context, bktInfo *data.B
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
||||
func (t *TreeServiceMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
||||
func (t *TreeServiceMock) PutBucketTagging(context.Context, *data.BucketInfo, map[string]string) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
func (t *TreeServiceMock) DeleteBucketTagging(context.Context, *data.BucketInfo) error {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
@ -100,15 +100,15 @@ func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.Bucke
|
|||
return settings, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) GetNotificationConfigurationNode(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) PutNotificationConfigurationNode(context.Context, *data.BucketInfo, oid.ID) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) GetBucketCORS(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return oid.ID{}, nil
|
||||
|
@ -122,7 +122,7 @@ func (t *TreeServiceMock) GetBucketCORS(ctx context.Context, bktInfo *data.Bucke
|
|||
return node.OID, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||
|
@ -137,7 +137,7 @@ func (t *TreeServiceMock) PutBucketCORS(ctx context.Context, bktInfo *data.Bucke
|
|||
return oid.ID{}, ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) (oid.ID, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error) {
|
||||
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.BucketInfo, string) ([]*data.MultipartInfo, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ LOOP:
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
func (t *TreeServiceMock) PutLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
t.locks[bktInfo.CID.EncodeToString()] = map[uint64]*data.LockInfo{
|
||||
|
@ -421,7 +421,7 @@ func (t *TreeServiceMock) PutLock(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
func (t *TreeServiceMock) GetLock(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error) {
|
||||
cnrLockMap, ok := t.locks[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
|
|
|
@ -56,7 +56,7 @@ func userHeaders(attrs []object.Attribute) map[string]string {
|
|||
result := make(map[string]string, len(attrs))
|
||||
|
||||
for _, attr := range attrs {
|
||||
result[attr.Key()] = attr.Value()
|
||||
result[attr.GetKey()] = attr.GetValue()
|
||||
}
|
||||
|
||||
return result
|
||||
|
@ -74,7 +74,7 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
mimeType = contentType
|
||||
delete(headers, object.AttributeContentType)
|
||||
}
|
||||
if val, ok := headers[object.AttributeTimestamp]; !ok {
|
||||
if val, ok := headers[object.AttributeTimestamp]; !ok { //nolint:revive
|
||||
// ignore empty value
|
||||
} else if dt, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||
creation = time.Unix(dt, 0)
|
||||
|
@ -94,7 +94,8 @@ func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectI
|
|||
ContentType: mimeType,
|
||||
Headers: headers,
|
||||
Owner: *meta.OwnerID(),
|
||||
Size: int64(meta.PayloadSize()),
|
||||
Size: meta.PayloadSize(),
|
||||
CreationEpoch: meta.CreationEpoch(),
|
||||
HashSum: hex.EncodeToString(payloadChecksum.Value()),
|
||||
}
|
||||
}
|
||||
|
@ -123,8 +124,8 @@ func addEncryptionHeaders(meta map[string]string, enc encryption.Params) error {
|
|||
|
||||
func filepathFromObject(o *object.Object) string {
|
||||
for _, attr := range o.Attributes() {
|
||||
if attr.Key() == object.AttributeFilePath {
|
||||
return attr.Value()
|
||||
if attr.GetKey() == object.AttributeFilePath {
|
||||
return attr.GetValue()
|
||||
}
|
||||
}
|
||||
objID, _ := o.ID()
|
||||
|
|
|
@ -17,12 +17,12 @@ import (
|
|||
var (
|
||||
defaultTestCreated = time.Now()
|
||||
defaultTestPayload = []byte("test object payload")
|
||||
defaultTestPayloadLength = int64(len(defaultTestPayload))
|
||||
defaultTestPayloadLength = uint64(len(defaultTestPayload))
|
||||
defaultTestContentType = http.DetectContentType(defaultTestPayload)
|
||||
)
|
||||
|
||||
func newTestInfo(obj oid.ID, bkt *data.BucketInfo, name string, isDir bool) *data.ObjectInfo {
|
||||
var hashSum checksum.Checksum
|
||||
hashSum := checksum.NewChecksum()
|
||||
info := &data.ObjectInfo{
|
||||
ID: obj,
|
||||
Name: name,
|
||||
|
|
|
@ -21,7 +21,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
|
|||
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Object: tc.obj,
|
||||
Size: int64(len(content)),
|
||||
Size: uint64(len(content)),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: make(map[string]string),
|
||||
})
|
||||
|
|
|
@ -94,7 +94,7 @@ type (
|
|||
|
||||
Object struct {
|
||||
Key string `json:"key"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Size uint64 `json:"size,omitempty"`
|
||||
VersionID string `json:"versionId,omitempty"`
|
||||
ETag string `json:"eTag,omitempty"`
|
||||
Sequencer string `json:"sequencer,omitempty"`
|
||||
|
@ -131,7 +131,7 @@ func NewController(p *Options, l *zap.Logger) (*Controller, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (c *Controller) Subscribe(ctx context.Context, topic string, handler layer.MsgHandler) error {
|
||||
func (c *Controller) Subscribe(_ context.Context, topic string, handler layer.MsgHandler) error {
|
||||
ch := make(chan *nats.Msg, 1)
|
||||
|
||||
c.mu.RLock()
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -42,10 +43,13 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
// Key used for Get/SetReqInfo.
|
||||
// Key used for custom key/value in context.
|
||||
type contextKeyType string
|
||||
|
||||
const ctxRequestInfo = contextKeyType("FrostFS-S3-GW")
|
||||
const (
|
||||
ctxRequestInfo = contextKeyType("FrostFS-S3-GW")
|
||||
ctxRequestLogger = contextKeyType("FrostFS-S3-GW-Logger")
|
||||
)
|
||||
|
||||
var (
|
||||
// De-facto standard header keys.
|
||||
|
@ -104,7 +108,7 @@ func GetSourceIP(r *http.Request) string {
|
|||
return addr
|
||||
}
|
||||
|
||||
func prepareContext(w http.ResponseWriter, r *http.Request) context.Context {
|
||||
func prepareReqInfo(w http.ResponseWriter, r *http.Request) *ReqInfo {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := url.PathUnescape(vars["object"])
|
||||
|
@ -118,13 +122,11 @@ func prepareContext(w http.ResponseWriter, r *http.Request) context.Context {
|
|||
if prefix != "" {
|
||||
object = prefix
|
||||
}
|
||||
return SetReqInfo(r.Context(),
|
||||
// prepare request info
|
||||
NewReqInfo(w, r, ObjectRequest{
|
||||
return NewReqInfo(w, r, ObjectRequest{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Method: mux.CurrentRoute(r).GetName(),
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
// NewReqInfo returns new ReqInfo based on parameters.
|
||||
|
@ -194,6 +196,7 @@ func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
|
|||
}
|
||||
|
||||
// GetReqInfo returns ReqInfo if set.
|
||||
// If ReqInfo isn't set returns new empty ReqInfo.
|
||||
func GetReqInfo(ctx context.Context) *ReqInfo {
|
||||
if ctx == nil {
|
||||
return &ReqInfo{}
|
||||
|
@ -202,3 +205,22 @@ func GetReqInfo(ctx context.Context) *ReqInfo {
|
|||
}
|
||||
return &ReqInfo{}
|
||||
}
|
||||
|
||||
// SetReqLogger sets child zap.Logger in the context.
|
||||
func SetReqLogger(ctx context.Context, log *zap.Logger) context.Context {
|
||||
if ctx == nil {
|
||||
return nil
|
||||
}
|
||||
return context.WithValue(ctx, ctxRequestLogger, log)
|
||||
}
|
||||
|
||||
// GetReqLog returns log if set.
|
||||
// If zap.Logger isn't set returns nil.
|
||||
func GetReqLog(ctx context.Context) *zap.Logger {
|
||||
if ctx == nil {
|
||||
return nil
|
||||
} else if r, ok := ctx.Value(ctxRequestLogger).(*zap.Logger); ok {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -124,25 +124,63 @@ func (lrw *logResponseWriter) Flush() {
|
|||
}
|
||||
}
|
||||
|
||||
func setRequestID(h http.Handler) http.Handler {
|
||||
func prepareRequest(log *zap.Logger) mux.MiddlewareFunc {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// generate random UUIDv4
|
||||
id, _ := uuid.NewRandom()
|
||||
|
||||
// set request id into response header
|
||||
// also we have to set request id here
|
||||
// to be able to get it in prepareReqInfo
|
||||
w.Header().Set(hdrAmzRequestID, id.String())
|
||||
|
||||
// set request info into context
|
||||
reqInfo := prepareReqInfo(w, r)
|
||||
r = r.WithContext(SetReqInfo(r.Context(), reqInfo))
|
||||
|
||||
// set request id into gRPC meta header
|
||||
r = r.WithContext(metadata.AppendToOutgoingContext(
|
||||
r.Context(), hdrAmzRequestID, id.String(),
|
||||
r.Context(), hdrAmzRequestID, reqInfo.RequestID,
|
||||
))
|
||||
|
||||
// set request info into context
|
||||
r = r.WithContext(prepareContext(w, r))
|
||||
// set request scoped child logger into context
|
||||
additionalFields := []zap.Field{zap.String("request_id", reqInfo.RequestID),
|
||||
zap.String("method", reqInfo.API), zap.String("bucket", reqInfo.BucketName)}
|
||||
|
||||
if isObjectRequest(reqInfo) {
|
||||
additionalFields = append(additionalFields, zap.String("object", reqInfo.ObjectName))
|
||||
}
|
||||
reqLogger := log.With(additionalFields...)
|
||||
|
||||
r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
|
||||
|
||||
reqLogger.Info("request start", zap.String("host", r.Host),
|
||||
zap.String("remote_host", reqInfo.RemoteHost))
|
||||
|
||||
// continue execution
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var objectMethods = []string{
|
||||
"HeadObject", "GetObject", "DeleteObject", "PutObject", "PostObject", "CopyObject",
|
||||
"CreateMultipartUpload", "UploadPartCopy", "UploadPart", "ListObjectParts",
|
||||
"CompleteMultipartUpload", "AbortMultipartUpload",
|
||||
"PutObjectACL", "GetObjectACL",
|
||||
"PutObjectTagging", "GetObjectTagging", "DeleteObjectTagging",
|
||||
"PutObjectRetention", "GetObjectRetention", "PutObjectLegalHold", "getobjectlegalhold",
|
||||
"SelectObjectContent", "GetObjectAttributes",
|
||||
}
|
||||
|
||||
func isObjectRequest(info *ReqInfo) bool {
|
||||
for _, method := range objectMethods {
|
||||
if info.API == method {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func appendCORS(handler Handler) mux.MiddlewareFunc {
|
||||
|
@ -173,10 +211,7 @@ func resolveCID(log *zap.Logger, resolveBucket BucketResolveFunc) CIDResolveFunc
|
|||
|
||||
bktInfo, err := resolveBucket(ctx, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
log.Debug("failed to resolve CID",
|
||||
zap.String("request_id", reqInfo.RequestID), zap.String("method", reqInfo.API),
|
||||
zap.String("bucket", reqInfo.BucketName), zap.String("object", reqInfo.ObjectName),
|
||||
zap.Error(err))
|
||||
reqLogOrDefault(ctx, log).Debug("failed to resolve CID", zap.Error(err))
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -188,7 +223,8 @@ func logSuccessResponse(l *zap.Logger) mux.MiddlewareFunc {
|
|||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
lw := &logResponseWriter{ResponseWriter: w}
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
|
||||
reqLogger := reqLogOrDefault(r.Context(), l)
|
||||
|
||||
// pass execution:
|
||||
h.ServeHTTP(lw, r)
|
||||
|
@ -198,13 +234,8 @@ func logSuccessResponse(l *zap.Logger) mux.MiddlewareFunc {
|
|||
return
|
||||
}
|
||||
|
||||
l.Info("call method",
|
||||
reqLogger.Info("request end",
|
||||
zap.Int("status", lw.statusCode),
|
||||
zap.String("host", r.Host),
|
||||
zap.String("request_id", GetRequestID(r.Context())),
|
||||
zap.String("method", mux.CurrentRoute(r).GetName()),
|
||||
zap.String("bucket", reqInfo.BucketName),
|
||||
zap.String("object", reqInfo.ObjectName),
|
||||
zap.String("description", http.StatusText(lw.statusCode)))
|
||||
})
|
||||
}
|
||||
|
@ -253,11 +284,13 @@ func Attach(r *mux.Router, domains []string, m MaxClients, h Handler, center aut
|
|||
|
||||
api.Use(
|
||||
// -- prepare request
|
||||
setRequestID,
|
||||
prepareRequest(log),
|
||||
|
||||
// Attach user authentication for all S3 routes.
|
||||
AuthMiddleware(log, center),
|
||||
|
||||
TracingMiddleware(),
|
||||
|
||||
metricsMiddleware(log, h.ResolveBucket, appMetrics),
|
||||
|
||||
// -- logging error requests
|
||||
|
|
|
@ -126,7 +126,7 @@ func Stats(f http.HandlerFunc, resolveCID CIDResolveFunc, appMetrics *metrics.Ap
|
|||
func resolveUser(ctx context.Context) string {
|
||||
user := "anon"
|
||||
if bd, ok := ctx.Value(BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
||||
user = bearer.ResolveIssuer(*bd.Gate.BearerToken).String()
|
||||
user = bearer.ResolveIssuer(bd.Gate.BearerToken).String()
|
||||
}
|
||||
return user
|
||||
}
|
||||
|
|
121
api/tracing.go
Normal file
121
api/tracing.go
Normal file
|
@ -0,0 +1,121 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.18.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// TracingMiddleware adds tracing support for requests.
|
||||
// Must be placed after prepareRequest middleware.
|
||||
func TracingMiddleware() mux.MiddlewareFunc {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
appCtx, span := StartHTTPServerSpan(r, "REQUEST S3")
|
||||
lw := &traceResponseWriter{ResponseWriter: w, ctx: appCtx, span: span}
|
||||
h.ServeHTTP(lw, r.WithContext(appCtx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type traceResponseWriter struct {
|
||||
sync.Once
|
||||
http.ResponseWriter
|
||||
|
||||
ctx context.Context
|
||||
span trace.Span
|
||||
}
|
||||
|
||||
func (lrw *traceResponseWriter) WriteHeader(code int) {
|
||||
lrw.Do(func() {
|
||||
lrw.span.SetAttributes(
|
||||
semconv.HTTPStatusCode(code),
|
||||
)
|
||||
|
||||
carrier := &httpResponseCarrier{resp: lrw.ResponseWriter}
|
||||
tracing.Propagator.Inject(lrw.ctx, carrier)
|
||||
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
lrw.span.End()
|
||||
})
|
||||
}
|
||||
|
||||
func (lrw *traceResponseWriter) Flush() {
|
||||
if f, ok := lrw.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
type httpResponseCarrier struct {
|
||||
resp http.ResponseWriter
|
||||
}
|
||||
|
||||
func (h httpResponseCarrier) Get(key string) string {
|
||||
return h.resp.Header().Get(key)
|
||||
}
|
||||
|
||||
func (h httpResponseCarrier) Set(key string, value string) {
|
||||
h.resp.Header().Set(key, value)
|
||||
}
|
||||
|
||||
func (h httpResponseCarrier) Keys() []string {
|
||||
result := make([]string, 0, len(h.resp.Header()))
|
||||
for key := range h.resp.Header() {
|
||||
result = append(result, key)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
type httpRequestCarrier struct {
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (c *httpRequestCarrier) Get(key string) string {
|
||||
bytes := c.req.Header.Get(key)
|
||||
if len(bytes) == 0 {
|
||||
return ""
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
func (c *httpRequestCarrier) Set(key string, value string) {
|
||||
c.req.Response.Header.Set(key, value)
|
||||
}
|
||||
|
||||
func (c *httpRequestCarrier) Keys() []string {
|
||||
result := make([]string, 0, len(c.req.Header))
|
||||
for key := range c.req.Header {
|
||||
result = append(result, key)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func extractHTTPTraceInfo(ctx context.Context, req *http.Request) context.Context {
|
||||
if req == nil {
|
||||
return ctx
|
||||
}
|
||||
carrier := &httpRequestCarrier{req: req}
|
||||
return tracing.Propagator.Extract(ctx, carrier)
|
||||
}
|
||||
|
||||
// StartHTTPServerSpan starts root HTTP server span.
|
||||
func StartHTTPServerSpan(r *http.Request, operationName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
ctx := extractHTTPTraceInfo(r.Context(), r)
|
||||
opts = append(opts, trace.WithAttributes(
|
||||
attribute.String("s3.client_address", r.RemoteAddr),
|
||||
attribute.String("s3.path", r.Host),
|
||||
attribute.String("s3.request_id", GetRequestID(r.Context())),
|
||||
semconv.HTTPMethod(r.Method),
|
||||
semconv.RPCService("frostfs-s3-gw"),
|
||||
attribute.String("s3.query", r.RequestURI),
|
||||
), trace.WithSpanKind(trace.SpanKindServer))
|
||||
return tracing.StartSpanFromContext(ctx, operationName, opts...)
|
||||
}
|
|
@ -13,6 +13,9 @@ import (
|
|||
// KeyWrapper is wrapper for context keys.
|
||||
type KeyWrapper string
|
||||
|
||||
// AuthHeaders is a wrapper for authentication headers of a request.
|
||||
var AuthHeaders = KeyWrapper("__context_auth_headers_key")
|
||||
|
||||
// BoxData is an ID used to store accessbox.Box in a context.
|
||||
var BoxData = KeyWrapper("__context_box_key")
|
||||
|
||||
|
@ -27,10 +30,10 @@ func AuthMiddleware(log *zap.Logger, center auth.Center) mux.MiddlewareFunc {
|
|||
box, err := center.Authenticate(r)
|
||||
if err != nil {
|
||||
if err == auth.ErrNoAuthorizationHeader {
|
||||
log.Debug("couldn't receive access box for gate key, random key will be used")
|
||||
reqLogOrDefault(ctx, log).Debug("couldn't receive access box for gate key, random key will be used")
|
||||
ctx = r.Context()
|
||||
} else {
|
||||
log.Error("failed to pass authentication", zap.Error(err))
|
||||
reqLogOrDefault(ctx, log).Error("failed to pass authentication", zap.Error(err))
|
||||
if _, ok := err.(errors.Error); !ok {
|
||||
err = errors.GetAPIError(errors.ErrAccessDenied)
|
||||
}
|
||||
|
@ -42,9 +45,18 @@ func AuthMiddleware(log *zap.Logger, center auth.Center) mux.MiddlewareFunc {
|
|||
if !box.ClientTime.IsZero() {
|
||||
ctx = context.WithValue(ctx, ClientTime, box.ClientTime)
|
||||
}
|
||||
ctx = context.WithValue(ctx, AuthHeaders, box.AuthHeaders)
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func reqLogOrDefault(ctx context.Context, log *zap.Logger) *zap.Logger {
|
||||
reqLog := GetReqLog(ctx)
|
||||
if reqLog != nil {
|
||||
return reqLog
|
||||
}
|
||||
return log
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
aclgrpc "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl/grpc"
|
||||
sessionv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
|
@ -107,6 +109,21 @@ type (
|
|||
ContainerPolicies ContainerPolicies
|
||||
}
|
||||
|
||||
// UpdateSecretOptions contains options for passing to Agent.UpdateSecret method.
|
||||
UpdateSecretOptions struct {
|
||||
FrostFSKey *keys.PrivateKey
|
||||
GatesPublicKeys []*keys.PublicKey
|
||||
Address oid.Address
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
}
|
||||
|
||||
tokenUpdateOptions struct {
|
||||
frostFSKey *keys.PrivateKey
|
||||
gatesPublicKeys []*keys.PublicKey
|
||||
lifetime lifetimeOptions
|
||||
box *accessbox.Box
|
||||
}
|
||||
|
||||
// ContainerOptions groups parameters of auth container to put the secret into.
|
||||
ContainerOptions struct {
|
||||
ID cid.ID
|
||||
|
@ -114,6 +131,12 @@ type (
|
|||
PlacementPolicy string
|
||||
}
|
||||
|
||||
// UpdateOptions groups parameters to update existing the secret into.
|
||||
UpdateOptions struct {
|
||||
Address oid.Address
|
||||
SecretAccessKey []byte
|
||||
}
|
||||
|
||||
// ObtainSecretOptions contains options for passing to Agent.ObtainSecret method.
|
||||
ObtainSecretOptions struct {
|
||||
SecretAddress string
|
||||
|
@ -129,6 +152,7 @@ type lifetimeOptions struct {
|
|||
|
||||
type (
|
||||
issuingResult struct {
|
||||
InitialAccessKeyID string `json:"initial_access_key_id"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
OwnerPrivateKey string `json:"owner_private_key"`
|
||||
|
@ -137,16 +161,21 @@ type (
|
|||
}
|
||||
|
||||
obtainingResult struct {
|
||||
BearerToken *bearer.Token `json:"-"`
|
||||
BearerToken *bearer.Token `json:"bearer_token"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
}
|
||||
)
|
||||
|
||||
func (a *Agent) checkContainer(ctx context.Context, opts ContainerOptions, idOwner user.ID) (cid.ID, error) {
|
||||
if !opts.ID.Equals(cid.ID{}) {
|
||||
a.log.Info("check container", zap.Stringer("cid", opts.ID))
|
||||
return opts.ID, a.frostFS.ContainerExists(ctx, opts.ID)
|
||||
}
|
||||
|
||||
a.log.Info("create container",
|
||||
zap.String("friendly_name", opts.FriendlyName),
|
||||
zap.String("placement_policy", opts.PlacementPolicy))
|
||||
|
||||
var prm PrmContainerCreate
|
||||
|
||||
err := prm.Policy.DecodeString(opts.PlacementPolicy)
|
||||
|
@ -224,7 +253,7 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
return fmt.Errorf("create tokens: %w", err)
|
||||
}
|
||||
|
||||
box, secrets, err := accessbox.PackTokens(gatesData)
|
||||
box, secrets, err := accessbox.PackTokens(gatesData, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pack tokens: %w", err)
|
||||
}
|
||||
|
@ -233,10 +262,6 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
|
||||
var idOwner user.ID
|
||||
user.IDFromKey(&idOwner, options.FrostFSKey.PrivateKey.PublicKey)
|
||||
|
||||
a.log.Info("check container or create", zap.Stringer("cid", options.Container.ID),
|
||||
zap.String("friendly_name", options.Container.FriendlyName),
|
||||
zap.String("placement_policy", options.Container.PlacementPolicy))
|
||||
id, err := a.checkContainer(ctx, options.Container, idOwner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check container: %w", err)
|
||||
|
@ -245,19 +270,16 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
a.log.Info("store bearer token into FrostFS",
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
addr, err := tokens.
|
||||
New(a.frostFS, secrets.EphemeralKey, cache.DefaultAccessBoxConfig(a.log)).
|
||||
Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
|
||||
creds := tokens.New(a.frostFS, secrets.EphemeralKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
|
||||
addr, err := creds.Put(ctx, id, idOwner, box, lifetime.Exp, options.GatesPublicKeys...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put bearer token: %w", err)
|
||||
return fmt.Errorf("failed to put creds: %w", err)
|
||||
}
|
||||
|
||||
objID := addr.Object()
|
||||
strIDObj := objID.EncodeToString()
|
||||
|
||||
accessKeyID := addr.Container().EncodeToString() + "0" + strIDObj
|
||||
|
||||
accessKeyID := accessKeyIDFromAddr(addr)
|
||||
ir := &issuingResult{
|
||||
InitialAccessKeyID: accessKeyID,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secrets.AccessKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
|
@ -272,7 +294,7 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
}
|
||||
|
||||
if options.AwsCliCredentialsFile != "" {
|
||||
profileName := "authmate_cred_" + strIDObj
|
||||
profileName := "authmate_cred_" + addr.Object().EncodeToString()
|
||||
if _, err = os.Stat(options.AwsCliCredentialsFile); os.IsNotExist(err) {
|
||||
profileName = "default"
|
||||
}
|
||||
|
@ -289,6 +311,73 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateSecret updates an auth token (change list of gates that can use credential), puts new cred version to the FrostFS network and writes to io.Writer a result.
|
||||
func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSecretOptions) error {
|
||||
creds := tokens.New(a.frostFS, options.GatePrivateKey, cache.DefaultAccessBoxConfig(a.log))
|
||||
|
||||
box, err := creds.GetBox(ctx, options.Address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get accessbox: %w", err)
|
||||
}
|
||||
|
||||
secret, err := hex.DecodeString(box.Gate.AccessKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode secret key access box: %w", err)
|
||||
}
|
||||
|
||||
lifetime := getLifetimeFromGateData(box.Gate)
|
||||
tokenOptions := tokenUpdateOptions{
|
||||
frostFSKey: options.FrostFSKey,
|
||||
gatesPublicKeys: options.GatesPublicKeys,
|
||||
lifetime: lifetime,
|
||||
box: box,
|
||||
}
|
||||
|
||||
gatesData, err := formTokensToUpdate(tokenOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create tokens: %w", err)
|
||||
}
|
||||
|
||||
updatedBox, secrets, err := accessbox.PackTokens(gatesData, secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pack tokens: %w", err)
|
||||
}
|
||||
|
||||
var idOwner user.ID
|
||||
user.IDFromKey(&idOwner, options.FrostFSKey.PrivateKey.PublicKey)
|
||||
a.log.Info("update access cred object into FrostFS",
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
oldAddr := options.Address
|
||||
addr, err := creds.Update(ctx, oldAddr, idOwner, updatedBox, lifetime.Exp, options.GatesPublicKeys...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update creds: %w", err)
|
||||
}
|
||||
|
||||
ir := &issuingResult{
|
||||
AccessKeyID: accessKeyIDFromAddr(addr),
|
||||
InitialAccessKeyID: accessKeyIDFromAddr(oldAddr),
|
||||
SecretAccessKey: secrets.AccessKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
|
||||
ContainerID: addr.Container().EncodeToString(),
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(ir)
|
||||
}
|
||||
|
||||
func getLifetimeFromGateData(gateData *accessbox.GateData) lifetimeOptions {
|
||||
var btokenv2 aclgrpc.BearerToken
|
||||
gateData.BearerToken.WriteToV2(&btokenv2)
|
||||
|
||||
return lifetimeOptions{
|
||||
Iat: btokenv2.GetBody().GetLifetime().GetIat(),
|
||||
Exp: btokenv2.GetBody().GetLifetime().GetExp(),
|
||||
}
|
||||
}
|
||||
|
||||
// ObtainSecret receives an existing secret access key from FrostFS and
|
||||
// writes to io.Writer the secret access key.
|
||||
func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSecretOptions) error {
|
||||
|
@ -349,10 +438,10 @@ func buildBearerToken(key *keys.PrivateKey, impersonate bool, table *eacl.Table,
|
|||
var ownerID user.ID
|
||||
user.IDFromKey(&ownerID, (ecdsa.PublicKey)(*gateKey))
|
||||
|
||||
var bearerToken bearer.Token
|
||||
bearerToken := bearer.NewToken()
|
||||
|
||||
if !impersonate {
|
||||
bearerToken.SetEACLTable(*table)
|
||||
bearerToken.SetEACLTable(table)
|
||||
}
|
||||
|
||||
bearerToken.ForUser(ownerID)
|
||||
|
@ -384,7 +473,9 @@ func buildBearerTokens(key *keys.PrivateKey, impersonate bool, table *eacl.Table
|
|||
func buildSessionToken(key *keys.PrivateKey, lifetime lifetimeOptions, ctx sessionTokenContext, gateKey *keys.PublicKey) (*session.Container, error) {
|
||||
tok := new(session.Container)
|
||||
tok.ForVerb(ctx.verb)
|
||||
tok.AppliedTo(ctx.containerID)
|
||||
if !ctx.containerID.Equals(cid.ID{}) {
|
||||
tok.ApplyOnlyTo(ctx.containerID)
|
||||
}
|
||||
|
||||
tok.SetID(uuid.New())
|
||||
tok.SetAuthKey((*frostfsecdsa.PublicKey)(gateKey))
|
||||
|
@ -445,3 +536,55 @@ func createTokens(options *IssueSecretOptions, lifetime lifetimeOptions) ([]*acc
|
|||
|
||||
return gates, nil
|
||||
}
|
||||
|
||||
func formTokensToUpdate(options tokenUpdateOptions) ([]*accessbox.GateData, error) {
|
||||
btoken := options.box.Gate.BearerToken
|
||||
table := btoken.EACLTable()
|
||||
|
||||
bearerTokens, err := buildBearerTokens(options.frostFSKey, btoken.Impersonate(), table, options.lifetime, options.gatesPublicKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build bearer tokens: %w", err)
|
||||
}
|
||||
|
||||
gates := make([]*accessbox.GateData, len(options.gatesPublicKeys))
|
||||
for i, gateKey := range options.gatesPublicKeys {
|
||||
gates[i] = accessbox.NewGateData(gateKey, bearerTokens[i])
|
||||
}
|
||||
|
||||
sessionRules := make([]sessionTokenContext, len(options.box.Gate.SessionTokens))
|
||||
for i, token := range options.box.Gate.SessionTokens {
|
||||
var stoken sessionv2.SessionToken
|
||||
token.WriteToV2(&stoken)
|
||||
|
||||
sessionCtx, ok := stoken.GetBody().GetContext().(*sessionv2.SessionToken_Body_Container)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("get context from session token: %w", err)
|
||||
}
|
||||
|
||||
var cnrID cid.ID
|
||||
if cnrIDv2 := sessionCtx.Container.GetContainerId(); cnrIDv2 != nil {
|
||||
if err = cnrID.ReadFromV2(cnrIDv2); err != nil {
|
||||
return nil, fmt.Errorf("read from v2 container id: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
sessionRules[i] = sessionTokenContext{
|
||||
verb: session.ContainerVerb(sessionCtx.Container.GetVerb()),
|
||||
containerID: cnrID,
|
||||
}
|
||||
}
|
||||
|
||||
sessionTokens, err := buildSessionTokens(options.frostFSKey, options.lifetime, sessionRules, options.gatesPublicKeys)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to biuild session token: %w", err)
|
||||
}
|
||||
for i, sessionTkns := range sessionTokens {
|
||||
gates[i].SessionTokens = sessionTkns
|
||||
}
|
||||
|
||||
return gates, nil
|
||||
}
|
||||
|
||||
func accessKeyIDFromAddr(addr oid.Address) string {
|
||||
return addr.Container().EncodeToString() + "0" + addr.Object().EncodeToString()
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
apisession "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session"
|
||||
apisession "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/session/grpc"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
)
|
||||
|
@ -29,11 +29,11 @@ func (c *sessionTokenContext) UnmarshalJSON(data []byte) (err error) {
|
|||
}
|
||||
|
||||
switch m.Verb {
|
||||
case apisession.ContainerVerbPut.String():
|
||||
case apisession.ContainerSessionContext_PUT.String():
|
||||
c.verb = session.VerbContainerPut
|
||||
case apisession.ContainerVerbSetEACL.String():
|
||||
case apisession.ContainerSessionContext_SETEACL.String():
|
||||
c.verb = session.VerbContainerSetEACL
|
||||
case apisession.ContainerVerbDelete.String():
|
||||
case apisession.ContainerSessionContext_DELETE.String():
|
||||
c.verb = session.VerbContainerDelete
|
||||
default:
|
||||
return fmt.Errorf("unknown container token verb %s", m.Verb)
|
||||
|
|
|
@ -2,718 +2,19 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
v4 "github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/cmd/s3-authmate/modules"
|
||||
)
|
||||
|
||||
const (
|
||||
poolDialTimeout = 5 * time.Second
|
||||
poolHealthcheckTimeout = 5 * time.Second
|
||||
poolRebalanceInterval = 30 * time.Second
|
||||
poolStreamTimeout = 10 * time.Second
|
||||
|
||||
// a month.
|
||||
defaultLifetime = 30 * 24 * time.Hour
|
||||
defaultPresignedLifetime = 12 * time.Hour
|
||||
)
|
||||
|
||||
type PoolConfig struct {
|
||||
Key *ecdsa.PrivateKey
|
||||
Address string
|
||||
DialTimeout time.Duration
|
||||
HealthcheckTimeout time.Duration
|
||||
StreamTimeout time.Duration
|
||||
RebalanceInterval time.Duration
|
||||
}
|
||||
|
||||
var (
|
||||
walletPathFlag string
|
||||
accountAddressFlag string
|
||||
peerAddressFlag string
|
||||
eaclRulesFlag string
|
||||
disableImpersonateFlag bool
|
||||
gateWalletPathFlag string
|
||||
gateAccountAddressFlag string
|
||||
accessKeyIDFlag string
|
||||
containerIDFlag string
|
||||
containerFriendlyName string
|
||||
containerPlacementPolicy string
|
||||
gatesPublicKeysFlag cli.StringSlice
|
||||
logEnabledFlag bool
|
||||
logDebugEnabledFlag bool
|
||||
sessionTokenFlag string
|
||||
lifetimeFlag time.Duration
|
||||
endpointFlag string
|
||||
bucketFlag string
|
||||
objectFlag string
|
||||
methodFlag string
|
||||
profileFlag string
|
||||
regionFlag string
|
||||
secretAccessKeyFlag string
|
||||
containerPolicies string
|
||||
awcCliCredFile string
|
||||
timeoutFlag time.Duration
|
||||
|
||||
// pool timeouts flag.
|
||||
poolDialTimeoutFlag time.Duration
|
||||
poolHealthcheckTimeoutFlag time.Duration
|
||||
poolRebalanceIntervalFlag time.Duration
|
||||
poolStreamTimeoutFlag time.Duration
|
||||
)
|
||||
|
||||
const (
|
||||
envWalletPassphrase = "wallet.passphrase"
|
||||
envWalletGatePassphrase = "wallet.gate.passphrase"
|
||||
)
|
||||
|
||||
var zapConfig = zap.Config{
|
||||
Development: true,
|
||||
Encoding: "console",
|
||||
Level: zap.NewAtomicLevelAt(zapcore.FatalLevel),
|
||||
OutputPaths: []string{"stdout"},
|
||||
EncoderConfig: zapcore.EncoderConfig{
|
||||
MessageKey: "message",
|
||||
LevelKey: "level",
|
||||
EncodeLevel: zapcore.CapitalLevelEncoder,
|
||||
TimeKey: "time",
|
||||
EncodeTime: zapcore.ISO8601TimeEncoder,
|
||||
CallerKey: "caller",
|
||||
EncodeCaller: zapcore.ShortCallerEncoder,
|
||||
},
|
||||
}
|
||||
|
||||
func prepare() (context.Context, *zap.Logger) {
|
||||
var (
|
||||
err error
|
||||
log = zap.NewNop()
|
||||
ctx, _ = signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
)
|
||||
|
||||
if !logEnabledFlag {
|
||||
return ctx, log
|
||||
} else if logDebugEnabledFlag {
|
||||
zapConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
|
||||
}
|
||||
|
||||
if log, err = zapConfig.Build(); err != nil {
|
||||
panic(fmt.Errorf("create logger: %w", err))
|
||||
}
|
||||
|
||||
return ctx, log
|
||||
}
|
||||
|
||||
func main() {
|
||||
app := &cli.App{
|
||||
Name: "FrostFS S3 Authmate",
|
||||
Usage: "Helps manage delegated access via gates to data stored in FrostFS network",
|
||||
Version: version.Version,
|
||||
Flags: appFlags(),
|
||||
Commands: appCommands(),
|
||||
}
|
||||
cli.VersionPrinter = func(c *cli.Context) {
|
||||
fmt.Printf("%s\nVersion: %s\nGoVersion: %s\n", c.App.Name, c.App.Version, runtime.Version())
|
||||
}
|
||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
viper.AutomaticEnv()
|
||||
viper.SetEnvPrefix("AUTHMATE")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AllowEmptyEnv(true)
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "error: %s\n", err)
|
||||
os.Exit(100)
|
||||
if cmd, err := modules.Execute(ctx); err != nil {
|
||||
cmd.PrintErrln("Error:", err.Error())
|
||||
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func appFlags() []cli.Flag {
|
||||
return []cli.Flag{
|
||||
&cli.BoolFlag{
|
||||
Name: "with-log",
|
||||
Usage: "Enable logger",
|
||||
Destination: &logEnabledFlag,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "Enable debug logger level",
|
||||
Destination: &logDebugEnabledFlag,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "timeout",
|
||||
Usage: "timeout of processing of the command, for example 2m " +
|
||||
"(note: max time unit is an hour so to set a day you should use 24h)",
|
||||
Destination: &timeoutFlag,
|
||||
Value: 1 * time.Minute,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func appCommands() []*cli.Command {
|
||||
return []*cli.Command{
|
||||
issueSecret(),
|
||||
obtainSecret(),
|
||||
generatePresignedURL(),
|
||||
}
|
||||
}
|
||||
|
||||
func issueSecret() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "issue-secret",
|
||||
Usage: "Issue a secret in FrostFS network",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "wallet",
|
||||
Value: "",
|
||||
Usage: "path to the wallet",
|
||||
Required: true,
|
||||
Destination: &walletPathFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: "",
|
||||
Usage: "address of wallet account",
|
||||
Required: false,
|
||||
Destination: &accountAddressFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "peer",
|
||||
Value: "",
|
||||
Usage: "address of a frostfs peer to connect to",
|
||||
Required: true,
|
||||
Destination: &peerAddressFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bearer-rules",
|
||||
Usage: "rules for bearer token (filepath or a plain json string are allowed, can be used only with --disable-impersonate)",
|
||||
Required: false,
|
||||
Destination: &eaclRulesFlag,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "disable-impersonate",
|
||||
Usage: "mark token as not impersonate to don't consider token signer as request owner (must be provided to use --bearer-rules flag)",
|
||||
Required: false,
|
||||
Destination: &disableImpersonateFlag,
|
||||
},
|
||||
&cli.StringSliceFlag{
|
||||
Name: "gate-public-key",
|
||||
Usage: "public 256r1 key of a gate (use flags repeatedly for multiple gates)",
|
||||
Required: true,
|
||||
Destination: &gatesPublicKeysFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "container-id",
|
||||
Usage: "auth container id to put the secret into",
|
||||
Required: false,
|
||||
Destination: &containerIDFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "container-friendly-name",
|
||||
Usage: "friendly name of auth container to put the secret into",
|
||||
Required: false,
|
||||
Destination: &containerFriendlyName,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "container-placement-policy",
|
||||
Usage: "placement policy of auth container to put the secret into",
|
||||
Required: false,
|
||||
Destination: &containerPlacementPolicy,
|
||||
Value: "REP 2 IN X CBF 3 SELECT 2 FROM * AS X",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "session-tokens",
|
||||
Usage: "create session tokens with rules, if the rules are set as 'none', no session tokens will be created",
|
||||
Required: false,
|
||||
Destination: &sessionTokenFlag,
|
||||
Value: "",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "lifetime",
|
||||
Usage: `Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).
|
||||
It will be ceil rounded to the nearest amount of epoch.`,
|
||||
Required: false,
|
||||
Destination: &lifetimeFlag,
|
||||
Value: defaultLifetime,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "container-policy",
|
||||
Usage: "mapping AWS storage class to FrostFS storage policy as plain json string or path to json file",
|
||||
Required: false,
|
||||
Destination: &containerPolicies,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "aws-cli-credentials",
|
||||
Usage: "path to the aws cli credential file",
|
||||
Required: false,
|
||||
Destination: &awcCliCredFile,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-dial-timeout",
|
||||
Usage: `Timeout for connection to the node in pool to be established`,
|
||||
Required: false,
|
||||
Destination: &poolDialTimeoutFlag,
|
||||
Value: poolDialTimeout,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-healthcheck-timeout",
|
||||
Usage: `Timeout for request to node to decide if it is alive`,
|
||||
Required: false,
|
||||
Destination: &poolHealthcheckTimeoutFlag,
|
||||
Value: poolHealthcheckTimeout,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-rebalance-interval",
|
||||
Usage: `Interval for updating nodes health status`,
|
||||
Required: false,
|
||||
Destination: &poolRebalanceIntervalFlag,
|
||||
Value: poolRebalanceInterval,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-stream-timeout",
|
||||
Usage: `Timeout for individual operation in streaming RPC`,
|
||||
Required: false,
|
||||
Destination: &poolStreamTimeoutFlag,
|
||||
Value: poolStreamTimeout,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
ctx, log := prepare()
|
||||
|
||||
password := wallet.GetPassword(viper.GetViper(), envWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(walletPathFlag, accountAddressFlag, password)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to load frostfs private key: %s", err), 1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Address: peerAddressFlag,
|
||||
DialTimeout: poolDialTimeoutFlag,
|
||||
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
|
||||
StreamTimeout: poolStreamTimeoutFlag,
|
||||
RebalanceInterval: poolRebalanceIntervalFlag,
|
||||
}
|
||||
|
||||
frostFS, err := createFrostFS(ctx, log, poolCfg)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
|
||||
}
|
||||
|
||||
agent := authmate.New(log, frostFS)
|
||||
|
||||
var containerID cid.ID
|
||||
if len(containerIDFlag) > 0 {
|
||||
if err = containerID.DecodeString(containerIDFlag); err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to parse auth container id: %s", err), 3)
|
||||
}
|
||||
}
|
||||
|
||||
var gatesPublicKeys []*keys.PublicKey
|
||||
for _, key := range gatesPublicKeysFlag.Value() {
|
||||
gpk, err := keys.NewPublicKeyFromString(key)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to load gate's public key: %s", err), 4)
|
||||
}
|
||||
gatesPublicKeys = append(gatesPublicKeys, gpk)
|
||||
}
|
||||
|
||||
if lifetimeFlag <= 0 {
|
||||
return cli.Exit(fmt.Sprintf("lifetime must be greater 0, current value: %d", lifetimeFlag), 5)
|
||||
}
|
||||
|
||||
policies, err := parsePolicies(containerPolicies)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("couldn't parse container policy: %s", err.Error()), 6)
|
||||
}
|
||||
|
||||
if !disableImpersonateFlag && eaclRulesFlag != "" {
|
||||
return cli.Exit("--bearer-rules flag can be used only with --disable-impersonate", 6)
|
||||
}
|
||||
|
||||
bearerRules, err := getJSONRules(eaclRulesFlag)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("couldn't parse 'bearer-rules' flag: %s", err.Error()), 7)
|
||||
}
|
||||
|
||||
sessionRules, skipSessionRules, err := getSessionRules(sessionTokenFlag)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("couldn't parse 'session-tokens' flag: %s", err.Error()), 8)
|
||||
}
|
||||
|
||||
issueSecretOptions := &authmate.IssueSecretOptions{
|
||||
Container: authmate.ContainerOptions{
|
||||
ID: containerID,
|
||||
FriendlyName: containerFriendlyName,
|
||||
PlacementPolicy: containerPlacementPolicy,
|
||||
},
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
EACLRules: bearerRules,
|
||||
Impersonate: !disableImpersonateFlag,
|
||||
SessionTokenRules: sessionRules,
|
||||
SkipSessionRules: skipSessionRules,
|
||||
ContainerPolicies: policies,
|
||||
Lifetime: lifetimeFlag,
|
||||
AwsCliCredentialsFile: awcCliCredFile,
|
||||
}
|
||||
|
||||
var tcancel context.CancelFunc
|
||||
ctx, tcancel = context.WithTimeout(ctx, timeoutFlag)
|
||||
defer tcancel()
|
||||
|
||||
if err = agent.IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to issue secret: %s", err), 7)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generatePresignedURL() *cli.Command {
|
||||
return &cli.Command{
|
||||
Name: "generate-presigned-url",
|
||||
Description: `Generate presigned url using AWS credentials. Credentials must be placed in ~/.aws/credentials.
|
||||
You provide profile to load using --profile flag or explicitly provide credentials and region using
|
||||
--aws-access-key-id, --aws-secret-access-key, --region.
|
||||
Note to override credentials you must provide both access key and secret key.`,
|
||||
Usage: "generate-presigned-url --endpoint http://s3.frostfs.devenv:8080 --bucket bucket-name --object object-name --method get --profile aws-profile",
|
||||
Flags: []cli.Flag{
|
||||
&cli.DurationFlag{
|
||||
Name: "lifetime",
|
||||
Usage: `Lifetime of presigned URL. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).
|
||||
It will be ceil rounded to the nearest amount of epoch.`,
|
||||
Required: false,
|
||||
Destination: &lifetimeFlag,
|
||||
Value: defaultPresignedLifetime,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "endpoint",
|
||||
Usage: `Endpoint of s3-gw`,
|
||||
Required: true,
|
||||
Destination: &endpointFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "bucket",
|
||||
Usage: `Bucket name to perform action`,
|
||||
Required: true,
|
||||
Destination: &bucketFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "object",
|
||||
Usage: `Object name to perform action`,
|
||||
Required: true,
|
||||
Destination: &objectFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "method",
|
||||
Usage: `HTTP method to perform action`,
|
||||
Required: true,
|
||||
Destination: &methodFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "profile",
|
||||
Usage: `AWS profile to load`,
|
||||
Required: false,
|
||||
Destination: &profileFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "region",
|
||||
Usage: `AWS region to use in signature (default is taken from ~/.aws/config)`,
|
||||
Required: false,
|
||||
Destination: ®ionFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "aws-access-key-id",
|
||||
Usage: `AWS access key id to sign the URL (default is taken from ~/.aws/credentials)`,
|
||||
Required: false,
|
||||
Destination: &accessKeyIDFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "aws-secret-access-key",
|
||||
Usage: `AWS access secret access key to sign the URL (default is taken from ~/.aws/credentials)`,
|
||||
Required: false,
|
||||
Destination: &secretAccessKeyFlag,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
var cfg aws.Config
|
||||
if regionFlag != "" {
|
||||
cfg.Region = ®ionFlag
|
||||
}
|
||||
if accessKeyIDFlag != "" && secretAccessKeyFlag != "" {
|
||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(credentials.Value{
|
||||
AccessKeyID: accessKeyIDFlag,
|
||||
SecretAccessKey: secretAccessKeyFlag,
|
||||
})
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: cfg,
|
||||
Profile: profileFlag,
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get credentials: %w", err)
|
||||
}
|
||||
|
||||
signer := v4.NewSigner(sess.Config.Credentials)
|
||||
req, err := http.NewRequest(strings.ToUpper(methodFlag), fmt.Sprintf("%s/%s/%s", endpointFlag, bucketFlag, objectFlag), nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new request: %w", err)
|
||||
}
|
||||
|
||||
date := time.Now().UTC()
|
||||
req.Header.Set(api.AmzDate, date.Format("20060102T150405Z"))
|
||||
|
||||
if _, err = signer.Presign(req, nil, "s3", *sess.Config.Region, lifetimeFlag, date); err != nil {
|
||||
return fmt.Errorf("presign: %w", err)
|
||||
}
|
||||
|
||||
res := &struct{ URL string }{
|
||||
URL: req.URL.String(),
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
enc.SetEscapeHTML(false)
|
||||
return enc.Encode(res)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
data = []byte(val)
|
||||
err error
|
||||
)
|
||||
|
||||
if !json.Valid(data) {
|
||||
if data, err = os.ReadFile(val); err != nil {
|
||||
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
var policies authmate.ContainerPolicies
|
||||
if err = json.Unmarshal(data, &policies); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal policies: %w", err)
|
||||
}
|
||||
if _, ok := policies[api.DefaultLocationConstraint]; ok {
|
||||
return nil, fmt.Errorf("config overrides %s location constraint", api.DefaultLocationConstraint)
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getJSONRules(val string) ([]byte, error) {
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
data := []byte(val)
|
||||
if json.Valid(data) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if data, err := os.ReadFile(val); err == nil {
|
||||
if json.Valid(data) {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
|
||||
}
|
||||
|
||||
// getSessionRules reads json session rules.
|
||||
// It returns true if rules must be skipped.
|
||||
func getSessionRules(r string) ([]byte, bool, error) {
|
||||
if r == "none" {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
data, err := getJSONRules(r)
|
||||
return data, false, err
|
||||
}
|
||||
|
||||
func obtainSecret() *cli.Command {
|
||||
command := &cli.Command{
|
||||
Name: "obtain-secret",
|
||||
Usage: "Obtain a secret from FrostFS network",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "wallet",
|
||||
Value: "",
|
||||
Usage: "path to the wallet",
|
||||
Required: true,
|
||||
Destination: &walletPathFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "address",
|
||||
Value: "",
|
||||
Usage: "address of wallet account",
|
||||
Required: false,
|
||||
Destination: &accountAddressFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "peer",
|
||||
Value: "",
|
||||
Usage: "address of frostfs peer to connect to",
|
||||
Required: true,
|
||||
Destination: &peerAddressFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "gate-wallet",
|
||||
Value: "",
|
||||
Usage: "path to the wallet",
|
||||
Required: true,
|
||||
Destination: &gateWalletPathFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "gate-address",
|
||||
Value: "",
|
||||
Usage: "address of wallet account",
|
||||
Required: false,
|
||||
Destination: &gateAccountAddressFlag,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "access-key-id",
|
||||
Usage: "access key id for s3",
|
||||
Required: true,
|
||||
Destination: &accessKeyIDFlag,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-dial-timeout",
|
||||
Usage: `Timeout for connection to the node in pool to be established`,
|
||||
Required: false,
|
||||
Destination: &poolDialTimeoutFlag,
|
||||
Value: poolDialTimeout,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-healthcheck-timeout",
|
||||
Usage: `Timeout for request to node to decide if it is alive`,
|
||||
Required: false,
|
||||
Destination: &poolHealthcheckTimeoutFlag,
|
||||
Value: poolHealthcheckTimeout,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-rebalance-interval",
|
||||
Usage: `Interval for updating nodes health status`,
|
||||
Required: false,
|
||||
Destination: &poolRebalanceIntervalFlag,
|
||||
Value: poolRebalanceInterval,
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "pool-stream-timeout",
|
||||
Usage: `Timeout for individual operation in streaming RPC`,
|
||||
Required: false,
|
||||
Destination: &poolStreamTimeoutFlag,
|
||||
Value: poolStreamTimeout,
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
ctx, log := prepare()
|
||||
|
||||
password := wallet.GetPassword(viper.GetViper(), envWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(walletPathFlag, accountAddressFlag, password)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to load frostfs private key: %s", err), 1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Address: peerAddressFlag,
|
||||
DialTimeout: poolDialTimeoutFlag,
|
||||
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
|
||||
StreamTimeout: poolStreamTimeoutFlag,
|
||||
RebalanceInterval: poolRebalanceIntervalFlag,
|
||||
}
|
||||
|
||||
frostFS, err := createFrostFS(ctx, log, poolCfg)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
|
||||
}
|
||||
|
||||
agent := authmate.New(log, frostFS)
|
||||
|
||||
var _ = agent
|
||||
|
||||
password = wallet.GetPassword(viper.GetViper(), envWalletGatePassphrase)
|
||||
gateCreds, err := wallet.GetKeyFromPath(gateWalletPathFlag, gateAccountAddressFlag, password)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to create owner's private key: %s", err), 4)
|
||||
}
|
||||
|
||||
secretAddress := strings.Replace(accessKeyIDFlag, "0", "/", 1)
|
||||
|
||||
obtainSecretOptions := &authmate.ObtainSecretOptions{
|
||||
SecretAddress: secretAddress,
|
||||
GatePrivateKey: gateCreds,
|
||||
}
|
||||
|
||||
var tcancel context.CancelFunc
|
||||
ctx, tcancel = context.WithTimeout(ctx, timeoutFlag)
|
||||
defer tcancel()
|
||||
|
||||
if err = agent.ObtainSecret(ctx, os.Stdout, obtainSecretOptions); err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to obtain secret: %s", err), 5)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authmate.FrostFS, error) {
|
||||
log.Debug("prepare connection pool")
|
||||
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(cfg.Key)
|
||||
prm.SetNodeDialTimeout(cfg.DialTimeout)
|
||||
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
|
||||
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
|
||||
prm.SetClientRebalanceInterval(cfg.RebalanceInterval)
|
||||
prm.AddNode(pool.NewNodeParam(1, cfg.Address, 1))
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return frostfs.NewAuthmateFrostFS(p), nil
|
||||
}
|
||||
|
|
108
cmd/s3-authmate/modules/generate-presigned-url.go
Normal file
108
cmd/s3-authmate/modules/generate-presigned-url.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var generatePresignedURLCmd = &cobra.Command{
|
||||
Use: "generate-presigned-url",
|
||||
Short: "Generate presigned url using AWS credentials",
|
||||
Long: `Generate presigned url using AWS credentials.Credentials must be placed in ~/.aws/credentials.
|
||||
You provide profile to load using --profile flag or explicitly provide credentials and region using
|
||||
--aws-access-key-id, --aws-secret-access-key, --region.
|
||||
Note to override credentials you must provide both access key and secret key.`,
|
||||
Example: `frostfs-s3-authmate generate-presigned-url --method put --bucket my-bucket --object my-object --endpoint http://localhost:8084 --lifetime 12h --region ru --aws-access-key-id ETaA2CadPcA7bAkLsML2PbTudXY8uRt2PDjCCwkvRv9s0FDCxWDXYc1SA1vKv8KbyCNsLY2AmAjJ92Vz5rgvsFCy --aws-secret-access-key c2d65ef2980f03f4f495bdebedeeae760496697880d61d106bb9a4e5cd2e0607`,
|
||||
RunE: runGeneratePresignedURLCmd,
|
||||
}
|
||||
|
||||
const defaultPresignedLifetime = 12 * time.Hour
|
||||
|
||||
const (
|
||||
endpointFlag = "endpoint"
|
||||
bucketFlag = "bucket"
|
||||
objectFlag = "object"
|
||||
methodFlag = "method"
|
||||
profileFlag = "profile"
|
||||
regionFlag = "region"
|
||||
awsAccessKeyIDFlag = "aws-access-key-id"
|
||||
awsSecretAccessKeyFlag = "aws-secret-access-key"
|
||||
)
|
||||
|
||||
func initGeneratePresignedURLCmd() {
|
||||
generatePresignedURLCmd.Flags().Duration(lifetimeFlag, defaultPresignedLifetime, "Lifetime of presigned URL. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).\nIt will be ceil rounded to the nearest amount of epoch.")
|
||||
generatePresignedURLCmd.Flags().String(endpointFlag, "", "S3 gateway endpoint")
|
||||
generatePresignedURLCmd.Flags().String(bucketFlag, "", "Bucket name to perform action")
|
||||
generatePresignedURLCmd.Flags().String(objectFlag, "", "Object name to perform action")
|
||||
generatePresignedURLCmd.Flags().String(methodFlag, "", "HTTP method to perform action")
|
||||
generatePresignedURLCmd.Flags().String(profileFlag, "", "AWS profile to load")
|
||||
generatePresignedURLCmd.Flags().String(regionFlag, "", "AWS region to use in signature (default is taken from ~/.aws/config)")
|
||||
generatePresignedURLCmd.Flags().String(awsAccessKeyIDFlag, "", "AWS access key id to sign the URL (default is taken from ~/.aws/credentials)")
|
||||
generatePresignedURLCmd.Flags().String(awsSecretAccessKeyFlag, "", "AWS secret access key to sign the URL (default is taken from ~/.aws/credentials)")
|
||||
|
||||
_ = generatePresignedURLCmd.MarkFlagRequired(endpointFlag)
|
||||
_ = generatePresignedURLCmd.MarkFlagRequired(bucketFlag)
|
||||
_ = generatePresignedURLCmd.MarkFlagRequired(objectFlag)
|
||||
}
|
||||
|
||||
func runGeneratePresignedURLCmd(*cobra.Command, []string) error {
|
||||
var cfg aws.Config
|
||||
|
||||
if region := viper.GetString(regionFlag); region != "" {
|
||||
cfg.Region = ®ion
|
||||
}
|
||||
accessKeyID := viper.GetString(awsAccessKeyIDFlag)
|
||||
secretAccessKey := viper.GetString(awsSecretAccessKeyFlag)
|
||||
|
||||
if accessKeyID != "" && secretAccessKey != "" {
|
||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(credentials.Value{
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretAccessKey,
|
||||
})
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: cfg,
|
||||
Profile: viper.GetString(profileFlag),
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get aws credentials: %w", err)
|
||||
}
|
||||
|
||||
reqData := auth.RequestData{
|
||||
Method: viper.GetString(methodFlag),
|
||||
Endpoint: viper.GetString(endpointFlag),
|
||||
Bucket: viper.GetString(bucketFlag),
|
||||
Object: viper.GetString(objectFlag),
|
||||
}
|
||||
presignData := auth.PresignData{
|
||||
Service: "s3",
|
||||
Region: *sess.Config.Region,
|
||||
Lifetime: viper.GetDuration(lifetimeFlag),
|
||||
SignTime: time.Now().UTC(),
|
||||
}
|
||||
|
||||
req, err := auth.PresignRequest(sess.Config.Credentials, reqData, presignData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := &struct{ URL string }{
|
||||
URL: req.URL.String(),
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
enc.SetEscapeHTML(false)
|
||||
return enc.Encode(res)
|
||||
}
|
176
cmd/s3-authmate/modules/issue-secret.go
Normal file
176
cmd/s3-authmate/modules/issue-secret.go
Normal file
|
@ -0,0 +1,176 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var issueSecretCmd = &cobra.Command{
|
||||
Use: "issue-secret",
|
||||
Short: "Issue a secret in FrostFS network",
|
||||
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a`,
|
||||
RunE: runIssueSecretCmd,
|
||||
}
|
||||
|
||||
const (
|
||||
walletFlag = "wallet"
|
||||
addressFlag = "address"
|
||||
peerFlag = "peer"
|
||||
bearerRulesFlag = "bearer-rules"
|
||||
disableImpersonateFlag = "disable-impersonate"
|
||||
gatePublicKeyFlag = "gate-public-key"
|
||||
containerIDFlag = "container-id"
|
||||
containerFriendlyNameFlag = "container-friendly-name"
|
||||
containerPlacementPolicyFlag = "container-placement-policy"
|
||||
sessionTokensFlag = "session-tokens"
|
||||
lifetimeFlag = "lifetime"
|
||||
containerPolicyFlag = "container-policy"
|
||||
awsCLICredentialFlag = "aws-cli-credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
walletPassphraseCfg = "wallet.passphrase"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAccessBoxLifetime = 30 * 24 * time.Hour
|
||||
|
||||
defaultPoolDialTimeout = 5 * time.Second
|
||||
defaultPoolHealthcheckTimeout = 5 * time.Second
|
||||
defaultPoolRebalanceInterval = 30 * time.Second
|
||||
defaultPoolStreamTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
poolDialTimeoutFlag = "pool-dial-timeout"
|
||||
poolHealthcheckTimeoutFlag = "pool-healthcheck-timeout"
|
||||
poolRebalanceIntervalFlag = "pool-rebalance-interval"
|
||||
poolStreamTimeoutFlag = "pool-stream-timeout"
|
||||
)
|
||||
|
||||
func initIssueSecretCmd() {
|
||||
issueSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
|
||||
issueSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
|
||||
issueSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
|
||||
issueSecretCmd.Flags().String(bearerRulesFlag, "", "Rules for bearer token (filepath or a plain json string are allowed, can be used only with --disable-impersonate)")
|
||||
issueSecretCmd.Flags().Bool(disableImpersonateFlag, false, "Mark token as not impersonate to don't consider token signer as request owner (must be provided to use --bearer-rules flag)")
|
||||
issueSecretCmd.Flags().StringSlice(gatePublicKeyFlag, nil, "Public 256r1 key of a gate (use flags repeatedly for multiple gates or separate them by comma)")
|
||||
issueSecretCmd.Flags().String(containerIDFlag, "", "Auth container id to put the secret into (if not provided new container will be created)")
|
||||
issueSecretCmd.Flags().String(containerFriendlyNameFlag, "", "Friendly name of auth container to put the secret into (flag value will be used only if --container-id is missed)")
|
||||
issueSecretCmd.Flags().String(containerPlacementPolicyFlag, "REP 2 IN X CBF 3 SELECT 2 FROM * AS X", "Placement policy of auth container to put the secret into (flag value will be used only if --container-id is missed)")
|
||||
issueSecretCmd.Flags().String(sessionTokensFlag, "", "create session tokens with rules, if the rules are set as 'none', no session tokens will be created")
|
||||
issueSecretCmd.Flags().Duration(lifetimeFlag, defaultAccessBoxLifetime, "Lifetime of tokens. For example 50h30m (note: max time unit is an hour so to set a day you should use 24h).\nIt will be ceil rounded to the nearest amount of epoch.")
|
||||
issueSecretCmd.Flags().String(containerPolicyFlag, "", "Mapping AWS storage class to FrostFS storage policy as plain json string or path to json file")
|
||||
issueSecretCmd.Flags().String(awsCLICredentialFlag, "", "Path to the aws cli credential file")
|
||||
issueSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
|
||||
issueSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
issueSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
issueSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
|
||||
_ = issueSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = issueSecretCmd.MarkFlagRequired(peerFlag)
|
||||
_ = issueSecretCmd.MarkFlagRequired(gatePublicKeyFlag)
|
||||
}
|
||||
|
||||
func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
||||
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
|
||||
defer cancel()
|
||||
|
||||
log := getLogger()
|
||||
|
||||
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
|
||||
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load frostfs private key: %s", err)
|
||||
}
|
||||
|
||||
var cnrID cid.ID
|
||||
containerID := viper.GetString(containerIDFlag)
|
||||
if len(containerID) > 0 {
|
||||
if err = cnrID.DecodeString(containerID); err != nil {
|
||||
return fmt.Errorf("failed to parse auth container id: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var gatesPublicKeys []*keys.PublicKey
|
||||
for _, keyStr := range viper.GetStringSlice(gatePublicKeyFlag) {
|
||||
gpk, err := keys.NewPublicKeyFromString(keyStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load gate's public key: %s", err)
|
||||
}
|
||||
gatesPublicKeys = append(gatesPublicKeys, gpk)
|
||||
}
|
||||
|
||||
lifetime := viper.GetDuration(lifetimeFlag)
|
||||
if lifetime <= 0 {
|
||||
return fmt.Errorf("lifetime must be greater 0, current value: %d", lifetime)
|
||||
}
|
||||
|
||||
policies, err := parsePolicies(viper.GetString(containerPolicyFlag))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't parse container policy: %s", err.Error())
|
||||
}
|
||||
|
||||
disableImpersonate := viper.GetBool(disableImpersonateFlag)
|
||||
eaclRules := viper.GetString(bearerRulesFlag)
|
||||
if !disableImpersonate && eaclRules != "" {
|
||||
return errors.New("--bearer-rules flag can be used only with --disable-impersonate")
|
||||
}
|
||||
|
||||
bearerRules, err := getJSONRules(eaclRules)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't parse 'bearer-rules' flag: %s", err.Error())
|
||||
}
|
||||
|
||||
sessionRules, skipSessionRules, err := getSessionRules(viper.GetString(sessionTokensFlag))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't parse 'session-tokens' flag: %s", err.Error())
|
||||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
|
||||
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
|
||||
}
|
||||
|
||||
frostFS, err := createFrostFS(ctx, log, poolCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create FrostFS component: %s", err)
|
||||
}
|
||||
|
||||
issueSecretOptions := &authmate.IssueSecretOptions{
|
||||
Container: authmate.ContainerOptions{
|
||||
ID: cnrID,
|
||||
FriendlyName: viper.GetString(containerFriendlyNameFlag),
|
||||
PlacementPolicy: viper.GetString(containerPlacementPolicyFlag),
|
||||
},
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
EACLRules: bearerRules,
|
||||
Impersonate: !disableImpersonate,
|
||||
SessionTokenRules: sessionRules,
|
||||
SkipSessionRules: skipSessionRules,
|
||||
ContainerPolicies: policies,
|
||||
Lifetime: lifetime,
|
||||
AwsCliCredentialsFile: viper.GetString(awsCLICredentialFlag),
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).IssueSecret(ctx, os.Stdout, issueSecretOptions); err != nil {
|
||||
return fmt.Errorf("failed to issue secret: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
94
cmd/s3-authmate/modules/obtain-secret.go
Normal file
94
cmd/s3-authmate/modules/obtain-secret.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var obtainSecretCmd = &cobra.Command{
|
||||
Use: "obtain-secret",
|
||||
Short: "Obtain a secret from FrostFS network",
|
||||
Long: "Gets generated secret from credential object (accessbox)",
|
||||
Example: `frostfs-s3-authmate obtain-secret --wallet wallet.json --peer s01.neofs.devenv:8080 --gate-wallet s3-wallet.json --access-key-id EC3tyWpTEKfGNS888PFBpwQzZTrnwDXReGjgAxa8Em1h037VoWktUZCAk1LVA5SvVbVd2NHHb2NQm9jhcd5WFU5VD`,
|
||||
RunE: runObtainSecretCmd,
|
||||
}
|
||||
|
||||
const (
|
||||
gateWalletFlag = "gate-wallet"
|
||||
gateAddressFlag = "gate-address"
|
||||
accessKeyIDFlag = "access-key-id"
|
||||
)
|
||||
|
||||
const (
|
||||
walletGatePassphraseCfg = "wallet.gate.passphrase"
|
||||
)
|
||||
|
||||
func initObtainSecretCmd() {
|
||||
obtainSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
|
||||
obtainSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
|
||||
obtainSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
|
||||
obtainSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
|
||||
obtainSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
|
||||
obtainSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
|
||||
obtainSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
|
||||
obtainSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
obtainSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
obtainSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
|
||||
_ = obtainSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = obtainSecretCmd.MarkFlagRequired(peerFlag)
|
||||
_ = obtainSecretCmd.MarkFlagRequired(gateWalletFlag)
|
||||
_ = obtainSecretCmd.MarkFlagRequired(accessKeyIDFlag)
|
||||
}
|
||||
|
||||
func runObtainSecretCmd(cmd *cobra.Command, _ []string) error {
|
||||
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
|
||||
defer cancel()
|
||||
|
||||
log := getLogger()
|
||||
|
||||
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
|
||||
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load frostfs private key: %s", err)
|
||||
}
|
||||
|
||||
gatePassword := wallet.GetPassword(viper.GetViper(), walletGatePassphraseCfg)
|
||||
gateKey, err := wallet.GetKeyFromPath(viper.GetString(gateWalletFlag), viper.GetString(gateAddressFlag), gatePassword)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load s3 gate private key: %s", err)
|
||||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
|
||||
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
|
||||
}
|
||||
|
||||
frostFS, err := createFrostFS(ctx, log, poolCfg)
|
||||
if err != nil {
|
||||
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
|
||||
}
|
||||
|
||||
obtainSecretOptions := &authmate.ObtainSecretOptions{
|
||||
SecretAddress: strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1),
|
||||
GatePrivateKey: gateKey,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).ObtainSecret(ctx, os.Stdout, obtainSecretOptions); err != nil {
|
||||
return fmt.Errorf("failed to obtain secret: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
68
cmd/s3-authmate/modules/root.go
Normal file
68
cmd/s3-authmate/modules/root.go
Normal file
|
@ -0,0 +1,68 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// rootCmd represents the base command when called without any subcommands.
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "frostfs-s3-authmate",
|
||||
Version: version.Version,
|
||||
Short: "FrostFS S3 Authmate",
|
||||
Long: "Helps manage delegated access via gates to data stored in FrostFS network",
|
||||
Example: "frostfs-s3-authmate --version",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
|
||||
viper.AutomaticEnv()
|
||||
viper.SetEnvPrefix("AUTHMATE")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AllowEmptyEnv(true)
|
||||
|
||||
return viper.BindPFlags(cmd.Flags())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
const (
|
||||
withLogFlag = "with-log"
|
||||
debugFlag = "debug"
|
||||
timeoutFlag = "timeout"
|
||||
)
|
||||
|
||||
func Execute(ctx context.Context) (*cobra.Command, error) {
|
||||
return rootCmd.ExecuteContextC(ctx)
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().Bool(withLogFlag, false, "Enable logger")
|
||||
rootCmd.PersistentFlags().Bool(debugFlag, false, "Enable debug logger level")
|
||||
rootCmd.PersistentFlags().Duration(timeoutFlag, time.Minute, "Timeout of processing of the command, for example 2m (note: max time unit is an hour so to set a day you should use 24h)")
|
||||
|
||||
cobra.AddTemplateFunc("runtimeVersion", runtime.Version)
|
||||
rootCmd.SetVersionTemplate(`Frostfs S3 Authmate
|
||||
{{printf "Version: %s" .Version }}
|
||||
GoVersion: {{ runtimeVersion }}
|
||||
`)
|
||||
|
||||
rootCmd.AddCommand(issueSecretCmd)
|
||||
initIssueSecretCmd()
|
||||
|
||||
rootCmd.AddCommand(obtainSecretCmd)
|
||||
initObtainSecretCmd()
|
||||
|
||||
rootCmd.AddCommand(generatePresignedURLCmd)
|
||||
initGeneratePresignedURLCmd()
|
||||
|
||||
rootCmd.AddCommand(updateSecretCmd)
|
||||
initUpdateSecretCmd()
|
||||
}
|
108
cmd/s3-authmate/modules/update-secret.go
Normal file
108
cmd/s3-authmate/modules/update-secret.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var updateSecretCmd = &cobra.Command{
|
||||
Use: "update-secret",
|
||||
Short: "Update a secret in FrostFS network",
|
||||
Long: `Creates new access box that will be available for extend list of s3 gates, preserve all timeout from initial credentials.
|
||||
After using this command you can use initial access-key-id to interact with newly added gates`,
|
||||
Example: `To extend list of s3 gates that can use existing credentials run:
|
||||
frostfs-s3-authmate update-secret --wallet wallet.json --peer s01.neofs.devenv:8080 --gate-wallet s3-wallet.json \
|
||||
--gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a \
|
||||
--gate-public-key 021dc56fc6d81d581ae7605a8e00e0e0bab6cbad566a924a527339475a97a8e38e \
|
||||
--acces-key-id EC3tyWpTEKfGNS888PFBpwQzZTrnwDXReGjgAxa8Em1h037VoWktUZCAk1LVA5SvVbVd2NHHb2NQm9jhcd5WFU5VD`,
|
||||
RunE: runUpdateSecretCmd,
|
||||
}
|
||||
|
||||
func initUpdateSecretCmd() {
|
||||
updateSecretCmd.Flags().String(walletFlag, "", "Path to the wallet that will be owner of the credentials")
|
||||
updateSecretCmd.Flags().String(addressFlag, "", "Address of the wallet account")
|
||||
updateSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
|
||||
updateSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
|
||||
updateSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
|
||||
updateSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
|
||||
updateSecretCmd.Flags().StringSlice(gatePublicKeyFlag, nil, "Public 256r1 key of a gate (use flags repeatedly for multiple gates or separate them by comma)")
|
||||
updateSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
|
||||
updateSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
updateSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
updateSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
|
||||
_ = updateSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(peerFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(gateWalletFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(accessKeyIDFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(gatePublicKeyFlag)
|
||||
}
|
||||
|
||||
func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
||||
ctx, cancel := context.WithTimeout(cmd.Context(), viper.GetDuration(timeoutFlag))
|
||||
defer cancel()
|
||||
|
||||
log := getLogger()
|
||||
|
||||
password := wallet.GetPassword(viper.GetViper(), walletPassphraseCfg)
|
||||
key, err := wallet.GetKeyFromPath(viper.GetString(walletFlag), viper.GetString(addressFlag), password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load frostfs private key: %s", err)
|
||||
}
|
||||
|
||||
gatePassword := wallet.GetPassword(viper.GetViper(), walletGatePassphraseCfg)
|
||||
gateKey, err := wallet.GetKeyFromPath(viper.GetString(gateWalletFlag), viper.GetString(gateAddressFlag), gatePassword)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load s3 gate private key: %s", err)
|
||||
}
|
||||
|
||||
var accessBoxAddress oid.Address
|
||||
credAddr := strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1)
|
||||
if err = accessBoxAddress.DecodeString(credAddr); err != nil {
|
||||
return fmt.Errorf("failed to parse creds address: %w", err)
|
||||
}
|
||||
|
||||
var gatesPublicKeys []*keys.PublicKey
|
||||
for _, keyStr := range viper.GetStringSlice(gatePublicKeyFlag) {
|
||||
gpk, err := keys.NewPublicKeyFromString(keyStr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load gate's public key: %s", err)
|
||||
}
|
||||
gatesPublicKeys = append(gatesPublicKeys, gpk)
|
||||
}
|
||||
|
||||
poolCfg := PoolConfig{
|
||||
Key: &key.PrivateKey,
|
||||
Address: viper.GetString(peerFlag),
|
||||
DialTimeout: viper.GetDuration(poolDialTimeoutFlag),
|
||||
HealthcheckTimeout: viper.GetDuration(poolHealthcheckTimeoutFlag),
|
||||
StreamTimeout: viper.GetDuration(poolStreamTimeoutFlag),
|
||||
RebalanceInterval: viper.GetDuration(poolRebalanceIntervalFlag),
|
||||
}
|
||||
|
||||
frostFS, err := createFrostFS(ctx, log, poolCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create FrostFS component: %s", err)
|
||||
}
|
||||
|
||||
updateSecretOptions := &authmate.UpdateSecretOptions{
|
||||
Address: accessBoxAddress,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
GatePrivateKey: gateKey,
|
||||
}
|
||||
|
||||
if err = authmate.New(log, frostFS).UpdateSecret(ctx, os.Stdout, updateSecretOptions); err != nil {
|
||||
return fmt.Errorf("failed to update secret: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
142
cmd/s3-authmate/modules/utils.go
Normal file
142
cmd/s3-authmate/modules/utils.go
Normal file
|
@ -0,0 +1,142 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type PoolConfig struct {
|
||||
Key *ecdsa.PrivateKey
|
||||
Address string
|
||||
DialTimeout time.Duration
|
||||
HealthcheckTimeout time.Duration
|
||||
StreamTimeout time.Duration
|
||||
RebalanceInterval time.Duration
|
||||
}
|
||||
|
||||
func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authmate.FrostFS, error) {
|
||||
log.Debug("prepare connection pool")
|
||||
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(cfg.Key)
|
||||
prm.SetNodeDialTimeout(cfg.DialTimeout)
|
||||
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
|
||||
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
|
||||
prm.SetClientRebalanceInterval(cfg.RebalanceInterval)
|
||||
prm.SetLogger(log)
|
||||
prm.AddNode(pool.NewNodeParam(1, cfg.Address, 1))
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create pool: %w", err)
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dial pool: %w", err)
|
||||
}
|
||||
|
||||
return frostfs.NewAuthmateFrostFS(p), nil
|
||||
}
|
||||
|
||||
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
data = []byte(val)
|
||||
err error
|
||||
)
|
||||
|
||||
if !json.Valid(data) {
|
||||
if data, err = os.ReadFile(val); err != nil {
|
||||
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
var policies authmate.ContainerPolicies
|
||||
if err = json.Unmarshal(data, &policies); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal policies: %w", err)
|
||||
}
|
||||
if _, ok := policies[api.DefaultLocationConstraint]; ok {
|
||||
return nil, fmt.Errorf("config overrides %s location constraint", api.DefaultLocationConstraint)
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getJSONRules(val string) ([]byte, error) {
|
||||
if val == "" {
|
||||
return nil, nil
|
||||
}
|
||||
data := []byte(val)
|
||||
if json.Valid(data) {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if data, err := os.ReadFile(val); err == nil {
|
||||
if json.Valid(data) {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("coudln't read json file or provided json is invalid")
|
||||
}
|
||||
|
||||
// getSessionRules reads json session rules.
|
||||
// It returns true if rules must be skipped.
|
||||
func getSessionRules(r string) ([]byte, bool, error) {
|
||||
if r == "none" {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
data, err := getJSONRules(r)
|
||||
return data, false, err
|
||||
}
|
||||
|
||||
// getLogger returns new logger depending on appropriate values in viper.Viper
|
||||
// if logger cannot be built it panics.
|
||||
func getLogger() *zap.Logger {
|
||||
if !viper.GetBool(withLogFlag) {
|
||||
return zap.NewNop()
|
||||
}
|
||||
|
||||
var zapConfig = zap.Config{
|
||||
Development: true,
|
||||
Encoding: "console",
|
||||
Level: zap.NewAtomicLevelAt(zapcore.FatalLevel),
|
||||
OutputPaths: []string{"stdout"},
|
||||
EncoderConfig: zapcore.EncoderConfig{
|
||||
MessageKey: "message",
|
||||
LevelKey: "level",
|
||||
EncodeLevel: zapcore.CapitalLevelEncoder,
|
||||
TimeKey: "time",
|
||||
EncodeTime: zapcore.ISO8601TimeEncoder,
|
||||
CallerKey: "caller",
|
||||
EncodeCaller: zapcore.ShortCallerEncoder,
|
||||
},
|
||||
}
|
||||
|
||||
if viper.GetBool(debugFlag) {
|
||||
zapConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel)
|
||||
}
|
||||
|
||||
log, err := zapConfig.Build()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("create logger: %w", err))
|
||||
}
|
||||
|
||||
return log
|
||||
}
|
180
cmd/s3-gw/app.go
180
cmd/s3-gw/app.go
|
@ -13,6 +13,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
|
@ -29,6 +31,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -44,6 +47,7 @@ type (
|
|||
log *zap.Logger
|
||||
cfg *viper.Viper
|
||||
pool *pool.Pool
|
||||
treePool *treepool.Pool
|
||||
key *keys.PrivateKey
|
||||
nc *notifications.Controller
|
||||
obj layer.Client
|
||||
|
@ -76,20 +80,23 @@ type (
|
|||
mu sync.RWMutex
|
||||
defaultPolicy netmap.PlacementPolicy
|
||||
regionMap map[string]netmap.PlacementPolicy
|
||||
copiesNumbers map[string][]uint32
|
||||
defaultCopiesNumbers []uint32
|
||||
}
|
||||
)
|
||||
|
||||
func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||
conns, key := getPool(ctx, log.logger, v)
|
||||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||
|
||||
// prepare auth center
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(conns), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
ctr := auth.New(frostfs.NewAuthmateFrostFS(objPool), key, v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), getAccessBoxCacheConfig(v, log.logger))
|
||||
|
||||
app := &App{
|
||||
ctr: ctr,
|
||||
log: log.logger,
|
||||
cfg: v,
|
||||
pool: conns,
|
||||
pool: objPool,
|
||||
treePool: treePool,
|
||||
key: key,
|
||||
|
||||
webDone: make(chan struct{}, 1),
|
||||
|
@ -108,20 +115,12 @@ func (a *App) init(ctx context.Context) {
|
|||
a.initAPI(ctx)
|
||||
a.initMetrics()
|
||||
a.initServers(ctx)
|
||||
a.initTracing(ctx)
|
||||
}
|
||||
|
||||
func (a *App) initLayer(ctx context.Context) {
|
||||
a.initResolver()
|
||||
|
||||
treeServiceEndpoint := a.cfg.GetStringSlice(cfgTreeServiceEndpoint)
|
||||
grpcDialOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
treeGRPCClient, err := services.NewTreeServiceClientGRPC(ctx, treeServiceEndpoint, a.key, a.log, grpcDialOpt)
|
||||
if err != nil {
|
||||
a.log.Fatal("failed to create tree service", zap.Error(err))
|
||||
}
|
||||
treeService := tree.NewTree(treeGRPCClient, a.log)
|
||||
a.log.Info("init tree service", zap.Strings("endpoints", treeGRPCClient.Endpoints()))
|
||||
|
||||
// prepare random key for anonymous requests
|
||||
randomKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
|
@ -134,7 +133,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
Key: randomKey,
|
||||
},
|
||||
Resolver: a.bucketResolver,
|
||||
TreeService: treeService,
|
||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||
}
|
||||
|
||||
// prepare object layer
|
||||
|
@ -154,7 +153,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
|
||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||
policies, err := newPlacementPolicy(getDefaultPolicyValue(v), v.GetString(cfgPolicyRegionMapFile))
|
||||
policies, err := newPlacementPolicy(log.logger, v)
|
||||
if err != nil {
|
||||
log.logger.Fatal("failed to create new policy mapping", zap.Error(err))
|
||||
}
|
||||
|
@ -212,6 +211,38 @@ func (a *App) getResolverConfig() ([]string, *resolver.Config) {
|
|||
return order, resolveCfg
|
||||
}
|
||||
|
||||
func (a *App) initTracing(ctx context.Context) {
|
||||
instanceID := ""
|
||||
if len(a.servers) > 0 {
|
||||
instanceID = a.servers[0].Address()
|
||||
}
|
||||
cfg := tracing.Config{
|
||||
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||
Service: "frostfs-s3-gw",
|
||||
InstanceID: instanceID,
|
||||
Version: version.Version,
|
||||
}
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn("failed to initialize tracing", zap.Error(err))
|
||||
}
|
||||
if updated {
|
||||
a.log.Info("tracing config updated")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) shutdownTracing() {
|
||||
const tracingShutdownTimeout = 5 * time.Second
|
||||
shdnCtx, cancel := context.WithTimeout(context.Background(), tracingShutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||
a.log.Warn("failed to shutdown tracing", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func newMaxClients(cfg *viper.Viper) api.MaxClients {
|
||||
maxClientsCount := cfg.GetInt(cfgMaxClientsCount)
|
||||
if maxClientsCount <= 0 {
|
||||
|
@ -226,8 +257,9 @@ func newMaxClients(cfg *viper.Viper) api.MaxClients {
|
|||
return api.NewMaxClientsMiddleware(maxClientsCount, maxClientsDeadline)
|
||||
}
|
||||
|
||||
func getPool(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *keys.PrivateKey) {
|
||||
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||
var prm pool.InitParameters
|
||||
var prmTree treepool.InitParameters
|
||||
|
||||
password := wallet.GetPassword(cfg, cfgWalletPassphrase)
|
||||
key, err := wallet.GetKeyFromPath(cfg.GetString(cfgWalletPath), cfg.GetString(cfgWalletAddress), password)
|
||||
|
@ -236,10 +268,12 @@ func getPool(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.P
|
|||
}
|
||||
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prmTree.SetKey(key)
|
||||
logger.Info("using credentials", zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(logger, cfg) {
|
||||
prm.AddNode(peer)
|
||||
prmTree.AddNode(peer)
|
||||
}
|
||||
|
||||
connTimeout := cfg.GetDuration(cfgConnectTimeout)
|
||||
|
@ -247,24 +281,28 @@ func getPool(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.P
|
|||
connTimeout = defaultConnectTimeout
|
||||
}
|
||||
prm.SetNodeDialTimeout(connTimeout)
|
||||
prmTree.SetNodeDialTimeout(connTimeout)
|
||||
|
||||
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||
if streamTimeout <= 0 {
|
||||
streamTimeout = defaultStreamTimeout
|
||||
}
|
||||
prm.SetNodeStreamTimeout(streamTimeout)
|
||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||
|
||||
healthCheckTimeout := cfg.GetDuration(cfgHealthcheckTimeout)
|
||||
if healthCheckTimeout <= 0 {
|
||||
healthCheckTimeout = defaultHealthcheckTimeout
|
||||
}
|
||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
|
||||
rebalanceInterval := cfg.GetDuration(cfgRebalanceInterval)
|
||||
if rebalanceInterval <= 0 {
|
||||
rebalanceInterval = defaultRebalanceInterval
|
||||
}
|
||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||
|
||||
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||
if errorThreshold <= 0 {
|
||||
|
@ -272,6 +310,20 @@ func getPool(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.P
|
|||
}
|
||||
prm.SetErrorThreshold(errorThreshold)
|
||||
prm.SetLogger(logger)
|
||||
prmTree.SetLogger(logger)
|
||||
|
||||
var apiGRPCDialOpts []grpc.DialOption
|
||||
var treeGRPCDialOpts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
|
||||
if cfg.GetBool(cfgTracingEnabled) {
|
||||
interceptors := []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||
}
|
||||
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
|
||||
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
|
||||
}
|
||||
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
|
||||
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
|
@ -282,24 +334,36 @@ func getPool(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.P
|
|||
logger.Fatal("failed to dial connection pool", zap.Error(err))
|
||||
}
|
||||
|
||||
return p, key
|
||||
}
|
||||
|
||||
func newPlacementPolicy(defaultPolicy string, regionPolicyFilepath string) (*placementPolicy, error) {
|
||||
policies := &placementPolicy{
|
||||
regionMap: make(map[string]netmap.PlacementPolicy),
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to create tree pool", zap.Error(err))
|
||||
}
|
||||
if err = treePool.Dial(ctx); err != nil {
|
||||
logger.Fatal("failed to dial tree pool", zap.Error(err))
|
||||
}
|
||||
|
||||
return policies, policies.update(defaultPolicy, regionPolicyFilepath)
|
||||
return p, treePool, key
|
||||
}
|
||||
|
||||
func (p *placementPolicy) Default() netmap.PlacementPolicy {
|
||||
func newPlacementPolicy(l *zap.Logger, v *viper.Viper) (*placementPolicy, error) {
|
||||
policies := &placementPolicy{
|
||||
regionMap: make(map[string]netmap.PlacementPolicy),
|
||||
defaultCopiesNumbers: []uint32{handler.DefaultCopiesNumber},
|
||||
}
|
||||
|
||||
policies.updateCopiesNumbers(l, v)
|
||||
policies.updateDefaultCopiesNumbers(l, v)
|
||||
|
||||
return policies, policies.updatePolicy(getDefaultPolicyValue(v), v.GetString(cfgPolicyRegionMapFile))
|
||||
}
|
||||
|
||||
func (p *placementPolicy) DefaultPlacementPolicy() netmap.PlacementPolicy {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.defaultPolicy
|
||||
}
|
||||
|
||||
func (p *placementPolicy) Get(name string) (netmap.PlacementPolicy, bool) {
|
||||
func (p *placementPolicy) PlacementPolicy(name string) (netmap.PlacementPolicy, bool) {
|
||||
p.mu.RLock()
|
||||
policy, ok := p.regionMap[name]
|
||||
p.mu.RUnlock()
|
||||
|
@ -307,7 +371,30 @@ func (p *placementPolicy) Get(name string) (netmap.PlacementPolicy, bool) {
|
|||
return policy, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicy) update(defaultPolicy string, regionPolicyFilepath string) error {
|
||||
func (p *placementPolicy) CopiesNumbers(locationConstraint string) ([]uint32, bool) {
|
||||
p.mu.RLock()
|
||||
copiesNumbers, ok := p.copiesNumbers[locationConstraint]
|
||||
p.mu.RUnlock()
|
||||
|
||||
return copiesNumbers, ok
|
||||
}
|
||||
|
||||
func (p *placementPolicy) DefaultCopiesNumbers() []uint32 {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
return p.defaultCopiesNumbers
|
||||
}
|
||||
|
||||
func (p *placementPolicy) update(l *zap.Logger, v *viper.Viper) {
|
||||
if err := p.updatePolicy(getDefaultPolicyValue(v), v.GetString(cfgPolicyRegionMapFile)); err != nil {
|
||||
l.Warn("policies won't be updated", zap.Error(err))
|
||||
}
|
||||
|
||||
p.updateCopiesNumbers(l, v)
|
||||
p.updateDefaultCopiesNumbers(l, v)
|
||||
}
|
||||
|
||||
func (p *placementPolicy) updatePolicy(defaultPolicy string, regionPolicyFilepath string) error {
|
||||
var defaultPlacementPolicy netmap.PlacementPolicy
|
||||
if err := defaultPlacementPolicy.DecodeString(defaultPolicy); err != nil {
|
||||
return fmt.Errorf("parse default policy '%s': %w", defaultPolicy, err)
|
||||
|
@ -342,6 +429,31 @@ func (p *placementPolicy) update(defaultPolicy string, regionPolicyFilepath stri
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *placementPolicy) updateCopiesNumbers(l *zap.Logger, v *viper.Viper) {
|
||||
if newCopiesNumbers, err := fetchCopiesNumbers(l, v); err != nil {
|
||||
l.Warn("copies numbers won't be updated", zap.Error(err))
|
||||
} else {
|
||||
p.mu.Lock()
|
||||
p.copiesNumbers = newCopiesNumbers
|
||||
p.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *placementPolicy) updateDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) {
|
||||
configuredValues, err := fetchDefaultCopiesNumbers(v)
|
||||
|
||||
if err == nil {
|
||||
p.mu.Lock()
|
||||
p.defaultCopiesNumbers = configuredValues
|
||||
p.mu.Unlock()
|
||||
l.Info("default copies numbers", zap.Uint32s("vector", p.defaultCopiesNumbers))
|
||||
return
|
||||
}
|
||||
|
||||
l.Error("cannot parse default copies numbers", zap.Error(err))
|
||||
l.Warn("default copies numbers won't be updated", zap.Uint32s("current value", p.DefaultCopiesNumbers()))
|
||||
}
|
||||
|
||||
func remove(list []string, element string) []string {
|
||||
for i, item := range list {
|
||||
if item == element {
|
||||
|
@ -408,7 +520,7 @@ LOOP:
|
|||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case <-sigs:
|
||||
a.configReload()
|
||||
a.configReload(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -419,6 +531,7 @@ LOOP:
|
|||
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
a.shutdownTracing()
|
||||
|
||||
close(a.webDone)
|
||||
}
|
||||
|
@ -427,7 +540,7 @@ func shutdownContext() (context.Context, context.CancelFunc) {
|
|||
return context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||
}
|
||||
|
||||
func (a *App) configReload() {
|
||||
func (a *App) configReload(ctx context.Context) {
|
||||
a.log.Info("SIGHUP config reload started")
|
||||
|
||||
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||
|
@ -453,6 +566,7 @@ func (a *App) configReload() {
|
|||
a.updateSettings()
|
||||
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info("SIGHUP config reload completed")
|
||||
|
@ -465,9 +579,7 @@ func (a *App) updateSettings() {
|
|||
a.settings.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.settings.policies.update(getDefaultPolicyValue(a.cfg), a.cfg.GetString(cfgPolicyRegionMapFile)); err != nil {
|
||||
a.log.Warn("policies won't be updated", zap.Error(err))
|
||||
}
|
||||
a.settings.policies.update(a.log, a.cfg)
|
||||
|
||||
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
|
||||
}
|
||||
|
@ -637,7 +749,6 @@ func (a *App) initHandler() {
|
|||
Policy: a.settings.policies,
|
||||
DefaultMaxAge: handler.DefaultMaxAge,
|
||||
NotificatorEnabled: a.cfg.GetBool(cfgEnableNATS),
|
||||
DefaultCopiesNumbers: []uint32{handler.DefaultCopiesNumber},
|
||||
XMLDecoder: a.settings.xmlDecoder,
|
||||
}
|
||||
|
||||
|
@ -652,13 +763,6 @@ func (a *App) initHandler() {
|
|||
cfg.DefaultMaxAge = defaultMaxAge
|
||||
}
|
||||
|
||||
cfg.CopiesNumbers = fetchCopiesNumbers(a.log, a.cfg)
|
||||
|
||||
if val := fetchDefaultCopiesNumbers(a.log, a.cfg); len(val) > 0 {
|
||||
cfg.DefaultCopiesNumbers = val
|
||||
}
|
||||
a.log.Info("setting default copies numbers", zap.Uint32s("vector", cfg.DefaultCopiesNumbers))
|
||||
|
||||
cfg.ResolveZoneList = a.cfg.GetStringSlice(cfgResolveBucketAllow)
|
||||
cfg.IsResolveListAllow = len(cfg.ResolveZoneList) > 0
|
||||
if !cfg.IsResolveListAllow {
|
||||
|
|
|
@ -98,13 +98,16 @@ const ( // Settings.
|
|||
cfgPProfEnabled = "pprof.enabled"
|
||||
cfgPProfAddress = "pprof.address"
|
||||
|
||||
// Tracing.
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
|
||||
cfgListenDomains = "listen_domains"
|
||||
|
||||
// Peers.
|
||||
cfgPeers = "peers"
|
||||
|
||||
cfgTreeServiceEndpoint = "tree.service"
|
||||
|
||||
// NeoGo.
|
||||
cfgRPCEndpoint = "rpc_endpoint"
|
||||
|
||||
|
@ -152,25 +155,23 @@ var ignore = map[string]struct{}{
|
|||
cmdVersion: {},
|
||||
}
|
||||
|
||||
func fetchDefaultCopiesNumbers(l *zap.Logger, v *viper.Viper) []uint32 {
|
||||
func fetchDefaultCopiesNumbers(v *viper.Viper) ([]uint32, error) {
|
||||
unparsed := v.GetStringSlice(cfgSetCopiesNumber)
|
||||
var result []uint32
|
||||
|
||||
for i := range unparsed {
|
||||
parsedValue, err := strconv.ParseUint(unparsed[i], 10, 32)
|
||||
if err != nil {
|
||||
l.Error("cannot parse default copies numbers", zap.Error(err))
|
||||
return make([]uint32, 0)
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, uint32(parsedValue))
|
||||
}
|
||||
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) map[string][]uint32 {
|
||||
func fetchCopiesNumbers(l *zap.Logger, v *viper.Viper) (map[string][]uint32, error) {
|
||||
var copiesNums = make(map[string][]uint32)
|
||||
LOOP:
|
||||
for i := 0; ; i++ {
|
||||
key := cfgCopiesNumbers + "." + strconv.Itoa(i) + "."
|
||||
constraint := v.GetString(key + "location_constraint")
|
||||
|
@ -184,16 +185,15 @@ LOOP:
|
|||
for j := range vector {
|
||||
parsedValue, err := strconv.ParseUint(vector[j], 10, 32)
|
||||
if err != nil {
|
||||
l.Error("cannot parse copies numbers", zap.Error(err))
|
||||
break LOOP
|
||||
return nil, err
|
||||
}
|
||||
vector32[j] = uint32(parsedValue)
|
||||
}
|
||||
|
||||
copiesNums[constraint] = vector32
|
||||
l.Debug("added constraint", zap.String("location", constraint), zap.Strings("copies numbers", vector))
|
||||
l.Info("constraint added", zap.String("location", constraint), zap.Strings("copies numbers", vector))
|
||||
}
|
||||
return copiesNums
|
||||
return copiesNums, nil
|
||||
}
|
||||
|
||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||
|
@ -217,7 +217,8 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|||
|
||||
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
|
||||
|
||||
l.Info("added connection peer",
|
||||
l.Info("added storage peer",
|
||||
zap.Int("priority", priority),
|
||||
zap.String("address", address),
|
||||
zap.Float64("weight", weight))
|
||||
}
|
||||
|
@ -424,11 +425,7 @@ func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
|||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile))
|
||||
}
|
||||
|
||||
func readInConfig(v *viper.Viper) error {
|
||||
|
@ -492,11 +489,7 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if err = v.MergeConfig(cfgFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return v.MergeConfig(cfgFile)
|
||||
}
|
||||
|
||||
// newLogger constructs a Logger instance for the current application.
|
||||
|
|
|
@ -42,9 +42,6 @@ S3_GW_CONFIG=/path/to/config/yaml
|
|||
# Logger
|
||||
S3_GW_LOGGER_LEVEL=debug
|
||||
|
||||
# Endpoints of the tree service. At least one endpoint must be provided. Node addresses (from the `peers` section) can be used.
|
||||
S3_GW_TREE_SERVICE=grpc://s01.frostfs.devenv:8080 grpc://s02.frostfs.devenv:8080
|
||||
|
||||
# RPC endpoint and order of resolving of bucket names
|
||||
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
|
||||
S3_GW_RESOLVE_ORDER="nns dns"
|
||||
|
@ -124,8 +121,9 @@ S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_1_VECTOR=2 3 4
|
|||
S3_GW_CORS_DEFAULT_MAX_AGE=600
|
||||
|
||||
# Parameters of requests to FrostFS
|
||||
# Number of the object copies to consider PUT to FrostFS successful.
|
||||
# If not set, default value 0 will be used -- it means that object will be processed according to the container's placement policy
|
||||
# Numbers of the object copies (for each replica, syntax the same as for `S3_GW_PLACEMENT_POLICY_COPIES_NUMBERS_0_VECTOR` above)
|
||||
# to consider PUT to FrostFS successful.
|
||||
# `0` or empty list means that object will be processed according to the container's placement policy
|
||||
S3_GW_FROSTFS_SET_COPIES_NUMBER=0
|
||||
|
||||
# List of allowed AccessKeyID prefixes
|
||||
|
@ -140,3 +138,7 @@ S3_GW_RESOLVE_BUCKET_ALLOW=container
|
|||
S3_GW_KLUDGE_USE_DEFAULT_XMLNS_FOR_COMPLETE_MULTIPART=false
|
||||
# Set timeout between whitespace transmissions during CompleteMultipartUpload processing.
|
||||
S3_GW_KLUDGE_COMPLETE_MULTIPART_KEEPALIVE=10s
|
||||
|
||||
S3_GW_TRACING_ENABLED=false
|
||||
S3_GW_TRACING_ENDPOINT="localhost:4318"
|
||||
S3_GW_TRACING_EXPORTER="otlp_grpc"
|
||||
|
|
|
@ -44,12 +44,6 @@ listen_domains:
|
|||
logger:
|
||||
level: debug
|
||||
|
||||
# Endpoints of the tree service. At least one endpoint must be provided. Node addresses (from the `peers` section) can be used.
|
||||
tree:
|
||||
service:
|
||||
- node1.frostfs:8080
|
||||
- node2.frostfs:8080
|
||||
|
||||
# RPC endpoint and order of resolving of bucket names
|
||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||
resolve_order:
|
||||
|
@ -64,6 +58,11 @@ prometheus:
|
|||
enabled: false
|
||||
address: localhost:8086
|
||||
|
||||
tracing:
|
||||
enabled: false
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4318"
|
||||
|
||||
# Timeout to connect to a node
|
||||
connect_timeout: 10s
|
||||
# Timeout for individual operations in streaming RPC.
|
||||
|
@ -148,9 +147,9 @@ cors:
|
|||
|
||||
# Parameters of requests to FrostFS
|
||||
frostfs:
|
||||
# Number of the object copies to consider PUT to FrostFS successful.
|
||||
# `0` means that object will be processed according to the container's placement policy
|
||||
set_copies_number: 0
|
||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||
# `[0]` or empty list means that object will be processed according to the container's placement policy
|
||||
set_copies_number: [0]
|
||||
|
||||
# List of allowed AccessKeyID prefixes
|
||||
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
||||
|
|
|
@ -95,7 +95,8 @@ func (x *AccessBox) Unmarshal(data []byte) error {
|
|||
|
||||
// PackTokens adds bearer and session tokens to BearerTokens and SessionToken lists respectively.
|
||||
// Session token can be nil.
|
||||
func PackTokens(gatesData []*GateData) (*AccessBox, *Secrets, error) {
|
||||
// Secret can be nil. In such case secret will be generated.
|
||||
func PackTokens(gatesData []*GateData, secret []byte) (*AccessBox, *Secrets, error) {
|
||||
box := &AccessBox{}
|
||||
ephemeralKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
|
@ -103,10 +104,12 @@ func PackTokens(gatesData []*GateData) (*AccessBox, *Secrets, error) {
|
|||
}
|
||||
box.OwnerPublicKey = ephemeralKey.PublicKey().Bytes()
|
||||
|
||||
secret, err := generateSecret()
|
||||
if secret == nil {
|
||||
secret, err = generateSecret()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate accessKey as hex: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := box.addTokens(gatesData, ephemeralKey, secret); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to add tokens to accessbox: %w", err)
|
||||
|
@ -221,7 +224,7 @@ func decodeGate(gate *AccessBox_Gate, owner *keys.PrivateKey, sender *keys.Publi
|
|||
return nil, fmt.Errorf("unmarshal tokens: %w", err)
|
||||
}
|
||||
|
||||
var bearerTkn bearer.Token
|
||||
bearerTkn := bearer.NewToken()
|
||||
if err = bearerTkn.Unmarshal(tokens.BearerToken); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal bearer token: %w", err)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestTokensEncryptDecrypt(t *testing.T) {
|
|||
cred, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
tkn.SetEACLTable(*eacl.NewTable())
|
||||
tkn.SetEACLTable(eacl.NewTable())
|
||||
require.NoError(t, tkn.Sign(sec.PrivateKey))
|
||||
|
||||
data, err := encrypt(cred, cred.PublicKey(), tkn.Marshal())
|
||||
|
@ -56,11 +56,11 @@ func TestBearerTokenInAccessBox(t *testing.T) {
|
|||
cred, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
tkn.SetEACLTable(*eacl.NewTable())
|
||||
tkn.SetEACLTable(eacl.NewTable())
|
||||
require.NoError(t, tkn.Sign(sec.PrivateKey))
|
||||
|
||||
gate := NewGateData(cred.PublicKey(), &tkn)
|
||||
box, _, err = PackTokens([]*GateData{gate})
|
||||
box, _, err = PackTokens([]*GateData{gate}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := box.Marshal()
|
||||
|
@ -79,7 +79,7 @@ func TestSessionTokenInAccessBox(t *testing.T) {
|
|||
var (
|
||||
box *AccessBox
|
||||
box2 AccessBox
|
||||
tkn = new(session.Container)
|
||||
tkn = session.NewContainer()
|
||||
)
|
||||
|
||||
sec, err := keys.NewPrivateKey()
|
||||
|
@ -92,10 +92,10 @@ func TestSessionTokenInAccessBox(t *testing.T) {
|
|||
tkn.SetAuthKey((*frostfsecdsa.PublicKey)(sec.PublicKey()))
|
||||
require.NoError(t, tkn.Sign(sec.PrivateKey))
|
||||
|
||||
var newTkn bearer.Token
|
||||
newTkn := bearer.NewToken()
|
||||
gate := NewGateData(cred.PublicKey(), &newTkn)
|
||||
gate.SessionTokens = []*session.Container{tkn}
|
||||
box, _, err = PackTokens([]*GateData{gate})
|
||||
gate.SessionTokens = []*session.Container{&tkn}
|
||||
box, _, err = PackTokens([]*GateData{gate}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := box.Marshal()
|
||||
|
@ -107,7 +107,7 @@ func TestSessionTokenInAccessBox(t *testing.T) {
|
|||
tkns, err := box2.GetTokens(cred)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, []*session.Container{tkn}, tkns.SessionTokens)
|
||||
require.Equal(t, tkn.Marshal(), tkns.SessionTokens[0].Marshal())
|
||||
}
|
||||
|
||||
func TestAccessboxMultipleKeys(t *testing.T) {
|
||||
|
@ -119,7 +119,7 @@ func TestAccessboxMultipleKeys(t *testing.T) {
|
|||
sec, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
tkn.SetEACLTable(*eacl.NewTable())
|
||||
tkn.SetEACLTable(eacl.NewTable())
|
||||
require.NoError(t, tkn.Sign(sec.PrivateKey))
|
||||
|
||||
count := 10
|
||||
|
@ -135,7 +135,7 @@ func TestAccessboxMultipleKeys(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
box, _, err = PackTokens(gates)
|
||||
box, _, err = PackTokens(gates, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, k := range privateKeys {
|
||||
|
@ -160,11 +160,11 @@ func TestUnknownKey(t *testing.T) {
|
|||
wrongCred, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
tkn.SetEACLTable(*eacl.NewTable())
|
||||
tkn.SetEACLTable(eacl.NewTable())
|
||||
require.NoError(t, tkn.Sign(sec.PrivateKey))
|
||||
|
||||
gate := NewGateData(cred.PublicKey(), &tkn)
|
||||
box, _, err = PackTokens([]*GateData{gate})
|
||||
box, _, err = PackTokens([]*GateData{gate}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = box.GetTokens(wrongCred)
|
||||
|
|
|
@ -20,6 +20,7 @@ type (
|
|||
Credentials interface {
|
||||
GetBox(context.Context, oid.Address) (*accessbox.Box, error)
|
||||
Put(context.Context, cid.ID, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error)
|
||||
Update(context.Context, oid.Address, user.ID, *accessbox.AccessBox, uint64, ...*keys.PublicKey) (oid.Address, error)
|
||||
}
|
||||
|
||||
cred struct {
|
||||
|
@ -40,6 +41,10 @@ type PrmObjectCreate struct {
|
|||
// File path.
|
||||
Filepath string
|
||||
|
||||
// Optional.
|
||||
// If provided cred object will be created using crdt approach.
|
||||
NewVersionFor *oid.ID
|
||||
|
||||
// Last FrostFS epoch of the object lifetime.
|
||||
ExpirationEpoch uint64
|
||||
|
||||
|
@ -57,12 +62,13 @@ type FrostFS interface {
|
|||
// prevented the object from being created.
|
||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||
|
||||
// ReadObjectPayload reads payload of the object from FrostFS network by address
|
||||
// into memory.
|
||||
// GetCredsPayload gets payload of the credential object from FrostFS network.
|
||||
// It uses search by system name and select using CRDT 2PSet. In case of absence CRDT header
|
||||
// it heads object by address.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the object payload from being read.
|
||||
ReadObjectPayload(context.Context, oid.Address) ([]byte, error)
|
||||
GetCredsPayload(context.Context, oid.Address) ([]byte, error)
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -103,7 +109,7 @@ func (c *cred) GetBox(ctx context.Context, addr oid.Address) (*accessbox.Box, er
|
|||
}
|
||||
|
||||
func (c *cred) getAccessBox(ctx context.Context, addr oid.Address) (*accessbox.AccessBox, error) {
|
||||
data, err := c.frostFS.ReadObjectPayload(ctx, addr)
|
||||
data, err := c.frostFS.GetCredsPayload(ctx, addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read payload: %w", err)
|
||||
}
|
||||
|
@ -118,6 +124,15 @@ func (c *cred) getAccessBox(ctx context.Context, addr oid.Address) (*accessbox.A
|
|||
}
|
||||
|
||||
func (c *cred) Put(ctx context.Context, idCnr cid.ID, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
return c.createObject(ctx, idCnr, nil, issuer, box, expiration, keys...)
|
||||
}
|
||||
|
||||
func (c *cred) Update(ctx context.Context, addr oid.Address, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
objID := addr.Object()
|
||||
return c.createObject(ctx, addr.Container(), &objID, issuer, box, expiration, keys...)
|
||||
}
|
||||
|
||||
func (c *cred) createObject(ctx context.Context, cnrID cid.ID, newVersionFor *oid.ID, issuer user.ID, box *accessbox.AccessBox, expiration uint64, keys ...*keys.PublicKey) (oid.Address, error) {
|
||||
if len(keys) == 0 {
|
||||
return oid.Address{}, ErrEmptyPublicKeys
|
||||
} else if box == nil {
|
||||
|
@ -130,9 +145,10 @@ func (c *cred) Put(ctx context.Context, idCnr cid.ID, issuer user.ID, box *acces
|
|||
|
||||
idObj, err := c.frostFS.CreateObject(ctx, PrmObjectCreate{
|
||||
Creator: issuer,
|
||||
Container: idCnr,
|
||||
Container: cnrID,
|
||||
Filepath: strconv.FormatInt(time.Now().Unix(), 10) + "_access.box",
|
||||
ExpirationEpoch: expiration,
|
||||
NewVersionFor: newVersionFor,
|
||||
Payload: data,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -141,7 +157,7 @@ func (c *cred) Put(ctx context.Context, idCnr cid.ID, issuer user.ID, box *acces
|
|||
|
||||
var addr oid.Address
|
||||
addr.SetObject(idObj)
|
||||
addr.SetContainer(idCnr)
|
||||
addr.SetContainer(cnrID)
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
|
|
@ -24,8 +24,9 @@ potentially).
|
|||
2. [Bearer tokens](#bearer-tokens)
|
||||
3. [Session tokens](#session-tokens)
|
||||
4. [Containers policy](#containers-policy)
|
||||
3. [Obtainment of a secret](#obtainment-of-a-secret-access-key)
|
||||
3. [Obtainment of a secret](#obtaining-credential-secrets)
|
||||
4. [Generate presigned url](#generate-presigned-url)
|
||||
5. [Update secrets](#update-secret)
|
||||
|
||||
## Generation of wallet
|
||||
|
||||
|
@ -114,6 +115,7 @@ $ frostfs-s3-authmate issue-secret --wallet wallet.json \
|
|||
|
||||
{
|
||||
"access_key_id": "5g933dyLEkXbbAspouhPPTiyLZRg4axBW1axSPD87eVT0AiXsH4AjYy1iTJ4C1WExzjBrSobJsQFWEyKLREe5sQYM",
|
||||
"initial_access_key_id": "5g933dyLEkXbbAspouhPPTiyLZRg4axBW1axSPD87eVT0AiXsH4AjYy1iTJ4C1WExzjBrSobJsQFWEyKLREe5sQYM",
|
||||
"secret_access_key": "438bbd8243060e1e1c9dd4821756914a6e872ce29bf203b68f81b140ac91231c",
|
||||
"owner_private_key": "274fdd6e71fc6a6b8fe77bec500254115d66d6d17347d7db0880d2eb80afc72a",
|
||||
"container_id":"5g933dyLEkXbbAspouhPPTiyLZRg4axBW1axSPD87eVT"
|
||||
|
@ -122,6 +124,9 @@ $ frostfs-s3-authmate issue-secret --wallet wallet.json \
|
|||
|
||||
`access_key_id` and `secret_access_key` are AWS credentials that you can use with any S3 client.
|
||||
|
||||
`initial_access_key_id` contains the first credentials in the chain of credentials versions
|
||||
(can be useful when you update your credentials).
|
||||
|
||||
`access_key_id` consists of Base58 encoded containerID(cid) and objectID(oid) stored on the FrostFS network and containing
|
||||
the secret. Format of `access_key_id`: `%cid0%oid`, where 0(zero) is a delimiter.
|
||||
|
||||
|
@ -134,6 +139,9 @@ the secret. Format of `access_key_id`: `%cid0%oid`, where 0(zero) is a delimiter
|
|||
24h). Default value is `720h` (30 days). It will be ceil rounded to the nearest amount of epoch
|
||||
* `--aws-cli-credentials` - path to the aws cli credentials file, where authmate will write `access_key_id` and
|
||||
`secret_access_key` to
|
||||
* `--access-key-id` -- credentials that you want to update (e.g. to add more gates that can use your creds)
|
||||
without changing values of `aws_access_key_id` and `aws_secret_access_key`. If you want to update credential you MUST
|
||||
provide also secret key using `AUTHMATE_SECRET_ACCESS_KEY` env variable.
|
||||
|
||||
### Bearer tokens
|
||||
|
||||
|
@ -252,9 +260,9 @@ can be set via parameter `--container-policy` (json-string and file path allowed
|
|||
}
|
||||
```
|
||||
|
||||
## Obtainment of a secret access key
|
||||
## Obtaining credential secrets
|
||||
|
||||
You can get a secret access key associated with an access key ID by obtaining a
|
||||
You can get a secret access key and bearer token associated with an access key ID by obtaining a
|
||||
secret stored on the FrostFS network. Here is an example of providing one password (for `wallet.json`) via env variable
|
||||
and the other (for `gate-wallet.json`) interactively:
|
||||
|
||||
|
@ -267,6 +275,25 @@ frostfs-s3-authmate obtain-secret --wallet wallet.json \
|
|||
|
||||
Enter password for gate-wallet.json >
|
||||
{
|
||||
"bearer_token": {
|
||||
"body": {
|
||||
"eaclTable": null,
|
||||
"ownerID": {
|
||||
"value": "Naq5pfYuroaGE7h9o5iQsPR/1aRe5gmWrg=="
|
||||
},
|
||||
"lifetime": {
|
||||
"exp": "10813",
|
||||
"nbf": "13",
|
||||
"iat": "13"
|
||||
},
|
||||
"allowImpersonate": true
|
||||
},
|
||||
"signature": {
|
||||
"key": "Axpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89K",
|
||||
"signature": "BMIOqcNEwTughI26ivFw7vnGyzhWip8NsgSYTTf21aVkv0AH7bgE9R91gglYgS6tGNVcWZMTisYCJCT3OEQ9lkw=",
|
||||
"scheme": "ECDSA_SHA512"
|
||||
}
|
||||
},
|
||||
"secret_access_key": "438bbd8243060e1e1c9dd4821756914a6e872ce29bf203b68f81b140ac91231c"
|
||||
}
|
||||
```
|
||||
|
@ -308,3 +335,39 @@ $ aws s3 --endpoint http://localhost:8084 presign s3://pregigned/obj
|
|||
|
||||
http://localhost:8084/pregigned/obj?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=6UpmiuYspPLMWfyhEKYmZQSsTGkFLS5MhQVdsda3fhz908Hw9eo9urTmaJtfvHMHUpY8SWAptk61bns2Js8f1M5tZ%2F20220615%2Fus-east-2%2Fs3%2Faws4_request&X-Amz-Date=20220615T072348Z&X-Amz-Expires=3600&X-Amz-SignedHeaders=host&X-Amz-Signature=b82c13952534b1bba699a718f2d42d135c2833a1e64030d4ce0e198af46551d4
|
||||
```
|
||||
|
||||
## Update secret
|
||||
You can extend list of s3 gates that can accept already issued credentials.
|
||||
To do this use `frostfs-s3-authmate update-secret` command:
|
||||
|
||||
**Required parameters:**
|
||||
* `--wallet` is a path to a user wallet `.json` file. You can provide a passphrase to decrypt
|
||||
a wallet via environment variable `AUTHMATE_WALLET_PASSPHRASE`, or you will be asked to enter a passphrase
|
||||
interactively. You can also specify an account address to use from a wallet using the `--address` parameter.
|
||||
* `--gate-wallet` is a path to a gate wallet `.json` file (need to decrypt current access box version). You can provide a passphrase to decrypt
|
||||
a wallet via environment variable `AUTHMATE_WALLET_GATE_PASSPHRASE`, or you will be asked to enter a passphrase
|
||||
interactively. You can also specify an account address to use from a wallet using the `--gate-address` parameter.
|
||||
* `--peer` is an address of a FrostFS peer to connect to
|
||||
* `--gate-public-key` is a public `secp256r1` 33-byte short key of a gate (use flags repeatedly for multiple gates).
|
||||
* `--access-key-id` is a credential id to update.
|
||||
|
||||
```shell
|
||||
$ frostfs-s3-authmate update-secret --wallet wallet.json --gate-wallet s3-wallet.json \
|
||||
--peer 192.168.130.71:8080 \
|
||||
--gate-public-key 0313b1ac3a8076e155a7e797b24f0b650cccad5941ea59d7cfd51a024a8b2a06bf \
|
||||
--gate-public-key 0317585fa8274f7afdf1fc5f2a2e7bece549d5175c4e5182e37924f30229aef967 \
|
||||
--gate-public-key 0223450b9db6d0c083e9c6de1f7d8fd22858d70829e09afa39828bb2416bf190fc \
|
||||
--access-key-id HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP04QomYDfYsspMhENEDhzTGwGxm86Q6R2Weugf3PG4sJ3M
|
||||
|
||||
Enter password for wallet.json >
|
||||
Enter password for s3-wallet.json >
|
||||
|
||||
{
|
||||
"initial_access_key_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP04QomYDfYsspMhENEDhzTGwGxm86Q6R2Weugf3PG4sJ3M",
|
||||
"access_key_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP0xXf1ahGndNkydG9MrL9WmCebrPwdSHTAysQa9w6yCNJ",
|
||||
"secret_access_key": "f6a65481fd2752e69e4aa80a6fdcad70cfbf8304d2b3b8c2f9c15212aeee3ae7",
|
||||
"owner_private_key": "7f40233893e4f4a54e4f2f52455a0e6d563f7eb0233a985094937ed69faef681",
|
||||
"wallet_public_key": "031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a",
|
||||
"container_id": "HwrdXgetdGcEWAQwi68r1PMvw4iSm1Y5Z1fsFNSD6sQP"
|
||||
}
|
||||
```
|
||||
|
|
|
@ -176,12 +176,12 @@ There are some custom types used for brevity:
|
|||
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
||||
| `server` | [Server configuration](#server-section) |
|
||||
| `logger` | [Logger configuration](#logger-section) |
|
||||
| `tree` | [Tree configuration](#tree-section) |
|
||||
| `cache` | [Cache configuration](#cache-section) |
|
||||
| `nats` | [NATS configuration](#nats-section) |
|
||||
| `cors` | [CORS configuration](#cors-section) |
|
||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
||||
| `tracing` | [Tracing configuration](#tracing-section) |
|
||||
| `frostfs` | [Parameters of requests to FrostFS](#frostfs-section) |
|
||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||
| `kludge` | [Different kludge configuration](#kludge-section) |
|
||||
|
@ -358,26 +358,6 @@ logger:
|
|||
|-----------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
### `tree` section
|
||||
|
||||
```yaml
|
||||
tree:
|
||||
service:
|
||||
- s01.frostfs.devenv:8080
|
||||
- s02.frostfs.devenv:8080
|
||||
```
|
||||
|
||||
If you use only one endpoint, it can be provided as:
|
||||
|
||||
```yaml
|
||||
tree:
|
||||
service: s01.frostfs.devenv:8080
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------|------------|---------------|-------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `service` | `[]string` | | Endpoints of the tree service. At least one endpoint must be provided. Node addresses (from the `peers` section) can be used. |
|
||||
|
||||
### `cache` section
|
||||
|
||||
```yaml
|
||||
|
@ -499,19 +479,38 @@ prometheus:
|
|||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
||||
| `address` | `string` | yes | `localhost:8086` | Address that service listener binds to. |
|
||||
|
||||
# `tracing` section
|
||||
|
||||
Contains configuration for the `tracing` service.
|
||||
|
||||
```yaml
|
||||
tracing:
|
||||
enabled: false
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4318"
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-------------|----------|---------------|---------------|-----------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
||||
| `exporter` | `string` | yes | `` | Type of tracing exporter. |
|
||||
| `endpoint` | `string` | yes | `` | Address that service listener binds to. |
|
||||
|
||||
|
||||
# `frostfs` section
|
||||
|
||||
Contains parameters of requests to FrostFS.
|
||||
This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
|
||||
This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`)
|
||||
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
|
||||
|
||||
```yaml
|
||||
frostfs:
|
||||
set_copies_number: 0
|
||||
set_copies_number: [0]
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|---------------------|----------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `set_copies_number` | `uint32` | `0` | Number of the object copies to consider PUT to FrostFS successful. <br/>Default value `0` means that object will be processed according to the container's placement policy |
|
||||
|---------------------|------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `set_copies_number` | `[]uint32` | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
||||
|
||||
# `resolve_bucket` section
|
||||
|
||||
|
|
73
go.mod
73
go.mod
|
@ -1,54 +1,64 @@
|
|||
module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
replace (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac => ./frostfs-api-go
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe => ./frostfs-sdk-go
|
||||
)
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230418080822-bd44a3f47b85
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230505094539-15b4287092bd
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe
|
||||
github.com/aws/aws-sdk-go v1.44.6
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/minio/sio v0.3.0
|
||||
github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d
|
||||
github.com/nspcc-dev/neo-go v0.101.0
|
||||
github.com/nspcc-dev/neo-go v0.101.1
|
||||
github.com/panjf2000/ants/v2 v2.5.0
|
||||
github.com/prometheus/client_golang v1.15.0
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/crypto v0.4.0
|
||||
google.golang.org/grpc v1.53.0
|
||||
golang.org/x/crypto v0.9.0
|
||||
google.golang.org/grpc v1.55.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
)
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/benbjohnson/clock v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.3 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
|
@ -65,29 +75,28 @@ require (
|
|||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||
github.com/urfave/cli v1.22.5 // indirect
|
||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
131
go.sum
131
go.sum
|
@ -36,16 +36,18 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230418080822-bd44a3f47b85 h1:77lvdk0kMhnUgtnmqEcAPXPQaGlt24goMPu2+E5WRTk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230418080822-bd44a3f47b85/go.mod h1:sPyITTmQT662ZI38ud2aoE1SUCAr1mO5xV8P4nzLkKI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac h1:a6/Zc5BejflmguShwbllgJdEehnM9gshkLrLbKQHCU0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac/go.mod h1:pKJJRLOChW4zDQsAt1e8k/snWKljJtpkiPfxV53ngjI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230505094539-15b4287092bd h1:HxacVl1Lc2RrfxAE13AGkp1tR/Mf4DDP6TgrgbLP5fQ=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230505094539-15b4287092bd/go.mod h1:TaJJOF3Uhuq8aqv2CrfuY2yhxePUinW35Xd3wfXLV/I=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0 h1:KvAES7xIqmQBGd2q8KanNosD9+4BhU/zqD5Kt5KSflk=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0/go.mod h1:mq2sbvYfO+BB6iFZwYBkgC0yc6mJNx+qZi4jW918m+Y=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe h1:47lrWXcl36ayN7AJ9IW7sDDnTj//RUyHoIZOsjbYAYA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe/go.mod h1:w+s3ozlbFfTDFHhjX0A3Iif3BRtnTkwiACxFZD+Q0cQ=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
|
@ -71,8 +73,8 @@ github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGn
|
|||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 h1:npHgfD4Tl2WJS3AJaMUi5ynGDPUBfkg3U3fCzDyXZ+4=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/aws/aws-sdk-go v1.44.6 h1:Y+uHxmZfhRTLX2X3khkdxCoTZAyGEX21aOUHe1U6geg=
|
||||
github.com/aws/aws-sdk-go v1.44.6/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
|
@ -95,8 +97,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
|||
github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
|
@ -125,9 +127,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
|
@ -160,8 +161,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-redis/redis v6.10.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
|
@ -169,8 +170,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
|||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -244,8 +245,9 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
|
|||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
|
@ -254,14 +256,16 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
|
@ -343,8 +347,8 @@ github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkP
|
|||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
|
||||
github.com/nspcc-dev/neo-go v0.99.4/go.mod h1:mKTolfRUfKjFso5HPvGSQtUZc70n0VKBMs16eGuC5gA=
|
||||
github.com/nspcc-dev/neo-go v0.101.0 h1:JPT2DpZqVjho34TMR59dm6uxvCFttOp02Nm8qCjpfaU=
|
||||
github.com/nspcc-dev/neo-go v0.101.0/go.mod h1:Q0uWKivGc2mYgdKFmTNP49LeXwMu4x6pUzHm3OIsN2I=
|
||||
github.com/nspcc-dev/neo-go v0.101.1 h1:TVdcIpH/+bxQBTLRwWE3+Pw3j6j/JwguENbBSGAGid0=
|
||||
github.com/nspcc-dev/neo-go v0.101.1/go.mod h1:J4tspxWw7jknX06F+VSMsKvIiNpYGfVTb2IxVC005YU=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220927123257-24c107e3a262/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb h1:GFxfkpXEYAbMIr69JpKOsQWeLOaGrd49HNAor8uDW+A=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
|
||||
|
@ -389,8 +393,8 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
|
|||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
|
||||
github.com/prometheus/client_golang v1.15.0/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
|
@ -416,8 +420,8 @@ github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJf
|
|||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -426,12 +430,13 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
|||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
|
||||
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
|
@ -450,14 +455,16 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/syndtr/goleveldb v0.0.0-20180307113352-169b1b37be73/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||
github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
|
@ -481,20 +488,22 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
|
||||
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0 h1:sEL90JjOO/4yhquXl5zTAkLLsZ5+MycAgX99SDsxGc8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.14.0/go.mod h1:oCslUcizYdpKYyS9e8srZEqM6BB8fq41VJBjLAE6z1w=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
|
||||
go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
|
||||
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
||||
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
|
@ -507,8 +516,8 @@ go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A
|
|||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
|
@ -528,8 +537,8 @@ golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm
|
|||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -540,8 +549,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c h1:Govq2W3bnHJimHT2ium65kXcI7ZzTniZHcFATnLJM0Q=
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -608,8 +617,8 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -634,8 +643,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -699,13 +708,13 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -715,8 +724,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -840,8 +849,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -863,8 +872,8 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
157
internal/frostfs/authmate.go
Normal file
157
internal/frostfs/authmate.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/crdt"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
)
|
||||
|
||||
const (
|
||||
accessBoxCRDTNameAttr = "S3-Access-Box-CRDT-Name"
|
||||
)
|
||||
|
||||
// AuthmateFrostFS is a mediator which implements authmate.FrostFS through pool.Pool.
|
||||
type AuthmateFrostFS struct {
|
||||
frostFS *FrostFS
|
||||
}
|
||||
|
||||
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
|
||||
func NewAuthmateFrostFS(p *pool.Pool) *AuthmateFrostFS {
|
||||
return &AuthmateFrostFS{frostFS: NewFrostFS(p)}
|
||||
}
|
||||
|
||||
// ContainerExists implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) ContainerExists(ctx context.Context, idCnr cid.ID) error {
|
||||
_, err := x.frostFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get container via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimeToEpoch implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) TimeToEpoch(ctx context.Context, futureTime time.Time) (uint64, uint64, error) {
|
||||
return x.frostFS.TimeToEpoch(ctx, time.Now(), futureTime)
|
||||
}
|
||||
|
||||
// CreateContainer implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) CreateContainer(ctx context.Context, prm authmate.PrmContainerCreate) (cid.ID, error) {
|
||||
basicACL := acl.Private
|
||||
// allow reading objects to OTHERS in order to provide read access to S3 gateways
|
||||
basicACL.AllowOp(acl.OpObjectGet, acl.RoleOthers)
|
||||
basicACL.AllowOp(acl.OpObjectHead, acl.RoleOthers)
|
||||
basicACL.AllowOp(acl.OpObjectSearch, acl.RoleOthers)
|
||||
|
||||
return x.frostFS.CreateContainer(ctx, layer.PrmContainerCreate{
|
||||
Creator: prm.Owner,
|
||||
Policy: prm.Policy,
|
||||
Name: prm.FriendlyName,
|
||||
BasicACL: basicACL,
|
||||
})
|
||||
}
|
||||
|
||||
// GetCredsPayload implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) GetCredsPayload(ctx context.Context, addr oid.Address) ([]byte, error) {
|
||||
versions, err := x.getCredVersions(ctx, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
credObjID := addr.Object()
|
||||
if last := versions.GetLast(); last != nil {
|
||||
credObjID = last.OjbID
|
||||
}
|
||||
|
||||
res, err := x.frostFS.ReadObject(ctx, layer.PrmObjectRead{
|
||||
Container: addr.Container(),
|
||||
Object: credObjID,
|
||||
WithPayload: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer res.Payload.Close()
|
||||
|
||||
return io.ReadAll(res.Payload)
|
||||
}
|
||||
|
||||
// CreateObject implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObjectCreate) (oid.ID, error) {
|
||||
attributes := [][2]string{{objectv2.SysAttributeExpEpoch, strconv.FormatUint(prm.ExpirationEpoch, 10)}}
|
||||
|
||||
if prm.NewVersionFor != nil {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(*prm.NewVersionFor)
|
||||
|
||||
versions, err := x.getCredVersions(ctx, addr)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
if versions.GetLast() == nil {
|
||||
versions.AppendVersion(&crdt.ObjectVersion{OjbID: addr.Object()})
|
||||
}
|
||||
|
||||
for key, val := range versions.GetCRDTHeaders() {
|
||||
attributes = append(attributes, [2]string{key, val})
|
||||
}
|
||||
|
||||
attributes = append(attributes, [2]string{accessBoxCRDTNameAttr, versions.Name()})
|
||||
}
|
||||
|
||||
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
||||
Creator: prm.Creator,
|
||||
Container: prm.Container,
|
||||
Filepath: prm.Filepath,
|
||||
Attributes: attributes,
|
||||
Payload: bytes.NewReader(prm.Payload),
|
||||
})
|
||||
}
|
||||
|
||||
func (x *AuthmateFrostFS) getCredVersions(ctx context.Context, addr oid.Address) (*crdt.ObjectVersions, error) {
|
||||
objCredSystemName := credVersionSysName(addr.Container(), addr.Object())
|
||||
credVersions, err := x.frostFS.SearchObjects(ctx, layer.PrmObjectSearch{
|
||||
Container: addr.Container(),
|
||||
ExactAttribute: [2]string{accessBoxCRDTNameAttr, objCredSystemName},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("search s3 access boxes: %w", err)
|
||||
}
|
||||
|
||||
versions := crdt.NewObjectVersions(objCredSystemName)
|
||||
|
||||
for _, id := range credVersions {
|
||||
objVersion, err := x.frostFS.ReadObject(ctx, layer.PrmObjectRead{
|
||||
Container: addr.Container(),
|
||||
Object: id,
|
||||
WithHeader: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("head crdt access box '%s': %w", id.EncodeToString(), err)
|
||||
}
|
||||
|
||||
versions.AppendVersion(crdt.NewObjectVersion(objVersion.Head))
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
func credVersionSysName(cnrID cid.ID, objID oid.ID) string {
|
||||
return cnrID.EncodeToString() + "0" + objID.EncodeToString()
|
||||
}
|
124
internal/frostfs/crdt/gset.go
Normal file
124
internal/frostfs/crdt/gset.go
Normal file
|
@ -0,0 +1,124 @@
|
|||
package crdt
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
const (
|
||||
versionsAddAttr = "S3-CRDT-Versions-Add"
|
||||
)
|
||||
|
||||
type ObjectVersions struct {
|
||||
name string
|
||||
objects []*ObjectVersion
|
||||
addList []string
|
||||
isSorted bool
|
||||
}
|
||||
|
||||
type ObjectVersion struct {
|
||||
OjbID oid.ID
|
||||
Headers map[string]string
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
||||
func (o *ObjectVersion) VersionID() string {
|
||||
return o.OjbID.EncodeToString()
|
||||
}
|
||||
|
||||
func NewObjectVersions(name string) *ObjectVersions {
|
||||
return &ObjectVersions{name: name}
|
||||
}
|
||||
|
||||
func NewObjectVersion(obj *object.Object) *ObjectVersion {
|
||||
objID, _ := obj.ID()
|
||||
headers := make(map[string]string)
|
||||
|
||||
for _, attr := range obj.Attributes() {
|
||||
headers[attr.GetKey()] = attr.GetValue()
|
||||
}
|
||||
|
||||
return &ObjectVersion{
|
||||
OjbID: objID,
|
||||
Headers: headers,
|
||||
CreationEpoch: obj.CreationEpoch(),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) Name() string {
|
||||
return v.name
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) AppendVersion(ov *ObjectVersion) {
|
||||
addVers := append(splitVersions(ov.Headers[versionsAddAttr]), ov.VersionID())
|
||||
v.objects = append(v.objects, ov)
|
||||
for _, add := range addVers {
|
||||
if !contains(v.addList, add) {
|
||||
v.addList = append(v.addList, add)
|
||||
}
|
||||
}
|
||||
v.isSorted = false
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) GetCRDTHeaders() map[string]string {
|
||||
if len(v.objects) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
headers := make(map[string]string, 2)
|
||||
|
||||
if len(v.addList) != 0 {
|
||||
headers[versionsAddAttr] = v.getAddHeader()
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) GetLast() *ObjectVersion {
|
||||
if len(v.objects) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
v.sort()
|
||||
return v.objects[len(v.objects)-1]
|
||||
}
|
||||
|
||||
func splitVersions(header string) []string {
|
||||
if len(header) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return strings.Split(header, ",")
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) sort() {
|
||||
if !v.isSorted {
|
||||
sort.Slice(v.objects, func(i, j int) bool {
|
||||
return less(v.objects[i], v.objects[j])
|
||||
})
|
||||
v.isSorted = true
|
||||
}
|
||||
}
|
||||
|
||||
func (v *ObjectVersions) getAddHeader() string {
|
||||
return strings.Join(v.addList, ",")
|
||||
}
|
||||
|
||||
func less(ov1, ov2 *ObjectVersion) bool {
|
||||
if ov1.CreationEpoch == ov2.CreationEpoch {
|
||||
return ov1.VersionID() < ov2.VersionID()
|
||||
}
|
||||
return ov1.CreationEpoch < ov2.CreationEpoch
|
||||
}
|
||||
|
||||
func contains(list []string, elem string) bool {
|
||||
for _, item := range list {
|
||||
if elem == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
38
internal/frostfs/crdt/gset_test.go
Normal file
38
internal/frostfs/crdt/gset_test.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
package crdt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestObjectVersionsSort(t *testing.T) {
|
||||
t.Run("sort by epoch", func(t *testing.T) {
|
||||
versions := NewObjectVersions("test")
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 3})
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 1})
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 2})
|
||||
|
||||
last := versions.GetLast()
|
||||
require.Equal(t, uint64(3), last.CreationEpoch)
|
||||
})
|
||||
|
||||
t.Run("sort by oids", func(t *testing.T) {
|
||||
versions := NewObjectVersions("test")
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 3, OjbID: getTestOID(2)})
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 3, OjbID: getTestOID(1)})
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 1, OjbID: getTestOID(3)})
|
||||
versions.AppendVersion(&ObjectVersion{CreationEpoch: 2, OjbID: getTestOID(4)})
|
||||
|
||||
last := versions.GetLast()
|
||||
require.Equal(t, uint64(3), last.CreationEpoch)
|
||||
require.Equal(t, getTestOID(2).String(), last.VersionID())
|
||||
})
|
||||
}
|
||||
|
||||
func getTestOID(val byte) oid.ID {
|
||||
var res oid.ID
|
||||
res.SetSHA256([32]byte{val})
|
||||
return res
|
||||
}
|
42
internal/frostfs/errors/errors.go
Normal file
42
internal/frostfs/errors/errors.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func UnwrapErr(err error) error {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
for unwrappedErr != nil {
|
||||
err = unwrappedErr
|
||||
unwrappedErr = errors.Unwrap(err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func IsErrObjectAccessDenied(err error) (string, bool) {
|
||||
err = UnwrapErr(err)
|
||||
switch err := err.(type) {
|
||||
default:
|
||||
return "", false
|
||||
case apistatus.ObjectAccessDenied:
|
||||
return err.Reason(), true
|
||||
case *apistatus.ObjectAccessDenied:
|
||||
return err.Reason(), true
|
||||
}
|
||||
}
|
||||
|
||||
func IsTimeoutError(err error) bool {
|
||||
if strings.Contains(err.Error(), "timeout") ||
|
||||
errors.Is(err, context.DeadlineExceeded) {
|
||||
return true
|
||||
}
|
||||
|
||||
return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -10,11 +9,9 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
lockv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/lock/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
|
@ -61,7 +58,7 @@ func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (u
|
|||
|
||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("get network info via client: %w", err)
|
||||
return 0, 0, handleObjectError("get network info via client", err)
|
||||
}
|
||||
|
||||
durEpoch := networkInfo.EpochDuration()
|
||||
|
@ -94,7 +91,7 @@ func (x *FrostFS) Container(ctx context.Context, idCnr cid.ID) (*container.Conta
|
|||
|
||||
res, err := x.pool.GetContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read container via connection pool: %w", err)
|
||||
return nil, handleObjectError("read container via connection pool", err)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -110,7 +107,7 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
|||
prm.BasicACL = acl.PublicRWExtended
|
||||
}
|
||||
|
||||
var cnr container.Container
|
||||
cnr := container.NewContainer()
|
||||
cnr.Init()
|
||||
cnr.SetPlacementPolicy(prm.Policy)
|
||||
cnr.SetOwner(prm.Creator)
|
||||
|
@ -120,23 +117,23 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
|||
if creationTime.IsZero() {
|
||||
creationTime = time.Now()
|
||||
}
|
||||
container.SetCreationTime(&cnr, creationTime)
|
||||
container.SetCreationTime(cnr, creationTime)
|
||||
|
||||
if prm.Name != "" {
|
||||
var d container.Domain
|
||||
d.SetName(prm.Name)
|
||||
|
||||
container.WriteDomain(&cnr, d)
|
||||
container.SetName(&cnr, prm.Name)
|
||||
container.WriteDomain(cnr, d)
|
||||
container.SetName(cnr, prm.Name)
|
||||
}
|
||||
|
||||
for i := range prm.AdditionalAttributes {
|
||||
cnr.SetAttribute(prm.AdditionalAttributes[i][0], prm.AdditionalAttributes[i][1])
|
||||
}
|
||||
|
||||
err := pool.SyncContainerWithNetwork(ctx, &cnr, x.pool)
|
||||
err := pool.SyncContainerWithNetwork(ctx, cnr, x.pool)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("sync container with the network state: %w", err)
|
||||
return cid.ID{}, handleObjectError("sync container with the network state", err)
|
||||
}
|
||||
|
||||
var prmPut pool.PrmContainerPut
|
||||
|
@ -149,11 +146,7 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
|||
|
||||
// send request to save the container
|
||||
idCnr, err := x.pool.PutContainer(ctx, prmPut)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("save container via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return idCnr, nil
|
||||
return idCnr, handleObjectError("save container via connection pool", err)
|
||||
}
|
||||
|
||||
// UserContainers implements frostfs.FrostFS interface method.
|
||||
|
@ -162,11 +155,7 @@ func (x *FrostFS) UserContainers(ctx context.Context, id user.ID) ([]cid.ID, err
|
|||
prm.SetOwnerID(id)
|
||||
|
||||
r, err := x.pool.ListContainers(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list user containers via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
return r, handleObjectError("list user containers via connection pool", err)
|
||||
}
|
||||
|
||||
// SetContainerEACL implements frostfs.FrostFS interface method.
|
||||
|
@ -180,11 +169,7 @@ func (x *FrostFS) SetContainerEACL(ctx context.Context, table eacl.Table, sessio
|
|||
}
|
||||
|
||||
err := x.pool.SetEACL(ctx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("save eACL via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return err
|
||||
return handleObjectError("save eACL via connection pool", err)
|
||||
}
|
||||
|
||||
// ContainerEACL implements frostfs.FrostFS interface method.
|
||||
|
@ -194,7 +179,7 @@ func (x *FrostFS) ContainerEACL(ctx context.Context, id cid.ID) (*eacl.Table, er
|
|||
|
||||
res, err := x.pool.GetEACL(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read eACL via connection pool: %w", err)
|
||||
return nil, handleObjectError("read eACL via connection pool", err)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
|
@ -211,11 +196,7 @@ func (x *FrostFS) DeleteContainer(ctx context.Context, id cid.ID, token *session
|
|||
}
|
||||
|
||||
err := x.pool.DeleteContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete container via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return handleObjectError("delete container via connection pool", err)
|
||||
}
|
||||
|
||||
// CreateObject implements frostfs.FrostFS interface method.
|
||||
|
@ -263,10 +244,10 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
|
|||
if len(prm.Locks) > 0 {
|
||||
lock := new(object.Lock)
|
||||
lock.WriteMembers(prm.Locks)
|
||||
objectv2.WriteLock(obj.ToV2(), (objectv2.Lock)(*lock))
|
||||
lockv2.WriteLock(obj.ToV2(), lock.ToV2())
|
||||
}
|
||||
|
||||
var prmPut pool.PrmObjectPut
|
||||
prmPut := pool.NewPrmObjectPut()
|
||||
prmPut.SetHeader(*obj)
|
||||
prmPut.SetPayload(prm.Payload)
|
||||
prmPut.SetCopiesNumberVector(prm.CopiesNumber)
|
||||
|
@ -278,15 +259,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
|
|||
}
|
||||
|
||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
||||
if err != nil {
|
||||
reason, ok := isErrAccessDenied(err)
|
||||
if ok {
|
||||
return oid.ID{}, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
return oid.ID{}, fmt.Errorf("save object via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return idObj, nil
|
||||
return idObj, handleObjectError("save object via connection pool", err)
|
||||
}
|
||||
|
||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
||||
|
@ -297,13 +270,7 @@ type payloadReader struct {
|
|||
|
||||
func (x payloadReader) Read(p []byte) (int, error) {
|
||||
n, err := x.ReadCloser.Read(p)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return n, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
return n, handleObjectError("read payload", err)
|
||||
}
|
||||
|
||||
// ReadObject implements frostfs.FrostFS interface method.
|
||||
|
@ -325,18 +292,14 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
|||
if prm.WithPayload {
|
||||
res, err := x.pool.GetObject(ctx, prmGet)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("init full object reading via connection pool: %w", err)
|
||||
return nil, handleObjectError("init full object reading via connection pool", err)
|
||||
}
|
||||
|
||||
defer res.Payload.Close()
|
||||
|
||||
payload, err := io.ReadAll(res.Payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read full object payload: %w", err)
|
||||
return nil, handleObjectError("read full object payload", err)
|
||||
}
|
||||
|
||||
res.Header.SetPayload(payload)
|
||||
|
@ -357,11 +320,7 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
|||
|
||||
hdr, err := x.pool.HeadObject(ctx, prmHead)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("read object header via connection pool: %w", err)
|
||||
return nil, handleObjectError("read object header via connection pool", err)
|
||||
}
|
||||
|
||||
return &layer.ObjectPart{
|
||||
|
@ -370,11 +329,7 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
|||
} else if prm.PayloadRange[0]+prm.PayloadRange[1] == 0 {
|
||||
res, err := x.pool.GetObject(ctx, prmGet)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("init full payload range reading via connection pool: %w", err)
|
||||
return nil, handleObjectError("init full payload range reading via connection pool", err)
|
||||
}
|
||||
|
||||
return &layer.ObjectPart{
|
||||
|
@ -395,11 +350,7 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
|||
|
||||
res, err := x.pool.ObjectRange(ctx, prmRange)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("init payload range reading via connection pool: %w", err)
|
||||
return nil, handleObjectError("init payload range reading via connection pool", err)
|
||||
}
|
||||
|
||||
return &layer.ObjectPart{
|
||||
|
@ -423,32 +374,44 @@ func (x *FrostFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) e
|
|||
}
|
||||
|
||||
err := x.pool.DeleteObject(ctx, prmDelete)
|
||||
if err != nil {
|
||||
if reason, ok := isErrAccessDenied(err); ok {
|
||||
return fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return fmt.Errorf("mark object removal via connection pool: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return handleObjectError("mark object removal via connection pool", err)
|
||||
}
|
||||
|
||||
func isErrAccessDenied(err error) (string, bool) {
|
||||
unwrappedErr := errors.Unwrap(err)
|
||||
for unwrappedErr != nil {
|
||||
err = unwrappedErr
|
||||
unwrappedErr = errors.Unwrap(err)
|
||||
// SearchObjects implements frostfs.FrostFS interface method.
|
||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm layer.PrmObjectSearch) ([]oid.ID, error) {
|
||||
filters := object.NewSearchFilters()
|
||||
filters.AddRootFilter()
|
||||
|
||||
if prm.ExactAttribute[0] != "" {
|
||||
filters.AddFilter(prm.ExactAttribute[0], prm.ExactAttribute[1], object.MatchStringEqual)
|
||||
}
|
||||
|
||||
switch err := err.(type) {
|
||||
default:
|
||||
return "", false
|
||||
case apistatus.ObjectAccessDenied:
|
||||
return err.Reason(), true
|
||||
case *apistatus.ObjectAccessDenied:
|
||||
return err.Reason(), true
|
||||
if prm.FilePrefix != "" {
|
||||
filters.AddFilter(object.AttributeFileName, prm.FilePrefix, object.MatchCommonPrefix)
|
||||
}
|
||||
|
||||
var prmSearch pool.PrmObjectSearch
|
||||
prmSearch.SetContainerID(prm.Container)
|
||||
prmSearch.SetFilters(filters)
|
||||
|
||||
if prm.BearerToken != nil {
|
||||
prmSearch.UseBearer(*prm.BearerToken)
|
||||
} else {
|
||||
prmSearch.UseKey(prm.PrivateKey)
|
||||
}
|
||||
|
||||
res, err := x.pool.SearchObjects(ctx, prmSearch)
|
||||
if err != nil {
|
||||
return nil, handleObjectError("init object search via connection pool", err)
|
||||
}
|
||||
defer res.Close()
|
||||
|
||||
var buf []oid.ID
|
||||
err = res.Iterate(func(id oid.ID) bool {
|
||||
buf = append(buf, id)
|
||||
return false
|
||||
})
|
||||
return buf, handleObjectError("read object list", err)
|
||||
}
|
||||
|
||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||
|
@ -466,7 +429,7 @@ func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
|||
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read network info via client: %w", err)
|
||||
return "", handleObjectError("read network info via client", err)
|
||||
}
|
||||
|
||||
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
||||
|
@ -477,71 +440,20 @@ func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
|||
return string(domain), nil
|
||||
}
|
||||
|
||||
// AuthmateFrostFS is a mediator which implements authmate.FrostFS through pool.Pool.
|
||||
type AuthmateFrostFS struct {
|
||||
frostFS *FrostFS
|
||||
}
|
||||
|
||||
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
|
||||
func NewAuthmateFrostFS(p *pool.Pool) *AuthmateFrostFS {
|
||||
return &AuthmateFrostFS{frostFS: NewFrostFS(p)}
|
||||
}
|
||||
|
||||
// ContainerExists implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) ContainerExists(ctx context.Context, idCnr cid.ID) error {
|
||||
_, err := x.frostFS.Container(ctx, idCnr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get container via connection pool: %w", err)
|
||||
}
|
||||
|
||||
func handleObjectError(msg string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimeToEpoch implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) TimeToEpoch(ctx context.Context, futureTime time.Time) (uint64, uint64, error) {
|
||||
return x.frostFS.TimeToEpoch(ctx, time.Now(), futureTime)
|
||||
}
|
||||
|
||||
// CreateContainer implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) CreateContainer(ctx context.Context, prm authmate.PrmContainerCreate) (cid.ID, error) {
|
||||
basicACL := acl.Private
|
||||
// allow reading objects to OTHERS in order to provide read access to S3 gateways
|
||||
basicACL.AllowOp(acl.OpObjectGet, acl.RoleOthers)
|
||||
|
||||
return x.frostFS.CreateContainer(ctx, layer.PrmContainerCreate{
|
||||
Creator: prm.Owner,
|
||||
Policy: prm.Policy,
|
||||
Name: prm.FriendlyName,
|
||||
BasicACL: basicACL,
|
||||
})
|
||||
}
|
||||
|
||||
// ReadObjectPayload implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) ReadObjectPayload(ctx context.Context, addr oid.Address) ([]byte, error) {
|
||||
res, err := x.frostFS.ReadObject(ctx, layer.PrmObjectRead{
|
||||
Container: addr.Container(),
|
||||
Object: addr.Object(),
|
||||
WithPayload: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer res.Payload.Close()
|
||||
if reason, ok := errorsFrost.IsErrObjectAccessDenied(err); ok {
|
||||
return fmt.Errorf("%s: %w: %s", msg, layer.ErrAccessDenied, reason)
|
||||
}
|
||||
|
||||
return io.ReadAll(res.Payload)
|
||||
}
|
||||
if errorsFrost.IsTimeoutError(err) {
|
||||
return fmt.Errorf("%s: %w: %s", msg, layer.ErrGatewayTimeout, err.Error())
|
||||
}
|
||||
|
||||
// CreateObject implements authmate.FrostFS interface method.
|
||||
func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObjectCreate) (oid.ID, error) {
|
||||
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
||||
Creator: prm.Creator,
|
||||
Container: prm.Container,
|
||||
Filepath: prm.Filepath,
|
||||
Attributes: [][2]string{
|
||||
{objectv2.SysAttributeExpEpoch, strconv.FormatUint(prm.ExpirationEpoch, 10)}},
|
||||
Payload: bytes.NewReader(prm.Payload),
|
||||
})
|
||||
return fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
||||
|
||||
// PoolStatistic is a mediator which implements authmate.FrostFS through pool.Pool.
|
||||
|
|
|
@ -1,25 +1,50 @@
|
|||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func TestErrorChecking(t *testing.T) {
|
||||
reason := "some reason"
|
||||
err := new(apistatus.ObjectAccessDenied)
|
||||
err := apistatus.NewObjectAccessDenied()
|
||||
err.WriteReason(reason)
|
||||
|
||||
var wrappedError error
|
||||
|
||||
if fetchedReason, ok := isErrAccessDenied(err); ok {
|
||||
if fetchedReason, ok := errorsFrost.IsErrObjectAccessDenied(err); ok {
|
||||
wrappedError = fmt.Errorf("%w: %s", layer.ErrAccessDenied, fetchedReason)
|
||||
}
|
||||
|
||||
require.ErrorIs(t, wrappedError, layer.ErrAccessDenied)
|
||||
require.Contains(t, wrappedError.Error(), reason)
|
||||
}
|
||||
|
||||
func TestErrorTimeoutChecking(t *testing.T) {
|
||||
t.Run("simple timeout", func(t *testing.T) {
|
||||
require.True(t, errorsFrost.IsTimeoutError(errors.New("timeout")))
|
||||
})
|
||||
|
||||
t.Run("deadline exceeded", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
||||
defer cancel()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
require.True(t, errorsFrost.IsTimeoutError(ctx.Err()))
|
||||
})
|
||||
|
||||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
||||
err := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
||||
require.True(t, errorsFrost.IsTimeoutError(err))
|
||||
})
|
||||
}
|
||||
|
|
197
internal/frostfs/services/pool_wrapper.go
Normal file
197
internal/frostfs/services/pool_wrapper.go
Normal file
|
@ -0,0 +1,197 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
errorsFrost "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||
)
|
||||
|
||||
type GetNodeByPathResponseInfoWrapper struct {
|
||||
response *grpcService.GetNodeByPathResponse_Info
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type GetSubTreeResponseBodyWrapper struct {
|
||||
response *grpcService.GetSubTreeResponse_Body
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type PoolWrapper struct {
|
||||
p *treepool.Pool
|
||||
}
|
||||
|
||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||
return &PoolWrapper{p: p}
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
poolPrm := treepool.GetNodesParams{
|
||||
CID: prm.BktInfo.CID,
|
||||
TreeID: prm.TreeID,
|
||||
Path: prm.Path,
|
||||
Meta: prm.Meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
LatestOnly: prm.LatestOnly,
|
||||
AllAttrs: prm.AllAttrs,
|
||||
BearerToken: getBearer(ctx, prm.BktInfo),
|
||||
}
|
||||
|
||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(nodes))
|
||||
for i, info := range nodes {
|
||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
|
||||
poolPrm := treepool.GetSubTreeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
RootID: rootID,
|
||||
Depth: depth,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
}
|
||||
|
||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
var subtree []tree.NodeResponse
|
||||
|
||||
node, err := subTreeReader.Next()
|
||||
for err == nil {
|
||||
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
||||
node, err = subTreeReader.Next()
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
return subtree, nil
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
|
||||
nodeID, err := w.p.AddNode(ctx, treepool.AddNodeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
Parent: parent,
|
||||
Meta: meta,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
})
|
||||
return nodeID, handleError(err)
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
|
||||
nodeID, err := w.p.AddNodeByPath(ctx, treepool.AddNodeByPathParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
Path: path,
|
||||
Meta: meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
})
|
||||
return nodeID, handleError(err)
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
|
||||
return handleError(w.p.MoveNode(ctx, treepool.MoveNodeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
NodeID: nodeID,
|
||||
ParentID: parentID,
|
||||
Meta: meta,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
}))
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
|
||||
return handleError(w.p.RemoveNode(ctx, treepool.RemoveNodeParams{
|
||||
CID: bktInfo.CID,
|
||||
TreeID: treeID,
|
||||
NodeID: nodeID,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
}))
|
||||
}
|
||||
|
||||
func getBearer(ctx context.Context, bktInfo *data.BucketInfo) []byte {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil {
|
||||
if bd.Gate.BearerToken != nil {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktInfo.Owner.Equals(bearer.ResolveIssuer(bd.Gate.BearerToken)) {
|
||||
return bd.Gate.BearerToken.Marshal()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||
}
|
||||
if errorsFrost.IsTimeoutError(err) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrGatewayTimeout, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
3454
internal/frostfs/services/tree/service.pb.go
Normal file
3454
internal/frostfs/services/tree/service.pb.go
Normal file
File diff suppressed because it is too large
Load diff
520
internal/frostfs/services/tree/service_grpc.pb.go
Normal file
520
internal/frostfs/services/tree/service_grpc.pb.go
Normal file
|
@ -0,0 +1,520 @@
|
|||
//*
|
||||
// Service for working with CRDT tree.
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc v3.12.4
|
||||
// source: pkg/services/tree/service.proto
|
||||
|
||||
package tree
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
TreeService_Add_FullMethodName = "/tree.TreeService/Add"
|
||||
TreeService_AddByPath_FullMethodName = "/tree.TreeService/AddByPath"
|
||||
TreeService_Remove_FullMethodName = "/tree.TreeService/Remove"
|
||||
TreeService_Move_FullMethodName = "/tree.TreeService/Move"
|
||||
TreeService_GetNodeByPath_FullMethodName = "/tree.TreeService/GetNodeByPath"
|
||||
TreeService_GetSubTree_FullMethodName = "/tree.TreeService/GetSubTree"
|
||||
TreeService_TreeList_FullMethodName = "/tree.TreeService/TreeList"
|
||||
TreeService_Apply_FullMethodName = "/tree.TreeService/Apply"
|
||||
TreeService_GetOpLog_FullMethodName = "/tree.TreeService/GetOpLog"
|
||||
TreeService_Healthcheck_FullMethodName = "/tree.TreeService/Healthcheck"
|
||||
)
|
||||
|
||||
// TreeServiceClient is the client API for TreeService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type TreeServiceClient interface {
|
||||
// Add adds new node to the tree. Invoked by a client.
|
||||
Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error)
|
||||
// AddByPath adds new node to the tree by path. Invoked by a client.
|
||||
AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error)
|
||||
// Remove removes node from the tree. Invoked by a client.
|
||||
Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error)
|
||||
// Move moves node from one parent to another. Invoked by a client.
|
||||
Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error)
|
||||
// GetNodeByPath returns list of IDs corresponding to a specific filepath.
|
||||
GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error)
|
||||
// GetSubTree returns tree corresponding to a specific node.
|
||||
GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error)
|
||||
// TreeList return list of the existing trees in the container.
|
||||
TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error)
|
||||
// Apply pushes log operation from another node to the current.
|
||||
// The request must be signed by a container node.
|
||||
Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error)
|
||||
// GetOpLog returns a stream of logged operations starting from some height.
|
||||
GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error)
|
||||
// Healthcheck is a dummy rpc to check service availability
|
||||
Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error)
|
||||
}
|
||||
|
||||
type treeServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewTreeServiceClient(cc grpc.ClientConnInterface) TreeServiceClient {
|
||||
return &treeServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) {
|
||||
out := new(AddResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_Add_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) AddByPath(ctx context.Context, in *AddByPathRequest, opts ...grpc.CallOption) (*AddByPathResponse, error) {
|
||||
out := new(AddByPathResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_AddByPath_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) {
|
||||
out := new(RemoveResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_Remove_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) Move(ctx context.Context, in *MoveRequest, opts ...grpc.CallOption) (*MoveResponse, error) {
|
||||
out := new(MoveResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_Move_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) GetNodeByPath(ctx context.Context, in *GetNodeByPathRequest, opts ...grpc.CallOption) (*GetNodeByPathResponse, error) {
|
||||
out := new(GetNodeByPathResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_GetNodeByPath_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) GetSubTree(ctx context.Context, in *GetSubTreeRequest, opts ...grpc.CallOption) (TreeService_GetSubTreeClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[0], TreeService_GetSubTree_FullMethodName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &treeServiceGetSubTreeClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TreeService_GetSubTreeClient interface {
|
||||
Recv() (*GetSubTreeResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type treeServiceGetSubTreeClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *treeServiceGetSubTreeClient) Recv() (*GetSubTreeResponse, error) {
|
||||
m := new(GetSubTreeResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) TreeList(ctx context.Context, in *TreeListRequest, opts ...grpc.CallOption) (*TreeListResponse, error) {
|
||||
out := new(TreeListResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_TreeList_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
|
||||
out := new(ApplyResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_Apply_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) GetOpLog(ctx context.Context, in *GetOpLogRequest, opts ...grpc.CallOption) (TreeService_GetOpLogClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &TreeService_ServiceDesc.Streams[1], TreeService_GetOpLog_FullMethodName, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &treeServiceGetOpLogClient{stream}
|
||||
if err := x.ClientStream.SendMsg(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := x.ClientStream.CloseSend(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TreeService_GetOpLogClient interface {
|
||||
Recv() (*GetOpLogResponse, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type treeServiceGetOpLogClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *treeServiceGetOpLogClient) Recv() (*GetOpLogResponse, error) {
|
||||
m := new(GetOpLogResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *treeServiceClient) Healthcheck(ctx context.Context, in *HealthcheckRequest, opts ...grpc.CallOption) (*HealthcheckResponse, error) {
|
||||
out := new(HealthcheckResponse)
|
||||
err := c.cc.Invoke(ctx, TreeService_Healthcheck_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// TreeServiceServer is the server API for TreeService service.
|
||||
// All implementations should embed UnimplementedTreeServiceServer
|
||||
// for forward compatibility
|
||||
type TreeServiceServer interface {
|
||||
// Add adds new node to the tree. Invoked by a client.
|
||||
Add(context.Context, *AddRequest) (*AddResponse, error)
|
||||
// AddByPath adds new node to the tree by path. Invoked by a client.
|
||||
AddByPath(context.Context, *AddByPathRequest) (*AddByPathResponse, error)
|
||||
// Remove removes node from the tree. Invoked by a client.
|
||||
Remove(context.Context, *RemoveRequest) (*RemoveResponse, error)
|
||||
// Move moves node from one parent to another. Invoked by a client.
|
||||
Move(context.Context, *MoveRequest) (*MoveResponse, error)
|
||||
// GetNodeByPath returns list of IDs corresponding to a specific filepath.
|
||||
GetNodeByPath(context.Context, *GetNodeByPathRequest) (*GetNodeByPathResponse, error)
|
||||
// GetSubTree returns tree corresponding to a specific node.
|
||||
GetSubTree(*GetSubTreeRequest, TreeService_GetSubTreeServer) error
|
||||
// TreeList return list of the existing trees in the container.
|
||||
TreeList(context.Context, *TreeListRequest) (*TreeListResponse, error)
|
||||
// Apply pushes log operation from another node to the current.
|
||||
// The request must be signed by a container node.
|
||||
Apply(context.Context, *ApplyRequest) (*ApplyResponse, error)
|
||||
// GetOpLog returns a stream of logged operations starting from some height.
|
||||
GetOpLog(*GetOpLogRequest, TreeService_GetOpLogServer) error
|
||||
// Healthcheck is a dummy rpc to check service availability
|
||||
Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedTreeServiceServer should be embedded to have forward compatible implementations.
|
||||
type UnimplementedTreeServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedTreeServiceServer) Add(context.Context, *AddRequest) (*AddResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Add not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) AddByPath(context.Context, *AddByPathRequest) (*AddByPathResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddByPath not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) Remove(context.Context, *RemoveRequest) (*RemoveResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) Move(context.Context, *MoveRequest) (*MoveResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Move not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) GetNodeByPath(context.Context, *GetNodeByPathRequest) (*GetNodeByPathResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNodeByPath not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) GetSubTree(*GetSubTreeRequest, TreeService_GetSubTreeServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method GetSubTree not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) TreeList(context.Context, *TreeListRequest) (*TreeListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method TreeList not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) GetOpLog(*GetOpLogRequest, TreeService_GetOpLogServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method GetOpLog not implemented")
|
||||
}
|
||||
func (UnimplementedTreeServiceServer) Healthcheck(context.Context, *HealthcheckRequest) (*HealthcheckResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Healthcheck not implemented")
|
||||
}
|
||||
|
||||
// UnsafeTreeServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to TreeServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeTreeServiceServer interface {
|
||||
mustEmbedUnimplementedTreeServiceServer()
|
||||
}
|
||||
|
||||
func RegisterTreeServiceServer(s grpc.ServiceRegistrar, srv TreeServiceServer) {
|
||||
s.RegisterService(&TreeService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _TreeService_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AddRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).Add(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_Add_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).Add(ctx, req.(*AddRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_AddByPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AddByPathRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).AddByPath(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_AddByPath_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).AddByPath(ctx, req.(*AddByPathRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RemoveRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).Remove(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_Remove_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).Remove(ctx, req.(*RemoveRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_Move_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MoveRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).Move(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_Move_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).Move(ctx, req.(*MoveRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_GetNodeByPath_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNodeByPathRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).GetNodeByPath(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_GetNodeByPath_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).GetNodeByPath(ctx, req.(*GetNodeByPathRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_GetSubTree_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(GetSubTreeRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(TreeServiceServer).GetSubTree(m, &treeServiceGetSubTreeServer{stream})
|
||||
}
|
||||
|
||||
type TreeService_GetSubTreeServer interface {
|
||||
Send(*GetSubTreeResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type treeServiceGetSubTreeServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *treeServiceGetSubTreeServer) Send(m *GetSubTreeResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _TreeService_TreeList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(TreeListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).TreeList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_TreeList_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).TreeList(ctx, req.(*TreeListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ApplyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).Apply(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_Apply_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).Apply(ctx, req.(*ApplyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _TreeService_GetOpLog_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
m := new(GetOpLogRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return err
|
||||
}
|
||||
return srv.(TreeServiceServer).GetOpLog(m, &treeServiceGetOpLogServer{stream})
|
||||
}
|
||||
|
||||
type TreeService_GetOpLogServer interface {
|
||||
Send(*GetOpLogResponse) error
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type treeServiceGetOpLogServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *treeServiceGetOpLogServer) Send(m *GetOpLogResponse) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func _TreeService_Healthcheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(HealthcheckRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TreeServiceServer).Healthcheck(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: TreeService_Healthcheck_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TreeServiceServer).Healthcheck(ctx, req.(*HealthcheckRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// TreeService_ServiceDesc is the grpc.ServiceDesc for TreeService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var TreeService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "tree.TreeService",
|
||||
HandlerType: (*TreeServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Add",
|
||||
Handler: _TreeService_Add_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AddByPath",
|
||||
Handler: _TreeService_AddByPath_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Remove",
|
||||
Handler: _TreeService_Remove_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Move",
|
||||
Handler: _TreeService_Move_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetNodeByPath",
|
||||
Handler: _TreeService_GetNodeByPath_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "TreeList",
|
||||
Handler: _TreeService_TreeList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Apply",
|
||||
Handler: _TreeService_Apply_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Healthcheck",
|
||||
Handler: _TreeService_Healthcheck_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "GetSubTree",
|
||||
Handler: _TreeService_GetSubTree_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
{
|
||||
StreamName: "GetOpLog",
|
||||
Handler: _TreeService_GetOpLog_Handler,
|
||||
ServerStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "pkg/services/tree/service.proto",
|
||||
}
|
320
internal/frostfs/services/tree/types.pb.go
Normal file
320
internal/frostfs/services/tree/types.pb.go
Normal file
|
@ -0,0 +1,320 @@
|
|||
//*
|
||||
// Auxiliary structures to use with tree service.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.12.4
|
||||
// source: pkg/services/tree/types.proto
|
||||
|
||||
package tree
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// KeyValue represents key-value pair attached to an object.
|
||||
type KeyValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Attribute name.
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// Attribute value.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *KeyValue) Reset() {
|
||||
*x = KeyValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *KeyValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*KeyValue) ProtoMessage() {}
|
||||
|
||||
func (x *KeyValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
|
||||
func (*KeyValue) Descriptor() ([]byte, []int) {
|
||||
return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *KeyValue) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *KeyValue) GetValue() []byte {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LogMove represents log-entry for a single move operation.
|
||||
type LogMove struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// ID of the parent node.
|
||||
ParentId uint64 `protobuf:"varint,1,opt,name=parent_id,json=parentID,proto3" json:"parent_id,omitempty"`
|
||||
// Node meta information, including operation timestamp.
|
||||
Meta []byte `protobuf:"bytes,2,opt,name=meta,proto3" json:"meta,omitempty"`
|
||||
// ID of the node to move.
|
||||
ChildId uint64 `protobuf:"varint,3,opt,name=child_id,json=childID,proto3" json:"child_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LogMove) Reset() {
|
||||
*x = LogMove{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LogMove) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LogMove) ProtoMessage() {}
|
||||
|
||||
func (x *LogMove) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LogMove.ProtoReflect.Descriptor instead.
|
||||
func (*LogMove) Descriptor() ([]byte, []int) {
|
||||
return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *LogMove) GetParentId() uint64 {
|
||||
if x != nil {
|
||||
return x.ParentId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *LogMove) GetMeta() []byte {
|
||||
if x != nil {
|
||||
return x.Meta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *LogMove) GetChildId() uint64 {
|
||||
if x != nil {
|
||||
return x.ChildId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Signature of a message.
|
||||
type Signature struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Serialized public key as defined in FrostFS API.
|
||||
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// Signature of a message body.
|
||||
Sign []byte `protobuf:"bytes,2,opt,name=sign,json=signature,proto3" json:"sign,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Signature) Reset() {
|
||||
*x = Signature{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Signature) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Signature) ProtoMessage() {}
|
||||
|
||||
func (x *Signature) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_pkg_services_tree_types_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Signature.ProtoReflect.Descriptor instead.
|
||||
func (*Signature) Descriptor() ([]byte, []int) {
|
||||
return file_pkg_services_tree_types_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Signature) GetKey() []byte {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Signature) GetSign() []byte {
|
||||
if x != nil {
|
||||
return x.Sign
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_pkg_services_tree_types_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_pkg_services_tree_types_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74,
|
||||
0x72, 0x65, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x04, 0x74, 0x72, 0x65, 0x65, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x07, 0x4c, 0x6f, 0x67,
|
||||
0x4d, 0x6f, 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49,
|
||||
0x44, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69,
|
||||
0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x44,
|
||||
0x22, 0x36, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a,
|
||||
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x17, 0x0a, 0x04, 0x73, 0x69, 0x67, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73,
|
||||
0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x2e,
|
||||
0x66, 0x72, 0x6f, 0x73, 0x74, 0x66, 0x73, 0x2e, 0x69, 0x6e, 0x66, 0x6f, 0x2f, 0x54, 0x72, 0x75,
|
||||
0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x4c, 0x61, 0x62, 0x2f, 0x66, 0x72, 0x6f, 0x73, 0x74, 0x66,
|
||||
0x73, 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x65, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_pkg_services_tree_types_proto_rawDescOnce sync.Once
|
||||
file_pkg_services_tree_types_proto_rawDescData = file_pkg_services_tree_types_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_pkg_services_tree_types_proto_rawDescGZIP() []byte {
|
||||
file_pkg_services_tree_types_proto_rawDescOnce.Do(func() {
|
||||
file_pkg_services_tree_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_services_tree_types_proto_rawDescData)
|
||||
})
|
||||
return file_pkg_services_tree_types_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_pkg_services_tree_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_pkg_services_tree_types_proto_goTypes = []interface{}{
|
||||
(*KeyValue)(nil), // 0: tree.KeyValue
|
||||
(*LogMove)(nil), // 1: tree.LogMove
|
||||
(*Signature)(nil), // 2: tree.Signature
|
||||
}
|
||||
var file_pkg_services_tree_types_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_pkg_services_tree_types_proto_init() }
|
||||
func file_pkg_services_tree_types_proto_init() {
|
||||
if File_pkg_services_tree_types_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_pkg_services_tree_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*KeyValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pkg_services_tree_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LogMove); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_pkg_services_tree_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Signature); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_pkg_services_tree_types_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_pkg_services_tree_types_proto_goTypes,
|
||||
DependencyIndexes: file_pkg_services_tree_types_proto_depIdxs,
|
||||
MessageInfos: file_pkg_services_tree_types_proto_msgTypes,
|
||||
}.Build()
|
||||
File_pkg_services_tree_types_proto = out.File
|
||||
file_pkg_services_tree_types_proto_rawDesc = nil
|
||||
file_pkg_services_tree_types_proto_goTypes = nil
|
||||
file_pkg_services_tree_types_proto_depIdxs = nil
|
||||
}
|
|
@ -1,407 +0,0 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/services/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type GetNodeByPathResponseInfoWrapper struct {
|
||||
response *grpcService.GetNodeByPathResponse_Info
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type GetSubTreeResponseBodyWrapper struct {
|
||||
response *grpcService.GetSubTreeResponse_Body
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type ServiceClientGRPC struct {
|
||||
key *keys.PrivateKey
|
||||
log *zap.Logger
|
||||
clients []treeClient
|
||||
startIndex int32
|
||||
}
|
||||
|
||||
type treeClient struct {
|
||||
address string
|
||||
conn *grpc.ClientConn
|
||||
service grpcService.TreeServiceClient
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) Endpoints() []string {
|
||||
res := make([]string, len(c.clients))
|
||||
for i, client := range c.clients {
|
||||
res[i] = client.address
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func NewTreeServiceClientGRPC(ctx context.Context, endpoints []string, key *keys.PrivateKey, log *zap.Logger, grpcOpts ...grpc.DialOption) (*ServiceClientGRPC, error) {
|
||||
res := &ServiceClientGRPC{
|
||||
key: key,
|
||||
log: log,
|
||||
}
|
||||
|
||||
for _, addr := range endpoints {
|
||||
conn, err := grpc.Dial(addr, grpcOpts...)
|
||||
if err != nil {
|
||||
log.Warn("dial node tree service", zap.String("address", addr), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
c := grpcService.NewTreeServiceClient(conn)
|
||||
if _, err = c.Healthcheck(ctx, &grpcService.HealthcheckRequest{}); err != nil {
|
||||
log.Warn("healthcheck tree service", zap.String("address", addr), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
res.clients = append(res.clients, treeClient{
|
||||
address: addr,
|
||||
conn: conn,
|
||||
service: c,
|
||||
})
|
||||
}
|
||||
|
||||
if len(res.clients) == 0 {
|
||||
return nil, errors.New("no healthy tree grpc client")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) GetNodes(ctx context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
request := &grpcService.GetNodeByPathRequest{
|
||||
Body: &grpcService.GetNodeByPathRequest_Body{
|
||||
ContainerId: p.BktInfo.CID[:],
|
||||
TreeId: p.TreeID,
|
||||
Path: p.Path,
|
||||
Attributes: p.Meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
LatestOnly: p.LatestOnly,
|
||||
AllAttributes: p.AllAttrs,
|
||||
BearerToken: getBearer(ctx, p.BktInfo),
|
||||
},
|
||||
}
|
||||
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp *grpcService.GetNodeByPathResponse
|
||||
if err := c.requestWithRetry(func(client treeClient) (inErr error) {
|
||||
resp, inErr = client.service.GetNodeByPath(ctx, request)
|
||||
return handleError("failed to get node by path", inErr)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(resp.GetBody().GetNodes()))
|
||||
for i, info := range resp.GetBody().GetNodes() {
|
||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
|
||||
request := &grpcService.GetSubTreeRequest{
|
||||
Body: &grpcService.GetSubTreeRequest_Body{
|
||||
ContainerId: bktInfo.CID[:],
|
||||
TreeId: treeID,
|
||||
RootId: rootID,
|
||||
Depth: depth,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
},
|
||||
}
|
||||
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var cli grpcService.TreeService_GetSubTreeClient
|
||||
if err := c.requestWithRetry(func(client treeClient) (inErr error) {
|
||||
cli, inErr = client.service.GetSubTree(ctx, request)
|
||||
return handleError("failed to get sub tree client", inErr)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var subtree []tree.NodeResponse
|
||||
for {
|
||||
resp, err := cli.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, handleError("failed to get sub tree", err)
|
||||
}
|
||||
subtree = append(subtree, GetSubTreeResponseBodyWrapper{resp.Body})
|
||||
}
|
||||
|
||||
return subtree, nil
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error) {
|
||||
request := &grpcService.AddRequest{
|
||||
Body: &grpcService.AddRequest_Body{
|
||||
ContainerId: bktInfo.CID[:],
|
||||
TreeId: treeID,
|
||||
ParentId: parent,
|
||||
Meta: metaToKV(meta),
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
},
|
||||
}
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var resp *grpcService.AddResponse
|
||||
if err := c.requestWithRetry(func(client treeClient) (inErr error) {
|
||||
resp, inErr = client.service.Add(ctx, request)
|
||||
return handleError("failed to add node", inErr)
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return resp.GetBody().GetNodeId(), nil
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error) {
|
||||
request := &grpcService.AddByPathRequest{
|
||||
Body: &grpcService.AddByPathRequest_Body{
|
||||
ContainerId: bktInfo.CID[:],
|
||||
TreeId: treeID,
|
||||
Path: path,
|
||||
Meta: metaToKV(meta),
|
||||
PathAttribute: tree.FileNameKey,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
},
|
||||
}
|
||||
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var resp *grpcService.AddByPathResponse
|
||||
if err := c.requestWithRetry(func(client treeClient) (inErr error) {
|
||||
resp, inErr = client.service.AddByPath(ctx, request)
|
||||
return handleError("failed to add node by path", inErr)
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
body := resp.GetBody()
|
||||
if body == nil {
|
||||
return 0, errors.New("nil body in tree service response")
|
||||
} else if len(body.Nodes) == 0 {
|
||||
return 0, errors.New("empty list of added nodes in tree service response")
|
||||
}
|
||||
|
||||
// The first node is the leaf that we add, according to tree service docs.
|
||||
return body.Nodes[0], nil
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) MoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID, parentID uint64, meta map[string]string) error {
|
||||
request := &grpcService.MoveRequest{
|
||||
Body: &grpcService.MoveRequest_Body{
|
||||
ContainerId: bktInfo.CID[:],
|
||||
TreeId: treeID,
|
||||
NodeId: nodeID,
|
||||
ParentId: parentID,
|
||||
Meta: metaToKV(meta),
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
},
|
||||
}
|
||||
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.requestWithRetry(func(client treeClient) error {
|
||||
if _, err := client.service.Move(ctx, request); err != nil {
|
||||
return handleError("failed to move node", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) RemoveNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, nodeID uint64) error {
|
||||
request := &grpcService.RemoveRequest{
|
||||
Body: &grpcService.RemoveRequest_Body{
|
||||
ContainerId: bktInfo.CID[:],
|
||||
TreeId: treeID,
|
||||
NodeId: nodeID,
|
||||
BearerToken: getBearer(ctx, bktInfo),
|
||||
},
|
||||
}
|
||||
if err := c.signRequest(request.Body, func(key, sign []byte) {
|
||||
request.Signature = &grpcService.Signature{
|
||||
Key: key,
|
||||
Sign: sign,
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.requestWithRetry(func(client treeClient) error {
|
||||
if _, err := client.service.Remove(ctx, request); err != nil {
|
||||
return handleError("failed to remove node", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ServiceClientGRPC) requestWithRetry(fn func(client treeClient) error) (err error) {
|
||||
start := int(atomic.LoadInt32(&c.startIndex))
|
||||
for i := start; i < start+len(c.clients); i++ {
|
||||
index := i % len(c.clients)
|
||||
err = fn(c.clients[index])
|
||||
if !shouldTryAgain(err) {
|
||||
atomic.StoreInt32(&c.startIndex, int32(index))
|
||||
return err
|
||||
}
|
||||
c.log.Debug("tree request error", zap.String("address", c.clients[index].address), zap.Error(err))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func shouldTryAgain(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
code := status.Code(unwrapErr(err))
|
||||
if code == codes.Unavailable || code == codes.Unimplemented {
|
||||
return true
|
||||
}
|
||||
|
||||
errText := err.Error()
|
||||
if strings.Contains(errText, "not found") ||
|
||||
strings.Contains(errText, "shard is in read-only mode") ||
|
||||
strings.Contains(errText, "shard is in degraded mode") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func unwrapErr(err error) error {
|
||||
for e := errors.Unwrap(err); e != nil; e = errors.Unwrap(err) {
|
||||
err = e
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func metaToKV(meta map[string]string) []*grpcService.KeyValue {
|
||||
result := make([]*grpcService.KeyValue, 0, len(meta))
|
||||
|
||||
for key, value := range meta {
|
||||
result = append(result, &grpcService.KeyValue{Key: key, Value: []byte(value)})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func getBearer(ctx context.Context, bktInfo *data.BucketInfo) []byte {
|
||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil {
|
||||
if bd.Gate.BearerToken != nil {
|
||||
if bd.Gate.BearerToken.Impersonate() || bktInfo.Owner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||
return bd.Gate.BearerToken.Marshal()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleError(msg string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||
} else if strings.Contains(err.Error(), "is denied by") {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||
}
|
||||
return fmt.Errorf("%s: %w", msg, err)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue