forked from TrueCloudLab/frostfs-http-gw
Compare commits
73 commits
support/v0
...
master
Author | SHA1 | Date | |
---|---|---|---|
11965deb41 | |||
a95dc6c8c7 | |||
f39b3aa93a | |||
6695ebe5a0 | |||
c6383fc135 | |||
5ded105c09 | |||
88e32ddd7f | |||
007d278caa | |||
7ec9b34d33 | |||
5470916361 | |||
c038957649 | |||
ce4ec032f9 | |||
4049255eed | |||
2c95250f72 | |||
5ae75eb9d8 | |||
627294bf70 | |||
0ef3e18ee1 | |||
2e28b2ac85 | |||
a375af7d98 | |||
dc8d0d4ab3 | |||
7fa973b261 | |||
1ced82a714 | |||
49d6a27562 | |||
9a5a2239bd | |||
8bc246f8f9 | |||
9b34413e17 | |||
e61b4867c9 | |||
84eb57475b | |||
e26577e753 | |||
d219943542 | |||
add07a21ed | |||
40568590c7 | |||
834d5b93e5 | |||
dbc6804d27 | |||
7d47e88e36 | |||
54eadc3c31 | |||
fa28f1ff82 | |||
cc69601b32 | |||
97ac638dff | |||
0882d344a2 | |||
6fac6341c2 | |||
d9122e2093 | |||
6f64557a4b | |||
2ccb43bc8c | |||
202ef5cc54 | |||
1dfbe36eca | |||
5be537321b | |||
2c706bec71 | |||
d0f6baa44b | |||
d7dbff1255 | |||
61d152ee6a | |||
9765adf844 | |||
b8944adb65 | |||
f24f39ec92 | |||
f17f6747c4 | |||
01b9df83e6 | |||
c4fe718556 | |||
cdaab4feab | |||
8a22991326 | |||
1776db289c | |||
1f702ad2d8 | |||
adb95642d4 | |||
3844ac83e6 | |||
f7784db146 | |||
8c3c3782f5 | |||
37dbb29535 | |||
a945cdd42c | |||
ad05f1eb82 | |||
9eeaf44163 | |||
15b65b521b | |||
385f336a17 | |||
6c6fd0e9a5 | |||
cc37c34396 |
64 changed files with 3751 additions and 2014 deletions
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.19-alpine as basebuilder
|
||||
FROM golang:1.21-alpine as basebuilder
|
||||
RUN apk add --update make bash ca-certificates
|
||||
|
||||
FROM basebuilder as builder
|
23
.forgejo/workflows/builds.yml
Normal file
23
.forgejo/workflows/builds.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
builds:
|
||||
name: Builds
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.20', '1.21' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Build binary
|
||||
run: make
|
||||
|
||||
- name: Check dirty suffix
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
dco:
|
||||
name: DCO
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
with:
|
||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
41
.forgejo/workflows/tests.yml
Normal file
41
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,41 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
run: make lint-install
|
||||
|
||||
- name: Run linters
|
||||
run: make lint
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.20', '1.21' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
21
.forgejo/workflows/vulncheck.yml
Normal file
21
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,21 @@
|
|||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
vulncheck:
|
||||
name: Vulncheck
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1 +1 @@
|
|||
* @alexvanin @KirillovDenis
|
||||
* @alexvanin @dkirillov
|
||||
|
|
67
.github/workflows/builds.yml
vendored
67
.github/workflows/builds.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
name: Builds
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [opened, synchronize]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
build_cli:
|
||||
name: Build CLI
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build CLI
|
||||
run: make
|
||||
|
||||
- name: Check version
|
||||
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
||||
|
||||
build_image:
|
||||
needs: build_cli
|
||||
name: Build Docker image
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Build Docker image
|
||||
run: make image
|
67
.github/workflows/codeql-analysis.yml
vendored
67
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,67 +0,0 @@
|
|||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, 'support/*' ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master, 'support/*' ]
|
||||
schedule:
|
||||
- cron: '35 8 * * 1'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
22
.github/workflows/dco.yml
vendored
22
.github/workflows/dco.yml
vendored
|
@ -1,22 +0,0 @@
|
|||
name: DCO check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
|
||||
jobs:
|
||||
commits_check_job:
|
||||
runs-on: ubuntu-latest
|
||||
name: Commits Check
|
||||
steps:
|
||||
- name: Get PR Commits
|
||||
id: 'get-pr-commits'
|
||||
uses: tim-actions/get-pr-commits@master
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: DCO Check
|
||||
uses: tim-actions/dco@master
|
||||
with:
|
||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
86
.github/workflows/tests.yml
vendored
86
.github/workflows/tests.yml
vendored
|
@ -1,86 +0,0 @@
|
|||
name: Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'support/*'
|
||||
types: [opened, synchronize]
|
||||
paths-ignore:
|
||||
- '**/*.md'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
cover:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
env:
|
||||
CGO_ENABLED: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Test and write coverage profile
|
||||
run: make cover
|
||||
|
||||
- name: Upload coverage results to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
fail_ci_if_error: false
|
||||
path_to_write_report: ./coverage.txt
|
||||
verbose: true
|
||||
|
||||
tests:
|
||||
name: Tests
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.18', '1.19' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '${{ matrix.go_versions }}'
|
||||
|
||||
- name: Restore Go modules from cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /home/runner/go/pkg/mod
|
||||
key: deps-${{ hashFiles('go.sum') }}
|
||||
|
||||
- name: Update Go modules
|
||||
run: make dep
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
|
@ -4,7 +4,7 @@
|
|||
# options for analysis running
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 5m
|
||||
timeout: 15m
|
||||
|
||||
# include test files or not, default is true
|
||||
tests: true
|
||||
|
@ -24,6 +24,16 @@ linters-settings:
|
|||
govet:
|
||||
# report about shadowed variables
|
||||
check-shadowing: false
|
||||
custom:
|
||||
truecloudlab-linters:
|
||||
path: bin/external_linters.so
|
||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||
settings:
|
||||
noliteral:
|
||||
enable: true
|
||||
target-methods: ["Fatal"]
|
||||
disable-packages: ["req", "r"]
|
||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
|
@ -45,6 +55,7 @@ linters:
|
|||
- gofmt
|
||||
- whitespace
|
||||
- goimports
|
||||
- truecloudlab-linters
|
||||
disable-all: true
|
||||
fast: false
|
||||
|
||||
|
|
|
@ -37,6 +37,18 @@ repos:
|
|||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: make-lint-install
|
||||
name: install linters
|
||||
entry: make lint-install
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: make-lint
|
||||
name: run linters
|
||||
entry: make lint
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- id: go-unit-tests
|
||||
name: go unit tests
|
||||
entry: make test
|
||||
|
|
347
CHANGELOG.md
347
CHANGELOG.md
|
@ -4,19 +4,85 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.28.1] - 2024-01-24
|
||||
|
||||
### Added
|
||||
- Multiple configs support (TrueCloudLab#12)
|
||||
- Tree pool traversal limit (#92)
|
||||
- Add new `reconnect_interval` config param (#100)
|
||||
|
||||
### Update from 0.28.0
|
||||
See new `frostfs.tree_pool_max_attempts` config parameter.
|
||||
|
||||
### Fixed
|
||||
- Fix possibility of panic during SIGHUP (#99)
|
||||
- Handle query unescape and invalid bearer token errors (#107)
|
||||
- Fix HTTP/2 requests (#110)
|
||||
|
||||
### Added
|
||||
- Support client side object cut (#70)
|
||||
- Add `frostfs.client_cut` config param
|
||||
- Add `frostfs.buffer_max_size_for_put` config param
|
||||
- Add bucket/container caching
|
||||
- Disable homomorphic hash for PUT if it's disabled in container itself
|
||||
- Add new `logger.destination` config param (#89)
|
||||
- Add support namespaces (#91)
|
||||
|
||||
### Changed
|
||||
- Update go version to 1.18 (TrueCloudLab#9)
|
||||
|
||||
### Removed
|
||||
|
||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
||||
|
||||
### Fixed
|
||||
- `grpc` schemas in tree configuration (#62)
|
||||
- `GetSubTree` failures (#67)
|
||||
- Debian packaging (#69, #90)
|
||||
- Get latest version of tree node (#85)
|
||||
|
||||
### Added
|
||||
- Support dump metrics descriptions (#29)
|
||||
- Support impersonate bearer token (#40, #45)
|
||||
- Tracing support (#20, #44, #60)
|
||||
- Object name resolving with tree service (#30)
|
||||
- Metrics for current endpoint status (#77)
|
||||
- Soft memory limit with `runtime.soft_memory_limit` (#72)
|
||||
- Add selection of the node of the latest version of the object (#85)
|
||||
|
||||
### Changed
|
||||
- Update prometheus to v1.15.0 (#35)
|
||||
- Update go version to 1.19 (#50)
|
||||
- Finish rebranding (#2)
|
||||
- Use gate key to form object owner (#66)
|
||||
- Move log messages to constants (#36)
|
||||
- Uploader and downloader refactor (#73)
|
||||
|
||||
### Removed
|
||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
|
||||
|
||||
## [0.27.0] - Karpinsky - 2023-07-12
|
||||
|
||||
This is a first FrostFS HTTP Gateway release named after
|
||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
||||
|
||||
### Fixed
|
||||
- Require only one healthy storage server to start (#7)
|
||||
- Enable gate metrics (#38)
|
||||
- `Too many pings` error (#61)
|
||||
|
||||
### Added
|
||||
- Multiple configs support (#12)
|
||||
|
||||
### Changed
|
||||
- Repository rebranding (#1)
|
||||
- Update neo-go to v0.101.0 (#8)
|
||||
- Update viper to v1.15.0 (#8)
|
||||
- Update go version to 1.18 (#9)
|
||||
- Errors have become more detailed (#18)
|
||||
- Update system attribute names (#22)
|
||||
- Separate integration tests with build tags (#24)
|
||||
- Changed values for `frostfs_http_gw_state_health` metric (#32)
|
||||
|
||||
### Updating from v0.26.0
|
||||
### Updating from neofs-http-gw v0.26.0
|
||||
|
||||
To set system attributes use updated headers
|
||||
(you can use old ones for now, but their support will be dropped in the future releases):
|
||||
|
@ -25,276 +91,13 @@ To set system attributes use updated headers
|
|||
* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*`
|
||||
* `X-Attribute-neofs-*` -> `X-Attribute-system-*`
|
||||
|
||||
## [0.26.0] - 2022-12-28
|
||||
|
||||
### Fixed
|
||||
- ENV config example (#236)
|
||||
|
||||
### Added
|
||||
- Support the `Date` header on upload (#214)
|
||||
- Available routes specification (#216)
|
||||
- Mention caching strategy in docs (#215)
|
||||
- Add error response on attribute duplicates (#221)
|
||||
- Multiple server listeners (#228)
|
||||
|
||||
### Removed
|
||||
- Deprecated linters (#239)
|
||||
|
||||
### Updating from v0.25.1
|
||||
Make sure your configuration is valid:
|
||||
|
||||
If you configure application using environment variables change:
|
||||
* `HTTP_GW_LISTEN_ADDRESS` -> `HTTP_GW_SERVER_0_ADDRESS`
|
||||
* `HTTP_GW_TLS_CERT_FILE` -> `HTTP_GW_SERVER_0_TLS_CERT_FILE` (and set `HTTP_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
* `HTTP_GW_TLS_KEY_FILE` -> `HTTP_GW_SERVER_0_TLS_KEY_FILE` (and set `HTTP_GW_SERVER_0_TLS_ENABLED=true`)
|
||||
|
||||
If you configure application using `.yaml` file change:
|
||||
* `listen_address` -> `server.0.address`
|
||||
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
|
||||
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
|
||||
|
||||
## [0.25.1] - 2022-11-30
|
||||
|
||||
### Fixed
|
||||
- Download zip archive when `FilePath` is invalid (#222)
|
||||
- Only one peer must be healthy to init pool (#233)
|
||||
|
||||
### Added
|
||||
- Debian packaging (#223)
|
||||
- Timeout for individual operations in streaming RPC (#234)
|
||||
|
||||
## [0.25.0] - 2022-10-31
|
||||
|
||||
### Added
|
||||
- Config reloading on SIGHUP (#200, #208)
|
||||
- Stop pool dial on SIGINT (#212)
|
||||
- Makefile help (#213)
|
||||
|
||||
### Changed
|
||||
- Update NeoFS error handling (#206)
|
||||
- GitHub actions updates (#205, #209)
|
||||
- Unified system attribute format for GET and HEAD (#213)
|
||||
|
||||
## [0.24.0] - 2022-09-14
|
||||
|
||||
### Fixed
|
||||
- Fix expiration epoch calculation (#198)
|
||||
- Fix panic on go1.19 (#188)
|
||||
|
||||
### Added
|
||||
- Exposure of pool metrics (#179, #194)
|
||||
|
||||
### Changed
|
||||
- Help doesn't print empty parameters (#186)
|
||||
- Update version calculation (#190, #199)
|
||||
- Update neofs-sdk-go (#196)
|
||||
- Update go version in CI and docker (#197, #202)
|
||||
|
||||
## [0.23.0] - 2022-08-02
|
||||
|
||||
### Added
|
||||
- New param to configure pool error threshold (#184)
|
||||
|
||||
### Changed
|
||||
- Pprof and prometheus metrics configuration (#171)
|
||||
- Drop GO111MODULES from builds (#182)
|
||||
|
||||
### Updating from v0.22.0
|
||||
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
||||
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
||||
If you are using the command line flags you can skip this step.
|
||||
|
||||
## [0.22.0] - 2022-07-25
|
||||
|
||||
### Added
|
||||
- Default params documentation (#172)
|
||||
- Health metric (#175)
|
||||
|
||||
### Changed
|
||||
- Version output (#169)
|
||||
- Updated SDK Version (#178)
|
||||
|
||||
## [0.21.0] - 2022-06-20
|
||||
|
||||
### Fixed
|
||||
- Downloading ZIP archive using streaming (#163)
|
||||
|
||||
### Added
|
||||
- New make target to build app in docker (#159)
|
||||
|
||||
### Changed
|
||||
- Increased buffer size for file uploading (#148)
|
||||
- Updated linter version to v1.46.2 (#161)
|
||||
- Updated CodeQL version to v2 (#158)
|
||||
|
||||
|
||||
## [0.20.0] - 2022-04-29
|
||||
|
||||
### Fixed
|
||||
- Get rid of data race on server shutdown (#145)
|
||||
- Improved English in docs and comments (#153)
|
||||
- Use `FilePath` to download zip (#150)
|
||||
|
||||
### Added
|
||||
- Support container name NNS resolving (#142)
|
||||
|
||||
### Changed
|
||||
- Updated docs (#133, #140)
|
||||
- Increased default read/write timeouts (#154)
|
||||
- Updated SDK (#137, #139)
|
||||
- Updated go version to 1.17 (#143)
|
||||
- Improved error messages (#144)
|
||||
|
||||
## [0.19.0] - 2022-03-16
|
||||
|
||||
### Fixed
|
||||
- Uploading object with zero payload (#122)
|
||||
- Different headers format in GET and HEAD (#125)
|
||||
- Fixed project name in docs (#120)
|
||||
|
||||
### Added
|
||||
- Support object attributes with spaces (#123)
|
||||
|
||||
### Changed
|
||||
- Updated fasthttp to v1.34.0 (#129)
|
||||
- Updated NeoFS SDK to v1.0.0-rc.3 (#126, #132)
|
||||
- Refactored content type detecting (#128)
|
||||
|
||||
|
||||
## [0.18.0] - 2021-12-10
|
||||
|
||||
### Fixed
|
||||
- System headers format (#111)
|
||||
|
||||
### Added
|
||||
- Different formats to set object's expiration: in epoch, duration, timestamp,
|
||||
RFC3339 (#108)
|
||||
- Support of nodes priority (#115)
|
||||
|
||||
### Changed
|
||||
- Updated testcontainers dependency (#100)
|
||||
|
||||
## [0.17.0] - 2021-11-15
|
||||
|
||||
Support of bulk file download with zip streams and various bug fixes.
|
||||
|
||||
### Fixed
|
||||
- Allow canonical `X-Attribute-Neofs-*` headers (#87)
|
||||
- Responses with error message now end with `\n` character (#105)
|
||||
- Application does not require all neofs endpoints to be healthy at start now
|
||||
(#103)
|
||||
- Application now tracks session token errors and recreates tokens in runtime
|
||||
(#95)
|
||||
|
||||
### Added
|
||||
- Integration tests with [all-in-one](https://github.com/nspcc-dev/neofs-aio/)
|
||||
test containers (#85, #94)
|
||||
- Bulk download support with zip streams (#92, #96)
|
||||
|
||||
## 0.16.1 (28 Jul 2021)
|
||||
|
||||
New features:
|
||||
* logging requests (#77)
|
||||
* HEAD methods for download routes (#76)
|
||||
|
||||
Improvements:
|
||||
* updated sdk-go dependency (#82)
|
||||
|
||||
Bugs fixed:
|
||||
* wrong NotFound status was used (#30)
|
||||
|
||||
## 0.16.0 (29 Jun 2021)
|
||||
|
||||
We update HTTP gateway with NEP-6 wallets support, YAML configuration files
|
||||
and small fixes.
|
||||
|
||||
New features:
|
||||
* YAML configuration file (#71)
|
||||
|
||||
Behavior changes:
|
||||
* gateway key needs to be stored in a proper NEP-6 wallet now, `-k` option is
|
||||
no longer available, see `-w` and `-a` (#68)
|
||||
|
||||
Bugs fixed:
|
||||
* downloads were not streamed leading to excessive memory usage (#67)
|
||||
* Last-Modified header incorrectly used local time (#75)
|
||||
|
||||
## 0.15.2 (22 Jun 2021)
|
||||
|
||||
New features:
|
||||
* Content-Type returned for object GET requests can now be taken from
|
||||
attributes (overriding autodetection, #65)
|
||||
|
||||
Behavior changes:
|
||||
* grpc keepalive options can no longer be changed (#60)
|
||||
|
||||
Improvements:
|
||||
* code refactoring (more reuse between different gateways, moved some code to
|
||||
sdk-go, #47, #46, #51, #62, #63)
|
||||
* documentation updates and fixes (#53, #49, #55, #59)
|
||||
* updated api-go dependency (#57)
|
||||
|
||||
Bugs fixed:
|
||||
* `-k` option wasn't accepted for key although it was documented (#50)
|
||||
|
||||
## 0.15.1 (24 May 2021)
|
||||
|
||||
This important release makes HTTP gateway compatible with NeoFS node version
|
||||
0.20.0.
|
||||
|
||||
Behavior changes:
|
||||
* neofs-api-go was updated to 1.26.1, which contains some incompatible
|
||||
changes in underlying components (#39, #44)
|
||||
* `neofs-http-gw` is consistently used now for repository, binary and image
|
||||
names (#43)
|
||||
|
||||
Improvements:
|
||||
* minor code cleanups based on stricter set of linters (#41)
|
||||
* updated README (#42)
|
||||
|
||||
## 0.15.0 (30 Apr 2021)
|
||||
|
||||
This is the first public release incorporating latest NeoFS protocol support
|
||||
and fixing some bugs.
|
||||
|
||||
New features:
|
||||
* upload support (#14, #13, #29)
|
||||
* ephemeral keys (#26)
|
||||
* TLS server support (#28)
|
||||
|
||||
Behavior changes:
|
||||
* node weights can now be specified as simple numbers instead of percentages
|
||||
and gateway will calculate the proportion automatically (#27)
|
||||
* attributes are converted now to `X-Attribute-*` headers when retrieving
|
||||
object from gate instead of `X-*` (#29)
|
||||
|
||||
Improvements:
|
||||
* better Makefile (#16, #24, #33, #34)
|
||||
* updated documentation (#16, #29, #35, #36)
|
||||
* updated neofs-api-go to v1.25.0 (#17, #20)
|
||||
* updated fasthttp to v1.23.0+ (#17, #29)
|
||||
* refactoring, eliminating some dependencies (#20, #29)
|
||||
|
||||
Bugs fixed:
|
||||
* gateway attempted to work with no NeoFS peers configured (#29)
|
||||
* some invalid headers could be sent for attributes using non-ASCII or
|
||||
non-printable characters (#29)
|
||||
|
||||
## Older versions
|
||||
|
||||
Please refer to [Github
|
||||
releases](https://github.com/nspcc-dev/neofs-http-gw/releases/) for older
|
||||
releases.
|
||||
This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0.
|
||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
|
||||
|
||||
[0.17.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.16.1...v0.17.0
|
||||
[0.18.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.17.0...v0.18.0
|
||||
[0.19.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.18.0...v0.19.0
|
||||
[0.20.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.19.0...v0.20.0
|
||||
[0.21.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.20.0...v0.21.0
|
||||
[0.22.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.21.0...v0.22.0
|
||||
[0.23.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.22.0...v0.23.0
|
||||
[0.24.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.23.0...v0.24.0
|
||||
[0.25.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.24.0...v0.25.0
|
||||
[0.25.1]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.0...v0.25.1
|
||||
[0.26.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.1...v0.26.0
|
||||
[Unreleased]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.26.0...master
|
||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
|
||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0
|
||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...master
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
First, thank you for contributing! We love and encourage pull requests from
|
||||
everyone. Please follow the guidelines:
|
||||
|
||||
- Check the open [issues](https://github.com/TrueCloudLab/frostfs-http-gw/issues) and
|
||||
[pull requests](https://github.com/TrueCloudLab/frostfs-http-gw/pulls) for existing
|
||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and
|
||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing
|
||||
discussions.
|
||||
|
||||
- Open an issue first, to discuss a new feature or enhancement.
|
||||
|
@ -27,20 +27,20 @@ Start by forking the `frostfs-http-gw` repository, make changes in a branch and
|
|||
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||
are the steps in details:
|
||||
|
||||
### Set up your GitHub Repository
|
||||
### Set up your git repository
|
||||
Fork [FrostFS HTTP Gateway
|
||||
upstream](https://github.com/TrueCloudLab/frostfs-http-gw/fork) source repository
|
||||
upstream](https://git.frostfs.info/repo/fork/8) source repository
|
||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||
the `git clone` command below).
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/TrueCloudLab/frostfs-http-gw
|
||||
$ git clone https://git.frostfs.info/<username>/frostfs-http-gw.git
|
||||
```
|
||||
|
||||
### Set up git remote as ``upstream``
|
||||
```sh
|
||||
$ cd frostfs-http-gw
|
||||
$ git remote add upstream https://github.com/TrueCloudLab/frostfs-http-gw
|
||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
...
|
||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
|||
```
|
||||
|
||||
### Create a Pull Request
|
||||
Pull requests can be created via GitHub. Refer to [this
|
||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
||||
Pull requests can be created via Forgejo. Refer to [this
|
||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||
reviewed and approved, it will be merged.
|
||||
|
||||
|
|
44
Makefile
44
Makefile
|
@ -2,17 +2,24 @@
|
|||
|
||||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.19
|
||||
LINT_VERSION ?= 1.49.0
|
||||
GO_VERSION ?= 1.20
|
||||
LINT_VERSION ?= 1.54.0
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||
BUILD ?= $(shell date -u --iso=seconds)
|
||||
|
||||
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||
TMP_DIR := .cache
|
||||
|
||||
# List of binaries to build. For now just one.
|
||||
BINDIR = bin
|
||||
DIRS = $(BINDIR)
|
||||
BINS = $(BINDIR)/frostfs-http-gw
|
||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||
|
||||
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean
|
||||
|
||||
|
@ -25,13 +32,12 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|||
|
||||
# Make all binaries
|
||||
all: $(BINS)
|
||||
|
||||
$(BINS): $(DIRS) dep
|
||||
@echo "⇒ Build $@"
|
||||
CGO_ENABLED=0 \
|
||||
go build -v -trimpath \
|
||||
-ldflags "-X main.Version=$(VERSION)" \
|
||||
-o $@ ./
|
||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||
|
||||
$(DIRS):
|
||||
@echo "⇒ Ensure dir: $@"
|
||||
|
@ -84,7 +90,7 @@ image:
|
|||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--rm \
|
||||
-f Dockerfile \
|
||||
-f .docker/Dockerfile \
|
||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
||||
|
||||
# Push Docker image to the hub
|
||||
|
@ -99,12 +105,26 @@ dirty-image:
|
|||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--rm \
|
||||
-f Dockerfile.dirty \
|
||||
-f .docker/Dockerfile.dirty \
|
||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
||||
|
||||
# Install linters
|
||||
lint-install:
|
||||
@mkdir -p $(TMP_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||
@rm -rf $(TMP_DIR)/linters
|
||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||
|
||||
# Run linters
|
||||
lint:
|
||||
@golangci-lint --timeout=5m run
|
||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
||||
echo "Run make lint-install"; \
|
||||
exit 1; \
|
||||
fi
|
||||
$(LINT_DIR)/golangci-lint --timeout=5m run
|
||||
|
||||
# Run linters in Docker
|
||||
docker/lint:
|
||||
|
@ -143,4 +163,10 @@ debpackage:
|
|||
debclean:
|
||||
dh clean
|
||||
|
||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
||||
.PHONY: dump-metrics
|
||||
dump-metrics:
|
||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
||||
|
||||
|
||||
include help.mk
|
||||
|
|
34
README.md
34
README.md
|
@ -7,6 +7,8 @@
|
|||
|
||||
---
|
||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw)
|
||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange)
|
||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
||||
|
||||
# FrostFS HTTP Gateway
|
||||
|
||||
|
@ -484,43 +486,26 @@ the corresponding header to the upload request. Accessing the ACL protected data
|
|||
works the same way.
|
||||
|
||||
##### Example
|
||||
In order to generate a bearer token, you need to know the container owner key and
|
||||
In order to generate a bearer token, you need to have wallet (which will be used to sign the token) and
|
||||
the address of the sender who will do the request to FrostFS (in our case, it's a gateway wallet address).
|
||||
|
||||
Suppose we have:
|
||||
* **KxDgvEKzgSBPPfuVfw67oPQBSjidEiqTHURKSDL1R7yGaGYAeYnr** (container owner key)
|
||||
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner address)
|
||||
* **BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K** (container id)
|
||||
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner (gateway address))
|
||||
|
||||
Firstly, we need to encode the container id and the sender address to base64 (now it's base58).
|
||||
So use **base58** and **base64** utils.
|
||||
|
||||
1. Encoding container id:
|
||||
```
|
||||
$ echo 'BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K' | base58 --decode | base64
|
||||
# output: mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg=
|
||||
```
|
||||
|
||||
2. Encoding token owner id:
|
||||
1. Encoding token owner id:
|
||||
```
|
||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
||||
```
|
||||
|
||||
Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and save it to **bearer.json**:
|
||||
2. Form a Bearer token (10000 is lifetime expiration in epoch) and save it to **bearer.json**:
|
||||
```
|
||||
{
|
||||
"body": {
|
||||
"eaclTable": {
|
||||
"version": {
|
||||
"major": 0,
|
||||
"minor": 0
|
||||
},
|
||||
"containerID": {
|
||||
"value": "mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg="
|
||||
},
|
||||
"records": []
|
||||
},
|
||||
"allowImpersonate": true,
|
||||
"ownerID": {
|
||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
||||
},
|
||||
|
@ -534,11 +519,12 @@ Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and sav
|
|||
}
|
||||
```
|
||||
|
||||
Next, sign it with the container owner key:
|
||||
3. Sign it with the wallet:
|
||||
```
|
||||
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w ./wallet.json
|
||||
```
|
||||
Encoding to base64 to use via the header:
|
||||
|
||||
4. Encode to base64 to use in header:
|
||||
```
|
||||
$ base64 -w 0 signed.json
|
||||
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
v0.26.0
|
||||
v0.28.1
|
||||
|
|
538
app.go
538
app.go
|
@ -1,538 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/downloader"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/uploader"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/fasthttp/router"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
app struct {
|
||||
log *zap.Logger
|
||||
logLevel zap.AtomicLevel
|
||||
pool *pool.Pool
|
||||
owner *user.ID
|
||||
cfg *viper.Viper
|
||||
webServer *fasthttp.Server
|
||||
webDone chan struct{}
|
||||
resolver *resolver.ContainerResolver
|
||||
metrics *gateMetrics
|
||||
services []*metrics.Service
|
||||
settings *appSettings
|
||||
servers []Server
|
||||
}
|
||||
|
||||
appSettings struct {
|
||||
Uploader *uploader.Settings
|
||||
Downloader *downloader.Settings
|
||||
}
|
||||
|
||||
// App is an interface for the main gateway function.
|
||||
App interface {
|
||||
Wait()
|
||||
Serve(context.Context)
|
||||
}
|
||||
|
||||
// Option is an application option.
|
||||
Option func(a *app)
|
||||
|
||||
gateMetrics struct {
|
||||
logger *zap.Logger
|
||||
provider *metrics.GateMetrics
|
||||
mu sync.RWMutex
|
||||
enabled bool
|
||||
}
|
||||
)
|
||||
|
||||
// WithLogger returns Option to set a specific logger.
|
||||
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
||||
return func(a *app) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
a.log = l
|
||||
a.logLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
// WithConfig returns Option to use specific Viper configuration.
|
||||
func WithConfig(c *viper.Viper) Option {
|
||||
return func(a *app) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
a.cfg = c
|
||||
}
|
||||
}
|
||||
|
||||
func newApp(ctx context.Context, opt ...Option) App {
|
||||
var (
|
||||
key *ecdsa.PrivateKey
|
||||
err error
|
||||
)
|
||||
|
||||
a := &app{
|
||||
log: zap.L(),
|
||||
cfg: viper.GetViper(),
|
||||
webServer: new(fasthttp.Server),
|
||||
webDone: make(chan struct{}),
|
||||
}
|
||||
for i := range opt {
|
||||
opt[i](a)
|
||||
}
|
||||
|
||||
// -- setup FastHTTP server --
|
||||
a.webServer.Name = "frost-http-gw"
|
||||
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
||||
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
||||
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||
a.webServer.DisableHeaderNamesNormalizing = true
|
||||
a.webServer.NoDefaultServerHeader = true
|
||||
a.webServer.NoDefaultContentType = true
|
||||
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
||||
a.webServer.DisablePreParseMultipartForm = true
|
||||
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
||||
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
||||
key, err = getFrostFSKey(a)
|
||||
if err != nil {
|
||||
a.log.Fatal("failed to get frostfs credentials", zap.Error(err))
|
||||
}
|
||||
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, key.PublicKey)
|
||||
a.owner = &owner
|
||||
|
||||
var prm pool.InitParameters
|
||||
prm.SetKey(key)
|
||||
prm.SetNodeDialTimeout(a.cfg.GetDuration(cfgConTimeout))
|
||||
prm.SetNodeStreamTimeout(a.cfg.GetDuration(cfgStreamTimeout))
|
||||
prm.SetHealthcheckTimeout(a.cfg.GetDuration(cfgReqTimeout))
|
||||
prm.SetClientRebalanceInterval(a.cfg.GetDuration(cfgRebalance))
|
||||
prm.SetErrorThreshold(a.cfg.GetUint32(cfgPoolErrorThreshold))
|
||||
|
||||
for i := 0; ; i++ {
|
||||
address := a.cfg.GetString(cfgPeers + "." + strconv.Itoa(i) + ".address")
|
||||
weight := a.cfg.GetFloat64(cfgPeers + "." + strconv.Itoa(i) + ".weight")
|
||||
priority := a.cfg.GetInt(cfgPeers + "." + strconv.Itoa(i) + ".priority")
|
||||
if address == "" {
|
||||
break
|
||||
}
|
||||
if weight <= 0 { // unspecified or wrong
|
||||
weight = 1
|
||||
}
|
||||
if priority <= 0 { // unspecified or wrong
|
||||
priority = 1
|
||||
}
|
||||
prm.AddNode(pool.NewNodeParam(priority, address, weight))
|
||||
a.log.Info("add connection", zap.String("address", address),
|
||||
zap.Float64("weight", weight), zap.Int("priority", priority))
|
||||
}
|
||||
|
||||
a.pool, err = pool.NewPool(prm)
|
||||
if err != nil {
|
||||
a.log.Fatal("failed to create connection pool", zap.Error(err))
|
||||
}
|
||||
|
||||
err = a.pool.Dial(ctx)
|
||||
if err != nil {
|
||||
a.log.Fatal("failed to dial pool", zap.Error(err))
|
||||
}
|
||||
|
||||
a.initAppSettings()
|
||||
a.initResolver()
|
||||
a.initMetrics()
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *app) initAppSettings() {
|
||||
a.settings = &appSettings{
|
||||
Uploader: &uploader.Settings{},
|
||||
Downloader: &downloader.Settings{},
|
||||
}
|
||||
|
||||
a.updateSettings()
|
||||
}
|
||||
|
||||
func (a *app) initResolver() {
|
||||
var err error
|
||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal("failed to create resolver", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||
resolveCfg := &resolver.Config{
|
||||
FrostFS: resolver.NewFrostFSResolver(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
}
|
||||
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if resolveCfg.RPCAddress == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(fmt.Sprintf("resolver '%s' won't be used since '%s' isn't provided", resolver.NNSResolver, cfgRPCEndpoint))
|
||||
}
|
||||
|
||||
if len(order) == 0 {
|
||||
a.log.Info("container resolver will be disabled because of resolvers 'resolver_order' is empty")
|
||||
}
|
||||
|
||||
return order, resolveCfg
|
||||
}
|
||||
|
||||
func (a *app) initMetrics() {
|
||||
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
||||
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||
}
|
||||
|
||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||
if !enabled {
|
||||
logger.Warn("metrics are disabled")
|
||||
}
|
||||
return &gateMetrics{
|
||||
logger: logger,
|
||||
provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||
if !enabled {
|
||||
m.logger.Warn("metrics are disabled")
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.enabled = enabled
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *gateMetrics) SetHealth(status metrics.HealthStatus) {
|
||||
m.mu.RLock()
|
||||
if !m.enabled {
|
||||
m.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
m.mu.RUnlock()
|
||||
|
||||
m.provider.SetHealth(status)
|
||||
}
|
||||
|
||||
func (m *gateMetrics) Shutdown() {
|
||||
m.mu.Lock()
|
||||
if m.enabled {
|
||||
m.provider.SetHealth(metrics.HealthStatusShuttingDown)
|
||||
m.enabled = false
|
||||
}
|
||||
m.provider.Unregister()
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func remove(list []string, element string) []string {
|
||||
for i, item := range list {
|
||||
if item == element {
|
||||
return append(list[:i], list[i+1:]...)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func getFrostFSKey(a *app) (*ecdsa.PrivateKey, error) {
|
||||
walletPath := a.cfg.GetString(cfgWalletPath)
|
||||
|
||||
if len(walletPath) == 0 {
|
||||
a.log.Info("no wallet path specified, creating ephemeral key automatically for this run")
|
||||
key, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &key.PrivateKey, nil
|
||||
}
|
||||
w, err := wallet.NewWalletFromFile(walletPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var password *string
|
||||
if a.cfg.IsSet(cfgWalletPassphrase) {
|
||||
pwd := a.cfg.GetString(cfgWalletPassphrase)
|
||||
password = &pwd
|
||||
}
|
||||
|
||||
address := a.cfg.GetString(cfgWalletAddress)
|
||||
|
||||
return getKeyFromWallet(w, address, password)
|
||||
}
|
||||
|
||||
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*ecdsa.PrivateKey, error) {
|
||||
var addr util.Uint160
|
||||
var err error
|
||||
|
||||
if addrStr == "" {
|
||||
addr = w.GetChangeAddress()
|
||||
} else {
|
||||
addr, err = flags.ParseAddress(addrStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
|
||||
acc := w.GetAccount(addr)
|
||||
if acc == nil {
|
||||
return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr)
|
||||
}
|
||||
|
||||
if password == nil {
|
||||
pwd, err := input.ReadPassword("Enter password > ")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read password")
|
||||
}
|
||||
password = &pwd
|
||||
}
|
||||
|
||||
if err := acc.Decrypt(*password, w.Scrypt); err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
|
||||
}
|
||||
|
||||
return &acc.PrivateKey().PrivateKey, nil
|
||||
}
|
||||
|
||||
func (a *app) Wait() {
|
||||
a.log.Info("starting application", zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||
|
||||
a.setHealthStatus()
|
||||
|
||||
<-a.webDone // wait for web-server to be stopped
|
||||
}
|
||||
|
||||
func (a *app) setHealthStatus() {
|
||||
a.metrics.SetHealth(metrics.HealthStatusReady)
|
||||
}
|
||||
|
||||
func (a *app) Serve(ctx context.Context) {
|
||||
uploadRoutes := uploader.New(ctx, a.AppParams(), a.settings.Uploader)
|
||||
downloadRoutes := downloader.New(ctx, a.AppParams(), a.settings.Downloader)
|
||||
|
||||
// Configure router.
|
||||
a.configureRouter(uploadRoutes, downloadRoutes)
|
||||
|
||||
a.startServices()
|
||||
a.initServers(ctx)
|
||||
|
||||
for i := range a.servers {
|
||||
go func(i int) {
|
||||
a.log.Info("starting server", zap.String("address", a.servers[i].Address()))
|
||||
if err := a.webServer.Serve(a.servers[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.log.Fatal("listen and serve", zap.Error(err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGHUP)
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break LOOP
|
||||
case <-sigs:
|
||||
a.configReload()
|
||||
}
|
||||
}
|
||||
|
||||
a.log.Info("shutting down web server", zap.Error(a.webServer.Shutdown()))
|
||||
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
|
||||
close(a.webDone)
|
||||
}
|
||||
|
||||
func (a *app) configReload() {
|
||||
a.log.Info("SIGHUP config reload started")
|
||||
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||
a.log.Warn("failed to reload config because it's missed")
|
||||
return
|
||||
}
|
||||
if err := readInConfig(a.cfg); err != nil {
|
||||
a.log.Warn("failed to reload config", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||
a.log.Warn("log level won't be updated", zap.Error(err))
|
||||
} else {
|
||||
a.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||
a.log.Warn("failed to update resolvers", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := a.updateServers(); err != nil {
|
||||
a.log.Warn("failed to reload server parameters", zap.Error(err))
|
||||
}
|
||||
|
||||
a.stopServices()
|
||||
a.startServices()
|
||||
|
||||
a.updateSettings()
|
||||
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info("SIGHUP config reload completed")
|
||||
}
|
||||
|
||||
func (a *app) updateSettings() {
|
||||
a.settings.Uploader.SetDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||
a.settings.Downloader.SetZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||
}
|
||||
|
||||
func (a *app) startServices() {
|
||||
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
||||
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
||||
a.services = append(a.services, pprofService)
|
||||
go pprofService.Start()
|
||||
|
||||
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
||||
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
||||
a.services = append(a.services, prometheusService)
|
||||
go prometheusService.Start()
|
||||
}
|
||||
|
||||
func (a *app) stopServices() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, svc := range a.services {
|
||||
svc.ShutDown(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) configureRouter(uploadRoutes *uploader.Uploader, downloadRoutes *downloader.Downloader) {
|
||||
r := router.New()
|
||||
r.RedirectTrailingSlash = true
|
||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
||||
}
|
||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||
}
|
||||
r.POST("/upload/{cid}", a.logger(uploadRoutes.Upload))
|
||||
a.log.Info("added path /upload/{cid}")
|
||||
r.GET("/get/{cid}/{oid}", a.logger(downloadRoutes.DownloadByAddress))
|
||||
r.HEAD("/get/{cid}/{oid}", a.logger(downloadRoutes.HeadByAddress))
|
||||
a.log.Info("added path /get/{cid}/{oid}")
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.DownloadByAttribute))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.HeadByAttribute))
|
||||
a.log.Info("added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}")
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.logger(downloadRoutes.DownloadZipped))
|
||||
a.log.Info("added path /zip/{cid}/{prefix}")
|
||||
|
||||
a.webServer.Handler = r.Handler
|
||||
}
|
||||
|
||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(ctx *fasthttp.RequestCtx) {
|
||||
a.log.Info("request", zap.String("remote", ctx.RemoteAddr().String()),
|
||||
zap.ByteString("method", ctx.Method()),
|
||||
zap.ByteString("path", ctx.Path()),
|
||||
zap.ByteString("query", ctx.QueryArgs().QueryString()),
|
||||
zap.Uint64("id", ctx.ID()))
|
||||
h(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) AppParams() *utils.AppParams {
|
||||
return &utils.AppParams{
|
||||
Logger: a.log,
|
||||
Pool: a.pool,
|
||||
Owner: a.owner,
|
||||
Resolver: a.resolver,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) initServers(ctx context.Context) {
|
||||
serversInfo := fetchServers(a.cfg)
|
||||
|
||||
a.servers = make([]Server, 0, len(serversInfo))
|
||||
for _, serverInfo := range serversInfo {
|
||||
fields := []zap.Field{
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
}
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.log.Warn("failed to add server", append(fields, zap.Error(err))...)
|
||||
continue
|
||||
}
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info("add server", fields...)
|
||||
}
|
||||
|
||||
if len(a.servers) == 0 {
|
||||
a.log.Fatal("no healthy servers")
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) updateServers() error {
|
||||
serversInfo := fetchServers(a.cfg)
|
||||
|
||||
var found bool
|
||||
for _, serverInfo := range serversInfo {
|
||||
index := a.serverIndex(serverInfo.Address)
|
||||
if index == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err := a.servers[index].UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||
}
|
||||
}
|
||||
found = true
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("invalid servers configuration: no known server found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *app) serverIndex(address string) int {
|
||||
for i := range a.servers {
|
||||
if a.servers[i].Address() == address {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
819
cmd/http-gw/app.go
Normal file
819
cmd/http-gw/app.go
Normal file
|
@ -0,0 +1,819 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/fasthttp/router"
|
||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||
"github.com/nspcc-dev/neo-go/cli/input"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type (
|
||||
app struct {
|
||||
ctx context.Context
|
||||
log *zap.Logger
|
||||
logLevel zap.AtomicLevel
|
||||
pool *pool.Pool
|
||||
treePool *treepool.Pool
|
||||
key *keys.PrivateKey
|
||||
owner *user.ID
|
||||
cfg *viper.Viper
|
||||
webServer *fasthttp.Server
|
||||
webDone chan struct{}
|
||||
resolver *resolver.ContainerResolver
|
||||
metrics *gateMetrics
|
||||
services []*metrics.Service
|
||||
settings *appSettings
|
||||
|
||||
servers []Server
|
||||
unbindServers []ServerInfo
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// App is an interface for the main gateway function.
|
||||
App interface {
|
||||
Wait()
|
||||
Serve()
|
||||
}
|
||||
|
||||
// Option is an application option.
|
||||
Option func(a *app)
|
||||
|
||||
gateMetrics struct {
|
||||
logger *zap.Logger
|
||||
provider *metrics.GateMetrics
|
||||
mu sync.RWMutex
|
||||
enabled bool
|
||||
}
|
||||
|
||||
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
||||
appSettings struct {
|
||||
reconnectInterval time.Duration
|
||||
|
||||
mu sync.RWMutex
|
||||
defaultTimestamp bool
|
||||
zipCompression bool
|
||||
clientCut bool
|
||||
bufferMaxSizeForPut uint64
|
||||
namespaceHeader string
|
||||
defaultNamespaces []string
|
||||
}
|
||||
)
|
||||
|
||||
// WithLogger returns Option to set a specific logger.
|
||||
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
||||
return func(a *app) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
a.log = l
|
||||
a.logLevel = lvl
|
||||
}
|
||||
}
|
||||
|
||||
// WithConfig returns Option to use specific Viper configuration.
|
||||
func WithConfig(c *viper.Viper) Option {
|
||||
return func(a *app) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
a.cfg = c
|
||||
}
|
||||
}
|
||||
|
||||
func newApp(ctx context.Context, opt ...Option) App {
|
||||
a := &app{
|
||||
ctx: ctx,
|
||||
log: zap.L(),
|
||||
cfg: viper.GetViper(),
|
||||
webServer: new(fasthttp.Server),
|
||||
webDone: make(chan struct{}),
|
||||
}
|
||||
for i := range opt {
|
||||
opt[i](a)
|
||||
}
|
||||
|
||||
// -- setup FastHTTP server --
|
||||
a.webServer.Name = "frost-http-gw"
|
||||
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
||||
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
||||
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||
a.webServer.DisableHeaderNamesNormalizing = true
|
||||
a.webServer.NoDefaultServerHeader = true
|
||||
a.webServer.NoDefaultContentType = true
|
||||
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
||||
a.webServer.DisablePreParseMultipartForm = true
|
||||
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
||||
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
||||
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg)
|
||||
|
||||
var owner user.ID
|
||||
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
||||
a.owner = &owner
|
||||
|
||||
a.setRuntimeParameters()
|
||||
|
||||
a.initAppSettings()
|
||||
a.initResolver()
|
||||
a.initMetrics()
|
||||
a.initTracing(ctx)
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultTimestamp() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.defaultTimestamp
|
||||
}
|
||||
|
||||
func (s *appSettings) setDefaultTimestamp(val bool) {
|
||||
s.mu.Lock()
|
||||
s.defaultTimestamp = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ZipCompression() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.zipCompression
|
||||
}
|
||||
|
||||
func (s *appSettings) setZipCompression(val bool) {
|
||||
s.mu.Lock()
|
||||
s.zipCompression = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ClientCut() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.clientCut
|
||||
}
|
||||
|
||||
func (s *appSettings) setClientCut(val bool) {
|
||||
s.mu.Lock()
|
||||
s.clientCut = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.bufferMaxSizeForPut
|
||||
}
|
||||
|
||||
func (s *appSettings) setBufferMaxSizeForPut(val uint64) {
|
||||
s.mu.Lock()
|
||||
s.bufferMaxSizeForPut = val
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (a *app) initAppSettings() {
|
||||
a.settings = &appSettings{
|
||||
reconnectInterval: fetchReconnectInterval(a.cfg),
|
||||
}
|
||||
a.updateSettings()
|
||||
}
|
||||
|
||||
func (a *app) initResolver() {
|
||||
var err error
|
||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||
resolveCfg := &resolver.Config{
|
||||
FrostFS: resolver.NewFrostFSResolver(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Settings: a.settings,
|
||||
}
|
||||
|
||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||
if resolveCfg.RPCAddress == "" {
|
||||
order = remove(order, resolver.NNSResolver)
|
||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||
}
|
||||
|
||||
if len(order) == 0 {
|
||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||
}
|
||||
|
||||
return order, resolveCfg
|
||||
}
|
||||
|
||||
func (a *app) initMetrics() {
|
||||
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
||||
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||
}
|
||||
|
||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||
if !enabled {
|
||||
logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
return &gateMetrics{
|
||||
logger: logger,
|
||||
provider: provider,
|
||||
enabled: enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *gateMetrics) isEnabled() bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
|
||||
return m.enabled
|
||||
}
|
||||
|
||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||
if !enabled {
|
||||
m.logger.Warn(logs.MetricsAreDisabled)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.enabled = enabled
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *gateMetrics) SetHealth(status metrics.HealthStatus) {
|
||||
if !m.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
m.provider.SetHealth(status)
|
||||
}
|
||||
|
||||
func (m *gateMetrics) SetVersion(ver string) {
|
||||
if !m.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
m.provider.SetVersion(ver)
|
||||
}
|
||||
|
||||
func (m *gateMetrics) Shutdown() {
|
||||
m.mu.Lock()
|
||||
if m.enabled {
|
||||
m.provider.SetHealth(metrics.HealthStatusShuttingDown)
|
||||
m.enabled = false
|
||||
}
|
||||
m.provider.Unregister()
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
func (m *gateMetrics) MarkHealthy(endpoint string) {
|
||||
if !m.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
m.provider.MarkHealthy(endpoint)
|
||||
}
|
||||
|
||||
func (m *gateMetrics) MarkUnhealthy(endpoint string) {
|
||||
if !m.isEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
m.provider.MarkUnhealthy(endpoint)
|
||||
}
|
||||
|
||||
func remove(list []string, element string) []string {
|
||||
for i, item := range list {
|
||||
if item == element {
|
||||
return append(list[:i], list[i+1:]...)
|
||||
}
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error) {
|
||||
walletPath := cfg.GetString(cfgWalletPath)
|
||||
|
||||
if len(walletPath) == 0 {
|
||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
|
||||
key, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
w, err := wallet.NewWalletFromFile(walletPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var password *string
|
||||
if cfg.IsSet(cfgWalletPassphrase) {
|
||||
pwd := cfg.GetString(cfgWalletPassphrase)
|
||||
password = &pwd
|
||||
}
|
||||
|
||||
address := cfg.GetString(cfgWalletAddress)
|
||||
|
||||
return getKeyFromWallet(w, address, password)
|
||||
}
|
||||
|
||||
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys.PrivateKey, error) {
|
||||
var addr util.Uint160
|
||||
var err error
|
||||
|
||||
if addrStr == "" {
|
||||
addr = w.GetChangeAddress()
|
||||
} else {
|
||||
addr, err = flags.ParseAddress(addrStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address")
|
||||
}
|
||||
}
|
||||
|
||||
acc := w.GetAccount(addr)
|
||||
if acc == nil {
|
||||
return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr)
|
||||
}
|
||||
|
||||
if password == nil {
|
||||
pwd, err := input.ReadPassword("Enter password > ")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read password")
|
||||
}
|
||||
password = &pwd
|
||||
}
|
||||
|
||||
if err := acc.Decrypt(*password, w.Scrypt); err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
|
||||
}
|
||||
|
||||
return acc.PrivateKey(), nil
|
||||
}
|
||||
|
||||
func (a *app) Wait() {
|
||||
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||
|
||||
a.metrics.SetVersion(Version)
|
||||
a.setHealthStatus()
|
||||
|
||||
<-a.webDone // wait for web-server to be stopped
|
||||
}
|
||||
|
||||
func (a *app) setHealthStatus() {
|
||||
a.metrics.SetHealth(metrics.HealthStatusReady)
|
||||
}
|
||||
|
||||
func (a *app) Serve() {
|
||||
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(services.NewPoolWrapper(a.treePool)))
|
||||
|
||||
// Configure router.
|
||||
a.configureRouter(handler)
|
||||
|
||||
a.startServices()
|
||||
a.initServers(a.ctx)
|
||||
|
||||
servs := a.getServers()
|
||||
|
||||
for i := range servs {
|
||||
go func(i int) {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
if len(a.unbindServers) != 0 {
|
||||
a.scheduleReconnect(a.ctx, a.webServer)
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGHUP)
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-a.ctx.Done():
|
||||
break LOOP
|
||||
case <-sigs:
|
||||
a.configReload(a.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
|
||||
|
||||
a.metrics.Shutdown()
|
||||
a.stopServices()
|
||||
a.shutdownTracing()
|
||||
|
||||
close(a.webDone)
|
||||
}
|
||||
|
||||
func (a *app) shutdownTracing() {
|
||||
const tracingShutdownTimeout = 5 * time.Second
|
||||
shdnCtx, cancel := context.WithTimeout(context.Background(), tracingShutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) configReload(ctx context.Context) {
|
||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||
return
|
||||
}
|
||||
if err := readInConfig(a.cfg); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||
} else {
|
||||
a.logLevel.SetLevel(lvl)
|
||||
}
|
||||
|
||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||
}
|
||||
|
||||
if err := a.updateServers(); err != nil {
|
||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||
}
|
||||
|
||||
a.setRuntimeParameters()
|
||||
|
||||
a.stopServices()
|
||||
a.startServices()
|
||||
|
||||
a.updateSettings()
|
||||
|
||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||
a.initTracing(ctx)
|
||||
a.setHealthStatus()
|
||||
|
||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||
}
|
||||
|
||||
func (a *app) updateSettings() {
|
||||
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
||||
a.settings.setDefaultNamespaces(a.cfg.GetStringSlice(cfgResolveDefaultNamespaces))
|
||||
}
|
||||
|
||||
func (a *app) startServices() {
|
||||
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
||||
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
||||
a.services = append(a.services, pprofService)
|
||||
go pprofService.Start()
|
||||
|
||||
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
||||
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
||||
a.services = append(a.services, prometheusService)
|
||||
go prometheusService.Start()
|
||||
}
|
||||
|
||||
func (a *app) stopServices() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||
defer cancel()
|
||||
|
||||
for _, svc := range a.services {
|
||||
svc.ShutDown(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) configureRouter(handler *handler.Handler) {
|
||||
r := router.New()
|
||||
r.RedirectTrailingSlash = true
|
||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
||||
}
|
||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
r.POST("/upload/{cid}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.Upload)))))
|
||||
a.log.Info(logs.AddedPathUploadCid)
|
||||
r.GET("/get/{cid}/{oid:*}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAddressOrBucketName)))))
|
||||
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAddressOrBucketName)))))
|
||||
a.log.Info(logs.AddedPathGetCidOid)
|
||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAttribute)))))
|
||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAttribute)))))
|
||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadZipped)))))
|
||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||
|
||||
a.webServer.Handler = r.Handler
|
||||
}
|
||||
|
||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||
zap.ByteString("method", req.Method()),
|
||||
zap.ByteString("path", req.Path()),
|
||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||
zap.Uint64("id", req.ID()))
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
||||
if err != nil {
|
||||
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Uint64("id", req.ID()), zap.Error(err))
|
||||
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
utils.SetContextToRequest(appCtx, req)
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
appCtx := utils.GetContextFromRequest(req)
|
||||
|
||||
appCtx, span := utils.StartHTTPServerSpan(appCtx, req, "REQUEST")
|
||||
defer func() {
|
||||
utils.SetHTTPTraceInfo(appCtx, span, req)
|
||||
span.End()
|
||||
}()
|
||||
|
||||
appCtx = treepool.SetRequestID(appCtx, strconv.FormatUint(req.ID(), 10))
|
||||
|
||||
utils.SetContextToRequest(appCtx, req)
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||
return func(req *fasthttp.RequestCtx) {
|
||||
appCtx := utils.GetContextFromRequest(req)
|
||||
|
||||
nsBytes := req.Request.Header.Peek(a.settings.NamespaceHeader())
|
||||
appCtx = middleware.SetNamespace(appCtx, string(nsBytes))
|
||||
|
||||
utils.SetContextToRequest(appCtx, req)
|
||||
h(req)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) AppParams() *utils.AppParams {
|
||||
return &utils.AppParams{
|
||||
Logger: a.log,
|
||||
Pool: a.pool,
|
||||
Owner: a.owner,
|
||||
Resolver: a.resolver,
|
||||
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) initServers(ctx context.Context) {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
|
||||
a.servers = make([]Server, 0, len(serversInfo))
|
||||
for _, serverInfo := range serversInfo {
|
||||
fields := []zap.Field{
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
}
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||
continue
|
||||
}
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.AddServer, fields...)
|
||||
}
|
||||
|
||||
if len(a.servers) == 0 {
|
||||
a.log.Fatal(logs.NoHealthyServers)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) updateServers() error {
|
||||
serversInfo := fetchServers(a.cfg, a.log)
|
||||
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
var found bool
|
||||
for _, serverInfo := range serversInfo {
|
||||
ser := a.getServer(serverInfo.Address)
|
||||
if ser != nil {
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||
}
|
||||
found = true
|
||||
}
|
||||
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("invalid servers configuration: no known server found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *app) getServers() []Server {
|
||||
a.mu.RLock()
|
||||
defer a.mu.RUnlock()
|
||||
return a.servers
|
||||
}
|
||||
|
||||
func (a *app) getServer(address string) Server {
|
||||
for i := range a.servers {
|
||||
if a.servers[i].Address() == address {
|
||||
return a.servers[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *app) updateUnbindServerInfo(info ServerInfo) bool {
|
||||
for i := range a.unbindServers {
|
||||
if a.unbindServers[i].Address == info.Address {
|
||||
a.unbindServers[i] = info
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *app) initTracing(ctx context.Context) {
|
||||
instanceID := ""
|
||||
if len(a.servers) > 0 {
|
||||
instanceID = a.servers[0].Address()
|
||||
}
|
||||
cfg := tracing.Config{
|
||||
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||
Service: "frostfs-http-gw",
|
||||
InstanceID: instanceID,
|
||||
Version: Version,
|
||||
}
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
}
|
||||
if updated {
|
||||
a.log.Info(logs.TracingConfigUpdated)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *app) setRuntimeParameters() {
|
||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||
return
|
||||
}
|
||||
|
||||
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||
if softMemoryLimit != previous {
|
||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||
zap.Int64("new_value", softMemoryLimit),
|
||||
zap.Int64("old_value", previous))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *appSettings) NamespaceHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaceHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) setNamespaceHeader(nsHeader string) {
|
||||
s.mu.Lock()
|
||||
s.namespaceHeader = nsHeader
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
s.mu.RLock()
|
||||
namespaces := s.defaultNamespaces
|
||||
s.mu.RUnlock()
|
||||
if slices.Contains(namespaces, ns) {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
}
|
||||
|
||||
func (s *appSettings) setDefaultNamespaces(namespaces []string) {
|
||||
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
||||
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.defaultNamespaces = namespaces
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) {
|
||||
go func() {
|
||||
t := time.NewTicker(a.settings.reconnectInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
if a.tryReconnect(ctx, srv) {
|
||||
return
|
||||
}
|
||||
t.Reset(a.settings.reconnectInterval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
|
||||
a.log.Info(logs.ServerReconnecting)
|
||||
var failedServers []ServerInfo
|
||||
|
||||
for _, serverInfo := range a.unbindServers {
|
||||
fields := []zap.Field{
|
||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||
}
|
||||
|
||||
srv, err := newServer(ctx, serverInfo)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||
failedServers = append(failedServers, serverInfo)
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||
a.metrics.MarkHealthy(serverInfo.Address)
|
||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||
}
|
||||
}()
|
||||
|
||||
a.servers = append(a.servers, srv)
|
||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||
}
|
||||
|
||||
a.unbindServers = failedServers
|
||||
|
||||
return len(a.unbindServers) == 0
|
||||
}
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type putResponse struct {
|
||||
|
@ -38,20 +39,15 @@ type putResponse struct {
|
|||
|
||||
const (
|
||||
testContainerName = "friendly"
|
||||
versionWithNativeNames = "0.27.5"
|
||||
testListenAddress = "localhost:8082"
|
||||
testHost = "http://" + testListenAddress
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
rootCtx := context.Background()
|
||||
aioImage := "nspccdev/neofs-aio-testcontainer:"
|
||||
aioImage := "truecloudlab/frostfs-aio:"
|
||||
versions := []string{
|
||||
"0.29.0",
|
||||
"0.30.0",
|
||||
"0.32.0",
|
||||
"0.34.0",
|
||||
"latest",
|
||||
"1.2.7", // frostfs-storage v0.36.0 RC
|
||||
}
|
||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
||||
require.NoError(t, err)
|
||||
|
@ -73,6 +69,7 @@ func TestIntegration(t *testing.T) {
|
|||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
|
||||
|
||||
cancel()
|
||||
server.Wait()
|
||||
|
@ -86,9 +83,9 @@ func runServer() (App, context.CancelFunc) {
|
|||
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
v := getDefaultConfig()
|
||||
l, lvl := newLogger(v)
|
||||
l, lvl := newStdoutLogger(zapcore.DebugLevel)
|
||||
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
||||
go application.Serve(cancelCtx)
|
||||
go application.Serve()
|
||||
|
||||
return application, cancel
|
||||
}
|
||||
|
@ -97,10 +94,8 @@ func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, vers
|
|||
url := testHost + "/upload/" + CID.String()
|
||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||
|
||||
if version >= versionWithNativeNames {
|
||||
url = testHost + "/upload/" + testContainerName
|
||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||
}
|
||||
}
|
||||
|
||||
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
||||
|
@ -223,11 +218,9 @@ func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
|||
require.NoError(t, err)
|
||||
checkGetResponse(t, resp, content, attributes)
|
||||
|
||||
if version >= versionWithNativeNames {
|
||||
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
|
||||
require.NoError(t, err)
|
||||
checkGetResponse(t, resp, content, attributes)
|
||||
}
|
||||
}
|
||||
|
||||
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
|
||||
|
@ -277,11 +270,9 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
|||
require.NoError(t, err)
|
||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||
|
||||
if version >= versionWithNativeNames {
|
||||
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
|
||||
require.NoError(t, err)
|
||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||
}
|
||||
}
|
||||
|
||||
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||
|
@ -296,10 +287,8 @@ func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID us
|
|||
baseURL := testHost + "/zip/" + CID.String()
|
||||
makeZipTest(t, baseURL, names, contents)
|
||||
|
||||
if version >= versionWithNativeNames {
|
||||
baseURL = testHost + "/zip/" + testContainerName
|
||||
makeZipTest(t, baseURL, names, contents)
|
||||
}
|
||||
}
|
||||
|
||||
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
|
||||
|
@ -351,6 +340,40 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
|
|||
}
|
||||
}
|
||||
|
||||
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||
content := "content of file"
|
||||
attributes := map[string]string{
|
||||
"some-attr": "some-get-value",
|
||||
}
|
||||
|
||||
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set(defaultNamespaceHeader, "")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
checkGetResponse(t, resp, content, attributes)
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set(defaultNamespaceHeader, "root")
|
||||
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
checkGetResponse(t, resp, content, attributes)
|
||||
|
||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set(defaultNamespaceHeader, "root2")
|
||||
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||
|
||||
}
|
||||
|
||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: image,
|
||||
|
@ -407,15 +430,11 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
|||
|
||||
container.SetCreationTime(&cnr, time.Now())
|
||||
|
||||
if version >= versionWithNativeNames {
|
||||
var domain container.Domain
|
||||
domain.SetName(testContainerName)
|
||||
|
||||
// currently node in aio image knows nothing about new sys attributes
|
||||
// todo (@dkirillov): #2 use frostfs aio images that supports new attributes
|
||||
cnr.SetAttribute(containerv2.SysAttributeNameNeoFS, domain.Name())
|
||||
cnr.SetAttribute(containerv2.SysAttributeZoneNeoFS, domain.Zone())
|
||||
}
|
||||
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
|
||||
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
|
||||
|
||||
var waitPrm pool.WaitParams
|
||||
waitPrm.SetTimeout(15 * time.Second)
|
|
@ -9,9 +9,9 @@ import (
|
|||
func main() {
|
||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
v := settings()
|
||||
logger, atomicLevel := newLogger(v)
|
||||
logger, atomicLevel := pickLogger(v)
|
||||
|
||||
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
||||
go application.Serve(globalContext)
|
||||
go application.Serve()
|
||||
application.Wait()
|
||||
}
|
|
@ -68,11 +68,13 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
|||
|
||||
if serverInfo.TLS.Enabled {
|
||||
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||
return nil, fmt.Errorf("failed to update cert: %w", err)
|
||||
lnErr := ln.Close()
|
||||
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
|
||||
}
|
||||
|
||||
ln = tls.NewListener(ln, &tls.Config{
|
||||
GetCertificate: tlsProvider.GetCertificate,
|
||||
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
||||
})
|
||||
}
|
||||
|
119
cmd/http-gw/server_test.go
Normal file
119
cmd/http-gw/server_test.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
expHeaderKey = "Foo"
|
||||
expHeaderValue = "Bar"
|
||||
)
|
||||
|
||||
func TestHTTP2TLS(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
certPath, keyPath := prepareTestCerts(t)
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: http.HandlerFunc(testHandler),
|
||||
}
|
||||
|
||||
tlsListener, err := newServer(ctx, ServerInfo{
|
||||
Address: ":0",
|
||||
TLS: ServerTLSInfo{
|
||||
Enabled: true,
|
||||
CertFile: certPath,
|
||||
KeyFile: keyPath,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
|
||||
addr := fmt.Sprintf("https://localhost:%d", port)
|
||||
|
||||
go func() {
|
||||
_ = srv.Serve(tlsListener.Listener())
|
||||
}()
|
||||
|
||||
// Server is running, now send HTTP/2 request
|
||||
|
||||
tlsClientConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
||||
|
||||
req, err := http.NewRequest("GET", addr, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header[expHeaderKey] = []string{expHeaderValue}
|
||||
|
||||
resp, err := cliHTTP1.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
|
||||
resp, err = cliHTTP2.Do(req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
}
|
||||
|
||||
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
||||
hdr, ok := req.Header[expHeaderKey]
|
||||
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
} else {
|
||||
resp.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
||||
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
require.NoError(t, err)
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "localhost"},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
dir := t.TempDir()
|
||||
certPath = path.Join(dir, "cert.pem")
|
||||
keyPath = path.Join(dir, "key.pem")
|
||||
|
||||
certFile, err := os.Create(certPath)
|
||||
require.NoError(t, err)
|
||||
defer certFile.Close()
|
||||
|
||||
keyFile, err := os.Create(keyPath)
|
||||
require.NoError(t, err)
|
||||
defer keyFile.Close()
|
||||
|
||||
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
||||
require.NoError(t, err)
|
||||
|
||||
return certPath, keyPath
|
||||
}
|
|
@ -1,7 +1,10 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
|
@ -10,12 +13,26 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/ssgreg/journald"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
destinationStdout = "stdout"
|
||||
destinationJournald = "journald"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -28,11 +45,21 @@ const (
|
|||
|
||||
defaultPoolErrorThreshold uint32 = 100
|
||||
|
||||
defaultSoftMemoryLimit = math.MaxInt64
|
||||
|
||||
defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb
|
||||
|
||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||
|
||||
defaultReconnectInterval = time.Minute
|
||||
|
||||
cfgServer = "server"
|
||||
cfgTLSEnabled = "tls.enabled"
|
||||
cfgTLSCertFile = "tls.cert_file"
|
||||
cfgTLSKeyFile = "tls.key_file"
|
||||
|
||||
cfgReconnectInterval = "reconnect_interval"
|
||||
|
||||
// Web.
|
||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||
|
@ -47,6 +74,11 @@ const (
|
|||
cfgPprofEnabled = "pprof.enabled"
|
||||
cfgPprofAddress = "pprof.address"
|
||||
|
||||
// Tracing ...
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
|
||||
// Pool config.
|
||||
cfgConTimeout = "connect_timeout"
|
||||
cfgStreamTimeout = "stream_timeout"
|
||||
|
@ -56,6 +88,7 @@ const (
|
|||
|
||||
// Logger.
|
||||
cfgLoggerLevel = "logger.level"
|
||||
cfgLoggerDestination = "logger.destination"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPassphrase = "wallet.passphrase"
|
||||
|
@ -77,6 +110,25 @@ const (
|
|||
// Zip compression.
|
||||
cfgZipCompression = "zip.compression"
|
||||
|
||||
// Runtime.
|
||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||
|
||||
// Enabling client side object preparing for PUT operations.
|
||||
cfgClientCut = "frostfs.client_cut"
|
||||
// Sets max buffer size for read payload in put operations.
|
||||
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
||||
// Configuration of parameters of requests to FrostFS.
|
||||
// Sets max attempt to make successful tree request.
|
||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||
|
||||
// Caching.
|
||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||
cfgBucketsCacheSize = "cache.buckets.size"
|
||||
|
||||
// Bucket resolving options.
|
||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
||||
|
||||
// Command line args.
|
||||
cmdHelp = "help"
|
||||
cmdVersion = "version"
|
||||
|
@ -134,10 +186,14 @@ func settings() *viper.Viper {
|
|||
|
||||
// logger:
|
||||
v.SetDefault(cfgLoggerLevel, "debug")
|
||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||
|
||||
// pool:
|
||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||
|
||||
// frostfs:
|
||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||
|
||||
// web-server:
|
||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
||||
|
@ -156,6 +212,10 @@ func settings() *viper.Viper {
|
|||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||
|
||||
// resolve bucket
|
||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
||||
|
||||
// Binding flags
|
||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
||||
panic(err)
|
||||
|
@ -312,14 +372,28 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
}
|
||||
}()
|
||||
|
||||
if err = v.MergeConfig(cfgFile); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return v.MergeConfig(cfgFile)
|
||||
}
|
||||
|
||||
// newLogger constructs a zap.Logger instance for current application.
|
||||
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dest := v.GetString(cfgLoggerDestination)
|
||||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(lvl)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(lvl)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
}
|
||||
|
||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||
// Panics on failure.
|
||||
//
|
||||
// Logger is built from zap's production logging configuration with:
|
||||
|
@ -330,12 +404,7 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
|||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||
lvl, err := getLogLevel(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
func newStdoutLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
c := zap.NewProductionConfig()
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
c.Encoding = "console"
|
||||
|
@ -351,6 +420,25 @@ func newLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
|||
return l, c.Level
|
||||
}
|
||||
|
||||
func newJournaldLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||
c := zap.NewProductionConfig()
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
|
||||
return l, c.Level
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
|
@ -370,8 +458,18 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
|||
return lvl, nil
|
||||
}
|
||||
|
||||
func fetchServers(v *viper.Viper) []ServerInfo {
|
||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||
reconnect := cfg.GetDuration(cfgReconnectInterval)
|
||||
if reconnect <= 0 {
|
||||
reconnect = defaultReconnectInterval
|
||||
}
|
||||
|
||||
return reconnect
|
||||
}
|
||||
|
||||
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||
var servers []ServerInfo
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
for i := 0; ; i++ {
|
||||
key := cfgServer + "." + strconv.Itoa(i) + "."
|
||||
|
@ -386,8 +484,181 @@ func fetchServers(v *viper.Viper) []ServerInfo {
|
|||
break
|
||||
}
|
||||
|
||||
if _, ok := seen[serverInfo.Address]; ok {
|
||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||
continue
|
||||
}
|
||||
seen[serverInfo.Address] = struct{}{}
|
||||
servers = append(servers, serverInfo)
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||
key, err := getFrostFSKey(cfg, logger)
|
||||
if err != nil {
|
||||
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||
}
|
||||
|
||||
var prm pool.InitParameters
|
||||
var prmTree treepool.InitParameters
|
||||
|
||||
prm.SetKey(&key.PrivateKey)
|
||||
prmTree.SetKey(key)
|
||||
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(logger, cfg) {
|
||||
prm.AddNode(peer)
|
||||
prmTree.AddNode(peer)
|
||||
}
|
||||
|
||||
connTimeout := cfg.GetDuration(cfgConTimeout)
|
||||
if connTimeout <= 0 {
|
||||
connTimeout = defaultConnectTimeout
|
||||
}
|
||||
prm.SetNodeDialTimeout(connTimeout)
|
||||
prmTree.SetNodeDialTimeout(connTimeout)
|
||||
|
||||
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||
if streamTimeout <= 0 {
|
||||
streamTimeout = defaultStreamTimeout
|
||||
}
|
||||
prm.SetNodeStreamTimeout(streamTimeout)
|
||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||
|
||||
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
|
||||
if healthCheckTimeout <= 0 {
|
||||
healthCheckTimeout = defaultRequestTimeout
|
||||
}
|
||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||
|
||||
rebalanceInterval := cfg.GetDuration(cfgRebalance)
|
||||
if rebalanceInterval <= 0 {
|
||||
rebalanceInterval = defaultRebalanceTimer
|
||||
}
|
||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||
|
||||
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||
if errorThreshold <= 0 {
|
||||
errorThreshold = defaultPoolErrorThreshold
|
||||
}
|
||||
prm.SetErrorThreshold(errorThreshold)
|
||||
prm.SetLogger(logger)
|
||||
prmTree.SetLogger(logger)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
var apiGRPCDialOpts []grpc.DialOption
|
||||
var treeGRPCDialOpts []grpc.DialOption
|
||||
if cfg.GetBool(cfgTracingEnabled) {
|
||||
interceptors := []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||
}
|
||||
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
|
||||
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
|
||||
}
|
||||
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
|
||||
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
if err = p.Dial(ctx); err != nil {
|
||||
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||
}
|
||||
if err = treePool.Dial(ctx); err != nil {
|
||||
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||
}
|
||||
|
||||
return p, treePool, key
|
||||
}
|
||||
|
||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||
var nodes []pool.NodeParam
|
||||
for i := 0; ; i++ {
|
||||
key := cfgPeers + "." + strconv.Itoa(i) + "."
|
||||
address := v.GetString(key + "address")
|
||||
weight := v.GetFloat64(key + "weight")
|
||||
priority := v.GetInt(key + "priority")
|
||||
|
||||
if address == "" {
|
||||
break
|
||||
}
|
||||
if weight <= 0 { // unspecified or wrong
|
||||
weight = 1
|
||||
}
|
||||
if priority <= 0 { // unspecified or wrong
|
||||
priority = 1
|
||||
}
|
||||
|
||||
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
|
||||
|
||||
l.Info(logs.AddedStoragePeer,
|
||||
zap.Int("priority", priority),
|
||||
zap.String("address", address),
|
||||
zap.Float64("weight", weight))
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
||||
softMemoryLimit := cfg.GetSizeInBytes(cfgSoftMemoryLimit)
|
||||
if softMemoryLimit <= 0 {
|
||||
softMemoryLimit = defaultSoftMemoryLimit
|
||||
}
|
||||
|
||||
return int64(softMemoryLimit)
|
||||
}
|
||||
|
||||
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||
cacheCfg := cache.DefaultBucketConfig(l)
|
||||
|
||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
||||
cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size)
|
||||
|
||||
return cacheCfg
|
||||
}
|
||||
|
||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
||||
if v.IsSet(cfgEntry) {
|
||||
lifetime := v.GetDuration(cfgEntry)
|
||||
if lifetime <= 0 {
|
||||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Duration("value in config", lifetime),
|
||||
zap.Duration("default", defaultValue))
|
||||
} else {
|
||||
return lifetime
|
||||
}
|
||||
}
|
||||
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int {
|
||||
if v.IsSet(cfgEntry) {
|
||||
size := v.GetInt(cfgEntry)
|
||||
if size <= 0 {
|
||||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||
zap.String("parameter", cfgEntry),
|
||||
zap.Int("value in config", size),
|
||||
zap.Int("default", defaultValue))
|
||||
} else {
|
||||
return size
|
||||
}
|
||||
}
|
||||
|
||||
return defaultValue
|
||||
}
|
|
@ -26,6 +26,9 @@ HTTP_GW_SERVER_1_TLS_ENABLED=true
|
|||
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
||||
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||
|
||||
# How often to reconnect to the servers
|
||||
HTTP_GW_RECONNECT_INTERVAL: 1m
|
||||
|
||||
# Nodes configuration.
|
||||
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
||||
|
@ -92,3 +95,29 @@ HTTP_GW_POOL_ERROR_THRESHOLD=100
|
|||
|
||||
# Enable zip compression to download files by common prefix.
|
||||
HTTP_GW_ZIP_COMPRESSION=false
|
||||
|
||||
HTTP_GW_TRACING_ENABLED=true
|
||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||
|
||||
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||
|
||||
# Parameters of requests to FrostFS
|
||||
# This flag enables client side object preparing.
|
||||
HTTP_GW_FROSTFS_CLIENT_CUT=false
|
||||
# Sets max buffer size for read payload in put operations.
|
||||
HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||
|
||||
# Caching
|
||||
# Cache which contains mapping of bucket name to bucket info
|
||||
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
||||
|
||||
# Header to determine zone to resolve bucket name
|
||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||
# Namespaces that should be handled as default
|
||||
HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
|
||||
|
||||
# Max attempt to make successful tree request.
|
||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||
|
|
|
@ -9,9 +9,14 @@ pprof:
|
|||
prometheus:
|
||||
enabled: false # Enable metrics.
|
||||
address: localhost:8084
|
||||
tracing:
|
||||
enabled: true
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4317"
|
||||
|
||||
logger:
|
||||
level: debug # Log level.
|
||||
destination: stdout
|
||||
|
||||
server:
|
||||
- address: 0.0.0.0:8080
|
||||
|
@ -50,6 +55,7 @@ peers:
|
|||
priority: 2
|
||||
weight: 9
|
||||
|
||||
reconnect_interval: 1m
|
||||
|
||||
web:
|
||||
# Per-connection buffer size for requests' reading.
|
||||
|
@ -97,3 +103,27 @@ pool_error_threshold: 100 # The number of errors on connection after which node
|
|||
|
||||
zip:
|
||||
compression: false # Enable zip compression to download files by common prefix.
|
||||
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
|
||||
# Parameters of requests to FrostFS
|
||||
frostfs:
|
||||
# This flag enables client side object preparing.
|
||||
client_cut: false
|
||||
# Sets max buffer size for read payload in put operations.
|
||||
buffer_max_size_for_put: 1048576
|
||||
# Max attempt to make successful tree request.
|
||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||
tree_pool_max_attempts: 0
|
||||
|
||||
# Caching
|
||||
cache:
|
||||
# Cache which contains mapping of bucket name to bucket info
|
||||
buckets:
|
||||
lifetime: 1m
|
||||
size: 1000
|
||||
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
default_namespaces: [ "", "root" ]
|
4
debian/frostfs-http-gw.postinst
vendored
4
debian/frostfs-http-gw.postinst
vendored
|
@ -21,7 +21,7 @@ set -e
|
|||
case "$1" in
|
||||
configure)
|
||||
USERNAME=http
|
||||
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /srv/frostfs_cache --system -M -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
|
||||
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -m -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
|
||||
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
|
||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
|
||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
|
||||
|
@ -29,7 +29,7 @@ case "$1" in
|
|||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
|
||||
fi
|
||||
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
|
||||
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
|
||||
if ! dpkg-statoverride --list "$USERDIR" >/dev/null; then
|
||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
||||
fi
|
||||
;;
|
||||
|
|
|
@ -41,7 +41,7 @@ $ cat http.log
|
|||
# Structure
|
||||
|
||||
| Section | Description |
|
||||
|-----------------|-------------------------------------------------------|
|
||||
|------------------|----------------------------------------------------------------|
|
||||
| no section | [General parameters](#general-section) |
|
||||
| `wallet` | [Wallet configuration](#wallet-section) |
|
||||
| `peers` | [Nodes configuration](#peers-section) |
|
||||
|
@ -52,6 +52,11 @@ $ cat http.log
|
|||
| `zip` | [ZIP configuration](#zip-section) |
|
||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
||||
| `tracing` | [Tracing configuration](#tracing-section) |
|
||||
| `runtime` | [Runtime configuration](#runtime-section) |
|
||||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
||||
| `cache` | [Cache configuration](#cache-section) |
|
||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||
|
||||
|
||||
# General section
|
||||
|
@ -67,6 +72,7 @@ stream_timeout: 10s
|
|||
request_timeout: 5s
|
||||
rebalance_timer: 30s
|
||||
pool_error_threshold: 100
|
||||
reconnect_interval: 1m
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|
@ -78,6 +84,7 @@ pool_error_threshold: 100
|
|||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
||||
|
||||
# `wallet` section
|
||||
|
||||
|
@ -156,12 +163,13 @@ server:
|
|||
```yaml
|
||||
logger:
|
||||
level: debug
|
||||
destination: stdout
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|-----------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||
|---------------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||
|
||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||
|
||||
# `web` section
|
||||
|
||||
|
@ -238,3 +246,93 @@ prometheus:
|
|||
|-----------|----------|---------------|------------------|-----------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
||||
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
|
||||
|
||||
# `tracing` section
|
||||
|
||||
Contains configuration for the `tracing` service.
|
||||
|
||||
```yaml
|
||||
tracing:
|
||||
enabled: true
|
||||
exporter: "otlp_grpc"
|
||||
endpoint: "localhost:4317"
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|------------|----------|---------------|------------------|---------------------------------------------------------------|
|
||||
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||
|
||||
# `runtime` section
|
||||
Contains runtime parameters.
|
||||
|
||||
```yaml
|
||||
runtime:
|
||||
soft_memory_limit: 1gb
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
||||
|
||||
# `frostfs` section
|
||||
|
||||
Contains parameters of requests to FrostFS.
|
||||
|
||||
```yaml
|
||||
frostfs:
|
||||
client_cut: false
|
||||
buffer_max_size_for_put: 1048576 # 1mb
|
||||
tree_pool_max_attempts: 0
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
|
||||
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
||||
|
||||
|
||||
### `cache` section
|
||||
|
||||
```yaml
|
||||
cache:
|
||||
buckets:
|
||||
lifetime: 1m
|
||||
size: 1000
|
||||
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||
|
||||
|
||||
#### `cache` subsection
|
||||
|
||||
```yaml
|
||||
lifetime: 1m
|
||||
size: 1000
|
||||
```
|
||||
|
||||
| Parameter | Type | Default value | Description |
|
||||
|------------|------------|------------------|-------------------------------|
|
||||
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
||||
| `size` | `int` | depends on cache | LRU cache size. |
|
||||
|
||||
|
||||
# `resolve_bucket` section
|
||||
|
||||
Bucket name resolving parameters from and to container ID.
|
||||
|
||||
```yaml
|
||||
resolve_bucket:
|
||||
namespace_header: X-Frostfs-Namespace
|
||||
default_namespaces: [ "", "root" ]
|
||||
```
|
||||
|
||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
|
@ -1,498 +0,0 @@
|
|||
package downloader
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type request struct {
|
||||
*fasthttp.RequestCtx
|
||||
appCtx context.Context
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func isValidToken(s string) bool {
|
||||
for _, c := range s {
|
||||
if c <= ' ' || c > 127 {
|
||||
return false
|
||||
}
|
||||
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidValue(s string) bool {
|
||||
for _, c := range s {
|
||||
// HTTP specification allows for more technically, but we don't want to escape things.
|
||||
if c < ' ' || c > 127 || c == '"' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||
// Returns r's error directly. Also returns the processed data.
|
||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||
if maxSize > sizeToDetectType {
|
||||
maxSize = sizeToDetectType
|
||||
}
|
||||
|
||||
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
||||
|
||||
r, err := rInit(maxSize)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
n, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
|
||||
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||
}
|
||||
|
||||
func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
|
||||
var (
|
||||
err error
|
||||
dis = "inline"
|
||||
start = time.Now()
|
||||
filename string
|
||||
)
|
||||
if err = tokens.StoreBearerToken(r.RequestCtx); err != nil {
|
||||
r.log.Error("could not fetch and store bearer token", zap.Error(err))
|
||||
response.Error(r.RequestCtx, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var prm pool.PrmObjectGet
|
||||
prm.SetAddress(objectAddress)
|
||||
if btoken := bearerToken(r.RequestCtx); btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
rObj, err := clnt.GetObject(r.appCtx, prm)
|
||||
if err != nil {
|
||||
r.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
// we can't close reader in this function, so how to do it?
|
||||
|
||||
if r.Request.URI().QueryArgs().GetBool("download") {
|
||||
dis = "attachment"
|
||||
}
|
||||
|
||||
payloadSize := rObj.Header.PayloadSize()
|
||||
|
||||
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
var contentType string
|
||||
for _, attr := range rObj.Header.Attributes() {
|
||||
key := attr.Key()
|
||||
val := attr.Value()
|
||||
if !isValidToken(key) || !isValidValue(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
key = utils.BackwardTransformIfSystem(key)
|
||||
|
||||
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||
switch key {
|
||||
case object.AttributeFileName:
|
||||
filename = val
|
||||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
r.log.Info("couldn't parse creation date",
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
}
|
||||
}
|
||||
|
||||
idsToResponse(&r.Response, &rObj.Header)
|
||||
|
||||
if len(contentType) == 0 {
|
||||
// determine the Content-Type from the payload head
|
||||
var payloadHead []byte
|
||||
|
||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||
return rObj.Payload, nil
|
||||
})
|
||||
if err != nil && err != io.EOF {
|
||||
r.log.Error("could not detect Content-Type from payload", zap.Error(err))
|
||||
response.Error(r.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// reset payload reader since a part of the data has been read
|
||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||
|
||||
if err != io.EOF { // otherwise, we've already read full payload
|
||||
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||
}
|
||||
|
||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||
// if it implements io.Closer and that's useful for us.
|
||||
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||
}
|
||||
r.SetContentType(contentType)
|
||||
|
||||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
|
||||
r.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||
}
|
||||
|
||||
func bearerToken(ctx context.Context) *bearer.Token {
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
||||
return tkn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||
logFields := []zap.Field{
|
||||
zap.Stringer("elapsed", time.Since(start)),
|
||||
zap.Error(err),
|
||||
}
|
||||
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
||||
logFields = append(logFields, additionalFields...)
|
||||
|
||||
r.log.Error("could not receive object", logFields...)
|
||||
response.Error(r.RequestCtx, msg, statusCode)
|
||||
}
|
||||
|
||||
// Downloader is a download request handler.
|
||||
type Downloader struct {
|
||||
appCtx context.Context
|
||||
log *zap.Logger
|
||||
pool *pool.Pool
|
||||
containerResolver *resolver.ContainerResolver
|
||||
settings *Settings
|
||||
}
|
||||
|
||||
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
||||
type Settings struct {
|
||||
zipCompression atomic.Bool
|
||||
}
|
||||
|
||||
func (s *Settings) ZipCompression() bool {
|
||||
return s.zipCompression.Load()
|
||||
}
|
||||
|
||||
func (s *Settings) SetZipCompression(val bool) {
|
||||
s.zipCompression.Store(val)
|
||||
}
|
||||
|
||||
// New creates an instance of Downloader using specified options.
|
||||
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Downloader {
|
||||
return &Downloader{
|
||||
appCtx: ctx,
|
||||
log: params.Logger,
|
||||
pool: params.Pool,
|
||||
settings: settings,
|
||||
containerResolver: params.Resolver,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
||||
return &request{
|
||||
RequestCtx: ctx,
|
||||
appCtx: d.appCtx,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadByAddress handles download requests using simple cid/oid format.
|
||||
func (d *Downloader) DownloadByAddress(c *fasthttp.RequestCtx) {
|
||||
d.byAddress(c, request.receiveFile)
|
||||
}
|
||||
|
||||
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
|
||||
var (
|
||||
idCnr, _ = c.UserValue("cid").(string)
|
||||
idObj, _ = c.UserValue("oid").(string)
|
||||
log = d.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||
)
|
||||
|
||||
cnrID, err := utils.GetContainerID(d.appCtx, idCnr, d.containerResolver)
|
||||
if err != nil {
|
||||
log.Error("wrong container id", zap.Error(err))
|
||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
objID := new(oid.ID)
|
||||
if err = objID.DecodeString(idObj); err != nil {
|
||||
log.Error("wrong object id", zap.Error(err))
|
||||
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(*cnrID)
|
||||
addr.SetObject(*objID)
|
||||
|
||||
f(*d.newRequest(c, log), d.pool, addr)
|
||||
}
|
||||
|
||||
// DownloadByAttribute handles attribute-based download requests.
|
||||
func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||
d.byAttribute(c, request.receiveFile)
|
||||
}
|
||||
|
||||
// byAttribute is a wrapper similar to byAddress.
|
||||
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
|
||||
var (
|
||||
scid, _ = c.UserValue("cid").(string)
|
||||
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
|
||||
val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
|
||||
log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||
)
|
||||
|
||||
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
|
||||
if err != nil {
|
||||
log.Error("wrong container id", zap.Error(err))
|
||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := d.search(c, containerID, key, val, object.MatchStringEqual)
|
||||
if err != nil {
|
||||
log.Error("could not search for objects", zap.Error(err))
|
||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer res.Close()
|
||||
|
||||
buf := make([]oid.ID, 1)
|
||||
|
||||
n, err := res.Read(buf)
|
||||
if n == 0 {
|
||||
if errors.Is(err, io.EOF) {
|
||||
log.Error("object not found", zap.Error(err))
|
||||
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.Error("read object list failed", zap.Error(err))
|
||||
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(*containerID)
|
||||
addrObj.SetObject(buf[0])
|
||||
|
||||
f(*d.newRequest(c, log), d.pool, addrObj)
|
||||
}
|
||||
|
||||
func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
|
||||
filters := object.NewSearchFilters()
|
||||
filters.AddRootFilter()
|
||||
filters.AddFilter(key, val, op)
|
||||
|
||||
var prm pool.PrmObjectSearch
|
||||
prm.SetContainerID(*cid)
|
||||
prm.SetFilters(filters)
|
||||
if btoken := bearerToken(c); btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
return d.pool.SearchObjects(d.appCtx, prm)
|
||||
}
|
||||
|
||||
func (d *Downloader) getContainer(cnrID cid.ID) (container.Container, error) {
|
||||
var prm pool.PrmContainerGet
|
||||
prm.SetContainerID(cnrID)
|
||||
|
||||
return d.pool.GetContainer(d.appCtx, prm)
|
||||
}
|
||||
|
||||
func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||
method := zip.Store
|
||||
if d.settings.ZipCompression() {
|
||||
method = zip.Deflate
|
||||
}
|
||||
|
||||
filePath := getZipFilePath(obj)
|
||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||
}
|
||||
|
||||
return zw.CreateHeader(&zip.FileHeader{
|
||||
Name: filePath,
|
||||
Method: method,
|
||||
Modified: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
// DownloadZipped handles zip by prefix requests.
|
||||
func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
|
||||
log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||
|
||||
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
|
||||
if err != nil {
|
||||
log.Error("wrong container id", zap.Error(err))
|
||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err = tokens.StoreBearerToken(c); err != nil {
|
||||
log.Error("could not fetch and store bearer token", zap.Error(err))
|
||||
response.Error(c, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// check if container exists here to be able to return 404 error,
|
||||
// otherwise we get this error only in object iteration step
|
||||
// and client get 200 OK.
|
||||
if _, err = d.getContainer(*containerID); err != nil {
|
||||
log.Error("could not check container existence", zap.Error(err))
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
response.Error(c, "could not check container existence: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
resSearch, err := d.search(c, containerID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
if err != nil {
|
||||
log.Error("could not search for objects", zap.Error(err))
|
||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
c.Response.SetStatusCode(http.StatusOK)
|
||||
|
||||
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(w)
|
||||
|
||||
var bufZip []byte
|
||||
var addr oid.Address
|
||||
|
||||
empty := true
|
||||
called := false
|
||||
btoken := bearerToken(c)
|
||||
addr.SetContainer(*containerID)
|
||||
|
||||
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||
called = true
|
||||
|
||||
if empty {
|
||||
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||
}
|
||||
empty = false
|
||||
|
||||
addr.SetObject(id)
|
||||
if err = d.zipObject(zipWriter, addr, btoken, bufZip); err != nil {
|
||||
log.Error("failed to add object to archive", zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
if errIter != nil {
|
||||
log.Error("iterating over selected objects failed", zap.Error(errIter))
|
||||
} else if !called {
|
||||
log.Error("objects not found")
|
||||
}
|
||||
|
||||
if err = zipWriter.Close(); err != nil {
|
||||
log.Error("close zip writer", zap.Error(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Downloader) zipObject(zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||
var prm pool.PrmObjectGet
|
||||
prm.SetAddress(addr)
|
||||
if btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
resGet, err := d.pool.GetObject(d.appCtx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get FrostFS object: %v", err)
|
||||
}
|
||||
|
||||
objWriter, err := d.addObjectToZip(zipWriter, &resGet.Header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zip create header: %v", err)
|
||||
}
|
||||
|
||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||
}
|
||||
|
||||
if err = resGet.Payload.Close(); err != nil {
|
||||
return fmt.Errorf("object body close error: %w", err)
|
||||
}
|
||||
|
||||
if err = zipWriter.Flush(); err != nil {
|
||||
return fmt.Errorf("flush zip writer: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getZipFilePath(obj *object.Object) string {
|
||||
for _, attr := range obj.Attributes() {
|
||||
if attr.Key() == object.AttributeFilePath {
|
||||
return attr.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
84
go.mod
84
go.mod
|
@ -1,58 +1,70 @@
|
|||
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230329125804-552219b8e130
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/bluele/gcache v0.0.2
|
||||
github.com/fasthttp/router v1.4.1
|
||||
github.com/nspcc-dev/neo-go v0.101.0
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_model v0.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.15.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/ssgreg/journald v1.0.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/testcontainers/testcontainers-go v0.13.0
|
||||
github.com/valyala/fasthttp v1.34.0
|
||||
go.uber.org/atomic v1.10.0
|
||||
go.opentelemetry.io/otel v1.16.0
|
||||
go.opentelemetry.io/otel/trace v1.16.0
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc
|
||||
golang.org/x/net v0.10.0
|
||||
google.golang.org/grpc v1.55.0
|
||||
)
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb // indirect
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.6.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.14+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.0 // indirect
|
||||
github.com/klauspost/compress v1.16.4 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mount v0.3.2 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||
|
@ -60,7 +72,7 @@ require (
|
|||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce // indirect
|
||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
|
@ -68,33 +80,37 @@ require (
|
|||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||
github.com/urfave/cli v1.22.5 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c // indirect
|
||||
golang.org/x/net v0.4.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.3.0 // indirect
|
||||
golang.org/x/term v0.3.0 // indirect
|
||||
golang.org/x/text v0.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.52.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
168
go.sum
168
go.sum
|
@ -37,20 +37,24 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703 h1:lxe0DtZq/uFZVZu9apx6OcIXCJskQBMd/GVeYGKA3wA=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.11.2-0.20230315095236-9dc375346703/go.mod h1:gRd5iE5A84viily6AcNBsSlTx2XgoWrwRDz7z0MayDQ=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4 h1:wjLfZ3WCt7qNGsQv+Jl0TXnmtg0uVk/jToKPFTBc/jo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20231121085847-241a9f1ad0a4/go.mod h1:uY0AYmCznjZdghDnAk7THFIe1Vlg531IxUcus7ZfUJI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb h1:S/TrbOOu9qEXZRZ9/Ddw7crnxbBUQLo68PSzQWYrc9M=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb/go.mod h1:nkR5gaGeez3Zv2SE7aceP0YwxG2FzIB5cGKpQO2vV2o=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230329125804-552219b8e130 h1:V+3dGwEXwEvvSvseMKn8S6ZEMNhxBBYrcyx+F7VaptM=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230329125804-552219b8e130/go.mod h1:23fUGlEv/ImaOi3vck6vZj0v0b4hteOhLLPnVWHSQeA=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0 h1:KvAES7xIqmQBGd2q8KanNosD9+4BhU/zqD5Kt5KSflk=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.0/go.mod h1:mq2sbvYfO+BB6iFZwYBkgC0yc6mJNx+qZi4jW918m+Y=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939 h1:jZEepi9yWmqrWgLRQcHQu4YPJaudmd7d2AEhpmM3m4U=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20231107114540-ab75edd70939/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
|
@ -119,8 +123,8 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
|||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210521073959-f0d4d129b7f1/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12 h1:npHgfD4Tl2WJS3AJaMUi5ynGDPUBfkg3U3fCzDyXZ+4=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20221202181307-76fa05c21b12/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
|
@ -136,6 +140,8 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE
|
|||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
|
@ -156,14 +162,15 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq
|
|||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
|
@ -180,8 +187,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
|||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||
|
@ -308,9 +318,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
|
@ -364,7 +373,7 @@ github.com/flynn-archive/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:r
|
|||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
|
||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
|
@ -387,6 +396,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
|
@ -421,6 +435,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -452,8 +468,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
|
|||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
|
@ -512,8 +529,9 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
|||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
|
@ -521,6 +539,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 h1:gDLXvp5S9izjldquuoAhDzccbskOL6tDC5jMSyx3zxE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2/go.mod h1:7pdNwVWBBHGiCxa9lAszqCJMbfTISJ7oMftp8+UGV08=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
|
@ -530,8 +551,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
|
@ -573,8 +594,9 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6
|
|||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
|
||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
|
||||
github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -583,8 +605,8 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
|||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
|
@ -613,8 +635,9 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
|||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
|
@ -668,11 +691,11 @@ github.com/nspcc-dev/hrw v1.0.9/go.mod h1:l/W2vx83vMQo6aStyx2AuZrJ+07lGv2JQGlVkP
|
|||
github.com/nspcc-dev/neo-go v0.73.1-pre.0.20200303142215-f5a1b928ce09/go.mod h1:pPYwPZ2ks+uMnlRLUyXOpLieaDQSEaf4NM3zHVbRjmg=
|
||||
github.com/nspcc-dev/neo-go v0.98.0/go.mod h1:E3cc1x6RXSXrJb2nDWXTXjnXk3rIqVN8YdFyWv+FrqM=
|
||||
github.com/nspcc-dev/neo-go v0.99.4/go.mod h1:mKTolfRUfKjFso5HPvGSQtUZc70n0VKBMs16eGuC5gA=
|
||||
github.com/nspcc-dev/neo-go v0.101.0 h1:JPT2DpZqVjho34TMR59dm6uxvCFttOp02Nm8qCjpfaU=
|
||||
github.com/nspcc-dev/neo-go v0.101.0/go.mod h1:Q0uWKivGc2mYgdKFmTNP49LeXwMu4x6pUzHm3OIsN2I=
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc h1:fySIWvUQsitK5e5qYIHnTDCXuPpwzz89SEUEIyY11sg=
|
||||
github.com/nspcc-dev/neo-go v0.101.2-0.20230601131642-a0117042e8fc/go.mod h1:s9QhjMC784MWqTURovMbyYduIJc86mnCruxcMiAebpc=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220927123257-24c107e3a262/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb h1:GFxfkpXEYAbMIr69JpKOsQWeLOaGrd49HNAor8uDW+A=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb/go.mod h1:23bBw0v6pBYcrWs8CBEEDIEDJNbcFoIh8pGGcf2Vv8s=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce h1:vLGuUNDkmQrWMa4rr4vTd1u8ULqejWxVmNz1L7ocTEI=
|
||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230615193820-9185820289ce/go.mod h1:ZUuXOkdtHZgaC13za/zMgXfQFncZ0jLzfQTe+OsDOtg=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.11.0-pre.0.20211201134523-3604d96f3fe1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.11.1/go.mod h1:oS8dycEh8PPf2Jjp6+8dlwWyEv2Dy77h/XhhcdxYEFs=
|
||||
github.com/nspcc-dev/neofs-crypto v0.2.0/go.mod h1:F/96fUzPM3wR+UGsPi3faVNmFlA9KAEAUQR7dMxZmNA=
|
||||
|
@ -762,14 +785,16 @@ github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNk
|
|||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
|
@ -779,8 +804,9 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
|
|||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
@ -793,14 +819,15 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
|||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -825,7 +852,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
|||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
|
@ -849,6 +875,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
|
||||
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
|
||||
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
|
||||
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -865,8 +893,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
|
@ -882,6 +911,8 @@ github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBT
|
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/twmb/murmur3 v1.1.5/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
|
@ -936,7 +967,25 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
|
||||
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 h1:+XWJd3jf75RXJq29mxbuXhCXFDG3S3R4vBUeSI2P7tE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0/go.mod h1:hqgzBPTf4yONMFgdZvL/bK42R/iinTyVQtiWihs3SZc=
|
||||
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
|
||||
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
|
||||
go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4=
|
||||
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
|
||||
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
|
@ -944,12 +993,12 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
|||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
|
@ -975,8 +1024,8 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5
|
|||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -987,8 +1036,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c h1:Govq2W3bnHJimHT2ium65kXcI7ZzTniZHcFATnLJM0Q=
|
||||
golang.org/x/exp v0.0.0-20221227203929-1b447090c38c/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU=
|
||||
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1069,8 +1118,8 @@ golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.4.0 h1:Q5QPcMlvfxFTAPV0+07Xz/MpK9NTXu2VDUuy0FeMfaU=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1081,6 +1130,7 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1094,8 +1144,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1195,30 +1245,32 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210429154555-c04ba851c2a4/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180318012157-96caea41033d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -1357,8 +1409,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
|
@ -1383,8 +1436,9 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
|
||||
google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -1398,8 +1452,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/abiosoft/ishell.v2 v2.0.0/go.mod h1:sFp+cGtH6o4s1FtpVPTMcHq2yue+c4DGOVohJCPUzwY=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
|
@ -1409,7 +1464,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
22
internal/api/layer/tree_service.go
Normal file
22
internal/api/layer/tree_service.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
)
|
||||
|
||||
// TreeService provide interface to interact with tree service using s3 data models.
|
||||
type TreeService interface {
|
||||
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
||||
ErrNodeNotFound = errors.New("not found")
|
||||
|
||||
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
|
||||
ErrNodeAccessDenied = errors.New("access denied")
|
||||
)
|
17
internal/api/tree.go
Normal file
17
internal/api/tree.go
Normal file
|
@ -0,0 +1,17 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// NodeVersion represent node from tree service.
|
||||
type NodeVersion struct {
|
||||
BaseNodeVersion
|
||||
DeleteMarker bool
|
||||
}
|
||||
|
||||
// BaseNodeVersion is minimal node info from tree service.
|
||||
// Basically used for "system" object.
|
||||
type BaseNodeVersion struct {
|
||||
OID oid.ID
|
||||
}
|
72
internal/cache/buckets.go
vendored
Normal file
72
internal/cache/buckets.go
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||
type BucketCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// Config stores expiration params for cache.
|
||||
type Config struct {
|
||||
Size int
|
||||
Lifetime time.Duration
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultBucketCacheSize is a default maximum number of entries in cache.
|
||||
DefaultBucketCacheSize = 1e3
|
||||
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
|
||||
DefaultBucketCacheLifetime = time.Minute
|
||||
)
|
||||
|
||||
// DefaultBucketConfig returns new default cache expiration values.
|
||||
func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||
return &Config{
|
||||
Size: DefaultBucketCacheSize,
|
||||
Lifetime: DefaultBucketCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBucketCache creates an object of BucketCache.
|
||||
func NewBucketCache(config *Config) *BucketCache {
|
||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||
return &BucketCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
// Get returns a cached object.
|
||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*data.BucketInfo)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Put puts an object to cache.
|
||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||
}
|
||||
|
||||
func formKey(ns, name string) string {
|
||||
return name + "." + ns
|
||||
}
|
12
internal/data/bucket.go
Normal file
12
internal/data/bucket.go
Normal file
|
@ -0,0 +1,12 @@
|
|||
package data
|
||||
|
||||
import (
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
)
|
||||
|
||||
type BucketInfo struct {
|
||||
Name string // container name from system attribute
|
||||
Zone string // container zone from system attribute
|
||||
CID cid.ID
|
||||
HomomorphicHashDisabled bool
|
||||
}
|
115
internal/frostfs/services/pool_wrapper.go
Normal file
115
internal/frostfs/services/pool_wrapper.go
Normal file
|
@ -0,0 +1,115 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||
)
|
||||
|
||||
type GetNodeByPathResponseInfoWrapper struct {
|
||||
response *grpcService.GetNodeByPathResponse_Info
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type GetSubTreeResponseBodyWrapper struct {
|
||||
response *grpcService.GetSubTreeResponse_Body
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
|
||||
return n.response.GetNodeId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
|
||||
return n.response.GetParentId()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
|
||||
return n.response.GetTimestamp()
|
||||
}
|
||||
|
||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||
res := make([]tree.Meta, len(n.response.Meta))
|
||||
for i, value := range n.response.Meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
type PoolWrapper struct {
|
||||
p *treepool.Pool
|
||||
}
|
||||
|
||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||
return &PoolWrapper{p: p}
|
||||
}
|
||||
|
||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||
poolPrm := treepool.GetNodesParams{
|
||||
CID: prm.CnrID,
|
||||
TreeID: prm.TreeID,
|
||||
Path: prm.Path,
|
||||
Meta: prm.Meta,
|
||||
PathAttribute: tree.FileNameKey,
|
||||
LatestOnly: prm.LatestOnly,
|
||||
AllAttrs: prm.AllAttrs,
|
||||
BearerToken: getBearer(ctx),
|
||||
}
|
||||
|
||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||
if err != nil {
|
||||
return nil, handleError(err)
|
||||
}
|
||||
|
||||
res := make([]tree.NodeResponse, len(nodes))
|
||||
for i, info := range nodes {
|
||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getBearer(ctx context.Context) []byte {
|
||||
token, err := tokens.LoadBearerToken(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return token.Marshal()
|
||||
}
|
||||
|
||||
func handleError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||
}
|
||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
195
internal/handler/download.go
Normal file
195
internal/handler/download.go
Normal file
|
@ -0,0 +1,195 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
||||
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||
test, _ := c.UserValue("oid").(string)
|
||||
var id oid.ID
|
||||
err := id.DecodeString(test)
|
||||
if err != nil {
|
||||
h.byObjectName(c, h.receiveFile)
|
||||
} else {
|
||||
h.byAddress(c, h.receiveFile)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
||||
return &request{
|
||||
RequestCtx: ctx,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadByAttribute handles attribute-based download requests.
|
||||
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||
h.byAttribute(c, h.receiveFile)
|
||||
}
|
||||
|
||||
func (h *Handler) search(ctx context.Context, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
|
||||
filters := object.NewSearchFilters()
|
||||
filters.AddRootFilter()
|
||||
filters.AddFilter(key, val, op)
|
||||
|
||||
var prm pool.PrmObjectSearch
|
||||
prm.SetContainerID(*cid)
|
||||
prm.SetFilters(filters)
|
||||
if btoken := bearerToken(ctx); btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
return h.pool.SearchObjects(ctx, prm)
|
||||
}
|
||||
|
||||
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||
method := zip.Store
|
||||
if h.config.ZipCompression() {
|
||||
method = zip.Deflate
|
||||
}
|
||||
|
||||
filePath := getZipFilePath(obj)
|
||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||
}
|
||||
|
||||
return zw.CreateHeader(&zip.FileHeader{
|
||||
Name: filePath,
|
||||
Method: method,
|
||||
Modified: time.Now(),
|
||||
})
|
||||
}
|
||||
|
||||
// DownloadZipped handles zip by prefix requests.
|
||||
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
prefix, _ := c.UserValue("prefix").(string)
|
||||
|
||||
prefix, err := url.QueryUnescape(prefix)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()))
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
resSearch, err := h.search(ctx, &bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||
c.Response.SetStatusCode(http.StatusOK)
|
||||
|
||||
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||
defer resSearch.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(w)
|
||||
|
||||
var bufZip []byte
|
||||
var addr oid.Address
|
||||
|
||||
empty := true
|
||||
called := false
|
||||
btoken := bearerToken(ctx)
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
|
||||
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||
called = true
|
||||
|
||||
if empty {
|
||||
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||
}
|
||||
empty = false
|
||||
|
||||
addr.SetObject(id)
|
||||
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
||||
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
if errIter != nil {
|
||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||
} else if !called {
|
||||
log.Error(logs.ObjectsNotFound)
|
||||
}
|
||||
|
||||
if err = zipWriter.Close(); err != nil {
|
||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||
var prm pool.PrmObjectGet
|
||||
prm.SetAddress(addr)
|
||||
if btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
resGet, err := h.pool.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get FrostFS object: %v", err)
|
||||
}
|
||||
|
||||
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("zip create header: %v", err)
|
||||
}
|
||||
|
||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||
}
|
||||
|
||||
if err = resGet.Payload.Close(); err != nil {
|
||||
return fmt.Errorf("object body close error: %w", err)
|
||||
}
|
||||
|
||||
if err = zipWriter.Flush(); err != nil {
|
||||
return fmt.Errorf("flush zip writer: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getZipFilePath(obj *object.Object) string {
|
||||
for _, attr := range obj.Attributes() {
|
||||
if attr.Key() == object.AttributeFilePath {
|
||||
return attr.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
|
@ -1,9 +1,10 @@
|
|||
package uploader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
|
@ -47,7 +48,7 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]st
|
|||
|
||||
result[k] = v
|
||||
|
||||
l.Debug("add attribute to result object",
|
||||
l.Debug(logs.AddAttributeToResultObject,
|
||||
zap.String("key", k),
|
||||
zap.String("val", v))
|
||||
})
|
|
@ -1,6 +1,6 @@
|
|||
//go:build !integration
|
||||
|
||||
package uploader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"testing"
|
257
internal/handler/handler.go
Normal file
257
internal/handler/handler.go
Normal file
|
@ -0,0 +1,257 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type Config interface {
|
||||
DefaultTimestamp() bool
|
||||
ZipCompression() bool
|
||||
ClientCut() bool
|
||||
BufferMaxSizeForPut() uint64
|
||||
NamespaceHeader() string
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
log *zap.Logger
|
||||
pool *pool.Pool
|
||||
ownerID *user.ID
|
||||
config Config
|
||||
containerResolver *resolver.ContainerResolver
|
||||
tree *tree.Tree
|
||||
cache *cache.BucketCache
|
||||
}
|
||||
|
||||
func New(params *utils.AppParams, config Config, tree *tree.Tree) *Handler {
|
||||
return &Handler{
|
||||
log: params.Logger,
|
||||
pool: params.Pool,
|
||||
ownerID: params.Owner,
|
||||
config: config,
|
||||
containerResolver: params.Resolver,
|
||||
tree: tree,
|
||||
cache: params.Cache,
|
||||
}
|
||||
}
|
||||
|
||||
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
var (
|
||||
idCnr, _ = c.UserValue("cid").(string)
|
||||
idObj, _ = c.UserValue("oid").(string)
|
||||
log = h.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||
)
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
objID := new(oid.ID)
|
||||
if err = objID.DecodeString(idObj); err != nil {
|
||||
log.Error(logs.WrongObjectID, zap.Error(err))
|
||||
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(*objID)
|
||||
|
||||
f(ctx, *h.newRequest(c, log), addr)
|
||||
}
|
||||
|
||||
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||
// prepares request and object address to it.
|
||||
func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
var (
|
||||
bucketname = req.UserValue("cid").(string)
|
||||
key = req.UserValue("oid").(string)
|
||||
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||
)
|
||||
|
||||
ctx := utils.GetContextFromRequest(req)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(req, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||
response.Error(req, "Access Denied", fasthttp.StatusForbidden)
|
||||
return
|
||||
}
|
||||
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||
|
||||
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if foundOid.DeleteMarker {
|
||||
log.Error(logs.ObjectWasDeleted)
|
||||
response.Error(req, "object deleted", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(foundOid.OID)
|
||||
|
||||
f(ctx, *h.newRequest(req, log), addr)
|
||||
}
|
||||
|
||||
// byAttribute is a wrapper similar to byAddress.
|
||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||
scid, _ := c.UserValue("cid").(string)
|
||||
key, _ := c.UserValue("attr_key").(string)
|
||||
val, _ := c.UserValue("attr_val").(string)
|
||||
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
val, err = url.QueryUnescape(val)
|
||||
if err != nil {
|
||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
log := h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||
|
||||
ctx := utils.GetContextFromRequest(c)
|
||||
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
logAndSendBucketError(c, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
res, err := h.search(ctx, &bktInfo.CID, key, val, object.MatchStringEqual)
|
||||
if err != nil {
|
||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
defer res.Close()
|
||||
|
||||
buf := make([]oid.ID, 1)
|
||||
|
||||
n, err := res.Read(buf)
|
||||
if n == 0 {
|
||||
if errors.Is(err, io.EOF) {
|
||||
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var addrObj oid.Address
|
||||
addrObj.SetContainer(bktInfo.CID)
|
||||
addrObj.SetObject(buf[0])
|
||||
|
||||
f(ctx, *h.newRequest(c, log), addrObj)
|
||||
}
|
||||
|
||||
// resolveContainer decode container id, if it's not a valid container id
|
||||
// then trey to resolve name using provided resolver.
|
||||
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
||||
cnrID := new(cid.ID)
|
||||
err := cnrID.DecodeString(containerID)
|
||||
if err != nil {
|
||||
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
|
||||
}
|
||||
}
|
||||
return cnrID, err
|
||||
}
|
||||
|
||||
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
|
||||
ns, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil {
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = h.cache.Put(bktInfo); err != nil {
|
||||
log.Warn(logs.CouldntPutBucketIntoCache,
|
||||
zap.String("bucket name", bktInfo.Name),
|
||||
zap.Stringer("bucket cid", bktInfo.CID),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
|
||||
prm := pool.PrmContainerGet{ContainerID: cnrID}
|
||||
res, err := h.pool.GetContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
|
||||
}
|
||||
|
||||
bktInfo := &data.BucketInfo{
|
||||
CID: cnrID,
|
||||
Name: cnrID.EncodeToString(),
|
||||
}
|
||||
|
||||
if domain := container.ReadDomain(res); domain.Name() != "" {
|
||||
bktInfo.Name = domain.Name()
|
||||
bktInfo.Zone = domain.Zone()
|
||||
}
|
||||
|
||||
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(res)
|
||||
|
||||
return bktInfo, err
|
||||
}
|
|
@ -1,13 +1,13 @@
|
|||
package downloader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -25,15 +25,10 @@ const (
|
|||
hdrContainerID = "X-Container-Id"
|
||||
)
|
||||
|
||||
func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
|
||||
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
|
||||
var start = time.Now()
|
||||
if err := tokens.StoreBearerToken(r.RequestCtx); err != nil {
|
||||
r.log.Error("could not fetch and store bearer token", zap.Error(err))
|
||||
response.Error(r.RequestCtx, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
btoken := bearerToken(r.RequestCtx)
|
||||
btoken := bearerToken(ctx)
|
||||
|
||||
var prm pool.PrmObjectHead
|
||||
prm.SetAddress(objectAddress)
|
||||
|
@ -41,13 +36,13 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
|
|||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
obj, err := clnt.HeadObject(r.appCtx, prm)
|
||||
obj, err := h.pool.HeadObject(ctx, prm)
|
||||
if err != nil {
|
||||
r.handleFrostFSErr(err, start)
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
||||
var contentType string
|
||||
for _, attr := range obj.Attributes() {
|
||||
key := attr.Key()
|
||||
|
@ -58,24 +53,24 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
|
|||
|
||||
key = utils.BackwardTransformIfSystem(key)
|
||||
|
||||
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||
switch key {
|
||||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
r.log.Info("couldn't parse creation date",
|
||||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
r.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
}
|
||||
}
|
||||
|
||||
idsToResponse(&r.Response, &obj)
|
||||
idsToResponse(&req.Response, &obj)
|
||||
|
||||
if len(contentType) == 0 {
|
||||
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
||||
|
@ -86,18 +81,18 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
|
|||
prmRange.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
resObj, err := clnt.ObjectRange(r.appCtx, prmRange)
|
||||
resObj, err := h.pool.ObjectRange(ctx, prmRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resObj, nil
|
||||
})
|
||||
if err != nil && err != io.EOF {
|
||||
r.handleFrostFSErr(err, start)
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
}
|
||||
r.SetContentType(contentType)
|
||||
req.SetContentType(contentType)
|
||||
}
|
||||
|
||||
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
||||
|
@ -108,12 +103,20 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
|||
resp.Header.Set(hdrContainerID, cnrID.String())
|
||||
}
|
||||
|
||||
// HeadByAddress handles head requests using simple cid/oid format.
|
||||
func (d *Downloader) HeadByAddress(c *fasthttp.RequestCtx) {
|
||||
d.byAddress(c, request.headObject)
|
||||
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
||||
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||
test, _ := c.UserValue("oid").(string)
|
||||
var id oid.ID
|
||||
|
||||
err := id.DecodeString(test)
|
||||
if err != nil {
|
||||
h.byObjectName(c, h.headObject)
|
||||
} else {
|
||||
h.byAddress(c, h.headObject)
|
||||
}
|
||||
}
|
||||
|
||||
// HeadByAttribute handles attribute-based head requests.
|
||||
func (d *Downloader) HeadByAttribute(c *fasthttp.RequestCtx) {
|
||||
d.byAttribute(c, request.headObject)
|
||||
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
||||
h.byAttribute(c, h.headObject)
|
||||
}
|
26
internal/handler/middleware/util.go
Normal file
26
internal/handler/middleware/util.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// keyWrapper is wrapper for context keys.
|
||||
type keyWrapper string
|
||||
|
||||
const nsKey = keyWrapper("namespace")
|
||||
|
||||
// GetNamespace extract namespace from context.
|
||||
func GetNamespace(ctx context.Context) (string, error) {
|
||||
ns, ok := ctx.Value(nsKey).(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("couldn't get namespace from context")
|
||||
}
|
||||
|
||||
return ns, nil
|
||||
}
|
||||
|
||||
// SetNamespace sets namespace in the context.
|
||||
func SetNamespace(ctx context.Context, ns string) context.Context {
|
||||
return context.WithValue(ctx, nsKey, ns)
|
||||
}
|
|
@ -1,9 +1,10 @@
|
|||
package uploader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/uploader/multipart"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -16,7 +17,8 @@ type MultipartFile interface {
|
|||
|
||||
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
|
||||
// To have a custom buffer (3mb) the custom multipart reader is used.
|
||||
// https://github.com/nspcc-dev/neofs-http-gw/issues/148
|
||||
// Default reader uses 4KiB chunks, which slow down upload speed up to 400%
|
||||
// https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32
|
||||
reader := multipart.NewReader(r, boundary)
|
||||
|
||||
for {
|
||||
|
@ -27,7 +29,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug("ignore part, empty form name")
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -35,7 +37,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug("ignore part, empty filename", zap.String("form", name))
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
|
||||
continue
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
//go:build !integration
|
||||
|
||||
package uploader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
|
@ -10,6 +10,7 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -111,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
name := part.FormName()
|
||||
if name == "" {
|
||||
l.Debug("ignore part, empty form name")
|
||||
l.Debug(logs.IgnorePartEmptyFormName)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -119,7 +120,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
|||
|
||||
// ignore multipart/form-data values
|
||||
if filename == "" {
|
||||
l.Debug("ignore part, empty filename", zap.String("form", name))
|
||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||
|
||||
continue
|
||||
}
|
141
internal/handler/reader.go
Normal file
141
internal/handler/reader.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type readCloser struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||
// Returns r's error directly. Also returns the processed data.
|
||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||
if maxSize > sizeToDetectType {
|
||||
maxSize = sizeToDetectType
|
||||
}
|
||||
|
||||
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
||||
|
||||
r, err := rInit(maxSize)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
n, err := r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
buf = buf[:n]
|
||||
|
||||
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||
}
|
||||
|
||||
func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oid.Address) {
|
||||
var (
|
||||
err error
|
||||
dis = "inline"
|
||||
start = time.Now()
|
||||
filename string
|
||||
)
|
||||
|
||||
var prm pool.PrmObjectGet
|
||||
prm.SetAddress(objectAddress)
|
||||
if btoken := bearerToken(ctx); btoken != nil {
|
||||
prm.UseBearer(*btoken)
|
||||
}
|
||||
|
||||
rObj, err := h.pool.GetObject(ctx, prm)
|
||||
if err != nil {
|
||||
req.handleFrostFSErr(err, start)
|
||||
return
|
||||
}
|
||||
|
||||
// we can't close reader in this function, so how to do it?
|
||||
|
||||
if req.Request.URI().QueryArgs().GetBool("download") {
|
||||
dis = "attachment"
|
||||
}
|
||||
|
||||
payloadSize := rObj.Header.PayloadSize()
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||
var contentType string
|
||||
for _, attr := range rObj.Header.Attributes() {
|
||||
key := attr.Key()
|
||||
val := attr.Value()
|
||||
if !isValidToken(key) || !isValidValue(val) {
|
||||
continue
|
||||
}
|
||||
|
||||
key = utils.BackwardTransformIfSystem(key)
|
||||
|
||||
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||
switch key {
|
||||
case object.AttributeFileName:
|
||||
filename = val
|
||||
case object.AttributeTimestamp:
|
||||
value, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
req.log.Info(logs.CouldntParseCreationDate,
|
||||
zap.String("key", key),
|
||||
zap.String("val", val),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||
case object.AttributeContentType:
|
||||
contentType = val
|
||||
}
|
||||
}
|
||||
|
||||
idsToResponse(&req.Response, &rObj.Header)
|
||||
|
||||
if len(contentType) == 0 {
|
||||
// determine the Content-Type from the payload head
|
||||
var payloadHead []byte
|
||||
|
||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||
return rObj.Payload, nil
|
||||
})
|
||||
if err != nil && err != io.EOF {
|
||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// reset payload reader since a part of the data has been read
|
||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||
|
||||
if err != io.EOF { // otherwise, we've already read full payload
|
||||
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||
}
|
||||
|
||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||
// if it implements io.Closer and that's useful for us.
|
||||
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||
}
|
||||
req.SetContentType(contentType)
|
||||
|
||||
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||
|
||||
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
//go:build !integration
|
||||
|
||||
package downloader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"io"
|
|
@ -1,4 +1,4 @@
|
|||
package uploader
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -8,7 +8,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||
|
@ -16,9 +16,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/atomic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -27,64 +25,41 @@ const (
|
|||
drainBufSize = 4096
|
||||
)
|
||||
|
||||
// Uploader is an upload request handler.
|
||||
type Uploader struct {
|
||||
appCtx context.Context
|
||||
log *zap.Logger
|
||||
pool *pool.Pool
|
||||
ownerID *user.ID
|
||||
settings *Settings
|
||||
containerResolver *resolver.ContainerResolver
|
||||
type putResponse struct {
|
||||
ObjectID string `json:"object_id"`
|
||||
ContainerID string `json:"container_id"`
|
||||
}
|
||||
|
||||
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
||||
type Settings struct {
|
||||
defaultTimestamp atomic.Bool
|
||||
}
|
||||
|
||||
func (s *Settings) DefaultTimestamp() bool {
|
||||
return s.defaultTimestamp.Load()
|
||||
}
|
||||
|
||||
func (s *Settings) SetDefaultTimestamp(val bool) {
|
||||
s.defaultTimestamp.Store(val)
|
||||
}
|
||||
|
||||
// New creates a new Uploader using specified logger, connection pool and
|
||||
// other options.
|
||||
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Uploader {
|
||||
return &Uploader{
|
||||
appCtx: ctx,
|
||||
log: params.Logger,
|
||||
pool: params.Pool,
|
||||
ownerID: params.Owner,
|
||||
settings: settings,
|
||||
containerResolver: params.Resolver,
|
||||
func newPutResponse(addr oid.Address) *putResponse {
|
||||
return &putResponse{
|
||||
ObjectID: addr.Object().EncodeToString(),
|
||||
ContainerID: addr.Container().EncodeToString(),
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *putResponse) encode(w io.Writer) error {
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", "\t")
|
||||
return enc.Encode(pr)
|
||||
}
|
||||
|
||||
// Upload handles multipart upload request.
|
||||
func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
|
||||
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||
var (
|
||||
file MultipartFile
|
||||
idObj oid.ID
|
||||
addr oid.Address
|
||||
scid, _ = c.UserValue("cid").(string)
|
||||
log = u.log.With(zap.String("cid", scid))
|
||||
bodyStream = c.RequestBodyStream()
|
||||
scid, _ = req.UserValue("cid").(string)
|
||||
log = h.log.With(zap.String("cid", scid))
|
||||
bodyStream = req.RequestBodyStream()
|
||||
drainBuf = make([]byte, drainBufSize)
|
||||
)
|
||||
|
||||
if err := tokens.StoreBearerToken(c); err != nil {
|
||||
log.Error("could not fetch bearer token", zap.Error(err))
|
||||
response.Error(c, "could not fetch bearer token", fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := utils.GetContextFromRequest(req)
|
||||
|
||||
idCnr, err := utils.GetContainerID(u.appCtx, scid, u.containerResolver)
|
||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||
if err != nil {
|
||||
log.Error("wrong container id", zap.Error(err))
|
||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||
logAndSendBucketError(req, log, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -95,37 +70,37 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
|
|||
}
|
||||
err := file.Close()
|
||||
log.Debug(
|
||||
"close temporary multipart/form file",
|
||||
logs.CloseTemporaryMultipartFormFile,
|
||||
zap.Stringer("address", addr),
|
||||
zap.String("filename", file.FileName()),
|
||||
zap.Error(err),
|
||||
)
|
||||
}()
|
||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(u.log, bodyStream, boundary); err != nil {
|
||||
log.Error("could not receive multipart/form", zap.Error(err))
|
||||
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
boundary := string(req.Request.Header.MultipartFormBoundary())
|
||||
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
filtered, err := filterHeaders(u.log, &c.Request.Header)
|
||||
filtered, err := filterHeaders(h.log, &req.Request.Header)
|
||||
if err != nil {
|
||||
log.Error("could not process headers", zap.Error(err))
|
||||
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
|
||||
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||
log.Warn("could not parse client time", zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||
} else {
|
||||
now = parsed
|
||||
}
|
||||
}
|
||||
|
||||
if err = utils.PrepareExpirationHeader(c, u.pool, filtered, now); err != nil {
|
||||
log.Error("could not prepare expiration header", zap.Error(err))
|
||||
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
if err = utils.PrepareExpirationHeader(req, h.pool, filtered, now); err != nil {
|
||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -145,39 +120,42 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
|
|||
attributes = append(attributes, *filename)
|
||||
}
|
||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && u.settings.DefaultTimestamp() {
|
||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||
timestamp := object.NewAttribute()
|
||||
timestamp.SetKey(object.AttributeTimestamp)
|
||||
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||
attributes = append(attributes, *timestamp)
|
||||
}
|
||||
id, bt := u.fetchOwnerAndBearerToken(c)
|
||||
|
||||
obj := object.New()
|
||||
obj.SetContainerID(*idCnr)
|
||||
obj.SetOwnerID(id)
|
||||
obj.SetContainerID(bktInfo.CID)
|
||||
obj.SetOwnerID(h.ownerID)
|
||||
obj.SetAttributes(attributes...)
|
||||
|
||||
var prm pool.PrmObjectPut
|
||||
prm.SetHeader(*obj)
|
||||
prm.SetPayload(file)
|
||||
prm.SetClientCut(h.config.ClientCut())
|
||||
prm.SetBufferMaxSize(h.config.BufferMaxSizeForPut())
|
||||
prm.WithoutHomomorphicHash(bktInfo.HomomorphicHashDisabled)
|
||||
|
||||
bt := h.fetchBearerToken(ctx)
|
||||
if bt != nil {
|
||||
prm.UseBearer(*bt)
|
||||
}
|
||||
|
||||
if idObj, err = u.pool.PutObject(u.appCtx, prm); err != nil {
|
||||
u.handlePutFrostFSErr(c, err)
|
||||
if idObj, err = h.pool.PutObject(ctx, prm); err != nil {
|
||||
h.handlePutFrostFSErr(req, err)
|
||||
return
|
||||
}
|
||||
|
||||
addr.SetObject(idObj)
|
||||
addr.SetContainer(*idCnr)
|
||||
addr.SetContainer(bktInfo.CID)
|
||||
|
||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||
if err = newPutResponse(addr).encode(c); err != nil {
|
||||
log.Error("could not encode response", zap.Error(err))
|
||||
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||
if err = newPutResponse(addr).encode(req); err != nil {
|
||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -194,40 +172,21 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
|
|||
}
|
||||
}
|
||||
// Report status code and content type.
|
||||
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||
c.Response.Header.SetContentType(jsonHeader)
|
||||
req.Response.SetStatusCode(fasthttp.StatusOK)
|
||||
req.Response.Header.SetContentType(jsonHeader)
|
||||
}
|
||||
|
||||
func (u *Uploader) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||
|
||||
u.log.Error("could not store file in frostfs", logFields...)
|
||||
h.log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||
response.Error(r, msg, statusCode)
|
||||
}
|
||||
|
||||
func (u *Uploader) fetchOwnerAndBearerToken(ctx context.Context) (*user.ID, *bearer.Token) {
|
||||
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
||||
issuer := bearer.ResolveIssuer(*tkn)
|
||||
return &issuer, tkn
|
||||
return tkn
|
||||
}
|
||||
return u.ownerID, nil
|
||||
}
|
||||
|
||||
type putResponse struct {
|
||||
ObjectID string `json:"object_id"`
|
||||
ContainerID string `json:"container_id"`
|
||||
}
|
||||
|
||||
func newPutResponse(addr oid.Address) *putResponse {
|
||||
return &putResponse{
|
||||
ObjectID: addr.Object().EncodeToString(),
|
||||
ContainerID: addr.Container().EncodeToString(),
|
||||
}
|
||||
}
|
||||
|
||||
func (pr *putResponse) encode(w io.Writer) error {
|
||||
enc := json.NewEncoder(w)
|
||||
enc.SetIndent("", "\t")
|
||||
return enc.Encode(pr)
|
||||
return nil
|
||||
}
|
71
internal/handler/utils.go
Normal file
71
internal/handler/utils.go
Normal file
|
@ -0,0 +1,71 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type request struct {
|
||||
*fasthttp.RequestCtx
|
||||
log *zap.Logger
|
||||
}
|
||||
|
||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||
logFields := []zap.Field{
|
||||
zap.Stringer("elapsed", time.Since(start)),
|
||||
zap.Error(err),
|
||||
}
|
||||
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
||||
logFields = append(logFields, additionalFields...)
|
||||
|
||||
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||
response.Error(r.RequestCtx, msg, statusCode)
|
||||
}
|
||||
|
||||
func bearerToken(ctx context.Context) *bearer.Token {
|
||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
||||
return tkn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidToken(s string) bool {
|
||||
for _, c := range s {
|
||||
if c <= ' ' || c > 127 {
|
||||
return false
|
||||
}
|
||||
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isValidValue(s string) bool {
|
||||
for _, c := range s {
|
||||
// HTTP specification allows for more technically, but we don't want to escape things.
|
||||
if c < ' ' || c > 127 || c == '"' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||
return
|
||||
}
|
||||
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||
}
|
80
internal/logs/logs.go
Normal file
80
internal/logs/logs.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package logs
|
||||
|
||||
const (
|
||||
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
||||
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
|
||||
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
|
||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
|
||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
||||
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
|
||||
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
|
||||
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
|
||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
|
||||
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
|
||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||
StartingApplication = "starting application" // Info in ../../app.go
|
||||
StartingServer = "starting server" // Info in ../../app.go
|
||||
ListenAndServe = "listen and serve" // Fatal in ../../app.go
|
||||
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
|
||||
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
|
||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
|
||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
|
||||
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
|
||||
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
|
||||
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
|
||||
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
|
||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
|
||||
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
|
||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
|
||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
|
||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
|
||||
Request = "request" // Info in ../../app.go
|
||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
|
||||
FailedToAddServer = "failed to add server" // Warn in ../../app.go
|
||||
AddServer = "add server" // Info in ../../app.go
|
||||
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
|
||||
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
|
||||
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
|
||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
|
||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
|
||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
|
||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
|
||||
UsingCredentials = "using credentials" // Info in ../../settings.go
|
||||
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
|
||||
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
|
||||
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
||||
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
||||
FailedToUnescapeQuery = "failed to unescape query"
|
||||
ServerReconnecting = "reconnecting server..."
|
||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||
ServerReconnectFailed = "failed to reconnect server"
|
||||
WarnDuplicateAddress = "duplicate address"
|
||||
)
|
150
metrics/desc.go
Normal file
150
metrics/desc.go
Normal file
|
@ -0,0 +1,150 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var appMetricsDesc = map[string]map[string]Description{
|
||||
poolSubsystem: {
|
||||
overallErrorsMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: overallErrorsMetric,
|
||||
Help: "Total number of errors in pool",
|
||||
},
|
||||
overallNodeErrorsMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: overallNodeErrorsMetric,
|
||||
Help: "Total number of errors for connection in pool",
|
||||
VariableLabels: []string{"node"},
|
||||
},
|
||||
overallNodeRequestsMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: overallNodeRequestsMetric,
|
||||
Help: "Total number of requests to specific node in pool",
|
||||
VariableLabels: []string{"node"},
|
||||
},
|
||||
currentErrorMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: currentErrorMetric,
|
||||
Help: "Number of errors on current connections that will be reset after the threshold",
|
||||
VariableLabels: []string{"node"},
|
||||
},
|
||||
avgRequestDurationMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: avgRequestDurationMetric,
|
||||
Help: "Average request duration (in milliseconds) for specific method on node in pool",
|
||||
VariableLabels: []string{"node", "method"},
|
||||
},
|
||||
},
|
||||
stateSubsystem: {
|
||||
healthMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: stateSubsystem,
|
||||
Name: healthMetric,
|
||||
Help: "Current HTTP gateway state",
|
||||
},
|
||||
versionInfoMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: stateSubsystem,
|
||||
Name: versionInfoMetric,
|
||||
Help: "Version of current FrostFS HTTP Gate instance",
|
||||
VariableLabels: []string{"version"},
|
||||
},
|
||||
},
|
||||
serverSubsystem: {
|
||||
healthMetric: Description{
|
||||
Type: dto.MetricType_GAUGE,
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: healthMetric,
|
||||
Help: "HTTP Server endpoint health",
|
||||
VariableLabels: []string{"endpoint"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type Description struct {
|
||||
Type dto.MetricType
|
||||
Namespace string
|
||||
Subsystem string
|
||||
Name string
|
||||
Help string
|
||||
ConstantLabels prometheus.Labels
|
||||
VariableLabels []string
|
||||
}
|
||||
|
||||
func (d *Description) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(&struct {
|
||||
Type string `json:"type"`
|
||||
FQName string `json:"name"`
|
||||
Help string `json:"help"`
|
||||
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
|
||||
VariableLabels []string `json:"variable_labels,omitempty"`
|
||||
}{
|
||||
Type: d.Type.String(),
|
||||
FQName: d.BuildFQName(),
|
||||
Help: d.Help,
|
||||
ConstantLabels: d.ConstantLabels,
|
||||
VariableLabels: d.VariableLabels,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Description) BuildFQName() string {
|
||||
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
|
||||
}
|
||||
|
||||
// DescribeAll returns descriptions for metrics.
|
||||
func DescribeAll() []Description {
|
||||
var list []Description
|
||||
for _, m := range appMetricsDesc {
|
||||
for _, description := range m {
|
||||
list = append(list, description)
|
||||
}
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func newOpts(description Description) prometheus.Opts {
|
||||
return prometheus.Opts{
|
||||
Namespace: description.Namespace,
|
||||
Subsystem: description.Subsystem,
|
||||
Name: description.Name,
|
||||
Help: description.Help,
|
||||
ConstLabels: description.ConstantLabels,
|
||||
}
|
||||
}
|
||||
|
||||
func mustNewGauge(description Description) prometheus.Gauge {
|
||||
if description.Type != dto.MetricType_GAUGE {
|
||||
panic("invalid metric type")
|
||||
}
|
||||
return prometheus.NewGauge(
|
||||
prometheus.GaugeOpts(newOpts(description)),
|
||||
)
|
||||
}
|
||||
|
||||
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
|
||||
if description.Type != dto.MetricType_GAUGE {
|
||||
panic("invalid metric type")
|
||||
}
|
||||
return prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts(newOpts(description)),
|
||||
description.VariableLabels,
|
||||
)
|
||||
}
|
37
metrics/desc_test.go
Normal file
37
metrics/desc_test.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
//go:build dump_metrics
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type mock struct{}
|
||||
|
||||
func (m mock) Statistic() pool.Statistic {
|
||||
return pool.Statistic{}
|
||||
}
|
||||
|
||||
var metricsPath = flag.String("out", "", "File to export http gateway metrics to.")
|
||||
|
||||
func TestDescribeAll(t *testing.T) {
|
||||
// to check correct metrics type mapping
|
||||
_ = NewGateMetrics(mock{})
|
||||
|
||||
flag.Parse()
|
||||
|
||||
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
|
||||
|
||||
desc := DescribeAll()
|
||||
data, err := json.Marshal(desc)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.WriteFile(*metricsPath, data, 0644)
|
||||
require.NoError(t, err)
|
||||
}
|
|
@ -13,7 +13,23 @@ const (
|
|||
namespace = "frostfs_http_gw"
|
||||
stateSubsystem = "state"
|
||||
poolSubsystem = "pool"
|
||||
serverSubsystem = "server"
|
||||
)
|
||||
|
||||
const (
|
||||
healthMetric = "health"
|
||||
versionInfoMetric = "version_info"
|
||||
)
|
||||
|
||||
const (
|
||||
overallErrorsMetric = "overall_errors"
|
||||
overallNodeErrorsMetric = "overall_node_errors"
|
||||
overallNodeRequestsMetric = "overall_node_requests"
|
||||
currentErrorMetric = "current_errors"
|
||||
avgRequestDurationMetric = "avg_request_duration"
|
||||
)
|
||||
|
||||
const (
|
||||
methodGetBalance = "get_balance"
|
||||
methodPutContainer = "put_container"
|
||||
methodGetContainer = "get_container"
|
||||
|
@ -41,17 +57,27 @@ const (
|
|||
HealthStatusShuttingDown HealthStatus = 3
|
||||
)
|
||||
|
||||
type StatisticScraper interface {
|
||||
Statistic() pool.Statistic
|
||||
}
|
||||
|
||||
type serverMetrics struct {
|
||||
endpointHealth *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
type GateMetrics struct {
|
||||
stateMetrics
|
||||
poolMetricsCollector
|
||||
serverMetrics
|
||||
}
|
||||
|
||||
type stateMetrics struct {
|
||||
healthCheck prometheus.Gauge
|
||||
versionInfo *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
type poolMetricsCollector struct {
|
||||
pool *pool.Pool
|
||||
scraper StatisticScraper
|
||||
overallErrors prometheus.Gauge
|
||||
overallNodeErrors *prometheus.GaugeVec
|
||||
overallNodeRequests *prometheus.GaugeVec
|
||||
|
@ -60,113 +86,62 @@ type poolMetricsCollector struct {
|
|||
}
|
||||
|
||||
// NewGateMetrics creates new metrics for http gate.
|
||||
func NewGateMetrics(p *pool.Pool) *GateMetrics {
|
||||
func NewGateMetrics(p StatisticScraper) *GateMetrics {
|
||||
stateMetric := newStateMetrics()
|
||||
stateMetric.register()
|
||||
|
||||
poolMetric := newPoolMetricsCollector(p)
|
||||
poolMetric.register()
|
||||
|
||||
serverMetric := newServerMetrics()
|
||||
serverMetric.register()
|
||||
|
||||
return &GateMetrics{
|
||||
stateMetrics: *stateMetric,
|
||||
poolMetricsCollector: *poolMetric,
|
||||
serverMetrics: *serverMetric,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GateMetrics) Unregister() {
|
||||
g.stateMetrics.unregister()
|
||||
prometheus.Unregister(&g.poolMetricsCollector)
|
||||
g.serverMetrics.unregister()
|
||||
}
|
||||
|
||||
func newStateMetrics() *stateMetrics {
|
||||
return &stateMetrics{
|
||||
healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: stateSubsystem,
|
||||
Name: "health",
|
||||
Help: "Current HTTP gateway state",
|
||||
}),
|
||||
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
|
||||
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
|
||||
}
|
||||
}
|
||||
|
||||
func (m stateMetrics) register() {
|
||||
prometheus.MustRegister(m.healthCheck)
|
||||
prometheus.MustRegister(m.versionInfo)
|
||||
}
|
||||
|
||||
func (m stateMetrics) unregister() {
|
||||
prometheus.Unregister(m.healthCheck)
|
||||
prometheus.Unregister(m.versionInfo)
|
||||
}
|
||||
|
||||
func (m stateMetrics) SetHealth(s HealthStatus) {
|
||||
m.healthCheck.Set(float64(s))
|
||||
}
|
||||
|
||||
func newPoolMetricsCollector(p *pool.Pool) *poolMetricsCollector {
|
||||
overallErrors := prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: "overall_errors",
|
||||
Help: "Total number of errors in pool",
|
||||
},
|
||||
)
|
||||
|
||||
overallNodeErrors := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: "overall_node_errors",
|
||||
Help: "Total number of errors for connection in pool",
|
||||
},
|
||||
[]string{
|
||||
"node",
|
||||
},
|
||||
)
|
||||
|
||||
overallNodeRequests := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: "overall_node_requests",
|
||||
Help: "Total number of requests to specific node in pool",
|
||||
},
|
||||
[]string{
|
||||
"node",
|
||||
},
|
||||
)
|
||||
|
||||
currentErrors := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: "current_errors",
|
||||
Help: "Number of errors on current connections that will be reset after the threshold",
|
||||
},
|
||||
[]string{
|
||||
"node",
|
||||
},
|
||||
)
|
||||
|
||||
requestsDuration := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: poolSubsystem,
|
||||
Name: "avg_request_duration",
|
||||
Help: "Average request duration (in milliseconds) for specific method on node in pool",
|
||||
},
|
||||
[]string{
|
||||
"node",
|
||||
"method",
|
||||
},
|
||||
)
|
||||
func (m stateMetrics) SetVersion(ver string) {
|
||||
m.versionInfo.WithLabelValues(ver).Set(1)
|
||||
}
|
||||
|
||||
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
||||
return &poolMetricsCollector{
|
||||
pool: p,
|
||||
overallErrors: overallErrors,
|
||||
overallNodeErrors: overallNodeErrors,
|
||||
overallNodeRequests: overallNodeRequests,
|
||||
currentErrors: currentErrors,
|
||||
requestDuration: requestsDuration,
|
||||
scraper: p,
|
||||
overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]),
|
||||
overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]),
|
||||
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
|
||||
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
|
||||
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,7 +167,7 @@ func (m *poolMetricsCollector) register() {
|
|||
}
|
||||
|
||||
func (m *poolMetricsCollector) updateStatistic() {
|
||||
stat := m.pool.Statistic()
|
||||
stat := m.scraper.Statistic()
|
||||
|
||||
m.overallNodeErrors.Reset()
|
||||
m.overallNodeRequests.Reset()
|
||||
|
@ -228,6 +203,28 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
|||
m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
|
||||
}
|
||||
|
||||
func newServerMetrics() *serverMetrics {
|
||||
return &serverMetrics{
|
||||
endpointHealth: mustNewGaugeVec(appMetricsDesc[serverSubsystem][healthMetric]),
|
||||
}
|
||||
}
|
||||
|
||||
func (m serverMetrics) register() {
|
||||
prometheus.MustRegister(m.endpointHealth)
|
||||
}
|
||||
|
||||
func (m serverMetrics) unregister() {
|
||||
prometheus.Unregister(m.endpointHealth)
|
||||
}
|
||||
|
||||
func (m serverMetrics) MarkHealthy(endpoint string) {
|
||||
m.endpointHealth.WithLabelValues(endpoint).Set(float64(1))
|
||||
}
|
||||
|
||||
func (m serverMetrics) MarkUnhealthy(endpoint string) {
|
||||
m.endpointHealth.WithLabelValues(endpoint).Set(float64(0))
|
||||
}
|
||||
|
||||
// NewPrometheusService creates a new service for gathering prometheus metrics.
|
||||
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
|
||||
if log == nil {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
|
@ -24,21 +25,24 @@ type Config struct {
|
|||
// Start runs http service with the exposed endpoint on the configured port.
|
||||
func (ms *Service) Start() {
|
||||
if ms.enabled {
|
||||
ms.log.Info("service is running", zap.String("endpoint", ms.Addr))
|
||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||
err := ms.ListenAndServe()
|
||||
if err != nil && err != http.ErrServerClosed {
|
||||
ms.log.Warn("service couldn't start on configured port")
|
||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||
}
|
||||
} else {
|
||||
ms.log.Info("service hasn't started since it's disabled")
|
||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||
}
|
||||
}
|
||||
|
||||
// ShutDown stops the service.
|
||||
func (ms *Service) ShutDown(ctx context.Context) {
|
||||
ms.log.Info("shutting down service", zap.String("endpoint", ms.Addr))
|
||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||
err := ms.Shutdown(ctx)
|
||||
if err != nil {
|
||||
ms.log.Panic("can't shut down service")
|
||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||
if err = ms.Close(); err != nil {
|
||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
|
@ -28,9 +29,14 @@ type FrostFS interface {
|
|||
SystemDNS(context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FrostFS FrostFS
|
||||
RPCAddress string
|
||||
Settings Settings
|
||||
}
|
||||
|
||||
type ContainerResolver struct {
|
||||
|
@ -135,29 +141,43 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
|
|||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||
switch name {
|
||||
case DNSResolver:
|
||||
return NewDNSResolver(cfg.FrostFS)
|
||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||
case NNSResolver:
|
||||
return NewNNSResolver(cfg.RPCAddress)
|
||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||
if frostFS == nil {
|
||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||
}
|
||||
|
||||
var dns ns.DNS
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||
domain, err := frostFS.SystemDNS(ctx)
|
||||
var err error
|
||||
|
||||
namespace, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone, isDefault := settings.FormContainerZone(namespace)
|
||||
if isDefault {
|
||||
zone, err = frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
domain = name + "." + domain
|
||||
domain := name + "." + zone
|
||||
cnrID, err := dns.ResolveContainerName(domain)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
||||
}
|
||||
|
@ -170,17 +190,32 @@ func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
|
||||
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
|
||||
if rpcAddress == "" {
|
||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||
}
|
||||
|
||||
var nns ns.NNS
|
||||
|
||||
if err := nns.Dial(rpcAddress); err != nil {
|
||||
return nil, fmt.Errorf("could not dial nns: %w", err)
|
||||
}
|
||||
|
||||
resolveFunc := func(_ context.Context, name string) (*cid.ID, error) {
|
||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||
var d container.Domain
|
||||
d.SetName(name)
|
||||
|
||||
namespace, err := middleware.GetNamespace(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
zone, _ := settings.FormContainerZone(namespace)
|
||||
d.SetZone(zone)
|
||||
|
||||
cnrID, err := nns.ResolveContainerDomain(d)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
||||
|
|
|
@ -13,9 +13,11 @@ import (
|
|||
|
||||
type fromHandler = func(h *fasthttp.RequestHeader) []byte
|
||||
|
||||
type ctxKey string
|
||||
|
||||
const (
|
||||
bearerTokenHdr = "Bearer"
|
||||
bearerTokenKey = "__context_bearer_token_key"
|
||||
bearerTokenKey ctxKey = "__context_bearer_token_key"
|
||||
)
|
||||
|
||||
// BearerToken usage:
|
||||
|
@ -48,16 +50,15 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
|
|||
return auth
|
||||
}
|
||||
|
||||
// StoreBearerToken extracts a bearer token from the header or cookie and stores
|
||||
// it in the request context.
|
||||
func StoreBearerToken(ctx *fasthttp.RequestCtx) error {
|
||||
tkn, err := fetchBearerToken(ctx)
|
||||
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
||||
// it in the application context.
|
||||
func StoreBearerTokenAppCtx(ctx context.Context, req *fasthttp.RequestCtx) (context.Context, error) {
|
||||
tkn, err := fetchBearerToken(req)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// This is an analog of context.WithValue.
|
||||
ctx.SetUserValue(bearerTokenKey, tkn)
|
||||
return nil
|
||||
newCtx := context.WithValue(ctx, bearerTokenKey, tkn)
|
||||
return newCtx, nil
|
||||
}
|
||||
|
||||
// LoadBearerToken returns a bearer token stored in the context given (if it's
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package tokens
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
|
||||
|
@ -150,13 +151,14 @@ func Test_checkAndPropagateBearerToken(t *testing.T) {
|
|||
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||
require.NotEmpty(t, t64)
|
||||
|
||||
ctx := makeTestRequest(t64, "")
|
||||
req := makeTestRequest(t64, "")
|
||||
|
||||
// Expect to see the token within the context.
|
||||
require.NoError(t, StoreBearerToken(ctx))
|
||||
appCtx, err := StoreBearerTokenAppCtx(context.Background(), req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect to see the same token without errors.
|
||||
actual, err := LoadBearerToken(ctx)
|
||||
actual, err := LoadBearerToken(appCtx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tkn, actual)
|
||||
}
|
||||
|
|
189
tree/tree.go
Normal file
189
tree/tree.go
Normal file
|
@ -0,0 +1,189 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
type (
|
||||
Tree struct {
|
||||
service ServiceClient
|
||||
}
|
||||
|
||||
// ServiceClient is a client to interact with tree service.
|
||||
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
||||
ServiceClient interface {
|
||||
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
||||
}
|
||||
|
||||
treeNode struct {
|
||||
ObjID oid.ID
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
GetNodesParams struct {
|
||||
CnrID cid.ID
|
||||
TreeID string
|
||||
Path []string
|
||||
Meta []string
|
||||
LatestOnly bool
|
||||
AllAttrs bool
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
|
||||
ErrNodeNotFound = layer.ErrNodeNotFound
|
||||
|
||||
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
|
||||
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
|
||||
)
|
||||
|
||||
const (
|
||||
FileNameKey = "FileName"
|
||||
)
|
||||
|
||||
const (
|
||||
oidKV = "OID"
|
||||
|
||||
// keys for delete marker nodes.
|
||||
isDeleteMarkerKV = "IsDeleteMarker"
|
||||
|
||||
// versionTree -- ID of a tree with object versions.
|
||||
versionTree = "version"
|
||||
|
||||
separator = "/"
|
||||
)
|
||||
|
||||
// NewTree creates instance of Tree using provided address and create grpc connection.
|
||||
func NewTree(service ServiceClient) *Tree {
|
||||
return &Tree{service: service}
|
||||
}
|
||||
|
||||
type Meta interface {
|
||||
GetKey() string
|
||||
GetValue() []byte
|
||||
}
|
||||
|
||||
type NodeResponse interface {
|
||||
GetMeta() []Meta
|
||||
GetTimestamp() uint64
|
||||
}
|
||||
|
||||
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||
treeNode := &treeNode{
|
||||
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||
}
|
||||
|
||||
for _, kv := range nodeInfo.GetMeta() {
|
||||
switch kv.GetKey() {
|
||||
case oidKV:
|
||||
if err := treeNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
treeNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
return treeNode, nil
|
||||
}
|
||||
|
||||
func (n *treeNode) Get(key string) (string, bool) {
|
||||
value, ok := n.Meta[key]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func (n *treeNode) FileName() (string, bool) {
|
||||
value, ok := n.Meta[FileNameKey]
|
||||
return value, ok
|
||||
}
|
||||
|
||||
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||
treeNode, err := newTreeNode(node)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||
}
|
||||
|
||||
return newNodeVersionFromTreeNode(treeNode), nil
|
||||
}
|
||||
|
||||
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||
|
||||
version := &api.NodeVersion{
|
||||
BaseNodeVersion: api.BaseNodeVersion{
|
||||
OID: treeNode.ObjID,
|
||||
},
|
||||
DeleteMarker: isDeleteMarker,
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||
meta := []string{oidKV, isDeleteMarkerKV}
|
||||
path := pathFromName(objectName)
|
||||
|
||||
p := &GetNodesParams{
|
||||
CnrID: *cnrID,
|
||||
TreeID: versionTree,
|
||||
Path: path,
|
||||
Meta: meta,
|
||||
LatestOnly: false,
|
||||
AllAttrs: false,
|
||||
}
|
||||
nodes, err := c.service.GetNodes(ctx, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
latestNode, err := getLatestNode(nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newNodeVersion(latestNode)
|
||||
}
|
||||
|
||||
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||
var (
|
||||
maxCreationTime uint64
|
||||
targetIndexNode = -1
|
||||
)
|
||||
|
||||
for i, node := range nodes {
|
||||
currentCreationTime := node.GetTimestamp()
|
||||
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
|
||||
maxCreationTime = currentCreationTime
|
||||
targetIndexNode = i
|
||||
}
|
||||
}
|
||||
|
||||
if targetIndexNode == -1 {
|
||||
return nil, layer.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return nodes[targetIndexNode], nil
|
||||
}
|
||||
|
||||
func checkExistOID(meta []Meta) bool {
|
||||
for _, kv := range meta {
|
||||
if kv.GetKey() == "OID" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// pathFromName splits name by '/'.
|
||||
func pathFromName(objectName string) []string {
|
||||
return strings.Split(objectName, separator)
|
||||
}
|
143
tree/tree_test.go
Normal file
143
tree/tree_test.go
Normal file
|
@ -0,0 +1,143 @@
|
|||
package tree
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type nodeMeta struct {
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
func (m nodeMeta) GetKey() string {
|
||||
return m.key
|
||||
}
|
||||
|
||||
func (m nodeMeta) GetValue() []byte {
|
||||
return m.value
|
||||
}
|
||||
|
||||
type nodeResponse struct {
|
||||
meta []nodeMeta
|
||||
timestamp uint64
|
||||
}
|
||||
|
||||
func (n nodeResponse) GetTimestamp() uint64 {
|
||||
return n.timestamp
|
||||
}
|
||||
|
||||
func (n nodeResponse) GetMeta() []Meta {
|
||||
res := make([]Meta, len(n.meta))
|
||||
for i, value := range n.meta {
|
||||
res[i] = value
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func TestGetLatestNode(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
nodes []NodeResponse
|
||||
exceptedOID string
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
nodes: []NodeResponse{},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "one node of the object version",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
value: []byte("oid1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exceptedOID: "oid1",
|
||||
},
|
||||
{
|
||||
name: "one node of the object version and one node of the secondary object",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
value: []byte("oid1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exceptedOID: "oid1",
|
||||
},
|
||||
{
|
||||
name: "all nodes represent a secondary object",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 5,
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "several nodes of different types and with different timestamp",
|
||||
nodes: []NodeResponse{
|
||||
nodeResponse{
|
||||
timestamp: 1,
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
value: []byte("oid1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 3,
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 4,
|
||||
meta: []nodeMeta{
|
||||
{
|
||||
key: oidKV,
|
||||
value: []byte("oid2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
nodeResponse{
|
||||
timestamp: 6,
|
||||
meta: []nodeMeta{},
|
||||
},
|
||||
},
|
||||
exceptedOID: "oid2",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNode, err := getLatestNode(tc.nodes)
|
||||
if tc.error {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.exceptedOID, string(actualNode.GetMeta()[0].GetValue()))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -12,4 +13,5 @@ type AppParams struct {
|
|||
Pool *pool.Pool
|
||||
Owner *user.ID
|
||||
Resolver *resolver.ContainerResolver
|
||||
Cache *cache.BucketCache
|
||||
}
|
||||
|
|
83
utils/tracing.go
Normal file
83
utils/tracing.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
"github.com/valyala/fasthttp"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
type httpCarrier struct {
|
||||
r *fasthttp.RequestCtx
|
||||
}
|
||||
|
||||
func (c *httpCarrier) Get(key string) string {
|
||||
bytes := c.r.Request.Header.Peek(key)
|
||||
if len(bytes) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(bytes)
|
||||
}
|
||||
|
||||
func (c *httpCarrier) Set(key string, value string) {
|
||||
c.r.Response.Header.Set(key, value)
|
||||
}
|
||||
|
||||
func (c *httpCarrier) Keys() []string {
|
||||
dict := make(map[string]interface{})
|
||||
c.r.Request.Header.VisitAll(
|
||||
func(key, value []byte) {
|
||||
dict[string(key)] = true
|
||||
},
|
||||
)
|
||||
c.r.Response.Header.VisitAll(
|
||||
func(key, value []byte) {
|
||||
dict[string(key)] = true
|
||||
},
|
||||
)
|
||||
result := make([]string, 0, len(dict))
|
||||
for key := range dict {
|
||||
result = append(result, key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func extractHTTPTraceInfo(ctx context.Context, req *fasthttp.RequestCtx) context.Context {
|
||||
if req == nil {
|
||||
return ctx
|
||||
}
|
||||
carrier := &httpCarrier{r: req}
|
||||
return tracing.Propagator.Extract(ctx, carrier)
|
||||
}
|
||||
|
||||
// SetHTTPTraceInfo saves trace headers to response.
|
||||
func SetHTTPTraceInfo(ctx context.Context, span trace.Span, req *fasthttp.RequestCtx) {
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
if err := req.Err(); err != nil {
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
}
|
||||
span.SetAttributes(
|
||||
semconv.HTTPStatusCode(req.Response.StatusCode()),
|
||||
)
|
||||
carrier := &httpCarrier{r: req}
|
||||
tracing.Propagator.Inject(ctx, carrier)
|
||||
}
|
||||
|
||||
// StartHTTPServerSpan starts root HTTP server span.
|
||||
func StartHTTPServerSpan(ctx context.Context, req *fasthttp.RequestCtx, operationName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
ctx = extractHTTPTraceInfo(ctx, req)
|
||||
opts = append(opts, trace.WithAttributes(
|
||||
attribute.String("http.client_address", req.RemoteAddr().String()),
|
||||
attribute.String("http.path", string(req.Path())),
|
||||
semconv.HTTPMethod(string(req.Method())),
|
||||
semconv.RPCService("frostfs-http-gw"),
|
||||
attribute.String("http.query", req.QueryArgs().String()),
|
||||
), trace.WithSpanKind(trace.SpanKindServer))
|
||||
return tracing.StartSpanFromContext(ctx, operationName, opts...)
|
||||
}
|
|
@ -4,22 +4,10 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
|
||||
// GetContainerID decode container id, if it's not a valid container id
|
||||
// then trey to resolve name using provided resolver.
|
||||
func GetContainerID(ctx context.Context, containerID string, resolver *resolver.ContainerResolver) (*cid.ID, error) {
|
||||
cnrID := new(cid.ID)
|
||||
err := cnrID.DecodeString(containerID)
|
||||
if err != nil {
|
||||
cnrID, err = resolver.Resolve(ctx, containerID)
|
||||
}
|
||||
return cnrID, err
|
||||
}
|
||||
|
||||
type EpochDurations struct {
|
||||
CurrentEpoch uint64
|
||||
MsPerBlock int64
|
||||
|
@ -43,3 +31,13 @@ func GetEpochDurations(ctx context.Context, p *pool.Pool) (*EpochDurations, erro
|
|||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SetContextToRequest adds new context to fasthttp request.
|
||||
func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
|
||||
c.SetUserValue("context", ctx)
|
||||
}
|
||||
|
||||
// GetContextFromRequest returns main context from fasthttp request context.
|
||||
func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context {
|
||||
return c.UserValue("context").(context.Context)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue