Compare commits

...

77 commits

Author SHA1 Message Date
6f64557a4b [#60] Use gRPC interceptor from observability package
Previous SDK implementation had implicit gRPC interceptor
for tracing. Now pool constructors allow any dial options,
so gateway should manually pass tracing interceptors from
observability package.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-14 12:22:44 +03:00
2ccb43bc8c [#59] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 10:06:13 +03:00
202ef5cc54 [#59] Drop sync-tree
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 10:06:10 +03:00
1dfbe36eca [#59] Use tree pool from SDK
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-06-09 09:50:04 +03:00
5be537321b [#2] Keep issue templates in .github
The only thing supported in .forgejo dir is a workflows.
It might be useful to keep .github dir for repo mirrors.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
2c706bec71 [#2] Update CONTRIBUTING
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
d0f6baa44b [#2] Return shields
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
d7dbff1255 [#2] Use latest AIO image in integration test
Latest version provides more
stability during startup stage.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
61d152ee6a [#2] Update CHANGELOG
Replace changelog history before the fork
with the link to the fork source.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
9765adf844 [#2] Update CODEOWNERS
Codeownders feature isn't supported yet in forgejo nor
gitea. Looking for github.com/go-gitea/gitea/pull/24910

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-07 15:36:16 +00:00
b8944adb65 [#55] Add govulncheck in CI
Check dependency issues on every PR.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-06 17:43:23 +03:00
f24f39ec92 [#56] Increase golangci-lint timeout
For slow actions runners.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-06-05 13:49:09 +03:00
f17f6747c4 [#54] Fix linter warnings
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 20:18:00 +03:00
01b9df83e6 [#54] Update actions for forgejo ci runner
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 19:41:44 +03:00
c4fe718556 [#54] Move files from .github to .forgejo
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-31 19:15:00 +03:00
cdaab4feab [#44] add tracing support refactoring
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2023-05-31 15:09:47 +03:00
8a22991326 [#44] add tracing support for upload
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2023-05-29 14:15:54 +03:00
1776db289c [#50] go.mod: Update min go version to 1.19
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-23 17:48:46 +03:00
1f702ad2d8 [#1] Update comments
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-19 07:29:52 +00:00
adb95642d4 [#1] Use FrostFS AIO image in integration test
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-05-19 07:29:52 +00:00
3844ac83e6 [#35] Update prometheus to v1.15.0
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-05-18 17:26:40 +03:00
f7784db146 [#40] Update forming bearer token instruction
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-15 14:56:42 +00:00
8c3c3782f5 [#30] add object name resolving
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2023-05-12 12:52:57 +03:00
37dbb29535 [#45] Update SDK to fix impersonated token
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-05-05 18:15:09 +03:00
a945cdd42c [#20] get/head: Add tracing support
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-05-04 16:03:30 +03:00
ad05f1eb82 [#40] Support impersonate bearer token
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-27 17:04:01 +03:00
9eeaf44163 [#39] Enabling gate metrics
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-26 09:56:38 +03:00
15b65b521b [#29] metrics: Use map for constant labels
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:06:44 +03:00
385f336a17 [#29] Add type to metrics description
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:06:43 +03:00
6c6fd0e9a5 [#29] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:06:07 +03:00
cc37c34396 [#29] metrics: Support dump descriptions
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-20 11:06:06 +03:00
959213520e [#32] Update health metric values
Now values are:
0 - undefined
1 - starting
2 - ready
3 - shutting down

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-04-17 16:28:27 +03:00
162738e771 [#27] Update SDK to fix handling request canceling
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-30 15:59:33 +03:00
7c16ffa250 [#26] Fix pre-commit issues
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-24 15:35:42 +03:00
6f35d7198d [#24] Use build tags to run integration tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-24 12:27:17 +03:00
81f7168a16 [#22] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-24 11:33:48 +03:00
a8ec09e76a [#22] Update system attributes prefix
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-24 11:33:01 +03:00
1f66149316 [#17] Add pre-commit config 2023-03-24 08:13:56 +00:00
e2059a8926 [#21] Add bug label
Add bug label in the bug report template

Signed-off-by: Liza <e.chichindaeva@yadro.com>
2023-03-24 06:23:39 +00:00
53ee124b19 [#18] Fix typo in error message
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-22 10:43:34 +03:00
8f6be59e23 [#18] Extract error details
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-03-13 16:14:04 +03:00
e02ee50d7b Rename package name
Due to source code relocation from GitHub.

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2023-03-07 17:08:53 +03:00
93ec4c444d [TrueCloudLab#12] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-02-06 13:56:55 +03:00
6be8d47d92 [TrueCloudLab#12] Support multiple configs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-02-06 13:56:55 +03:00
ed983f8ad0 [TrueCloudLab#11] Update SDK to renew tokens beforehand
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-02-03 17:01:01 +03:00
5f01abf300 [#8] Update neo-go and viper
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2023-01-25 15:23:50 +03:00
38aa6db041 [TrueCloudLab#9] Fix tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-25 11:24:41 +03:00
5a98df9d2d [TrueCloudLab#9] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-25 11:24:41 +03:00
361acacf07 [TrueCloudLab#9] Update go version to 1.18
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-25 11:24:41 +03:00
6909ef5382 [TrueCloudLab#7] Fix tests
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-24 17:09:14 +03:00
148b1aa7f5 [TrueCloudLab#7] Require only one healthy server
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-24 17:09:14 +03:00
7df26d9181 [#6] Update SDK
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2023-01-10 11:25:14 +03:00
913644d64a Change logo
Signed-off-by: Stanislav Bogatyrev <s.bogatyrev@yadro.com>
2023-01-09 11:05:04 +03:00
4c30ff6638 Change logo
Signed-off-by: Stanislav Bogatyrev <s.bogatyrev@yadro.com>
2023-01-08 10:37:07 +03:00
72734ab486 Release v0.26.0
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2022-12-28 17:04:00 +03:00
6abd500b11 [#2] Drop logo
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2022-12-20 18:56:02 +03:00
67c5818fc1 [#2] Rename internals
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2022-12-20 18:56:02 +03:00
de309e3264 [#2] Update deb packages
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2022-12-20 13:34:42 +03:00
31d396a125 [#2] Update building
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2022-12-20 13:34:42 +03:00
a014fb96fc [#2] Update docs
Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2022-12-20 13:34:42 +03:00
2d9eee81c2 [#1] Build HTTP Gateway with FrostFS dependencies
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2022-12-15 13:26:08 +03:00
f88fe1092d [#223] Debian packaging
Debian package includes:
 - user creation;
 - directories and permissions;
 - unit file for systemd

Signed-off-by: Dmitriy Zabolotskiy <d.zabolotskiy@yadro.com>
2022-11-29 17:37:33 +03:00
Denis Kirillov
f4fbd936bc [#239] Remove deprecated linters
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-25 16:17:22 +03:00
278376643a [#221] add error response on key duplicates
Signed-off-by: Artem Tataurov <a.tataurov@yadro.com>
2022-11-24 18:15:56 +03:00
8f1d84ba8d [#228] update docs
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2022-11-24 18:07:04 +03:00
f3d58e4ef0 [#228] add support of multiple sockets
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2022-11-24 18:07:04 +03:00
Angira Kekteeva
7b1410968e [#236] Fix config example
Signed-off-by: Angira Kekteeva <kira@nspcc.ru>
2022-11-18 09:51:04 +03:00
Denis Kirillov
408d914347 [#234] Update SDK to support timeout for stream
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-15 18:06:53 +03:00
Denis Kirillov
a6ec194c2a [#233] Update sdk
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-15 10:22:36 +03:00
Denis Kirillov
4a0188b8f9 [#215] Mention caching strategy in README.md
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-14 14:46:45 +03:00
Denis Kirillov
ef0c17372f [#216] Add routes specification
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-10 12:43:24 +03:00
Denis Kirillov
d2ced3ccbb [#216] Clarify url encoding in README.md
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-10 12:43:24 +03:00
Denis Kirillov
9fda0397e4 [#214] Update CHANGELOG.md
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-08 16:20:28 +03:00
Denis Kirillov
be47263c42 [#214] Support the Date header on upload
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-08 16:20:28 +03:00
Denis Kirillov
9dad47502e [#222] Update docs
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-08 14:34:43 +03:00
Denis Kirillov
b2db6300c4 [#222] Fix zip streaming
Skip objects with invalid FilePath

Signed-off-by: Denis Kirillov <denis@nspcc.ru>
2022-11-08 14:34:33 +03:00
131f2dbcfc [#225] Run CI on support branch PR
Signed-off-by: Alex Vanin <a.vanin@yadro.com>
2022-11-02 14:11:52 +03:00
78 changed files with 3520 additions and 2070 deletions

View file

@ -0,0 +1,20 @@
on: [pull_request]
jobs:
builds:
name: Builds
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.19', '1.20' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Build binary
run: make

View file

@ -0,0 +1,20 @@
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
- name: Run commit format checker
uses: https://git.alexvan.in/alexvanin/dco-go@v1
with:
from: adb95642d

View file

@ -0,0 +1,34 @@
on: [pull_request]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: golangci-lint
uses: https://github.com/golangci/golangci-lint-action@v2
with:
version: latest
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.19', '1.20' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

View file

@ -0,0 +1,21 @@
on: [pull_request]
jobs:
vulncheck:
name: Vulncheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.20'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck ./...

2
.github/CODEOWNERS vendored
View file

@ -1 +1 @@
* @alexvanin @masterSplinter01 @KirillovDenis
* @alexvanin @dkirillov

View file

@ -2,7 +2,7 @@
name: Bug report
about: Create a report to help us improve
title: ''
labels: community, triage
labels: community, triage, bug
assignees: ''
---
@ -18,17 +18,17 @@ assignees: ''
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change -->
<!-- Not obligatory
If no reason/fix/additions for the bug can be suggested,
uncomment the following phrase:
<-- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. -->
1.
2.
3.
4.
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->

View file

@ -7,14 +7,14 @@ assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
## Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
## Describe the solution you'd like
<!--- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
## Describe alternatives you've considered
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
**Additional context**
Add any other context or screenshots about the feature request here.
## Additional context
<!--- Add any other context or screenshots about the feature request here. -->

197
.github/logo.svg vendored
View file

@ -1,129 +1,70 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
sodipodi:docname="logo_fs.svg"
inkscape:version="1.0 (4035a4fb49, 2020-05-01)"
id="svg57"
version="1.1"
viewBox="0 0 105 25"
height="25mm"
width="105mm">
<defs
id="defs51">
<clipPath
clipPathUnits="userSpaceOnUse"
id="clipPath434">
<path
d="M 0,0 H 1366 V 768 H 0 Z"
id="path432" />
</clipPath>
</defs>
<sodipodi:namedview
inkscape:window-maximized="0"
inkscape:window-y="0"
inkscape:window-x="130"
inkscape:window-height="1040"
inkscape:window-width="1274"
height="50mm"
units="mm"
showgrid="false"
inkscape:document-rotation="0"
inkscape:current-layer="layer1"
inkscape:document-units="mm"
inkscape:cy="344.49897"
inkscape:cx="468.64708"
inkscape:zoom="0.7"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
borderopacity="1.0"
bordercolor="#666666"
pagecolor="#ffffff"
id="base" />
<metadata
id="metadata54">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
id="layer1"
inkscape:groupmode="layer"
inkscape:label="Layer 1">
<g
id="g424"
transform="matrix(0.35277777,0,0,-0.35277777,63.946468,10.194047)">
<path
d="m 0,0 v -8.093 h 12.287 v -3.94 H 0 V -24.067 H -4.534 V 3.898 H 15.677 V 0 Z"
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path426" />
</g>
<g
transform="matrix(0.35277777,0,0,-0.35277777,-315.43002,107.34005)"
id="g428">
<g
id="g430"
clip-path="url(#clipPath434)">
<g
id="g436"
transform="translate(1112.874,278.2981)">
<path
d="M 0,0 C 1.822,-0.932 3.354,-2.359 4.597,-4.28 L 1.165,-7.373 c -0.791,1.695 -1.779,2.924 -2.966,3.686 -1.186,0.763 -2.768,1.145 -4.745,1.145 -1.949,0 -3.461,-0.389 -4.534,-1.166 -1.074,-0.777 -1.61,-1.772 -1.61,-2.987 0,-1.13 0.523,-2.027 1.568,-2.69 1.045,-0.664 2.909,-1.236 5.593,-1.716 2.514,-0.452 4.512,-1.024 5.995,-1.716 1.483,-0.693 2.564,-1.554 3.242,-2.585 0.677,-1.031 1.016,-2.309 1.016,-3.834 0,-1.639 -0.466,-3.079 -1.398,-4.322 -0.932,-1.243 -2.239,-2.197 -3.919,-2.86 -1.681,-0.664 -3.623,-0.996 -5.826,-0.996 -5.678,0 -9.689,1.892 -12.033,5.678 l 3.178,3.178 c 0.903,-1.695 2.068,-2.939 3.495,-3.729 1.426,-0.791 3.199,-1.186 5.318,-1.186 2.005,0 3.58,0.345 4.724,1.038 1.144,0.692 1.716,1.674 1.716,2.945 0,1.017 -0.516,1.835 -1.547,2.457 -1.031,0.621 -2.832,1.172 -5.402,1.653 -2.571,0.479 -4.618,1.073 -6.143,1.779 -1.526,0.706 -2.635,1.582 -3.326,2.627 -0.693,1.045 -1.039,2.316 -1.039,3.813 0,1.582 0.438,3.023 1.314,4.322 0.875,1.299 2.14,2.33 3.792,3.093 1.653,0.763 3.58,1.144 5.783,1.144 C -4.018,1.398 -1.822,0.932 0,0"
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path438" />
</g>
<g
id="g440"
transform="translate(993.0239,277.5454)">
<path
d="m 0,0 c 2.054,-1.831 3.083,-4.465 3.083,-7.902 v -17.935 h -4.484 v 16.366 c 0,2.914 -0.626,5.024 -1.877,6.332 -1.253,1.308 -2.924,1.962 -5.016,1.962 -1.495,0 -2.896,-0.327 -4.204,-0.981 -1.308,-0.654 -2.381,-1.719 -3.222,-3.194 -0.841,-1.477 -1.261,-3.335 -1.261,-5.576 v -14.909 h -4.484 V 1.328 l 4.086,-1.674 0.118,-1.84 c 0.933,1.681 2.222,2.923 3.867,3.727 1.643,0.803 3.493,1.205 5.548,1.205 C -4.671,2.746 -2.055,1.83 0,0"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path442" />
</g>
<g
id="g444"
transform="translate(1027.9968,264.0386)">
<path
d="m 0,0 h -21.128 c 0.261,-2.84 1.205,-5.044 2.83,-6.613 1.625,-1.57 3.727,-2.355 6.305,-2.355 2.054,0 3.763,0.356 5.128,1.065 1.363,0.71 2.288,1.738 2.774,3.083 l 3.755,-1.961 c -1.121,-1.981 -2.616,-3.495 -4.484,-4.54 -1.868,-1.046 -4.259,-1.569 -7.173,-1.569 -4.223,0 -7.538,1.289 -9.948,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.149,8.127 3.447,10.705 2.298,2.578 5.557,3.867 9.779,3.867 2.615,0 4.876,-0.58 6.782,-1.738 1.905,-1.158 3.343,-2.728 4.315,-4.707 C -0.262,7.827 0.224,5.605 0.224,3.139 0.224,2.092 0.149,1.046 0,0 m -18.298,10.144 c -1.513,-1.457 -2.438,-3.512 -2.775,-6.165 h 16.982 c -0.3,2.615 -1.159,4.661 -2.578,6.137 -1.42,1.476 -3.307,2.214 -5.661,2.214 -2.466,0 -4.455,-0.728 -5.968,-2.186"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path446" />
</g>
<g
id="g448"
transform="translate(1057.8818,276.4246)">
<path
d="m 0,0 c 2.41,-2.578 3.615,-6.147 3.615,-10.705 0,-4.558 -1.205,-8.126 -3.615,-10.704 -2.41,-2.578 -5.726,-3.867 -9.948,-3.867 -4.222,0 -7.537,1.289 -9.947,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.205,8.127 3.615,10.705 2.41,2.578 5.725,3.867 9.947,3.867 C -5.726,3.867 -2.41,2.578 0,0 m -16.617,-2.858 c -1.607,-1.906 -2.41,-4.522 -2.41,-7.847 0,-3.326 0.803,-5.94 2.41,-7.846 1.607,-1.905 3.83,-2.858 6.669,-2.858 2.839,0 5.063,0.953 6.67,2.858 1.606,1.906 2.41,4.52 2.41,7.846 0,3.325 -0.804,5.941 -2.41,7.847 C -4.885,-0.953 -7.109,0 -9.948,0 c -2.839,0 -5.062,-0.953 -6.669,-2.858"
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path450" />
</g>
</g>
</g>
<g
id="g452"
transform="matrix(0.35277777,0,0,-0.35277777,5.8329581,6.5590171)">
<path
d="m 0,0 0.001,-38.946 25.286,-9.076 V -8.753 L 52.626,1.321 27.815,10.207 Z"
style="fill:#00e599;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path454" />
</g>
<g
id="g456"
transform="matrix(0.35277777,0,0,-0.35277777,15.479008,10.041927)">
<path
d="M 0,0 V -21.306 L 25.293,-30.364 25.282,9.347 Z"
style="fill:#00b091;fill-opacity:1;fill-rule:nonzero;stroke:none"
id="path458" />
</g>
</g>
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
<style type="text/css">
.st0{display:none;}
.st1{display:inline;}
.st2{fill:#01E397;}
.st3{display:inline;fill:#010032;}
.st4{display:inline;fill:#00E599;}
.st5{display:inline;fill:#00AF92;}
.st6{fill:#00C3E5;}
</style>
<g id="Layer_2">
<g id="Layer_1-2" class="st0">
<g class="st1">
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
</g>
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
</g>
<g>
<g>
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
</g>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 6.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

View file

@ -1,66 +0,0 @@
name: Builds
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
jobs:
build_cli:
name: Build CLI
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Update Go modules
run: make dep
- name: Build CLI
run: make
- name: Check version
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
build_image:
needs: build_cli
name: Build Docker image
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Update Go modules
run: make dep
- name: Build Docker image
run: make image

View file

@ -1,67 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
schedule:
- cron: '35 8 * * 1'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -1,21 +0,0 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

View file

@ -1,85 +0,0 @@
name: Tests
on:
pull_request:
branches:
- master
types: [opened, synchronize]
paths-ignore:
- '**/*.md'
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: golangci-lint
uses: golangci/golangci-lint-action@v2
with:
version: latest
cover:
name: Coverage
runs-on: ubuntu-20.04
env:
CGO_ENABLED: 1
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.19
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Update Go modules
run: make dep
- name: Test and write coverage profile
run: make cover
- name: Upload coverage results to Codecov
uses: codecov/codecov-action@v1
with:
fail_ci_if_error: false
path_to_write_report: ./coverage.txt
verbose: true
tests:
name: Tests
runs-on: ubuntu-20.04
strategy:
matrix:
go_versions: [ '1.17', '1.18', '1.19' ]
fail-fast: false
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: '${{ matrix.go_versions }}'
- name: Restore Go modules from cache
uses: actions/cache@v2
with:
path: /home/runner/go/pkg/mod
key: deps-${{ hashFiles('go.sum') }}
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

5
.gitignore vendored
View file

@ -10,9 +10,12 @@ temp
test.sh
testfile
.blast.yml
.neofs-cli.yml
.frostfs-cli.yml
.cache
coverage.txt
coverage.html
# debhelpers
**/.debhelper

11
.gitlint Normal file
View file

@ -0,0 +1,11 @@
[general]
fail-without-commits=True
regex-style-search=True
contrib=CC1
[title-match-regex]
regex=^\[\#[0-9Xx]+\]\s
[ignore-by-title]
regex=^Release(.*)
ignore=title-match-regex

View file

@ -4,7 +4,7 @@
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 5m
timeout: 15m
# include test files or not, default is true
tests: true
@ -32,15 +32,12 @@ linters:
- revive
# some default golangci-lint linters
- deadcode
- errcheck
- gosimple
- ineffassign
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
# extra linters
- exhaustive

45
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,45 @@
ci:
autofix_prs: false
repos:
- repo: https://github.com/jorisroovers/gitlint
rev: v0.19.1
hooks:
- id: gitlint
stages: [commit-msg]
- id: gitlint-ci
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-json
- id: check-xml
- id: check-yaml
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
- repo: https://github.com/golangci/golangci-lint
rev: v1.51.2
hooks:
- id: golangci-lint
- repo: local
hooks:
- id: go-unit-tests
name: go unit tests
entry: make test
pass_filenames: false
types: [go]
language: system

View file

@ -4,236 +4,38 @@ This document outlines major changes between releases.
## [Unreleased]
## [0.25.0] - 2022-10-31
### Added
- Config reloading on SIGHUP (#200, #208)
- Stop pool dial on SIGINT (#212)
- Makefile help (#213)
- Multiple configs support (TrueCloudLab#12)
- Support dump metrics descriptions (#29)
- Support impersonate bearer token (#40)
### Changed
- Update NeoFS error handling (#206)
- GitHub actions updates (#205, #209)
- Unified system attribute format for GET and HEAD (#213)
- Update prometheus to v1.15.0 (#35)
- Update go version to 1.18 (TrueCloudLab#9)
- Update go version to 1.19 (#50)
- Update neo-go to v0.101.0 (#8)
- Update viper to v1.15.0 (#8)
- Errors have become more detailed (#18)
- Update system attribute names (#22)
- Separate integration tests with build tags (#24)
- Changed values for `frostfs_http_gw_state_health` metric (#32)
## [0.24.0] - 2022-09-14
### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
### Fixed
- Fix expiration epoch calculation (#198)
- Fix panic on go1.19 (#188)
### Updating from neofs-http-gw v0.26.0
### Added
- Exposure of pool metrics (#179, #194)
To set system attributes use updated headers
(you can use old ones for now, but their support will be dropped in the future releases):
### Changed
- Help doesn't print empty parameters (#186)
- Update version calculation (#190, #199)
- Update neofs-sdk-go (#196)
- Update go version in CI and docker (#197, #202)
* `X-Attribute-Neofs-*` -> `X-Attribute-System-*`
* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*`
* `X-Attribute-neofs-*` -> `X-Attribute-system-*`
## [0.23.0] - 2022-08-02
### Added
- New param to configure pool error threshold (#184)
### Changed
- Pprof and prometheus metrics configuration (#171)
- Drop GO111MODULES from builds (#182)
### Updating from v0.22.0
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
If you are using the command line flags you can skip this step.
## [0.22.0] - 2022-07-25
### Added
- Default params documentation (#172)
- Health metric (#175)
### Changed
- Version output (#169)
- Updated SDK Version (#178)
## [0.21.0] - 2022-06-20
### Fixed
- Downloading ZIP archive using streaming (#163)
### Added
- New make target to build app in docker (#159)
### Changed
- Increased buffer size for file uploading (#148)
- Updated linter version to v1.46.2 (#161)
- Updated CodeQL version to v2 (#158)
## [0.20.0] - 2022-04-29
### Fixed
- Get rid of data race on server shutdown (#145)
- Improved English in docs and comments (#153)
- Use `FilePath` to download zip (#150)
### Added
- Support container name NNS resolving (#142)
### Changed
- Updated docs (#133, #140)
- Increased default read/write timeouts (#154)
- Updated SDK (#137, #139)
- Updated go version to 1.17 (#143)
- Improved error messages (#144)
## [0.19.0] - 2022-03-16
### Fixed
- Uploading object with zero payload (#122)
- Different headers format in GET and HEAD (#125)
- Fixed project name in docs (#120)
### Added
- Support object attributes with spaces (#123)
### Changed
- Updated fasthttp to v1.34.0 (#129)
- Updated NeoFS SDK to v1.0.0-rc.3 (#126, #132)
- Refactored content type detecting (#128)
## [0.18.0] - 2021-12-10
### Fixed
- System headers format (#111)
### Added
- Different formats to set object's expiration: in epoch, duration, timestamp,
RFC3339 (#108)
- Support of nodes priority (#115)
### Changed
- Updated testcontainers dependency (#100)
## [0.17.0] - 2021-11-15
Support of bulk file download with zip streams and various bug fixes.
### Fixed
- Allow canonical `X-Attribute-Neofs-*` headers (#87)
- Responses with error message now end with `\n` character (#105)
- Application does not require all neofs endpoints to be healthy at start now
(#103)
- Application now tracks session token errors and recreates tokens in runtime
(#95)
### Added
- Integration tests with [all-in-one](https://github.com/nspcc-dev/neofs-aio/)
test containers (#85, #94)
- Bulk download support with zip streams (#92, #96)
## 0.16.1 (28 Jul 2021)
New features:
* logging requests (#77)
* HEAD methods for download routes (#76)
Improvements:
* updated sdk-go dependency (#82)
Bugs fixed:
* wrong NotFound status was used (#30)
## 0.16.0 (29 Jun 2021)
We update HTTP gateway with NEP-6 wallets support, YAML configuration files
and small fixes.
New features:
* YAML configuration file (#71)
Behavior changes:
* gateway key needs to be stored in a proper NEP-6 wallet now, `-k` option is
no longer available, see `-w` and `-a` (#68)
Bugs fixed:
* downloads were not streamed leading to excessive memory usage (#67)
* Last-Modified header incorrectly used local time (#75)
## 0.15.2 (22 Jun 2021)
New features:
* Content-Type returned for object GET requests can now be taken from
attributes (overriding autodetection, #65)
Behavior changes:
* grpc keepalive options can no longer be changed (#60)
Improvements:
* code refactoring (more reuse between different gateways, moved some code to
sdk-go, #47, #46, #51, #62, #63)
* documentation updates and fixes (#53, #49, #55, #59)
* updated api-go dependency (#57)
Bugs fixed:
* `-k` option wasn't accepted for key although it was documented (#50)
## 0.15.1 (24 May 2021)
This important release makes HTTP gateway compatible with NeoFS node version
0.20.0.
Behavior changes:
* neofs-api-go was updated to 1.26.1, which contains some incompatible
changes in underlying components (#39, #44)
* `neofs-http-gw` is consistently used now for repository, binary and image
names (#43)
Improvements:
* minor code cleanups based on stricter set of linters (#41)
* updated README (#42)
## 0.15.0 (30 Apr 2021)
This is the first public release incorporating latest NeoFS protocol support
and fixing some bugs.
New features:
* upload support (#14, #13, #29)
* ephemeral keys (#26)
* TLS server support (#28)
Behavior changes:
* node weights can now be specified as simple numbers instead of percentages
and gateway will calculate the proportion automatically (#27)
* attributes are converted now to `X-Attribute-*` headers when retrieving
object from gate instead of `X-*` (#29)
Improvements:
* better Makefile (#16, #24, #33, #34)
* updated documentation (#16, #29, #35, #36)
* updated neofs-api-go to v1.25.0 (#17, #20)
* updated fasthttp to v1.23.0+ (#17, #29)
* refactoring, eliminating some dependencies (#20, #29)
Bugs fixed:
* gateway attempted to work with no NeoFS peers configured (#29)
* some invalid headers could be sent for attributes using non-ASCII or
non-printable characters (#29)
## Older versions
Please refer to [Github
releases](https://github.com/nspcc-dev/neofs-http-gw/releases/) for older
releases.
This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0.
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
[0.17.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.16.1...v0.17.0
[0.18.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.17.0...v0.18.0
[0.19.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.18.0...v0.19.0
[0.20.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.19.0...v0.20.0
[0.21.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.20.0...v0.21.0
[0.22.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.21.0...v0.22.0
[0.23.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.22.0...v0.23.0
[0.24.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.23.0...v0.24.0
[0.25.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.24.0...v0.25.0
[Unreleased]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.0...master
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...master

View file

@ -3,8 +3,8 @@
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://github.com/nspcc-dev/neofs-http-gw/issues) and
[pull requests](https://github.com/nspcc-dev/neofs-http-gw/pulls) for existing
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
@ -23,24 +23,24 @@ everyone. Please follow the guidelines:
## Development Workflow
Start by forking the `neofs-http-gw` repository, make changes in a branch and then
Start by forking the `frostfs-http-gw` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your GitHub Repository
Fork [NeoFS HTTP Gateway
upstream](https://github.com/nspcc-dev/neofs-http-gw/fork) source repository
### Set up your git repository
Fork [FrostFS HTTP Gateway
upstream](https://git.frostfs.info/repo/fork/8) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://github.com/nspcc-dev/neofs-http-gw
$ git clone https://git.frostfs.info/<username>/frostfs-http-gw.git
```
### Set up git remote as ``upstream``
```sh
$ cd neofs-http-gw
$ git remote add upstream https://github.com/nspcc-dev/neofs-http-gw
$ cd frostfs-http-gw
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
$ git fetch upstream
$ git merge upstream/master
...
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via GitHub. Refer to [this
document](https://help.github.com/articles/creating-a-pull-request/) for
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
@ -107,7 +107,7 @@ contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
```
This can be easily done with the `--signoff` option to `git commit`.

View file

@ -18,6 +18,6 @@ FROM scratch
WORKDIR /
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /src/bin/neofs-http-gw /bin/neofs-http-gw
COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw
ENTRYPOINT ["/bin/neofs-http-gw"]
ENTRYPOINT ["/bin/frostfs-http-gw"]

View file

@ -3,6 +3,6 @@ RUN apk add --update --no-cache bash ca-certificates
WORKDIR /
COPY bin/neofs-http-gw /bin/neofs-http-gw
COPY bin/frostfs-http-gw /bin/frostfs-http-gw
CMD ["neofs-http-gw"]
CMD ["frostfs-http-gw"]

53
Makefile Normal file → Executable file
View file

@ -6,20 +6,28 @@ GO_VERSION ?= 1.19
LINT_VERSION ?= 1.49.0
BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= nspccdev/neofs-http-gw
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
METRICS_DUMP_OUT ?= ./metrics-dump.json
# List of binaries to build. For now just one.
BINDIR = bin
DIRS = $(BINDIR)
BINS = $(BINDIR)/neofs-http-gw
BINS = $(BINDIR)/frostfs-http-gw
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint version clean
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean
# .deb package versioning
OS_RELEASE = $(shell lsb_release -cs)
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
sed "s/-/~/")-${OS_RELEASE}
.PHONY: debpackage debclean
# Make all binaries
all: $(BINS)
$(BINS): $(DIRS) dep
$(BINS): $(DIRS) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
go build -v -trimpath \
@ -55,6 +63,11 @@ docker/%:
test:
@go test ./... -cover
# Run integration tests
.PHONY: integration-test
integration-test:
@go test ./... -cover --tags=integration
# Run tests with race detection and produce coverage output
cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@ -67,7 +80,7 @@ fmt:
# Build clean Docker image
image:
@echo "⇒ Build NeoFS HTTP Gateway docker image "
@echo "⇒ Build FrostFS HTTP Gateway docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
@ -82,7 +95,7 @@ image-push:
# Build dirty Docker image
dirty-image:
@echo "⇒ Build NeoFS HTTP Gateway dirty docker image "
@echo "⇒ Build FrostFS HTTP Gateway dirty docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
@ -102,6 +115,14 @@ docker/lint:
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
# Print version
version:
@echo $(VERSION)
@ -111,4 +132,22 @@ clean:
rm -rf vendor
rm -rf $(BINDIR)
# Package for Debian
debpackage:
dch --package frostfs-http-gw \
--controlmaint \
--newversion $(PKG_VERSION) \
--distribution $(OS_RELEASE) \
"Please see CHANGELOG.md for code changes for $(VERSION)"
dpkg-buildpackage --no-sign -b
debclean:
dh clean
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
.PHONY: dump-metrics
dump-metrics:
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
include help.mk

237
README.md
View file

@ -1,28 +1,30 @@
<p align="center">
<img src="./.github/logo.svg" width="500px" alt="NeoFS">
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://fs.neo.org">NeoFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
</p>
---
[![Report](https://goreportcard.com/badge/github.com/nspcc-dev/neofs-http-gw)](https://goreportcard.com/report/github.com/nspcc-dev/neofs-http-gw)
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nspcc-dev/neofs-http-gw?sort=semver)
![License](https://img.shields.io/github/license/nspcc-dev/neofs-http-gw.svg?style=popout)
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw)
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange)
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
# NeoFS HTTP Gateway
# FrostFS HTTP Gateway
NeoFS HTTP Gateway bridges NeoFS internal protocol and HTTP standard.
- you can download one file per request from the NeoFS Network
- you can upload one file per request into the NeoFS Network
FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
- you can download one file per request from the FrostFS Network
- you can upload one file per request into the FrostFS Network
See available routes in [specification](./docs/api.md).
## Installation
```go install github.com/nspcc-dev/neofs-http-gw```
```go install git.frostfs.info/TrueCloudLab/frostfs-http-gw```
Or you can call `make` to build it from the cloned repository (the binary will
end up in `bin/neofs-http-gw`). To build neofs-http-gw binary in clean docker
environment, call `make docker/bin/neofs-http-gw`.
end up in `bin/frostfs-http-gw`). To build frostfs-http-gw binary in clean docker
environment, call `make docker/bin/frostfs-http-gw`.
Other notable make targets:
@ -36,32 +38,32 @@ version Show current version
```
Or you can also use a [Docker
image](https://hub.docker.com/r/nspccdev/neofs-http-gw) provided for the released
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
(and occasionally unreleased) versions of the gateway (`:latest` points to the
latest stable release).
## Execution
HTTP gateway itself is not a NeoFS node, so to access NeoFS it uses node's
HTTP gateway itself is not a FrostFS node, so to access FrostFS it uses node's
gRPC interface and you need to provide some node that it will connect to. This
can be done either via `-p` parameter or via `HTTP_GW_PEERS_<N>_ADDRESS` and
`HTTP_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple
NeoFS nodes with weighted load balancing).
FrostFS nodes with weighted load balancing).
If you launch HTTP gateway in bundle with [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env),
you can get the IP address of the node in the output of `make hosts` command
(with s0*.neofs.devenv name).
If you launch HTTP gateway in bundle with [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
you can get the IP address of the node in the output of `make hosts` command
(with s0*.frostfs.devenv name).
These two commands are functionally equivalent, they run the gate with one
backend node (and otherwise default settings):
```
$ neofs-http-gw -p 192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 neofs-http-gw
$ frostfs-http-gw -p 192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 frostfs-http-gw
```
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
```
$ neofs-http-gw -p grpc://192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 neofs-http-gw
$ frostfs-http-gw -p grpc://192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 frostfs-http-gw
```
## Configuration
@ -72,11 +74,11 @@ environment variables (see [example](./config/config.env)), so they're not speci
### Nodes: weights and priorities
You can specify multiple `-p` options to add more NeoFS nodes, this will make
You can specify multiple `-p` options to add more FrostFS nodes, this will make
gateway spread requests equally among them (using weight 1 and priority 1 for every node):
```
$ neofs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
$ frostfs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
```
If you want some specific load distribution proportions, use weights and priorities:
@ -84,21 +86,21 @@ If you want some specific load distribution proportions, use weights and priorit
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \
HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \
HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \
neofs-http-gw
frostfs-http-gw
```
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%.
### Keys
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
If no wallet provided, the gateway autogenerates a key pair it will use for NeoFS requests.
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
If no wallet provided, the gateway autogenerates a key pair it will use for FrostFS requests.
```
$ neofs-http-gw -p $NEOFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
$ frostfs-http-gw -p $FROSTFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
```
Example:
```
$ neofs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
$ frostfs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
```
### Binding and TLS
@ -114,7 +116,7 @@ external redirecting solution.
Example to bind to `192.168.130.130:443` and serve TLS there:
```
$ neofs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
$ frostfs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
--tls_key=key.pem --tls_certificate=cert.pem
```
@ -132,12 +134,12 @@ request with data stream after timeout.
`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable
request body streaming (effectively it'll make the gateway accept the file completely
first and only then try sending it to NeoFS).
first and only then try sending it to FrostFS).
`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size
limiting uploads to files slightly lower than this limit.
### NeoFS parameters
### FrostFS parameters
Gateway can automatically set timestamps for uploaded files based on local
time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment
@ -162,7 +164,7 @@ All timing options accept values with suffixes, so "15s" is 15 seconds and
"2m" is 2 minutes.
### Zip streaming
The gateway supports downloading files by common prefix (like dir) in zip format. You can enable compression
The gateway supports downloading files by common prefix (like dir) in zip format. You can enable compression
using config or `HTTP_GW_ZIP_COMPRESSION=true` environment variable.
### Logging
@ -172,25 +174,42 @@ HTTP_GW_LOGGER_LEVEL=debug
```
### Yaml file
Configuration file is optional and can be used instead of environment variables/other parameters.
Configuration file is optional and can be used instead of environment variables/other parameters.
It can be specified with `--config` parameter:
```
$ neofs-http-gw --config your-config.yaml
$ frostfs-http-gw --config your-config.yaml
```
See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example.
#### Multiple configs
You can use several config files when running application. It allows you to split configuration into parts.
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](./config)).
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag.
Also, you can combine these flags:
```shell
$ frostfs-http-gw --config ./config/config.yaml --config /your/partial/config.yaml --config-dir ./config/dir
```
**Note:** next file in `--config` flag overwrites values from the previous one.
Files from `--config-dir` directory overwrite values from `--config` files.
So the command above run `frostfs-http-gw` to listen on `0.0.0.0:8080` address (value from `./config/config.yaml`),
applies parameters from `/your/partial/config.yaml`,
enable pprof (value from `./config/dir/pprof.yaml`) and prometheus (value from `./config/dir/prometheus.yaml`).
## HTTP API provided
This gateway intentionally provides limited feature set and doesn't try to
substitute (or completely wrap) regular gRPC NeoFS interface. You can download
substitute (or completely wrap) regular gRPC FrostFS interface. You can download
and upload objects with it, but deleting, searching, managing ACLs, creating
containers and other activities are not supported and not planned to be
supported.
### Preparation
Before uploading or downloading a file make sure you have a prepared container.
Before uploading or downloading a file make sure you have a prepared container.
You can create it with instructions below.
Also, in case of downloading, you need to have a file inside a container.
@ -204,27 +223,27 @@ Steps to start using name resolving:
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
```yaml
rpc_endpoint: http://morph-chain.neofs.devenv:30333
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
```
2. Make sure your container is registered in NNS contract. If you use [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env)
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
you can check if your container (e.g. with `container-name` name) is registered in NNS:
```shell
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
http://morph-chain.neofs.devenv:30333 | jq -r '.result.hash'
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
$ docker exec -it morph_chain neo-go \
contract testinvokefunction \
-r http://morph-chain.neofs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
resolve string:container-name.container int:16 \
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
| base64 -d && echo
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
```
@ -236,36 +255,36 @@ $ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-nam
#### Create a container
You can create a container via [neofs-cli](https://github.com/nspcc-dev/neofs-node/releases):
You can create a container via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases):
```
$ neofs-cli -r $NEOFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
$ frostfs-cli -r $FROSTFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
```
where `$WALLET` is a path to user wallet,
where `$WALLET` is a path to user wallet,
`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and
`$POLICY` -- QL-encoded or JSON-encoded placement policy or path to file with it
For example:
```
$ neofs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
```
If you have launched nodes via [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env),
you can get the key value from `wallets/wallet.json` or write the path to
If you have launched nodes via [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
you can get the key value from `wallets/wallet.json` or write the path to
the file `wallets/wallet.key`.
#### Prepare a file in a container
To create a file via [neofs-cli](https://github.com/nspcc-dev/neofs-node/releases), run a command below:
To create a file via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases), run a command below:
```
$ neofs-cli -r $NEOFS_NODE -k $KEY object put --file $FILENAME --cid $CID
$ frostfs-cli -r $FROSTFS_NODE -k $KEY object put --file $FILENAME --cid $CID
```
where
`$KEY` -- the key, please read the information [above](#create-a-container),
where
`$KEY` -- the key, please read the information [above](#create-a-container),
`$CID` -- container ID.
For example:
```
$ neofs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
```
@ -273,13 +292,13 @@ $ neofs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --
#### Requests
The following requests support GET/HEAD methods.
The following requests support GET/HEAD methods.
##### By IDs
Basic downloading involves container ID and object ID and is done via GET
requests to `/get/$CID/$OID` path, where `$CID` is a container ID or its name if NNS is enabled,
`$OID` is an object's (i.e. your file's) ID.
requests to `/get/$CID/$OID` path, where `$CID` is a container ID or its name if NNS is enabled,
`$OID` is an object's (i.e. your file's) ID.
For example:
@ -300,13 +319,14 @@ can be used as well. The generic syntax for it looks like this:
```/get_by_attribute/$CID/$ATTRIBUTE_NAME/$ATTRIBUTE_VALUE```
where
`$CID` is a container ID or its name if NNS is enabled,
where
`$CID` is a container ID or its name if NNS is enabled,
`$ATTRIBUTE_NAME` is the name of the attribute we want to use,
`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have.
**NB!** The attribute key and value must be url encoded, i.e., if you want to download an object with the attribute value
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key.
**NB!** The attribute key and value should be url encoded, i.e., if you want to download an object with the attribute value
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key. If you don't escape such values
everything can still work (for example you can use `d@ta` without encoding) but it's HIGHLY RECOMMENDED to encode all your attributes.
If multiple objects have specified attribute with specified value, then the
first one of them is returned (and you can't get others via this interface).
@ -328,7 +348,7 @@ Some other user-defined attributes:
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Ololo/100500
```
Or when the attribute includes special symbols:
Or when the attribute includes special symbols:
```
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Olo%2Blo/100500 # means Olo+lo
```
@ -347,8 +367,8 @@ You can download some dir (files with the same prefix) in zip (it will be compre
$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix
```
**Note:** the objects must have a `FilePath` attribute, otherwise they will not be in the zip archive.
You can upload file with this attribute using `curl`:
**Note:** the objects must have a valid `FilePath` attribute (it should not contain trailing `/`),
otherwise they will not be in the zip archive. You can upload file with this attribute using `curl`:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
@ -369,10 +389,15 @@ set of reply headers generated using the following rules:
* `x-container-id` contains container ID
* `x-object-id` contains object ID
* `x-owner-id` contains owner address
* all the other NeoFS attributes are converted to `X-Attribute-*` headers (but only
* all the other FrostFS attributes are converted to `X-Attribute-*` headers (but only
if they can be safely represented in HTTP header), for example `FileName`
attribute becomes `X-Attribute-FileName` header
##### Caching strategy
HTTP Gateway doesn't control caching (doesn't anything with the `Cache-Control` header). Caching strategy strictly
depends on application use case. So it should be carefully done by proxy server.
### Uploading
You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The
@ -401,12 +426,12 @@ You can also add some attributes to your file using the following rules:
"X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo:
100500" header to your request the resulting object will get "Ololo:
100500" attribute
* "X-Attribute-NEOFS-*" headers are special
(`-NEOFS-` part can also be `-neofs-` or`-Neofs-`), they're used to set internal
NeoFS attributes starting with `__NEOFS__` prefix, for these attributes all
* "X-Attribute-SYSTEM-*" headers are special
(`-SYSTEM-` part can also be `-system-` or`-System-` (and even legacy `-Neofs-` for some next releases)), they're used to set internal
FrostFS attributes starting with `__SYSTEM__` prefix, for these attributes all
dashes get converted to underscores and all letters are capitalized. For
example, you can use "X-Attribute-NEOFS-Expiration-Epoch" header to set
`__NEOFS__EXPIRATION_EPOCH` attribute
example, you can use "X-Attribute-SYSTEM-Expiration-Epoch" header to set
`__SYSTEM__EXPIRATION_EPOCH` attribute
* `FileName` attribute is set from multipart's `filename` if not set
explicitly via `X-Attribute-FileName` header
* `Timestamp` attribute can be set using gateway local time if using
@ -416,13 +441,13 @@ You can also add some attributes to your file using the following rules:
---
**NOTE**
There are some reserved headers type of `X-Attribute-NEOFS-*` (headers are arranged in descending order of priority):
1. `X-Attribute-Neofs-Expiration-Epoch: 100`
2. `X-Attribute-Neofs-Expiration-Duration: 24h30m`
3. `X-Attribute-Neofs-Expiration-Timestamp: 1637574797`
4. `X-Attribute-Neofs-Expiration-RFC3339: 2021-11-22T09:55:49Z`
There are some reserved headers type of `X-Attribute-SYSTEM-*` (headers are arranged in descending order of priority):
1. `X-Attribute-System-Expiration-Epoch: 100`
2. `X-Attribute-System-Expiration-Duration: 24h30m`
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
which transforms to `X-Attribute-Neofs-Expiration-Epoch`. So you can provide expiration any convenient way.
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
---
@ -444,60 +469,43 @@ operations for a request signed with your HTTP Gateway keys.
If your don't want to manage gateway's secret keys and adjust eACL rules when
gateway configuration changes (new gate, key rotation, etc) or you plan to use
public services, there is an option to let your application backend (or you) to
issue Bearer Tokens ans pass them from the client via gate down to NeoFS level
issue Bearer Tokens ans pass them from the client via gate down to FrostFS level
to grant access.
NeoFS Bearer Token basically is a container owner-signed ACL data (refer to NeoFS
FrostFS Bearer Token basically is a container owner-signed ACL data (refer to FrostFS
documentation for more details). There are two options to pass them to gateway:
* "Authorization" header with "Bearer" type and base64-encoded token in
credentials field
* "Bearer" cookie with base64-encoded token contents
For example, you have a mobile application frontend with a backend part storing
data in NeoFS. When a user authorizes in the mobile app, the backend issues a NeoFS
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
Bearer token and provides it to the frontend. Then, the mobile app may generate
some data and upload it via any available NeoFS HTTP Gateway by adding
some data and upload it via any available FrostFS HTTP Gateway by adding
the corresponding header to the upload request. Accessing the ACL protected data
works the same way.
##### Example
In order to generate a bearer token, you need to know the container owner key and
the address of the sender who will do the request to NeoFS (in our case, it's a gateway wallet address).
In order to generate a bearer token, you need to have wallet (which will be used to sign the token) and
the address of the sender who will do the request to FrostFS (in our case, it's a gateway wallet address).
Suppose we have:
* **KxDgvEKzgSBPPfuVfw67oPQBSjidEiqTHURKSDL1R7yGaGYAeYnr** (container owner key)
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner address)
* **BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K** (container id)
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner (gateway address))
Firstly, we need to encode the container id and the sender address to base64 (now it's base58).
Firstly, we need to encode the container id and the sender address to base64 (now it's base58).
So use **base58** and **base64** utils.
1. Encoding container id:
```
$ echo 'BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K' | base58 --decode | base64
# output: mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg=
```
2. Encoding token owner id:
1. Encoding token owner id:
```
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
```
Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and save it to **bearer.json**:
2. Form a Bearer token (10000 is lifetime expiration in epoch) and save it to **bearer.json**:
```
{
"body": {
"eaclTable": {
"version": {
"major": 0,
"minor": 0
},
"containerID": {
"value": "mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg="
},
"records": []
},
"allowImpersonate": true,
"ownerID": {
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
},
@ -511,13 +519,14 @@ Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and sav
}
```
Next, sign it with the container owner key:
3. Sign it with the wallet:
```
$ neofs-cli util sign bearer-token --from bearer.json --to signed.json -w ./wallet.json
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w ./wallet.json
```
Encoding to base64 to use via the header:
4. Encode to base64 to use in header:
```
$ base64 -w 0 signed.json
$ base64 -w 0 signed.json
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
```
@ -540,12 +549,12 @@ For the token to work correctly, you need to create a container with a basic ACL
For example:
```
$ neofs-cli -w ./wallet.json --basic-acl 0x0FFFCFFF -r 192.168.130.72:8080 container create --policy "REP 3" --await
$ frostfs-cli -w ./wallet.json --basic-acl 0x0FFFCFFF -r 192.168.130.72:8080 container create --policy "REP 3" --await
```
To deny access to a container without a token, set the eACL rules:
```
$ neofs-cli -w ./wallet.json -r 192.168.130.72:8080 container set-eacl --table eacl.json --await --cid BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
$ frostfs-cli -w ./wallet.json -r 192.168.130.72:8080 container set-eacl --table eacl.json --await --cid BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
```
File **eacl.json**:
@ -576,8 +585,8 @@ File **eacl.json**:
### Metrics and Pprof
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
and Pprof at `localhost:8083/debug/pprof` by default. Host and port can be configured.
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
and Pprof at `localhost:8083/debug/pprof` by default. Host and port can be configured.
See [configuration](./docs/gate-configuration.md).
## Credits

View file

@ -1 +1 @@
v0.25.0
v0.26.0

22
api/layer/tree_service.go Normal file
View file

@ -0,0 +1,22 @@
package layer
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/api"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// TreeService provide interface to interact with tree service using s3 data models.
type TreeService interface {
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
}
var (
// ErrNodeNotFound is returned from Tree service in case of not found error.
ErrNodeNotFound = errors.New("not found")
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
ErrNodeAccessDenied = errors.New("access denied")
)

17
api/tree.go Normal file
View file

@ -0,0 +1,17 @@
package api
import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// NodeVersion represent node from tree service.
type NodeVersion struct {
BaseNodeVersion
DeleteMarker bool
}
// BaseNodeVersion is minimal node info from tree service.
// Basically used for "system" object.
type BaseNodeVersion struct {
OID oid.ID
}

391
app.go
View file

@ -2,31 +2,33 @@ package main
import (
"context"
"crypto/ecdsa"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"os"
"os/signal"
"strconv"
"sync"
"syscall"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/downloader"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/uploader"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/fasthttp/router"
"github.com/nspcc-dev/neo-go/cli/flags"
"github.com/nspcc-dev/neo-go/cli/input"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/nspcc-dev/neofs-http-gw/downloader"
"github.com/nspcc-dev/neofs-http-gw/metrics"
"github.com/nspcc-dev/neofs-http-gw/resolver"
"github.com/nspcc-dev/neofs-http-gw/response"
"github.com/nspcc-dev/neofs-http-gw/uploader"
"github.com/nspcc-dev/neofs-http-gw/utils"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"github.com/nspcc-dev/neofs-sdk-go/user"
"github.com/spf13/viper"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
@ -34,9 +36,12 @@ import (
type (
app struct {
ctx context.Context
log *zap.Logger
logLevel zap.AtomicLevel
pool *pool.Pool
treePool *treepool.Pool
key *keys.PrivateKey
owner *user.ID
cfg *viper.Viper
webServer *fasthttp.Server
@ -45,18 +50,18 @@ type (
metrics *gateMetrics
services []*metrics.Service
settings *appSettings
servers []Server
}
appSettings struct {
Uploader *uploader.Settings
Downloader *downloader.Settings
TLSProvider *certProvider
Uploader *uploader.Settings
Downloader *downloader.Settings
}
// App is an interface for the main gateway function.
App interface {
Wait()
Serve(context.Context)
Serve()
}
// Option is an application option.
@ -64,15 +69,10 @@ type (
gateMetrics struct {
logger *zap.Logger
provider GateMetricsProvider
provider *metrics.GateMetrics
mu sync.RWMutex
enabled bool
}
GateMetricsProvider interface {
SetHealth(int32)
Unregister()
}
)
// WithLogger returns Option to set a specific logger.
@ -97,12 +97,8 @@ func WithConfig(c *viper.Viper) Option {
}
func newApp(ctx context.Context, opt ...Option) App {
var (
key *ecdsa.PrivateKey
err error
)
a := &app{
ctx: ctx,
log: zap.L(),
cfg: viper.GetViper(),
webServer: new(fasthttp.Server),
@ -113,7 +109,7 @@ func newApp(ctx context.Context, opt ...Option) App {
}
// -- setup FastHTTP server --
a.webServer.Name = "neofs-http-gw"
a.webServer.Name = "frost-http-gw"
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
@ -125,62 +121,24 @@ func newApp(ctx context.Context, opt ...Option) App {
a.webServer.DisablePreParseMultipartForm = true
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
key, err = getNeoFSKey(a)
if err != nil {
a.log.Fatal("failed to get neofs credentials", zap.Error(err))
}
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg)
var owner user.ID
user.IDFromKey(&owner, key.PublicKey)
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
a.owner = &owner
var prm pool.InitParameters
prm.SetKey(key)
prm.SetNodeDialTimeout(a.cfg.GetDuration(cfgConTimeout))
prm.SetHealthcheckTimeout(a.cfg.GetDuration(cfgReqTimeout))
prm.SetClientRebalanceInterval(a.cfg.GetDuration(cfgRebalance))
prm.SetErrorThreshold(a.cfg.GetUint32(cfgPoolErrorThreshold))
for i := 0; ; i++ {
address := a.cfg.GetString(cfgPeers + "." + strconv.Itoa(i) + ".address")
weight := a.cfg.GetFloat64(cfgPeers + "." + strconv.Itoa(i) + ".weight")
priority := a.cfg.GetInt(cfgPeers + "." + strconv.Itoa(i) + ".priority")
if address == "" {
break
}
if weight <= 0 { // unspecified or wrong
weight = 1
}
if priority <= 0 { // unspecified or wrong
priority = 1
}
prm.AddNode(pool.NewNodeParam(priority, address, weight))
a.log.Info("add connection", zap.String("address", address),
zap.Float64("weight", weight), zap.Int("priority", priority))
}
a.pool, err = pool.NewPool(prm)
if err != nil {
a.log.Fatal("failed to create connection pool", zap.Error(err))
}
err = a.pool.Dial(ctx)
if err != nil {
a.log.Fatal("failed to dial pool", zap.Error(err))
}
a.initAppSettings()
a.initResolver()
a.initMetrics()
a.initTracing(ctx)
return a
}
func (a *app) initAppSettings() {
a.settings = &appSettings{
Uploader: &uploader.Settings{},
Downloader: &downloader.Settings{},
TLSProvider: &certProvider{Enabled: a.cfg.IsSet(cfgTLSCertificate) || a.cfg.IsSet(cfgTLSKey)},
Uploader: &uploader.Settings{},
Downloader: &downloader.Settings{},
}
a.updateSettings()
@ -196,7 +154,7 @@ func (a *app) initResolver() {
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
resolveCfg := &resolver.Config{
NeoFS: resolver.NewNeoFSResolver(a.pool),
FrostFS: resolver.NewFrostFSResolver(a.pool),
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
}
@ -216,15 +174,17 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
func (a *app) initMetrics() {
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
a.metrics.SetHealth(metrics.HealthStatusStarting)
}
func newGateMetrics(logger *zap.Logger, provider GateMetricsProvider, enabled bool) *gateMetrics {
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
if !enabled {
logger.Warn("metrics are disabled")
}
return &gateMetrics{
logger: logger,
provider: provider,
enabled: enabled,
}
}
@ -238,7 +198,7 @@ func (m *gateMetrics) SetEnabled(enabled bool) {
m.mu.Unlock()
}
func (m *gateMetrics) SetHealth(status int32) {
func (m *gateMetrics) SetHealth(status metrics.HealthStatus) {
m.mu.RLock()
if !m.enabled {
m.mu.RUnlock()
@ -249,10 +209,21 @@ func (m *gateMetrics) SetHealth(status int32) {
m.provider.SetHealth(status)
}
func (m *gateMetrics) SetVersion(ver string) {
m.mu.RLock()
if !m.enabled {
m.mu.RUnlock()
return
}
m.mu.RUnlock()
m.provider.SetVersion(ver)
}
func (m *gateMetrics) Shutdown() {
m.mu.Lock()
if m.enabled {
m.provider.SetHealth(0)
m.provider.SetHealth(metrics.HealthStatusShuttingDown)
m.enabled = false
}
m.provider.Unregister()
@ -268,16 +239,16 @@ func remove(list []string, element string) []string {
return list
}
func getNeoFSKey(a *app) (*ecdsa.PrivateKey, error) {
walletPath := a.cfg.GetString(cfgWalletPath)
func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error) {
walletPath := cfg.GetString(cfgWalletPath)
if len(walletPath) == 0 {
a.log.Info("no wallet path specified, creating ephemeral key automatically for this run")
log.Info("no wallet path specified, creating ephemeral key automatically for this run")
key, err := keys.NewPrivateKey()
if err != nil {
return nil, err
}
return &key.PrivateKey, nil
return key, nil
}
w, err := wallet.NewWalletFromFile(walletPath)
if err != nil {
@ -285,17 +256,17 @@ func getNeoFSKey(a *app) (*ecdsa.PrivateKey, error) {
}
var password *string
if a.cfg.IsSet(cfgWalletPassphrase) {
pwd := a.cfg.GetString(cfgWalletPassphrase)
if cfg.IsSet(cfgWalletPassphrase) {
pwd := cfg.GetString(cfgWalletPassphrase)
password = &pwd
}
address := a.cfg.GetString(cfgWalletAddress)
address := cfg.GetString(cfgWalletAddress)
return getKeyFromWallet(w, address, password)
}
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*ecdsa.PrivateKey, error) {
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys.PrivateKey, error) {
var addr util.Uint160
var err error
@ -325,98 +296,40 @@ func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*ecds
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
}
return &acc.PrivateKey().PrivateKey, nil
return acc.PrivateKey(), nil
}
func (a *app) Wait() {
a.log.Info("starting application", zap.String("app_name", "neofs-http-gw"), zap.String("version", Version))
a.log.Info("starting application", zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
a.metrics.SetVersion(Version)
a.setHealthStatus()
<-a.webDone // wait for web-server to be stopped
}
func (a *app) setHealthStatus() {
a.metrics.SetHealth(1)
a.metrics.SetHealth(metrics.HealthStatusReady)
}
type certProvider struct {
Enabled bool
mu sync.RWMutex
certPath string
keyPath string
cert *tls.Certificate
}
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
if !p.Enabled {
return nil, errors.New("cert provider: disabled")
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.cert, nil
}
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
if !p.Enabled {
return fmt.Errorf("tls disabled")
}
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
}
p.mu.Lock()
p.certPath = certPath
p.keyPath = keyPath
p.cert = &cert
p.mu.Unlock()
return nil
}
func (a *app) Serve(ctx context.Context) {
uploadRoutes := uploader.New(ctx, a.AppParams(), a.settings.Uploader)
downloadRoutes := downloader.New(ctx, a.AppParams(), a.settings.Downloader)
func (a *app) Serve() {
uploadRoutes := uploader.New(a.AppParams(), a.settings.Uploader)
downloadRoutes := downloader.New(a.AppParams(), a.settings.Downloader, tree.NewTree(services.NewPoolWrapper(a.treePool)))
// Configure router.
a.configureRouter(uploadRoutes, downloadRoutes)
a.startServices()
a.initServers(a.ctx)
go func() {
var err error
defer func() {
if err != nil {
a.log.Fatal("could not start server", zap.Error(err))
for i := range a.servers {
go func(i int) {
a.log.Info("starting server", zap.String("address", a.servers[i].Address()))
if err := a.webServer.Serve(a.servers[i].Listener()); err != nil && err != http.ErrServerClosed {
a.log.Fatal("listen and serve", zap.Error(err))
}
}()
bind := a.cfg.GetString(cfgListenAddress)
if a.settings.TLSProvider.Enabled {
if err = a.settings.TLSProvider.UpdateCert(a.cfg.GetString(cfgTLSCertificate), a.cfg.GetString(cfgTLSKey)); err != nil {
return
}
var lnConf net.ListenConfig
var ln net.Listener
if ln, err = lnConf.Listen(ctx, "tcp4", bind); err != nil {
return
}
lnTLS := tls.NewListener(ln, &tls.Config{
GetCertificate: a.settings.TLSProvider.GetCertificate,
})
a.log.Info("running web server (TLS-enabled)", zap.String("address", bind))
err = a.webServer.Serve(lnTLS)
} else {
a.log.Info("running web server", zap.String("address", bind))
err = a.webServer.ListenAndServe(bind)
}
}()
}(i)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGHUP)
@ -424,10 +337,10 @@ func (a *app) Serve(ctx context.Context) {
LOOP:
for {
select {
case <-ctx.Done():
case <-a.ctx.Done():
break LOOP
case <-sigs:
a.configReload()
a.configReload(a.ctx)
}
}
@ -435,20 +348,32 @@ LOOP:
a.metrics.Shutdown()
a.stopServices()
a.shutdownTracing()
close(a.webDone)
}
func (a *app) configReload() {
func (a *app) shutdownTracing() {
const tracingShutdownTimeout = 5 * time.Second
shdnCtx, cancel := context.WithTimeout(context.Background(), tracingShutdownTimeout)
defer cancel()
if err := tracing.Shutdown(shdnCtx); err != nil {
a.log.Warn("failed to shutdown tracing", zap.Error(err))
}
}
func (a *app) configReload(ctx context.Context) {
a.log.Info("SIGHUP config reload started")
if !a.cfg.IsSet(cmdConfig) {
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
a.log.Warn("failed to reload config because it's missed")
return
}
if err := readConfig(a.cfg); err != nil {
if err := readInConfig(a.cfg); err != nil {
a.log.Warn("failed to reload config", zap.Error(err))
return
}
if lvl, err := getLogLevel(a.cfg); err != nil {
a.log.Warn("log level won't be updated", zap.Error(err))
} else {
@ -459,12 +384,17 @@ func (a *app) configReload() {
a.log.Warn("failed to update resolvers", zap.Error(err))
}
if err := a.updateServers(); err != nil {
a.log.Warn("failed to reload server parameters", zap.Error(err))
}
a.stopServices()
a.startServices()
a.updateSettings()
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
a.initTracing(ctx)
a.setHealthStatus()
a.log.Info("SIGHUP config reload completed")
@ -473,10 +403,6 @@ func (a *app) configReload() {
func (a *app) updateSettings() {
a.settings.Uploader.SetDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
a.settings.Downloader.SetZipCompression(a.cfg.GetBool(cfgZipCompression))
if err := a.settings.TLSProvider.UpdateCert(a.cfg.GetString(cfgTLSCertificate), a.cfg.GetString(cfgTLSKey)); err != nil {
a.log.Warn("failed to reload TLS certs", zap.Error(err))
}
}
func (a *app) startServices() {
@ -509,28 +435,55 @@ func (a *app) configureRouter(uploadRoutes *uploader.Uploader, downloadRoutes *d
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
}
r.POST("/upload/{cid}", a.logger(uploadRoutes.Upload))
r.POST("/upload/{cid}", a.logger(a.tokenizer(a.tracer(uploadRoutes.Upload))))
a.log.Info("added path /upload/{cid}")
r.GET("/get/{cid}/{oid}", a.logger(downloadRoutes.DownloadByAddress))
r.HEAD("/get/{cid}/{oid}", a.logger(downloadRoutes.HeadByAddress))
r.GET("/get/{cid}/{oid:*}", a.logger(a.tokenizer(a.tracer(downloadRoutes.DownloadByAddressOrBucketName))))
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.tokenizer(a.tracer(downloadRoutes.HeadByAddressOrBucketName))))
a.log.Info("added path /get/{cid}/{oid}")
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.DownloadByAttribute))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.HeadByAttribute))
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.tokenizer(a.tracer(downloadRoutes.DownloadByAttribute))))
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.tokenizer(a.tracer(downloadRoutes.HeadByAttribute))))
a.log.Info("added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}")
r.GET("/zip/{cid}/{prefix:*}", a.logger(downloadRoutes.DownloadZipped))
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.tokenizer(a.tracer(downloadRoutes.DownloadZipped))))
a.log.Info("added path /zip/{cid}/{prefix}")
a.webServer.Handler = r.Handler
}
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(ctx *fasthttp.RequestCtx) {
a.log.Info("request", zap.String("remote", ctx.RemoteAddr().String()),
zap.ByteString("method", ctx.Method()),
zap.ByteString("path", ctx.Path()),
zap.ByteString("query", ctx.QueryArgs().QueryString()),
zap.Uint64("id", ctx.ID()))
h(ctx)
return func(req *fasthttp.RequestCtx) {
a.log.Info("request", zap.String("remote", req.RemoteAddr().String()),
zap.ByteString("method", req.Method()),
zap.ByteString("path", req.Path()),
zap.ByteString("query", req.QueryArgs().QueryString()),
zap.Uint64("id", req.ID()))
h(req)
}
}
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(req *fasthttp.RequestCtx) {
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
if err != nil {
a.log.Error("could not fetch and store bearer token", zap.Error(err))
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
}
utils.SetContextToRequest(appCtx, req)
h(req)
}
}
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
return func(req *fasthttp.RequestCtx) {
appCtx := utils.GetContextFromRequest(req)
appCtx, span := utils.StartHTTPServerSpan(appCtx, req, "REQUEST")
defer func() {
utils.SetHTTPTraceInfo(appCtx, span, req)
span.End()
}()
utils.SetContextToRequest(appCtx, req)
h(req)
}
}
@ -542,3 +495,83 @@ func (a *app) AppParams() *utils.AppParams {
Resolver: a.resolver,
}
}
func (a *app) initServers(ctx context.Context) {
serversInfo := fetchServers(a.cfg)
a.servers = make([]Server, 0, len(serversInfo))
for _, serverInfo := range serversInfo {
fields := []zap.Field{
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
}
srv, err := newServer(ctx, serverInfo)
if err != nil {
a.log.Warn("failed to add server", append(fields, zap.Error(err))...)
continue
}
a.servers = append(a.servers, srv)
a.log.Info("add server", fields...)
}
if len(a.servers) == 0 {
a.log.Fatal("no healthy servers")
}
}
func (a *app) updateServers() error {
serversInfo := fetchServers(a.cfg)
var found bool
for _, serverInfo := range serversInfo {
index := a.serverIndex(serverInfo.Address)
if index == -1 {
continue
}
if serverInfo.TLS.Enabled {
if err := a.servers[index].UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
return fmt.Errorf("failed to update tls certs: %w", err)
}
}
found = true
}
if !found {
return fmt.Errorf("invalid servers configuration: no known server found")
}
return nil
}
func (a *app) serverIndex(address string) int {
for i := range a.servers {
if a.servers[i].Address() == address {
return i
}
}
return -1
}
func (a *app) initTracing(ctx context.Context) {
instanceID := ""
if len(a.servers) > 0 {
instanceID = a.servers[0].Address()
}
cfg := tracing.Config{
Enabled: a.cfg.GetBool(cfgTracingEnabled),
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
Service: "frostfs-http-gw",
InstanceID: instanceID,
Version: Version,
}
updated, err := tracing.Setup(ctx, cfg)
if err != nil {
a.log.Warn("failed to initialize tracing", zap.Error(err))
}
if updated {
a.log.Info("tracing config updated")
}
}

View file

@ -17,21 +17,23 @@ HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
# Log level.
HTTP_GW_LOGGER_LEVEL=debug
# Address to bind.
HTTP_GW_LISTEN_ADDRESS=0.0.0.0:443
# Provide cert to enable TLS.
HTTP_GW_TLS_CERTIFICATE=/path/to/tls/cert
# Provide key to enable TLS.
HTTP_GW_TLS_KEY=/path/to/tls/key
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false
HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert
HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key
HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444
HTTP_GW_SERVER_1_TLS_ENABLED=true
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
# Nodes configuration.
# This configuration make the gateway use the first node (grpc://s01.neofs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.neofs.devenv:8080)
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
# Peer 1.
# Endpoint.
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.neofs.devenv:8080
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
@ -39,11 +41,11 @@ HTTP_GW_PEERS_0_PRIORITY=1
# Load distribution proportion for nodes with the same priority.
HTTP_GW_PEERS_0_WEIGHT=1
# Peer 2.
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.neofs.devenv:8080
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080
HTTP_GW_PEERS_1_PRIORITY=2
HTTP_GW_PEERS_1_WEIGHT=1
# Peer 3.
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.neofs.devenv:8080
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080
HTTP_GW_PEERS_2_PRIORITY=2
HTTP_GW_PEERS_2_WEIGHT=9
@ -70,7 +72,7 @@ HTTP_GW_STREAM_REQUEST_BODY=true
HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
# RPC endpoint to be able to use nns container resolving.
HTTP_GW_RPC_ENDPOINT=http://morph-chain.neofs.devenv:30333
HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333
# The order in which resolvers are used to find an container id by name.
HTTP_GW_RESOLVE_ORDER="nns dns"
@ -79,12 +81,18 @@ HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false
# Timeout to dial node.
HTTP_GW_CONNECT_TIMEOUT=5s
# Timeout for individual operations in streaming RPC.
HTTP_GW_STREAM_TIMEOUT=10s
# Timeout to check node health during rebalance.
HTTP_GW_REQUEST_TIMEOUT=5s
# Interval to check nodes health.
HTTP_GW_REBALANCE_TIMER=30s
# The number of errors on connection after which node is considered as unhealthy
S3_GW_POOL_ERROR_THRESHOLD=100
HTTP_GW_POOL_ERROR_THRESHOLD=100
# Enable zip compression to download files by common prefix.
HTTP_GW_ZIP_COMPRESSION=false
HTTP_GW_TRACING_ENABLED=true
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
HTTP_GW_TRACING_EXPORTER="otlp_grpc"

View file

@ -4,27 +4,39 @@ wallet:
passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
pprof:
enabled: true # Enable pprof.
enabled: false # Enable pprof.
address: localhost:8083
prometheus:
enabled: true # Enable metrics.
enabled: false # Enable metrics.
address: localhost:8084
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost:4317"
logger:
level: debug # Log level.
listen_address: 0.0.0.0:443 # Address to bind.
tls_certificate: /path/to/tls/cert # Provide cert to enable TLS.
tls_key: /path/to/tls/key # Provide key to enable TLS.
server:
- address: 0.0.0.0:8080
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
- address: 0.0.0.0:8081
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
# Nodes configuration.
# This configuration make the gateway use the first node (grpc://s01.neofs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.neofs.devenv:8080)
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
peers:
0:
# Endpoint.
address: grpc://s01.neofs.devenv:8080
address: grpc://s01.frostfs.devenv:8080
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
@ -34,11 +46,11 @@ peers:
# Load distribution proportion for nodes with the same priority.
weight: 1
1:
address: grpc://s02.neofs.devenv:8080
address: grpc://s02.frostfs.devenv:8080
priority: 2
weight: 1
2:
address: grpc://s03.neofs.devenv:8080
address: grpc://s03.frostfs.devenv:8080
priority: 2
weight: 9
@ -72,7 +84,7 @@ web:
max_request_body_size: 4194304
# RPC endpoint to be able to use nns container resolving.
rpc_endpoint: http://morph-chain.neofs.devenv:30333
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
# The order in which resolvers are used to find an container id by name.
resolve_order:
- nns
@ -82,6 +94,7 @@ upload_header:
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
connect_timeout: 5s # Timeout to dial node.
stream_timeout: 10s # Timeout for individual operations in streaming RPC.
request_timeout: 5s # Timeout to check node health during rebalance.
rebalance_timer: 30s # Interval to check nodes health.
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.

3
config/dir/pprof.yaml Normal file
View file

@ -0,0 +1,3 @@
pprof:
enabled: true
address: localhost:8083

View file

@ -0,0 +1,3 @@
prometheus:
enabled: true
address: localhost:8084

5
debian/changelog vendored Normal file
View file

@ -0,0 +1,5 @@
frostfs-http-gw (0.0.0) stable; urgency=medium
* Please see CHANGELOG.md
-- TrueCloudLab <tech@frostfs.info> Wed, 24 Aug 2022 18:29:49 +0300

14
debian/control vendored Normal file
View file

@ -0,0 +1,14 @@
Source: frostfs-http-gw
Section: frostfs
Priority: optional
Maintainer: TrueCloudLab <tech@frostfs.info>
Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts
Standards-Version: 4.5.1
Homepage: https://frostfs.info/
Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
Package: frostfs-http-gw
Architecture: any
Depends: ${misc:Depends}
Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.

25
debian/copyright vendored Normal file
View file

@ -0,0 +1,25 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: frostfs-http-gw
Upstream-Contact: tech@frostfs.info
Source: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
Files: *
Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project
(https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md)
2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project
(https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/src/branch/master/CREDITS.md)
License: GPL-3
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; version 3.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program or at /usr/share/common-licenses/GPL-3.
If not, see <http://www.gnu.org/licenses/>.

2
debian/frostfs-http-gw.dirs vendored Normal file
View file

@ -0,0 +1,2 @@
etc/frostfs
srv/frostfs_cache

4
debian/frostfs-http-gw.docs vendored Normal file
View file

@ -0,0 +1,4 @@
docs/gate-configuration.md
README.md
CREDITS.md
CONTRIBUTING.md

1
debian/frostfs-http-gw.examples vendored Normal file
View file

@ -0,0 +1 @@
config/*

2
debian/frostfs-http-gw.install vendored Normal file
View file

@ -0,0 +1,2 @@
bin/frostfs-http-gw usr/bin
config/config.yaml etc/frostfs/http

51
debian/frostfs-http-gw.postinst vendored Executable file
View file

@ -0,0 +1,51 @@
#!/bin/sh
# postinst script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <postinst> `abort-remove'
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
configure)
USERNAME=http
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /srv/frostfs_cache --system -M -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
chmod -f 0750 /etc/frostfs/$USERNAME
chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
fi
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
if ! dpkg-statoverride --list frostfs-"$USERDIR" >/dev/null; then
chown -f frostfs-$USERNAME: "$USERDIR"
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

41
debian/frostfs-http-gw.postrm vendored Executable file
View file

@ -0,0 +1,41 @@
#!/bin/sh
# postrm script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postrm> `remove'
# * <postrm> `purge'
# * <old-postrm> `upgrade' <new-version>
# * <new-postrm> `failed-upgrade' <old-version>
# * <new-postrm> `abort-install'
# * <new-postrm> `abort-install' <old-version>
# * <new-postrm> `abort-upgrade' <old-version>
# * <disappearer's-postrm> `disappear' <overwriter>
# <overwriter-version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
purge)
rm -rf /srv/frostfs_cache
;;
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

35
debian/frostfs-http-gw.preinst vendored Executable file
View file

@ -0,0 +1,35 @@
#!/bin/sh
# preinst script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <new-preinst> `install'
# * <new-preinst> `install' <old-version>
# * <new-preinst> `upgrade' <old-version>
# * <old-preinst> `abort-upgrade' <new-version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
install|upgrade)
;;
abort-upgrade)
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

38
debian/frostfs-http-gw.prerm vendored Executable file
View file

@ -0,0 +1,38 @@
#!/bin/sh
# prerm script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <prerm> `remove'
# * <old-prerm> `upgrade' <new-version>
# * <new-prerm> `failed-upgrade' <old-version>
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
# * <deconfigured's-prerm> `deconfigure' `in-favour'
# <package-being-installed> <version> `removing'
# <conflicting-package> <version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
remove|upgrade|deconfigure)
;;
failed-upgrade)
;;
*)
echo "prerm called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

16
debian/frostfs-http-gw.service vendored Normal file
View file

@ -0,0 +1,16 @@
[Unit]
Description=FrostFS HTTP Gateway
Requires=network.target
[Service]
Type=simple
ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml
User=frostfs-http
Group=frostfs-http
WorkingDirectory=/srv/frostfs_cache
Restart=always
RestartSec=5
PrivateTmp=true
[Install]
WantedBy=multi-user.target

14
debian/rules vendored Executable file
View file

@ -0,0 +1,14 @@
#!/usr/bin/make -f
# Do not try to strip Go binaries and do not run test
export DEB_BUILD_OPTIONS := nostrip nocheck
SERVICE = frostfs-http-gw
%:
dh $@
override_dh_installsystemd:
dh_installsystemd --no-enable --no-start $(SERVICE).service
override_dh_installchangelogs:
dh_installchangelogs -k CHANGELOG.md

1
debian/source/format vendored Normal file
View file

@ -0,0 +1 @@
3.0 (quilt)

309
docs/api.md Normal file
View file

@ -0,0 +1,309 @@
# HTTP Gateway Specification
| Route | Description |
|-------------------------------------------------|----------------------------------------------|
| `/upload/{cid}` | [Put object](#put-object) |
| `/get/{cid}/{oid}` | [Get object](#get-object) |
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
**Note:** `cid` parameter can be base58 encoded container ID or container name
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
Route parameters can be:
* `Single` - match a single path segment (cannot contain `/` and be empty)
* `Catch-All` - match everything (such parameter usually the last one in routes)
* `Query` - regular query parameter
### Bearer token
All routes can accept [bearer token](../README.md#authentication) from:
* `Authorization` header with `Bearer` type and base64-encoded token in
credentials field
* `Bearer` cookie with base64-encoded token contents
Example:
Header:
```
Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
```
Cookie:
```
cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
```
## Put object
Route: `/upload/{cid}`
| Route parameter | Type | Description |
|-----------------|--------|---------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
### Methods
#### POST
Upload file as object with attributes to FrostFS.
##### Request
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
1. `X-Attribute-System-Expiration-Epoch: 100`
2. `X-Attribute-System-Expiration-Duration: 24h30m`
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway
(see http-gw [configuration](gate-configuration.md#upload-header-section)).
The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used.
Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb.
###### Body
Body must contain multipart form with file.
The `filename` field from the multipart form will be set as `FileName` attribute of object
(can be overriden by `X-Attribute-FileName` header).
##### Response
###### Status codes
| Status | Description |
|--------|----------------------------------------------|
| 200 | Object created successfully. |
| 400 | Some error occurred during object uploading. |
## Get object
Route: `/get/{cid}/{oid}?[download=true]`
| Route parameter | Type | Description |
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
| `oid` | Single | Base58 encoded object ID. |
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
### Methods
#### GET
Get an object (payload and attributes) by an address.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or object not found. |
#### HEAD
Get an object attributes by an address.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|---------------------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during object HEAD operation. |
| 404 | Container or object not found. |
## Search object
Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]`
| Route parameter | Type | Description |
|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
| `attr_key` | Single | Object attribute key to search. |
| `attr_val` | Catch-All | Object attribute value to match. |
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. |
### Methods
#### GET
Find and get an object (payload and attributes) by a specific attribute.
If more than one object is found, an arbitrary one will be returned.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or object not found. |
#### HEAD
Get object attributes by a specific attribute.
If more than one object is found, an arbitrary one will be used to get attributes.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|---------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during operation. |
| 404 | Container or object not found. |
## Download zip
Route: `/zip/{cid}/{prefix}`
| Route parameter | Type | Description |
|-----------------|-----------|---------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
| `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. |
### Methods
#### GET
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
Name of files in archive sets to `FilePath` attribute of objects.
Time of files sets to time when object has started downloading.
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
###### Status codes
| Status | Description |
|--------|-----------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or objects not found. |
| 500 | Some inner error (e.g. error on streaming objects). |

View file

@ -0,0 +1,46 @@
# Building Debian package on host
## Prerequisites
For now, we're assuming building for Debian 11 (stable) x86_64.
Go version 18.4 or later should already be installed, i.e. this runs
successfully:
* `make all`
## Installing packaging dependencies
```shell
$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
```
Warining: number of package installed is pretty large considering dependecies.
## Package building
```shell
$ make debpackage
```
## Leftovers cleaning
```shell
$ make debclean
```
or
```shell
$ dh clean
```
# Package versioning
By default, package version is based on product version and may also contain git
tags and hashes.
Package version could be overwritten by setting `PKG_VERSION` variable before
build, Debian package versioning rules should be respected.
```shell
$ PKG_VERSION=0.32.0 make debpackge
```

View file

@ -1,6 +1,6 @@
# NeoFS HTTP Gateway configuration file
# FrostFS HTTP Gateway configuration file
This section contains detailed NeoFS HTTP Gateway configuration file description
This section contains detailed FrostFS HTTP Gateway configuration file description
including default config values and some tips to set up configurable values.
There are some custom types used for brevity:
@ -23,19 +23,19 @@ $ kill -s SIGHUP <app_pid>
Example:
```shell
$ ./bin/neofs-http-gw --config config.yaml &> http.log &
$ ./bin/frostfs-http-gw --config config.yaml &> http.log &
[1] 998346
$ cat http.log
# ...
2022-10-03T09:37:25.826+0300 info neofs-http-gw/app.go:332 starting application {"app_name": "neofs-http-gw", "version": "v0.24.0"}
2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"}
# ...
$ kill -s SIGHUP 998346
$ cat http.log
# ...
2022-10-03T09:38:16.205+0300 info neofs-http-gw/app.go:470 SIGHUP config reload completed
2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed
```
# Structure
@ -47,38 +47,35 @@ $ cat http.log
| `peers` | [Nodes configuration](#peers-section) |
| `logger` | [Logger configuration](#logger-section) |
| `web` | [Web configuration](#web-section) |
| `server` | [Server configuration](#server-section) |
| `upload-header` | [Upload header configuration](#upload-header-section) |
| `zip` | [ZIP configuration](#zip-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
| `prometheus` | [Prometheus configuration](#prometheus-section) |
| `tracing` | [Tracing configuration](#tracing-section) |
# General section
```yaml
listen_address: 0.0.0.0:8082
tls_certificate: /path/to/tls/cert
tls_key: /path/to/tls/key
rpc_endpoint: http://morph-chain.neofs.devenv:30333
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
- dns
connect_timeout: 5s
request_timeout: 5s
connect_timeout: 5s
stream_timeout: 10s
request_timeout: 5s
rebalance_timer: 30s
pool_error_threshold: 100
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
| `listen_address` | `string` | | `0.0.0.0:8082` | The address that the gateway is listening on. |
| `tls_certificate` | `string` | yes | | Path to the TLS certificate. |
| `tls_key` | `string` | yes | | Path to the TLS key. |
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
@ -87,8 +84,8 @@ pool_error_threshold: 100
```yaml
wallet:
path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
passphrase: pwd
```
@ -102,23 +99,23 @@ wallet:
```yaml
# Nodes configuration
# This configuration makes the gateway use the first node (node1.neofs:8080)
# while it's healthy. Otherwise, gateway uses the second node (node2.neofs:8080)
# for 10% of requests and the third node (node3.neofs:8080) for 90% of requests.
# This configuration makes the gateway use the first node (node1.frostfs:8080)
# while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080)
# for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
peers:
0:
address: node1.neofs:8080
address: node1.frostfs:8080
priority: 1
weight: 1
1:
address: node2.neofs:8080
address: node2.frostfs:8080
priority: 2
weight: 0.1
2:
address: node3.neofs:8080
address: node3.frostfs:8080
priority: 2
weight: 0.9
```
@ -129,6 +126,32 @@ peers:
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
# `server` section
You can specify several listeners for server. For example, for `http` and `https`.
```yaml
server:
- address: 0.0.0.0:8080
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
- address: 0.0.0.0:8081
tls:
enabled: true
cert_file: /path/to/another/cert
key_file: /path/to/another/key
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------|----------|---------------|----------------|-----------------------------------------------|
| `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. |
| `tls.enabled` | `bool` | | false | Enable TLS or not. |
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
| `tls.key_file` | `string` | yes | | Path to the key. |
# `logger` section
```yaml
@ -179,7 +202,7 @@ upload_header:
```yaml
zip:
compression: false
compression: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
@ -216,3 +239,20 @@ prometheus:
|-----------|----------|---------------|------------------|-----------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
# `tracing` section
Contains configuration for the `tracing` service.
```yaml
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost:4317"
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------|----------|---------------|------------------|---------------------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |

View file

@ -14,19 +14,19 @@ import (
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"github.com/nspcc-dev/neofs-http-gw/resolver"
"github.com/nspcc-dev/neofs-http-gw/response"
"github.com/nspcc-dev/neofs-http-gw/tokens"
"github.com/nspcc-dev/neofs-http-gw/utils"
"github.com/nspcc-dev/neofs-sdk-go/bearer"
"github.com/nspcc-dev/neofs-sdk-go/client"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/valyala/fasthttp"
"go.uber.org/atomic"
"go.uber.org/zap"
@ -34,12 +34,9 @@ import (
type request struct {
*fasthttp.RequestCtx
appCtx context.Context
log *zap.Logger
log *zap.Logger
}
const attributeFilePath = "FilePath"
func isValidToken(s string) bool {
for _, c := range s {
if c <= ' ' || c > 127 {
@ -91,40 +88,35 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (str
return http.DetectContentType(buf), buf, err // to not lose io.EOF
}
func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
func receiveFile(ctx context.Context, req request, clnt *pool.Pool, objectAddress oid.Address) {
var (
err error
dis = "inline"
start = time.Now()
filename string
)
if err = tokens.StoreBearerToken(r.RequestCtx); err != nil {
r.log.Error("could not fetch and store bearer token", zap.Error(err))
response.Error(r.RequestCtx, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
return
}
var prm pool.PrmObjectGet
prm.SetAddress(objectAddress)
if btoken := bearerToken(r.RequestCtx); btoken != nil {
if btoken := bearerToken(ctx); btoken != nil {
prm.UseBearer(*btoken)
}
rObj, err := clnt.GetObject(r.appCtx, prm)
rObj, err := clnt.GetObject(ctx, prm)
if err != nil {
r.handleNeoFSErr(err, start)
req.handleFrostFSErr(err, start)
return
}
// we can't close reader in this function, so how to do it?
if r.Request.URI().QueryArgs().GetBool("download") {
if req.Request.URI().QueryArgs().GetBool("download") {
dis = "attachment"
}
payloadSize := rObj.Header.PayloadSize()
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
var contentType string
for _, attr := range rObj.Header.Attributes() {
key := attr.Key()
@ -132,30 +124,30 @@ func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
if !isValidToken(key) || !isValidValue(val) {
continue
}
if strings.HasPrefix(key, utils.SystemAttributePrefix) {
key = systemBackwardTranslator(key)
}
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
key = utils.BackwardTransformIfSystem(key)
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
switch key {
case object.AttributeFileName:
filename = val
case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64)
if err != nil {
r.log.Info("couldn't parse creation date",
req.log.Info("couldn't parse creation date",
zap.String("key", key),
zap.String("val", val),
zap.Error(err))
continue
}
r.Response.Header.Set(fasthttp.HeaderLastModified,
req.Response.Header.Set(fasthttp.HeaderLastModified,
time.Unix(value, 0).UTC().Format(http.TimeFormat))
case object.AttributeContentType:
contentType = val
}
}
idsToResponse(&r.Response, &rObj.Header)
idsToResponse(&req.Response, &rObj.Header)
if len(contentType) == 0 {
// determine the Content-Type from the payload head
@ -165,8 +157,8 @@ func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
return rObj.Payload, nil
})
if err != nil && err != io.EOF {
r.log.Error("could not detect Content-Type from payload", zap.Error(err))
response.Error(r.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
req.log.Error("could not detect Content-Type from payload", zap.Error(err))
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
return
}
@ -181,41 +173,11 @@ func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
// if it implements io.Closer and that's useful for us.
rObj.Payload = readCloser{headReader, rObj.Payload}
}
r.SetContentType(contentType)
req.SetContentType(contentType)
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
r.Response.SetBodyStream(rObj.Payload, int(payloadSize))
}
// systemBackwardTranslator is used to convert headers looking like '__NEOFS__ATTR_NAME' to 'Neofs-Attr-Name'.
func systemBackwardTranslator(key string) string {
// trim specified prefix '__NEOFS__'
key = strings.TrimPrefix(key, utils.SystemAttributePrefix)
var res strings.Builder
res.WriteString("Neofs-")
strs := strings.Split(key, "_")
for i, s := range strs {
s = title(strings.ToLower(s))
res.WriteString(s)
if i != len(strs)-1 {
res.WriteString("-")
}
}
return res.String()
}
func title(str string) string {
if str == "" {
return ""
}
r, size := utf8.DecodeRuneInString(str)
r0 := unicode.ToTitle(r)
return string(r0) + str[size:]
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
}
func bearerToken(ctx context.Context) *bearer.Token {
@ -225,29 +187,25 @@ func bearerToken(ctx context.Context) *bearer.Token {
return nil
}
func (r *request) handleNeoFSErr(err error, start time.Time) {
r.log.Error(
"could not receive object",
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
)
if client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err) {
response.Error(r.RequestCtx, "Not Found", fasthttp.StatusNotFound)
return
}
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
msg := fmt.Sprintf("could not receive object: %v", err)
response.Error(r.RequestCtx, msg, fasthttp.StatusBadRequest)
r.log.Error("could not receive object", logFields...)
response.Error(r.RequestCtx, msg, statusCode)
}
// Downloader is a download request handler.
type Downloader struct {
appCtx context.Context
log *zap.Logger
pool *pool.Pool
containerResolver *resolver.ContainerResolver
settings *Settings
tree *tree.Tree
}
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
@ -264,39 +222,47 @@ func (s *Settings) SetZipCompression(val bool) {
}
// New creates an instance of Downloader using specified options.
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Downloader {
func New(params *utils.AppParams, settings *Settings, tree *tree.Tree) *Downloader {
return &Downloader{
appCtx: ctx,
log: params.Logger,
pool: params.Pool,
settings: settings,
containerResolver: params.Resolver,
tree: tree,
}
}
func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
return &request{
RequestCtx: ctx,
appCtx: d.appCtx,
log: log,
}
}
// DownloadByAddress handles download requests using simple cid/oid format.
func (d *Downloader) DownloadByAddress(c *fasthttp.RequestCtx) {
d.byAddress(c, request.receiveFile)
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (d *Downloader) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
test, _ := c.UserValue("oid").(string)
var id oid.ID
err := id.DecodeString(test)
if err != nil {
d.byBucketname(c, receiveFile)
} else {
d.byAddress(c, receiveFile)
}
}
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
var (
idCnr, _ = c.UserValue("cid").(string)
idObj, _ = c.UserValue("oid").(string)
log = d.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
)
cnrID, err := utils.GetContainerID(d.appCtx, idCnr, d.containerResolver)
ctx := utils.GetContextFromRequest(c)
cnrID, err := utils.GetContainerID(ctx, idCnr, d.containerResolver)
if err != nil {
log.Error("wrong container id", zap.Error(err))
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
@ -314,16 +280,53 @@ func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, *pool.Poo
addr.SetContainer(*cnrID)
addr.SetObject(*objID)
f(*d.newRequest(c, log), d.pool, addr)
f(ctx, *d.newRequest(c, log), d.pool, addr)
}
// byBucketname is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (d *Downloader) byBucketname(req *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
var (
bucketname = req.UserValue("cid").(string)
key = req.UserValue("oid").(string)
log = d.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
)
ctx := utils.GetContextFromRequest(req)
cnrID, err := utils.GetContainerID(ctx, bucketname, d.containerResolver)
if err != nil {
log.Error("wrong container id", zap.Error(err))
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
return
}
foundOid, err := d.tree.GetLatestVersion(ctx, cnrID, key)
if err != nil {
log.Error("object wasn't found", zap.Error(err))
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
return
}
if foundOid.DeleteMarker {
log.Error("object was deleted")
response.Error(req, "object deleted", fasthttp.StatusNotFound)
return
}
var addr oid.Address
addr.SetContainer(*cnrID)
addr.SetObject(foundOid.OID)
f(ctx, *d.newRequest(req, log), d.pool, addr)
}
// DownloadByAttribute handles attribute-based download requests.
func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
d.byAttribute(c, request.receiveFile)
d.byAttribute(c, receiveFile)
}
// byAttribute is a wrapper similar to byAddress.
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, *pool.Pool, oid.Address)) {
var (
scid, _ = c.UserValue("cid").(string)
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
@ -331,14 +334,16 @@ func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.P
log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
)
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
ctx := utils.GetContextFromRequest(c)
containerID, err := utils.GetContainerID(ctx, scid, d.containerResolver)
if err != nil {
log.Error("wrong container id", zap.Error(err))
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
return
}
res, err := d.search(c, containerID, key, val, object.MatchStringEqual)
res, err := d.search(ctx, containerID, key, val, object.MatchStringEqual)
if err != nil {
log.Error("could not search for objects", zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
@ -366,10 +371,10 @@ func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.P
addrObj.SetContainer(*containerID)
addrObj.SetObject(buf[0])
f(*d.newRequest(c, log), d.pool, addrObj)
f(ctx, *d.newRequest(c, log), d.pool, addrObj)
}
func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
func (d *Downloader) search(ctx context.Context, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
filters := object.NewSearchFilters()
filters.AddRootFilter()
filters.AddFilter(key, val, op)
@ -377,11 +382,18 @@ func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string
var prm pool.PrmObjectSearch
prm.SetContainerID(*cid)
prm.SetFilters(filters)
if btoken := bearerToken(c); btoken != nil {
if btoken := bearerToken(ctx); btoken != nil {
prm.UseBearer(*btoken)
}
return d.pool.SearchObjects(d.appCtx, prm)
return d.pool.SearchObjects(ctx, prm)
}
func (d *Downloader) getContainer(ctx context.Context, cnrID cid.ID) (container.Container, error) {
var prm pool.PrmContainerGet
prm.SetContainerID(cnrID)
return d.pool.GetContainer(ctx, prm)
}
func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
@ -390,8 +402,13 @@ func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writ
method = zip.Deflate
}
filePath := getZipFilePath(obj)
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
}
return zw.CreateHeader(&zip.FileHeader{
Name: getZipFilePath(obj),
Name: filePath,
Method: method,
Modified: time.Now(),
})
@ -403,20 +420,29 @@ func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
ctx := utils.GetContextFromRequest(c)
containerID, err := utils.GetContainerID(ctx, scid, d.containerResolver)
if err != nil {
log.Error("wrong container id", zap.Error(err))
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
return
}
if err = tokens.StoreBearerToken(c); err != nil {
log.Error("could not fetch and store bearer token", zap.Error(err))
response.Error(c, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
// check if container exists here to be able to return 404 error,
// otherwise we get this error only in object iteration step
// and client get 200 OK.
if _, err = d.getContainer(ctx, *containerID); err != nil {
log.Error("could not check container existence", zap.Error(err))
if client.IsErrContainerNotFound(err) {
response.Error(c, "Not Found", fasthttp.StatusNotFound)
return
}
response.Error(c, "could not check container existence: "+err.Error(), fasthttp.StatusBadRequest)
return
}
resSearch, err := d.search(c, containerID, attributeFilePath, prefix, object.MatchCommonPrefix)
resSearch, err := d.search(ctx, containerID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil {
log.Error("could not search for objects", zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
@ -437,7 +463,7 @@ func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
empty := true
called := false
btoken := bearerToken(c)
btoken := bearerToken(ctx)
addr.SetContainer(*containerID)
errIter := resSearch.Iterate(func(id oid.ID) bool {
@ -449,44 +475,34 @@ func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
empty = false
addr.SetObject(id)
if err = d.zipObject(zipWriter, addr, btoken, bufZip); err != nil {
return true
if err = d.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
log.Error("failed to add object to archive", zap.String("oid", id.EncodeToString()), zap.Error(err))
}
return false
})
if errIter != nil {
log.Error("iterating over selected objects failed", zap.Error(errIter))
response.Error(c, "iterating over selected objects: "+errIter.Error(), fasthttp.StatusBadRequest)
return
} else if !called {
log.Error("objects not found")
response.Error(c, "objects not found", fasthttp.StatusNotFound)
return
}
if err == nil {
err = zipWriter.Close()
}
if err != nil {
log.Error("file streaming failure", zap.Error(err))
response.Error(c, "file streaming failure: "+err.Error(), fasthttp.StatusInternalServerError)
return
if err = zipWriter.Close(); err != nil {
log.Error("close zip writer", zap.Error(err))
}
})
}
func (d *Downloader) zipObject(zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
func (d *Downloader) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
var prm pool.PrmObjectGet
prm.SetAddress(addr)
if btoken != nil {
prm.UseBearer(*btoken)
}
resGet, err := d.pool.GetObject(d.appCtx, prm)
resGet, err := d.pool.GetObject(ctx, prm)
if err != nil {
return fmt.Errorf("get NeoFS object: %v", err)
return fmt.Errorf("get FrostFS object: %v", err)
}
objWriter, err := d.addObjectToZip(zipWriter, &resGet.Header)
@ -511,7 +527,7 @@ func (d *Downloader) zipObject(zipWriter *zip.Writer, addr oid.Address, btoken *
func getZipFilePath(obj *object.Object) string {
for _, attr := range obj.Attributes() {
if attr.Key() == attributeFilePath {
if attr.Key() == object.AttributeFilePath {
return attr.Value()
}
}

View file

@ -1,23 +0,0 @@
package downloader
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSystemBackwardTranslator(t *testing.T) {
input := []string{
"__NEOFS__EXPIRATION_EPOCH",
"__NEOFS__RANDOM_ATTR",
}
expected := []string{
"Neofs-Expiration-Epoch",
"Neofs-Random-Attr",
}
for i, str := range input {
res := systemBackwardTranslator(str)
require.Equal(t, expected[i], res)
}
}

View file

@ -1,18 +1,16 @@
package downloader
import (
"context"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/nspcc-dev/neofs-http-gw/response"
"github.com/nspcc-dev/neofs-http-gw/tokens"
"github.com/nspcc-dev/neofs-http-gw/utils"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
@ -26,15 +24,10 @@ const (
hdrContainerID = "X-Container-Id"
)
func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
func headObject(ctx context.Context, req request, clnt *pool.Pool, objectAddress oid.Address) {
var start = time.Now()
if err := tokens.StoreBearerToken(r.RequestCtx); err != nil {
r.log.Error("could not fetch and store bearer token", zap.Error(err))
response.Error(r.RequestCtx, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
return
}
btoken := bearerToken(r.RequestCtx)
btoken := bearerToken(ctx)
var prm pool.PrmObjectHead
prm.SetAddress(objectAddress)
@ -42,13 +35,13 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
prm.UseBearer(*btoken)
}
obj, err := clnt.HeadObject(r.appCtx, prm)
obj, err := clnt.HeadObject(ctx, prm)
if err != nil {
r.handleNeoFSErr(err, start)
req.handleFrostFSErr(err, start)
return
}
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
var contentType string
for _, attr := range obj.Attributes() {
key := attr.Key()
@ -56,27 +49,27 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
if !isValidToken(key) || !isValidValue(val) {
continue
}
if strings.HasPrefix(key, utils.SystemAttributePrefix) {
key = systemBackwardTranslator(key)
}
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
key = utils.BackwardTransformIfSystem(key)
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
switch key {
case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64)
if err != nil {
r.log.Info("couldn't parse creation date",
req.log.Info("couldn't parse creation date",
zap.String("key", key),
zap.String("val", val),
zap.Error(err))
continue
}
r.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
case object.AttributeContentType:
contentType = val
}
}
idsToResponse(&r.Response, &obj)
idsToResponse(&req.Response, &obj)
if len(contentType) == 0 {
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
@ -87,18 +80,18 @@ func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
prmRange.UseBearer(*btoken)
}
resObj, err := clnt.ObjectRange(r.appCtx, prmRange)
resObj, err := clnt.ObjectRange(ctx, prmRange)
if err != nil {
return nil, err
}
return &resObj, nil
})
if err != nil && err != io.EOF {
r.handleNeoFSErr(err, start)
req.handleFrostFSErr(err, start)
return
}
}
r.SetContentType(contentType)
req.SetContentType(contentType)
}
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
@ -109,12 +102,20 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
resp.Header.Set(hdrContainerID, cnrID.String())
}
// HeadByAddress handles head requests using simple cid/oid format.
func (d *Downloader) HeadByAddress(c *fasthttp.RequestCtx) {
d.byAddress(c, request.headObject)
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (d *Downloader) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
test, _ := c.UserValue("oid").(string)
var id oid.ID
err := id.DecodeString(test)
if err != nil {
d.byBucketname(c, headObject)
} else {
d.byAddress(c, headObject)
}
}
// HeadByAttribute handles attribute-based head requests.
func (d *Downloader) HeadByAttribute(c *fasthttp.RequestCtx) {
d.byAttribute(c, request.headObject)
d.byAttribute(c, headObject)
}

View file

@ -1,3 +1,5 @@
//go:build !integration
package downloader
import (

114
go.mod
View file

@ -1,99 +1,113 @@
module github.com/nspcc-dev/neofs-http-gw
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.17
go 1.19
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230531114046-62edd68f47ac
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230608140155-9d40228cecbe
github.com/fasthttp/router v1.4.1
github.com/nspcc-dev/neo-go v0.99.2
github.com/nspcc-dev/neofs-api-go/v2 v2.13.2-0.20221005093543-3a91383f24a9
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.6.0.20221007102402-8c682641bfd2
github.com/prometheus/client_golang v1.13.0
github.com/nspcc-dev/neo-go v0.101.1
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_model v0.3.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.7.0
github.com/spf13/viper v1.15.0
github.com/stretchr/testify v1.8.3
github.com/testcontainers/testcontainers-go v0.13.0
github.com/valyala/fasthttp v1.34.0
go.uber.org/atomic v1.9.0
go.uber.org/zap v1.18.1
go.opentelemetry.io/otel v1.16.0
go.opentelemetry.io/otel/trace v1.16.0
go.uber.org/atomic v1.10.0
go.uber.org/zap v1.24.0
google.golang.org/grpc v1.55.0
)
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.0.0-20230307110621-19a8ef2d02fb // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.22.0-beta // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.14+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/klauspost/compress v1.15.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/klauspost/compress v1.16.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/sys/mount v0.3.2 // indirect
github.com/moby/sys/mountinfo v0.6.1 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
github.com/nspcc-dev/hrw v1.0.9 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220809123759-3094d3e0c14b // indirect
github.com/nspcc-dev/neofs-contract v0.15.3 // indirect
github.com/nspcc-dev/neofs-crypto v0.4.0 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20221202075445-cb5c18dc73eb // indirect
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
github.com/nspcc-dev/tzhash v1.6.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.1.1 // indirect
github.com/pelletier/go-toml v1.9.3 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect
google.golang.org/grpc v1.48.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.2.0 // indirect
golang.org/x/sys v0.8.0 // indirect
golang.org/x/term v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

457
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,5 @@
//go:build integration
package main
import (
@ -13,45 +15,38 @@ import (
"testing"
"time"
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neofs-sdk-go/container"
"github.com/nspcc-dev/neofs-sdk-go/container/acl"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"github.com/nspcc-dev/neofs-sdk-go/netmap"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"github.com/nspcc-dev/neofs-sdk-go/user"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
)
const attributeFilePath = "FilePath"
type putResponse struct {
CID string `json:"container_id"`
OID string `json:"object_id"`
}
const (
testContainerName = "friendly"
versionWithNativeNames = "0.27.5"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
)
func TestIntegration(t *testing.T) {
rootCtx := context.Background()
aioImage := "nspccdev/neofs-aio-testcontainer:"
aioImage := "truecloudlab/frostfs-aio:"
versions := []string{
"0.27.5",
"0.28.1",
"0.29.0",
"0.30.0",
"0.32.0",
"latest",
"1.2.7", // frostfs-storage v0.36.0 RC
}
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
require.NoError(t, err)
@ -69,6 +64,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err, version)
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
@ -96,10 +92,8 @@ func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, vers
url := testHost + "/upload/" + CID.String()
makePutRequestAndCheck(ctx, t, p, CID, url)
if version >= versionWithNativeNames {
url = testHost + "/upload/" + testContainerName
makePutRequestAndCheck(ctx, t, p, CID, url)
}
url = testHost + "/upload/" + testContainerName
makePutRequestAndCheck(ctx, t, p, CID, url)
}
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
@ -173,6 +167,43 @@ func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnr
}
}
func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
url := testHost + "/upload/" + CID.String()
attr := "X-Attribute-User-Attribute"
content := "content of file"
valOne, valTwo := "first_value", "second_value"
fileName := "newFile.txt"
var buff bytes.Buffer
w := multipart.NewWriter(&buff)
fw, err := w.CreateFormFile("file", fileName)
require.NoError(t, err)
_, err = io.Copy(fw, bytes.NewBufferString(content))
require.NoError(t, err)
err = w.Close()
require.NoError(t, err)
request, err := http.NewRequest(http.MethodPost, url, &buff)
require.NoError(t, err)
request.Header.Set("Content-Type", w.FormDataContentType())
request.Header.Add(attr, valOne)
request.Header.Add(attr, valTwo)
resp, err := http.DefaultClient.Do(request)
require.NoError(t, err)
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
content := "content of file"
attributes := map[string]string{
@ -185,11 +216,9 @@ func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
if version >= versionWithNativeNames {
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
}
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
}
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
@ -239,18 +268,16 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
require.NoError(t, err)
checkGetByAttrResponse(t, resp, content, expectedAttr)
if version >= versionWithNativeNames {
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
require.NoError(t, err)
checkGetByAttrResponse(t, resp, content, expectedAttr)
}
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
require.NoError(t, err)
checkGetByAttrResponse(t, resp, content, expectedAttr)
}
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
contents := []string{"content of file1", "content of file2"}
attributes1 := map[string]string{attributeFilePath: names[0]}
attributes2 := map[string]string{attributeFilePath: names[1]}
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
attributes2 := map[string]string{object.AttributeFilePath: names[1]}
putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
@ -258,10 +285,8 @@ func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID us
baseURL := testHost + "/zip/" + CID.String()
makeZipTest(t, baseURL, names, contents)
if version >= versionWithNativeNames {
baseURL = testHost + "/zip/" + testContainerName
makeZipTest(t, baseURL, names, contents)
}
baseURL = testHost + "/zip/" + testContainerName
makeZipTest(t, baseURL, names, contents)
}
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
@ -337,7 +362,7 @@ func getDefaultConfig() *viper.Viper {
v.SetDefault(cfgPeers+".0.priority", 1)
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
v.SetDefault(cfgListenAddress, testListenAddress)
v.SetDefault("server.0.address", testListenAddress)
return v
}
@ -369,11 +394,11 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
container.SetCreationTime(&cnr, time.Now())
if version >= versionWithNativeNames {
var domain container.Domain
domain.SetName(testContainerName)
container.WriteDomain(&cnr, domain)
}
var domain container.Domain
domain.SetName(testContainerName)
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
var waitPrm pool.WaitParams
waitPrm.SetTimeout(15 * time.Second)

View file

@ -0,0 +1,115 @@
package services
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
)
type GetNodeByPathResponseInfoWrapper struct {
response *grpcService.GetNodeByPathResponse_Info
}
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
return n.response.GetNodeId()
}
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
return n.response.GetParentId()
}
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type GetSubTreeResponseBodyWrapper struct {
response *grpcService.GetSubTreeResponse_Body
}
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
return n.response.GetNodeId()
}
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
return n.response.GetParentId()
}
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
return n.response.GetTimestamp()
}
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type PoolWrapper struct {
p *treepool.Pool
}
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
return &PoolWrapper{p: p}
}
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetNodesParams{
CID: prm.CnrID,
TreeID: prm.TreeID,
Path: prm.Path,
Meta: prm.Meta,
PathAttribute: tree.FileNameKey,
LatestOnly: prm.LatestOnly,
AllAttrs: prm.AllAttrs,
BearerToken: getBearer(ctx),
}
nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
res := make([]tree.NodeResponse, len(nodes))
for i, info := range nodes {
res[i] = GetNodeByPathResponseInfoWrapper{info}
}
return res, nil
}
func getBearer(ctx context.Context) []byte {
token, err := tokens.LoadBearerToken(ctx)
if err != nil {
return nil
}
return token.Marshal()
}
func handleError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, treepool.ErrNodeNotFound) {
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
}
if errors.Is(err, treepool.ErrNodeAccessDenied) {
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
}
return err
}

View file

@ -12,6 +12,6 @@ func main() {
logger, atomicLevel := newLogger(v)
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
go application.Serve(globalContext)
go application.Serve()
application.Wait()
}

140
metrics/desc.go Normal file
View file

@ -0,0 +1,140 @@
package metrics
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
var appMetricsDesc = map[string]map[string]Description{
poolSubsystem: {
overallErrorsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallErrorsMetric,
Help: "Total number of errors in pool",
},
overallNodeErrorsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallNodeErrorsMetric,
Help: "Total number of errors for connection in pool",
VariableLabels: []string{"node"},
},
overallNodeRequestsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallNodeRequestsMetric,
Help: "Total number of requests to specific node in pool",
VariableLabels: []string{"node"},
},
currentErrorMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: currentErrorMetric,
Help: "Number of errors on current connections that will be reset after the threshold",
VariableLabels: []string{"node"},
},
avgRequestDurationMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: avgRequestDurationMetric,
Help: "Average request duration (in milliseconds) for specific method on node in pool",
VariableLabels: []string{"node", "method"},
},
},
stateSubsystem: {
healthMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: healthMetric,
Help: "Current HTTP gateway state",
},
versionInfoMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: versionInfoMetric,
Help: "Version of current FrostFS HTTP Gate instance",
VariableLabels: []string{"version"},
},
},
}
type Description struct {
Type dto.MetricType
Namespace string
Subsystem string
Name string
Help string
ConstantLabels prometheus.Labels
VariableLabels []string
}
func (d *Description) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Type string `json:"type"`
FQName string `json:"name"`
Help string `json:"help"`
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
VariableLabels []string `json:"variable_labels,omitempty"`
}{
Type: d.Type.String(),
FQName: d.BuildFQName(),
Help: d.Help,
ConstantLabels: d.ConstantLabels,
VariableLabels: d.VariableLabels,
})
}
func (d *Description) BuildFQName() string {
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
}
// DescribeAll returns descriptions for metrics.
func DescribeAll() []Description {
var list []Description
for _, m := range appMetricsDesc {
for _, description := range m {
list = append(list, description)
}
}
return list
}
func newOpts(description Description) prometheus.Opts {
return prometheus.Opts{
Namespace: description.Namespace,
Subsystem: description.Subsystem,
Name: description.Name,
Help: description.Help,
ConstLabels: description.ConstantLabels,
}
}
func mustNewGauge(description Description) prometheus.Gauge {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGauge(
prometheus.GaugeOpts(newOpts(description)),
)
}
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGaugeVec(
prometheus.GaugeOpts(newOpts(description)),
description.VariableLabels,
)
}

37
metrics/desc_test.go Normal file
View file

@ -0,0 +1,37 @@
//go:build dump_metrics
package metrics
import (
"encoding/json"
"flag"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/stretchr/testify/require"
)
type mock struct{}
func (m mock) Statistic() pool.Statistic {
return pool.Statistic{}
}
var metricsPath = flag.String("out", "", "File to export http gateway metrics to.")
func TestDescribeAll(t *testing.T) {
// to check correct metrics type mapping
_ = NewGateMetrics(mock{})
flag.Parse()
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
desc := DescribeAll()
data, err := json.Marshal(desc)
require.NoError(t, err)
err = os.WriteFile(*metricsPath, data, 0644)
require.NoError(t, err)
}

View file

@ -3,17 +3,32 @@ package metrics
import (
"net/http"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
)
const (
namespace = "neofs_http_gw"
namespace = "frostfs_http_gw"
stateSubsystem = "state"
poolSubsystem = "pool"
)
const (
healthMetric = "health"
versionInfoMetric = "version_info"
)
const (
overallErrorsMetric = "overall_errors"
overallNodeErrorsMetric = "overall_node_errors"
overallNodeRequestsMetric = "overall_node_requests"
currentErrorMetric = "current_errors"
avgRequestDurationMetric = "avg_request_duration"
)
const (
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
@ -31,6 +46,20 @@ const (
methodCreateSession = "create_session"
)
// HealthStatus of the gate application.
type HealthStatus int32
const (
HealthStatusUndefined HealthStatus = 0
HealthStatusStarting HealthStatus = 1
HealthStatusReady HealthStatus = 2
HealthStatusShuttingDown HealthStatus = 3
)
type StatisticScraper interface {
Statistic() pool.Statistic
}
type GateMetrics struct {
stateMetrics
poolMetricsCollector
@ -38,10 +67,11 @@ type GateMetrics struct {
type stateMetrics struct {
healthCheck prometheus.Gauge
versionInfo *prometheus.GaugeVec
}
type poolMetricsCollector struct {
pool *pool.Pool
scraper StatisticScraper
overallErrors prometheus.Gauge
overallNodeErrors *prometheus.GaugeVec
overallNodeRequests *prometheus.GaugeVec
@ -50,7 +80,7 @@ type poolMetricsCollector struct {
}
// NewGateMetrics creates new metrics for http gate.
func NewGateMetrics(p *pool.Pool) *GateMetrics {
func NewGateMetrics(p StatisticScraper) *GateMetrics {
stateMetric := newStateMetrics()
stateMetric.register()
@ -70,93 +100,37 @@ func (g *GateMetrics) Unregister() {
func newStateMetrics() *stateMetrics {
return &stateMetrics{
healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: stateSubsystem,
Name: "health",
Help: "Current HTTP gateway state",
}),
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
}
}
func (m stateMetrics) register() {
prometheus.MustRegister(m.healthCheck)
prometheus.MustRegister(m.versionInfo)
}
func (m stateMetrics) unregister() {
prometheus.Unregister(m.healthCheck)
prometheus.Unregister(m.versionInfo)
}
func (m stateMetrics) SetHealth(s int32) {
func (m stateMetrics) SetHealth(s HealthStatus) {
m.healthCheck.Set(float64(s))
}
func newPoolMetricsCollector(p *pool.Pool) *poolMetricsCollector {
overallErrors := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: poolSubsystem,
Name: "overall_errors",
Help: "Total number of errors in pool",
},
)
overallNodeErrors := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: poolSubsystem,
Name: "overall_node_errors",
Help: "Total number of errors for connection in pool",
},
[]string{
"node",
},
)
overallNodeRequests := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: poolSubsystem,
Name: "overall_node_requests",
Help: "Total number of requests to specific node in pool",
},
[]string{
"node",
},
)
currentErrors := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: poolSubsystem,
Name: "current_errors",
Help: "Number of errors on current connections that will be reset after the threshold",
},
[]string{
"node",
},
)
requestsDuration := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: poolSubsystem,
Name: "avg_request_duration",
Help: "Average request duration (in milliseconds) for specific method on node in pool",
},
[]string{
"node",
"method",
},
)
func (m stateMetrics) SetVersion(ver string) {
m.versionInfo.WithLabelValues(ver).Set(1)
}
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
return &poolMetricsCollector{
pool: p,
overallErrors: overallErrors,
overallNodeErrors: overallNodeErrors,
overallNodeRequests: overallNodeRequests,
currentErrors: currentErrors,
requestDuration: requestsDuration,
scraper: p,
overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]),
overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]),
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
}
}
@ -182,7 +156,7 @@ func (m *poolMetricsCollector) register() {
}
func (m *poolMetricsCollector) updateStatistic() {
stat := m.pool.Statistic()
stat := m.scraper.Statistic()
m.overallNodeErrors.Reset()
m.overallNodeRequests.Reset()

35
resolver/frostfs.go Normal file
View file

@ -0,0 +1,35 @@
package resolver
import (
"context"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
)
// FrostFSResolver represents virtual connection to the FrostFS network.
// It implements resolver.FrostFS.
type FrostFSResolver struct {
pool *pool.Pool
}
// NewFrostFSResolver creates new FrostFSResolver using provided pool.Pool.
func NewFrostFSResolver(p *pool.Pool) *FrostFSResolver {
return &FrostFSResolver{pool: p}
}
// SystemDNS implements resolver.FrostFS interface method.
func (x *FrostFSResolver) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", fmt.Errorf("read network info via client: %w", err)
}
domain := networkInfo.RawNetworkParameter("SystemDNS")
if domain == nil {
return "", errors.New("system DNS parameter not found or empty")
}
return string(domain), nil
}

View file

@ -1,35 +0,0 @@
package resolver
import (
"context"
"errors"
"fmt"
"github.com/nspcc-dev/neofs-sdk-go/pool"
)
// NeoFSResolver represents virtual connection to the NeoFS network.
// It implements resolver.NeoFS.
type NeoFSResolver struct {
pool *pool.Pool
}
// NewNeoFSResolver creates new NeoFSResolver using provided pool.Pool.
func NewNeoFSResolver(p *pool.Pool) *NeoFSResolver {
return &NeoFSResolver{pool: p}
}
// SystemDNS implements resolver.NeoFS interface method.
func (x *NeoFSResolver) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", fmt.Errorf("read network info via client: %w", err)
}
domain := networkInfo.RawNetworkParameter("SystemDNS")
if domain == nil {
return "", errors.New("system DNS parameter not found or empty")
}
return string(domain), nil
}

View file

@ -6,8 +6,9 @@ import (
"fmt"
"sync"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"github.com/nspcc-dev/neofs-sdk-go/ns"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
)
const (
@ -18,9 +19,9 @@ const (
// ErrNoResolvers returns when trying to resolve container without any resolver.
var ErrNoResolvers = errors.New("no resolvers")
// NeoFS represents virtual connection to the NeoFS network.
type NeoFS interface {
// SystemDNS reads system DNS network parameters of the NeoFS.
// FrostFS represents virtual connection to the FrostFS network.
type FrostFS interface {
// SystemDNS reads system DNS network parameters of the FrostFS.
//
// Returns exactly on non-zero value. Returns any error encountered
// which prevented the parameter to be read.
@ -28,7 +29,7 @@ type NeoFS interface {
}
type Config struct {
NeoFS NeoFS
FrostFS FrostFS
RPCAddress string
}
@ -134,7 +135,7 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name {
case DNSResolver:
return NewDNSResolver(cfg.NeoFS)
return NewDNSResolver(cfg.FrostFS)
case NNSResolver:
return NewNNSResolver(cfg.RPCAddress)
default:
@ -142,17 +143,17 @@ func newResolver(name string, cfg *Config) (*Resolver, error) {
}
}
func NewDNSResolver(neoFS NeoFS) (*Resolver, error) {
if neoFS == nil {
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
}
var dns ns.DNS
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
domain, err := neoFS.SystemDNS(ctx)
domain, err := frostFS.SystemDNS(ctx)
if err != nil {
return nil, fmt.Errorf("read system DNS parameter of the NeoFS: %w", err)
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
}
domain = name + "." + domain
@ -177,7 +178,10 @@ func NewNNSResolver(rpcAddress string) (*Resolver, error) {
}
resolveFunc := func(_ context.Context, name string) (*cid.ID, error) {
cnrID, err := nns.ResolveContainerName(name)
var d container.Domain
d.SetName(name)
cnrID, err := nns.ResolveContainerDomain(d)
if err != nil {
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
}

View file

@ -1,7 +1,41 @@
package response
import "github.com/valyala/fasthttp"
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
func Error(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch {
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
default:
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
}
return statusCode, msg, logFields
}

122
server.go Normal file
View file

@ -0,0 +1,122 @@
package main
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"sync"
)
type (
ServerInfo struct {
Address string
TLS ServerTLSInfo
}
ServerTLSInfo struct {
Enabled bool
CertFile string
KeyFile string
}
Server interface {
Address() string
Listener() net.Listener
UpdateCert(certFile, keyFile string) error
}
server struct {
address string
listener net.Listener
tlsProvider *certProvider
}
certProvider struct {
Enabled bool
mu sync.RWMutex
certPath string
keyPath string
cert *tls.Certificate
}
)
func (s *server) Address() string {
return s.address
}
func (s *server) Listener() net.Listener {
return s.listener
}
func (s *server) UpdateCert(certFile, keyFile string) error {
return s.tlsProvider.UpdateCert(certFile, keyFile)
}
func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
var lic net.ListenConfig
ln, err := lic.Listen(ctx, "tcp", serverInfo.Address)
if err != nil {
return nil, fmt.Errorf("could not prepare listener: %w", err)
}
tlsProvider := &certProvider{
Enabled: serverInfo.TLS.Enabled,
}
if serverInfo.TLS.Enabled {
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
return nil, fmt.Errorf("failed to update cert: %w", err)
}
ln = tls.NewListener(ln, &tls.Config{
GetCertificate: tlsProvider.GetCertificate,
})
}
return &server{
address: serverInfo.Address,
listener: ln,
tlsProvider: tlsProvider,
}, nil
}
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
if !p.Enabled {
return nil, errors.New("cert provider: disabled")
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.cert, nil
}
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
if !p.Enabled {
return fmt.Errorf("tls disabled")
}
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
}
p.mu.Lock()
p.certPath = certPath
p.keyPath = keyPath
p.cert = &cert
p.mu.Unlock()
return nil
}
func (p *certProvider) FilePaths() (string, string) {
if !p.Enabled {
return "", ""
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.certPath, p.keyPath
}

View file

@ -1,34 +1,45 @@
package main
import (
"context"
"encoding/hex"
"fmt"
"os"
"path"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/nspcc-dev/neofs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
const (
defaultRebalanceTimer = 60 * time.Second
defaultRequestTimeout = 15 * time.Second
defaultConnectTimeout = 10 * time.Second
defaultStreamTimeout = 10 * time.Second
defaultShutdownTimeout = 15 * time.Second
defaultPoolErrorThreshold uint32 = 100
cfgListenAddress = "listen_address"
cfgTLSCertificate = "tls_certificate"
cfgTLSKey = "tls_key"
cfgServer = "server"
cfgTLSEnabled = "tls.enabled"
cfgTLSCertFile = "tls.cert_file"
cfgTLSKeyFile = "tls.key_file"
// Web.
cfgWebReadBufferSize = "web.read_buffer_size"
@ -44,8 +55,14 @@ const (
cfgPprofEnabled = "pprof.enabled"
cfgPprofAddress = "pprof.address"
// Tracing ...
cfgTracingEnabled = "tracing.enabled"
cfgTracingExporter = "tracing.exporter"
cfgTracingEndpoint = "tracing.endpoint"
// Pool config.
cfgConTimeout = "connect_timeout"
cfgStreamTimeout = "stream_timeout"
cfgReqTimeout = "request_timeout"
cfgRebalance = "rebalance_timer"
cfgPoolErrorThreshold = "pool_error_threshold"
@ -74,13 +91,15 @@ const (
cfgZipCompression = "zip.compression"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"
cmdPprof = "pprof"
cmdMetrics = "metrics"
cmdWallet = "wallet"
cmdAddress = "address"
cmdConfig = "config"
cmdHelp = "help"
cmdVersion = "version"
cmdPprof = "pprof"
cmdMetrics = "metrics"
cmdWallet = "wallet"
cmdAddress = "address"
cmdConfig = "config"
cmdConfigDir = "config-dir"
cmdListenAddress = "listen_address"
)
var ignore = map[string]struct{}{
@ -110,15 +129,17 @@ func settings() *viper.Viper {
flags.StringP(cmdWallet, "w", "", `path to the wallet`)
flags.String(cmdAddress, "", `address of wallet account`)
flags.String(cmdConfig, "", "config path")
flags.StringArray(cmdConfig, nil, "config paths")
flags.String(cmdConfigDir, "", "config dir path")
flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout")
flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
flags.String(cfgListenAddress, "0.0.0.0:8082", "address to listen")
flags.String(cfgTLSCertificate, "", "TLS certificate path")
flags.String(cfgTLSKey, "", "TLS key path")
peers := flags.StringArrayP(cfgPeers, "p", nil, "NeoFS nodes")
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
flags.String(cfgTLSCertFile, "", "TLS certificate path")
flags.String(cfgTLSKeyFile, "", "TLS key path")
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
@ -168,17 +189,31 @@ func settings() *viper.Viper {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
panic(err)
}
if err := flags.Parse(os.Args); err != nil {
panic(err)
}
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
}
if resolveMethods != nil {
v.SetDefault(cfgResolveOrder, *resolveMethods)
}
switch {
case help != nil && *help:
fmt.Printf("NeoFS HTTP Gateway %s\n", Version)
fmt.Printf("FrostFS HTTP Gateway %s\n", Version)
flags.PrintDefaults()
fmt.Println()
@ -210,14 +245,12 @@ func settings() *viper.Viper {
os.Exit(0)
case version != nil && *version:
fmt.Printf("NeoFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
os.Exit(0)
}
if v.IsSet(cmdConfig) {
if err := readConfig(v); err != nil {
panic(err)
}
if err := readInConfig(v); err != nil {
panic(err)
}
if peers != nil && len(*peers) > 0 {
@ -231,17 +264,68 @@ func settings() *viper.Viper {
return v
}
func readConfig(v *viper.Viper) error {
cfgFileName := v.GetString(cmdConfig)
cfgFile, err := os.Open(cfgFileName)
func readInConfig(v *viper.Viper) error {
if v.IsSet(cmdConfig) {
if err := readConfig(v); err != nil {
return err
}
}
if v.IsSet(cmdConfigDir) {
if err := readConfigDir(v); err != nil {
return err
}
}
return nil
}
func readConfigDir(v *viper.Viper) error {
cfgSubConfigDir := v.GetString(cmdConfigDir)
entries, err := os.ReadDir(cfgSubConfigDir)
if err != nil {
return err
}
if err = v.ReadConfig(cfgFile); err != nil {
for _, entry := range entries {
if entry.IsDir() {
continue
}
ext := path.Ext(entry.Name())
if ext != ".yaml" && ext != ".yml" {
continue
}
if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
return err
}
}
return nil
}
func readConfig(v *viper.Viper) error {
for _, fileName := range v.GetStringSlice(cmdConfig) {
if err := mergeConfig(v, fileName); err != nil {
return err
}
}
return nil
}
func mergeConfig(v *viper.Viper, fileName string) error {
cfgFile, err := os.Open(fileName)
if err != nil {
return err
}
return cfgFile.Close()
defer func() {
if errClose := cfgFile.Close(); errClose != nil {
panic(errClose)
}
}()
return v.MergeConfig(cfgFile)
}
// newLogger constructs a zap.Logger instance for current application.
@ -294,3 +378,141 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
}
return lvl, nil
}
func fetchServers(v *viper.Viper) []ServerInfo {
var servers []ServerInfo
for i := 0; ; i++ {
key := cfgServer + "." + strconv.Itoa(i) + "."
var serverInfo ServerInfo
serverInfo.Address = v.GetString(key + "address")
serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled)
serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile)
serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile)
if serverInfo.Address == "" {
break
}
servers = append(servers, serverInfo)
}
return servers
}
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
key, err := getFrostFSKey(cfg, logger)
if err != nil {
logger.Fatal("could not load FrostFS private key", zap.Error(err))
}
var prm pool.InitParameters
var prmTree treepool.InitParameters
prm.SetKey(&key.PrivateKey)
prmTree.SetKey(key)
logger.Info("using credentials", zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
for _, peer := range fetchPeers(logger, cfg) {
prm.AddNode(peer)
prmTree.AddNode(peer)
}
connTimeout := cfg.GetDuration(cfgConTimeout)
if connTimeout <= 0 {
connTimeout = defaultConnectTimeout
}
prm.SetNodeDialTimeout(connTimeout)
prmTree.SetNodeDialTimeout(connTimeout)
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
if streamTimeout <= 0 {
streamTimeout = defaultStreamTimeout
}
prm.SetNodeStreamTimeout(streamTimeout)
prmTree.SetNodeStreamTimeout(streamTimeout)
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
if healthCheckTimeout <= 0 {
healthCheckTimeout = defaultRequestTimeout
}
prm.SetHealthcheckTimeout(healthCheckTimeout)
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
rebalanceInterval := cfg.GetDuration(cfgRebalance)
if rebalanceInterval <= 0 {
rebalanceInterval = defaultRebalanceTimer
}
prm.SetClientRebalanceInterval(rebalanceInterval)
prmTree.SetClientRebalanceInterval(rebalanceInterval)
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
if errorThreshold <= 0 {
errorThreshold = defaultPoolErrorThreshold
}
prm.SetErrorThreshold(errorThreshold)
prm.SetLogger(logger)
prmTree.SetLogger(logger)
var apiGRPCDialOpts []grpc.DialOption
var treeGRPCDialOpts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
if cfg.GetBool(cfgTracingEnabled) {
interceptors := []grpc.DialOption{
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
}
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
}
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
p, err := pool.NewPool(prm)
if err != nil {
logger.Fatal("failed to create connection pool", zap.Error(err))
}
if err = p.Dial(ctx); err != nil {
logger.Fatal("failed to dial connection pool", zap.Error(err))
}
treePool, err := treepool.NewPool(prmTree)
if err != nil {
logger.Fatal("failed to create tree pool", zap.Error(err))
}
if err = treePool.Dial(ctx); err != nil {
logger.Fatal("failed to dial tree pool", zap.Error(err))
}
return p, treePool, key
}
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
var nodes []pool.NodeParam
for i := 0; ; i++ {
key := cfgPeers + "." + strconv.Itoa(i) + "."
address := v.GetString(key + "address")
weight := v.GetFloat64(key + "weight")
priority := v.GetInt(key + "priority")
if address == "" {
break
}
if weight <= 0 { // unspecified or wrong
weight = 1
}
if priority <= 0 { // unspecified or wrong
priority = 1
}
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
l.Info("added storage peer",
zap.Int("priority", priority),
zap.String("address", address),
zap.Float64("weight", weight))
}
return nodes
}

View file

@ -7,15 +7,17 @@ import (
"errors"
"fmt"
"github.com/nspcc-dev/neofs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"github.com/valyala/fasthttp"
)
type fromHandler = func(h *fasthttp.RequestHeader) []byte
type ctxKey string
const (
bearerTokenHdr = "Bearer"
bearerTokenKey = "__context_bearer_token_key"
bearerTokenHdr = "Bearer"
bearerTokenKey ctxKey = "__context_bearer_token_key"
)
// BearerToken usage:
@ -48,16 +50,15 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
return auth
}
// StoreBearerToken extracts a bearer token from the header or cookie and stores
// it in the request context.
func StoreBearerToken(ctx *fasthttp.RequestCtx) error {
tkn, err := fetchBearerToken(ctx)
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
// it in the application context.
func StoreBearerTokenAppCtx(ctx context.Context, req *fasthttp.RequestCtx) (context.Context, error) {
tkn, err := fetchBearerToken(req)
if err != nil {
return err
return nil, err
}
// This is an analog of context.WithValue.
ctx.SetUserValue(bearerTokenKey, tkn)
return nil
newCtx := context.WithValue(ctx, bearerTokenKey, tkn)
return newCtx, nil
}
// LoadBearerToken returns a bearer token stored in the context given (if it's

View file

@ -1,12 +1,15 @@
//go:build !integration
package tokens
import (
"context"
"encoding/base64"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neofs-sdk-go/bearer"
"github.com/nspcc-dev/neofs-sdk-go/user"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
)
@ -148,13 +151,14 @@ func Test_checkAndPropagateBearerToken(t *testing.T) {
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
ctx := makeTestRequest(t64, "")
req := makeTestRequest(t64, "")
// Expect to see the token within the context.
require.NoError(t, StoreBearerToken(ctx))
appCtx, err := StoreBearerTokenAppCtx(context.Background(), req)
require.NoError(t, err)
// Expect to see the same token without errors.
actual, err := LoadBearerToken(ctx)
actual, err := LoadBearerToken(appCtx)
require.NoError(t, err)
require.Equal(t, tkn, actual)
}

156
tree/tree.go Normal file
View file

@ -0,0 +1,156 @@
package tree
import (
"context"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/api/layer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
type (
Tree struct {
service ServiceClient
}
// ServiceClient is a client to interact with tree service.
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
ServiceClient interface {
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
}
treeNode struct {
ObjID oid.ID
Meta map[string]string
}
GetNodesParams struct {
CnrID cid.ID
TreeID string
Path []string
Meta []string
LatestOnly bool
AllAttrs bool
}
)
var (
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
ErrNodeNotFound = layer.ErrNodeNotFound
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
)
const (
FileNameKey = "FileName"
)
const (
oidKV = "OID"
// keys for delete marker nodes.
isDeleteMarkerKV = "IsDeleteMarker"
// versionTree -- ID of a tree with object versions.
versionTree = "version"
separator = "/"
)
// NewTree creates instance of Tree using provided address and create grpc connection.
func NewTree(service ServiceClient) *Tree {
return &Tree{service: service}
}
type Meta interface {
GetKey() string
GetValue() []byte
}
type NodeResponse interface {
GetMeta() []Meta
}
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
treeNode := &treeNode{
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
}
for _, kv := range nodeInfo.GetMeta() {
switch kv.GetKey() {
case oidKV:
if err := treeNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
return nil, err
}
default:
treeNode.Meta[kv.GetKey()] = string(kv.GetValue())
}
}
return treeNode, nil
}
func (n *treeNode) Get(key string) (string, bool) {
value, ok := n.Meta[key]
return value, ok
}
func (n *treeNode) FileName() (string, bool) {
value, ok := n.Meta[FileNameKey]
return value, ok
}
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
treeNode, err := newTreeNode(node)
if err != nil {
return nil, fmt.Errorf("invalid tree node: %w", err)
}
return newNodeVersionFromTreeNode(treeNode), nil
}
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
version := &api.NodeVersion{
BaseNodeVersion: api.BaseNodeVersion{
OID: treeNode.ObjID,
},
DeleteMarker: isDeleteMarker,
}
return version
}
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
meta := []string{oidKV, isDeleteMarkerKV}
path := pathFromName(objectName)
p := &GetNodesParams{
CnrID: *cnrID,
TreeID: versionTree,
Path: path,
Meta: meta,
LatestOnly: true,
AllAttrs: false,
}
nodes, err := c.service.GetNodes(ctx, p)
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, layer.ErrNodeNotFound
}
return newNodeVersion(nodes[0])
}
// pathFromName splits name by '/'.
func pathFromName(objectName string) []string {
return strings.Split(objectName, separator)
}

View file

@ -3,30 +3,14 @@ package uploader
import (
"bytes"
"fmt"
"math"
"strconv"
"time"
"github.com/nspcc-dev/neofs-api-go/v2/object"
"github.com/nspcc-dev/neofs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
var neofsAttributeHeaderPrefixes = [...][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")}
func systemTranslator(key, prefix []byte) []byte {
// replace the specified prefix with `__NEOFS__`
key = bytes.Replace(key, prefix, []byte(utils.SystemAttributePrefix), 1)
// replace `-` with `_`
key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
// replace with uppercase
return bytes.ToUpper(key)
}
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) map[string]string {
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) {
var err error
result := make(map[string]string)
prefix := []byte(utils.UserAttributeHeaderPrefix)
@ -42,23 +26,24 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) map[string]str
}
// removing attribute prefix
key = bytes.TrimPrefix(key, prefix)
clearKey := bytes.TrimPrefix(key, prefix)
// checks that it's a system NeoFS header
for _, system := range neofsAttributeHeaderPrefixes {
if bytes.HasPrefix(key, system) {
key = systemTranslator(key, system)
break
}
}
clearKey = utils.TransformIfSystem(clearKey)
// checks that the attribute key is not empty
if len(key) == 0 {
if len(clearKey) == 0 {
return
}
// check if key gets duplicated
// return error containing full key name (with prefix)
if _, ok := result[string(clearKey)]; ok {
err = fmt.Errorf("key duplication error: %s", string(key))
return
}
// make string representation of key / val
k, v := string(key), string(val)
k, v := string(clearKey), string(val)
result[k] = v
@ -67,73 +52,5 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) map[string]str
zap.String("val", v))
})
return result
}
func prepareExpirationHeader(headers map[string]string, epochDurations *epochDurations) error {
expirationInEpoch := headers[object.SysAttributeExpEpoch]
if timeRFC3339, ok := headers[utils.ExpirationRFC3339Attr]; ok {
expTime, err := time.Parse(time.RFC3339, timeRFC3339)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, utils.ExpirationRFC3339Attr)
}
now := time.Now().UTC()
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, utils.ExpirationRFC3339Attr)
}
updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, utils.ExpirationRFC3339Attr)
}
if timestamp, ok := headers[utils.ExpirationTimestampAttr]; ok {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timestamp, utils.ExpirationTimestampAttr)
}
expTime := time.Unix(value, 0)
now := time.Now()
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timestamp, utils.ExpirationTimestampAttr)
}
updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, utils.ExpirationTimestampAttr)
}
if duration, ok := headers[utils.ExpirationDurationAttr]; ok {
expDuration, err := time.ParseDuration(duration)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", duration, utils.ExpirationDurationAttr)
}
if expDuration <= 0 {
return fmt.Errorf("value %s of header %s must be positive", expDuration, utils.ExpirationDurationAttr)
}
updateExpirationHeader(headers, epochDurations, expDuration)
delete(headers, utils.ExpirationDurationAttr)
}
if expirationInEpoch != "" {
headers[object.SysAttributeExpEpoch] = expirationInEpoch
}
return nil
}
func updateExpirationHeader(headers map[string]string, durations *epochDurations, expDuration time.Duration) {
epochDuration := uint64(durations.msPerBlock) * durations.blockPerEpoch
currentEpoch := durations.currentEpoch
numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
if uint64(expDuration.Milliseconds())%epochDuration != 0 {
numEpoch++
}
expirationEpoch := uint64(math.MaxUint64)
if numEpoch < math.MaxUint64-currentEpoch {
expirationEpoch = currentEpoch + numEpoch
}
headers[object.SysAttributeExpEpoch] = strconv.FormatUint(expirationEpoch, 10)
return result, err
}

View file

@ -1,13 +1,10 @@
//go:build !integration
package uploader
import (
"math"
"strconv"
"testing"
"time"
"github.com/nspcc-dev/neofs-api-go/v2/object"
"github.com/nspcc-dev/neofs-http-gw/utils"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
@ -16,175 +13,41 @@ import (
func TestFilter(t *testing.T) {
log := zap.NewNop()
t.Run("duplicate keys error", func(t *testing.T) {
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Add("X-Attribute-DupKey", "first-value")
req.Add("X-Attribute-DupKey", "second-value")
_, err := filterHeaders(log, req)
require.Error(t, err)
})
t.Run("duplicate system keys error", func(t *testing.T) {
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Add("X-Attribute-System-DupKey", "first-value")
req.Add("X-Attribute-System-DupKey", "second-value")
_, err := filterHeaders(log, req)
require.Error(t, err)
})
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Set("X-Attribute-Neofs-Expiration-Epoch1", "101")
req.Set("X-Attribute-NEOFS-Expiration-Epoch2", "102")
req.Set("X-Attribute-neofs-Expiration-Epoch3", "103")
req.Set("X-Attribute-System-Expiration-Epoch1", "101")
req.Set("X-Attribute-SYSTEM-Expiration-Epoch2", "102")
req.Set("X-Attribute-system-Expiration-Epoch3", "103")
req.Set("X-Attribute-MyAttribute", "value")
expected := map[string]string{
"__NEOFS__EXPIRATION_EPOCH1": "101",
"MyAttribute": "value",
"__NEOFS__EXPIRATION_EPOCH3": "103",
"__NEOFS__EXPIRATION_EPOCH2": "102",
"__SYSTEM__EXPIRATION_EPOCH1": "101",
"MyAttribute": "value",
"__SYSTEM__EXPIRATION_EPOCH3": "103",
"__SYSTEM__EXPIRATION_EPOCH2": "102",
}
result := filterHeaders(log, req)
result, err := filterHeaders(log, req)
require.NoError(t, err)
require.Equal(t, expected, result)
}
func TestPrepareExpirationHeader(t *testing.T) {
tomorrow := time.Now().Add(24 * time.Hour)
tomorrowUnix := tomorrow.Unix()
tomorrowUnixNano := tomorrow.UnixNano()
tomorrowUnixMilli := tomorrowUnixNano / 1e6
epoch := "100"
duration := "24h"
timestampSec := strconv.FormatInt(tomorrowUnix, 10)
timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10)
timestampNano := strconv.FormatInt(tomorrowUnixNano, 10)
defaultDurations := &epochDurations{
currentEpoch: 10,
msPerBlock: 1000,
blockPerEpoch: 101,
}
msPerBlock := defaultDurations.blockPerEpoch * uint64(defaultDurations.msPerBlock)
epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock
if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 {
epochPerDay++
}
defaultExpEpoch := strconv.FormatUint(defaultDurations.currentEpoch+epochPerDay, 10)
for _, tc := range []struct {
name string
headers map[string]string
durations *epochDurations
err bool
expected map[string]string
}{
{
name: "valid epoch",
headers: map[string]string{object.SysAttributeExpEpoch: epoch},
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid epoch, valid duration",
headers: map[string]string{
object.SysAttributeExpEpoch: epoch,
utils.ExpirationDurationAttr: duration,
},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid epoch, valid rfc3339",
headers: map[string]string{
object.SysAttributeExpEpoch: epoch,
utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339),
},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid epoch, valid timestamp sec",
headers: map[string]string{
object.SysAttributeExpEpoch: epoch,
utils.ExpirationTimestampAttr: timestampSec,
},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid epoch, valid timestamp milli",
headers: map[string]string{
object.SysAttributeExpEpoch: epoch,
utils.ExpirationTimestampAttr: timestampMilli,
},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid epoch, valid timestamp nano",
headers: map[string]string{
object.SysAttributeExpEpoch: epoch,
utils.ExpirationTimestampAttr: timestampNano,
},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
},
{
name: "valid timestamp sec",
headers: map[string]string{utils.ExpirationTimestampAttr: timestampSec},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
},
{
name: "valid duration",
headers: map[string]string{utils.ExpirationDurationAttr: duration},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
},
{
name: "valid rfc3339",
headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
durations: defaultDurations,
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
},
{
name: "valid max uint 64",
headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
durations: &epochDurations{
currentEpoch: math.MaxUint64 - 1,
msPerBlock: defaultDurations.msPerBlock,
blockPerEpoch: defaultDurations.blockPerEpoch,
},
expected: map[string]string{object.SysAttributeExpEpoch: strconv.FormatUint(uint64(math.MaxUint64), 10)},
},
{
name: "invalid timestamp sec",
headers: map[string]string{utils.ExpirationTimestampAttr: "abc"},
err: true,
},
{
name: "invalid timestamp sec zero",
headers: map[string]string{utils.ExpirationTimestampAttr: "0"},
err: true,
},
{
name: "invalid duration",
headers: map[string]string{utils.ExpirationDurationAttr: "1d"},
err: true,
},
{
name: "invalid duration negative",
headers: map[string]string{utils.ExpirationDurationAttr: "-5h"},
err: true,
},
{
name: "invalid rfc3339",
headers: map[string]string{utils.ExpirationRFC3339Attr: "abc"},
err: true,
},
{
name: "invalid rfc3339 zero",
headers: map[string]string{utils.ExpirationRFC3339Attr: time.RFC3339},
err: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := prepareExpirationHeader(tc.headers, tc.durations)
if tc.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tc.expected, tc.headers)
}
})
}
}

View file

@ -3,7 +3,7 @@ package uploader
import (
"io"
"github.com/nspcc-dev/neofs-http-gw/uploader/multipart"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/uploader/multipart"
"go.uber.org/zap"
)
@ -16,7 +16,8 @@ type MultipartFile interface {
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
// To have a custom buffer (3mb) the custom multipart reader is used.
// https://github.com/nspcc-dev/neofs-http-gw/issues/148
// Default reader uses 4KiB chunks, which slow down upload speed up to 400%
// https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32
reader := multipart.NewReader(r, boundary)
for {

View file

@ -1,3 +1,5 @@
//go:build !integration
package uploader
import (

View file

@ -3,20 +3,20 @@ package uploader
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/nspcc-dev/neofs-http-gw/resolver"
"github.com/nspcc-dev/neofs-http-gw/response"
"github.com/nspcc-dev/neofs-http-gw/tokens"
"github.com/nspcc-dev/neofs-http-gw/utils"
"github.com/nspcc-dev/neofs-sdk-go/bearer"
"github.com/nspcc-dev/neofs-sdk-go/object"
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"github.com/nspcc-dev/neofs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/valyala/fasthttp"
"go.uber.org/atomic"
"go.uber.org/zap"
@ -29,7 +29,6 @@ const (
// Uploader is an upload request handler.
type Uploader struct {
appCtx context.Context
log *zap.Logger
pool *pool.Pool
ownerID *user.ID
@ -37,12 +36,6 @@ type Uploader struct {
containerResolver *resolver.ContainerResolver
}
type epochDurations struct {
currentEpoch uint64
msPerBlock int64
blockPerEpoch uint64
}
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
type Settings struct {
defaultTimestamp atomic.Bool
@ -58,9 +51,8 @@ func (s *Settings) SetDefaultTimestamp(val bool) {
// New creates a new Uploader using specified logger, connection pool and
// other options.
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Uploader {
func New(params *utils.AppParams, settings *Settings) *Uploader {
return &Uploader{
appCtx: ctx,
log: params.Logger,
pool: params.Pool,
ownerID: params.Owner,
@ -70,27 +62,23 @@ func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Uplo
}
// Upload handles multipart upload request.
func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
func (u *Uploader) Upload(req *fasthttp.RequestCtx) {
var (
file MultipartFile
idObj oid.ID
addr oid.Address
scid, _ = c.UserValue("cid").(string)
scid, _ = req.UserValue("cid").(string)
log = u.log.With(zap.String("cid", scid))
bodyStream = c.RequestBodyStream()
bodyStream = req.RequestBodyStream()
drainBuf = make([]byte, drainBufSize)
)
if err := tokens.StoreBearerToken(c); err != nil {
log.Error("could not fetch bearer token", zap.Error(err))
response.Error(c, "could not fetch bearer token", fasthttp.StatusBadRequest)
return
}
ctx := utils.GetContextFromRequest(req)
idCnr, err := utils.GetContainerID(u.appCtx, scid, u.containerResolver)
idCnr, err := utils.GetContainerID(ctx, scid, u.containerResolver)
if err != nil {
log.Error("wrong container id", zap.Error(err))
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
return
}
@ -107,27 +95,34 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
zap.Error(err),
)
}()
boundary := string(c.Request.Header.MultipartFormBoundary())
boundary := string(req.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(u.log, bodyStream, boundary); err != nil {
log.Error("could not receive multipart/form", zap.Error(err))
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return
}
filtered := filterHeaders(u.log, &c.Request.Header)
if needParseExpiration(filtered) {
epochDuration, err := getEpochDurations(c, u.pool)
if err != nil {
log.Error("could not get epoch durations from network info", zap.Error(err))
response.Error(c, "could not get epoch durations from network info: "+err.Error(), fasthttp.StatusBadRequest)
return
}
if err = prepareExpirationHeader(filtered, epochDuration); err != nil {
log.Error("could not parse expiration header", zap.Error(err))
response.Error(c, "could not parse expiration header: "+err.Error(), fasthttp.StatusBadRequest)
return
filtered, err := filterHeaders(u.log, &req.Request.Header)
if err != nil {
log.Error("could not process headers", zap.Error(err))
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
return
}
now := time.Now()
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn("could not parse client time", zap.String("Date header", string(rawHeader)), zap.Error(err))
} else {
now = parsed
}
}
if err = utils.PrepareExpirationHeader(req, u.pool, filtered, now); err != nil {
log.Error("could not prepare expiration header", zap.Error(err))
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
return
}
attributes := make([]object.Attribute, 0, len(filtered))
// prepares attributes from filtered headers
for key, val := range filtered {
@ -150,7 +145,7 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
attributes = append(attributes, *timestamp)
}
id, bt := u.fetchOwnerAndBearerToken(c)
id, bt := u.fetchOwnerAndBearerToken(ctx)
obj := object.New()
obj.SetContainerID(*idCnr)
@ -165,9 +160,8 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
prm.UseBearer(*bt)
}
if idObj, err = u.pool.PutObject(u.appCtx, prm); err != nil {
log.Error("could not store file in neofs", zap.Error(err))
response.Error(c, "could not store file in neofs: "+err.Error(), fasthttp.StatusBadRequest)
if idObj, err = u.pool.PutObject(ctx, prm); err != nil {
u.handlePutFrostFSErr(req, err)
return
}
@ -175,9 +169,9 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
addr.SetContainer(*idCnr)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil {
if err = newPutResponse(addr).encode(req); err != nil {
log.Error("could not encode response", zap.Error(err))
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
return
}
@ -194,8 +188,16 @@ func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
}
}
// Report status code and content type.
c.Response.SetStatusCode(fasthttp.StatusOK)
c.Response.Header.SetContentType(jsonHeader)
req.Response.SetStatusCode(fasthttp.StatusOK)
req.Response.Header.SetContentType(jsonHeader)
}
func (u *Uploader) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
u.log.Error("could not store file in frostfs", logFields...)
response.Error(r, msg, statusCode)
}
func (u *Uploader) fetchOwnerAndBearerToken(ctx context.Context) (*user.ID, *bearer.Token) {
@ -223,28 +225,3 @@ func (pr *putResponse) encode(w io.Writer) error {
enc.SetIndent("", "\t")
return enc.Encode(pr)
}
func getEpochDurations(ctx context.Context, p *pool.Pool) (*epochDurations, error) {
networkInfo, err := p.NetworkInfo(ctx)
if err != nil {
return nil, err
}
res := &epochDurations{
currentEpoch: networkInfo.CurrentEpoch(),
msPerBlock: networkInfo.MsPerBlock(),
blockPerEpoch: networkInfo.EpochDuration(),
}
if res.blockPerEpoch == 0 {
return nil, fmt.Errorf("EpochDuration is empty")
}
return res, nil
}
func needParseExpiration(headers map[string]string) bool {
_, ok1 := headers[utils.ExpirationDurationAttr]
_, ok2 := headers[utils.ExpirationRFC3339Attr]
_, ok3 := headers[utils.ExpirationTimestampAttr]
return ok1 || ok2 || ok3
}

View file

@ -1,10 +1,250 @@
package utils
import (
"bytes"
"context"
"errors"
"fmt"
"math"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
)
const (
UserAttributeHeaderPrefix = "X-Attribute-"
SystemAttributePrefix = "__NEOFS__"
ExpirationDurationAttr = SystemAttributePrefix + "EXPIRATION_DURATION"
ExpirationTimestampAttr = SystemAttributePrefix + "EXPIRATION_TIMESTAMP"
ExpirationRFC3339Attr = SystemAttributePrefix + "EXPIRATION_RFC3339"
)
const (
systemAttributePrefix = "__SYSTEM__"
// deprecated: use systemAttributePrefix
systemAttributePrefixNeoFS = "__NEOFS__"
)
type systemTransformer struct {
prefix string
backwardPrefix string
xAttrPrefixes [][]byte
}
var transformers = []systemTransformer{
{
prefix: systemAttributePrefix,
backwardPrefix: "System-",
xAttrPrefixes: [][]byte{[]byte("System-"), []byte("SYSTEM-"), []byte("system-")},
},
{
prefix: systemAttributePrefixNeoFS,
backwardPrefix: "Neofs-",
xAttrPrefixes: [][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")},
},
}
func (t systemTransformer) existsExpirationAttributes(headers map[string]string) bool {
_, ok0 := headers[t.expirationEpochAttr()]
_, ok1 := headers[t.expirationDurationAttr()]
_, ok2 := headers[t.expirationTimestampAttr()]
_, ok3 := headers[t.expirationRFC3339Attr()]
return ok0 || ok1 || ok2 || ok3
}
func (t systemTransformer) expirationEpochAttr() string {
return t.prefix + "EXPIRATION_EPOCH"
}
func (t systemTransformer) expirationDurationAttr() string {
return t.prefix + "EXPIRATION_DURATION"
}
func (t systemTransformer) expirationTimestampAttr() string {
return t.prefix + "EXPIRATION_TIMESTAMP"
}
func (t systemTransformer) expirationRFC3339Attr() string {
return t.prefix + "EXPIRATION_RFC3339"
}
func (t systemTransformer) systemTranslator(key, prefix []byte) []byte {
// replace the specified prefix with system prefix
key = bytes.Replace(key, prefix, []byte(t.prefix), 1)
// replace `-` with `_`
key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
// replace with uppercase
return bytes.ToUpper(key)
}
func (t systemTransformer) transformIfSystem(key []byte) ([]byte, bool) {
// checks that it's a system FrostFS header
for _, system := range t.xAttrPrefixes {
if bytes.HasPrefix(key, system) {
return t.systemTranslator(key, system), true
}
}
return key, false
}
// systemBackwardTranslator is used to convert headers looking like '__PREFIX__ATTR_NAME' to 'Prefix-Attr-Name'.
func (t systemTransformer) systemBackwardTranslator(key string) string {
// trim specified prefix '__PREFIX__'
key = strings.TrimPrefix(key, t.prefix)
var res strings.Builder
res.WriteString(t.backwardPrefix)
strs := strings.Split(key, "_")
for i, s := range strs {
s = title(strings.ToLower(s))
res.WriteString(s)
if i != len(strs)-1 {
res.WriteString("-")
}
}
return res.String()
}
func (t systemTransformer) backwardTransformIfSystem(key string) (string, bool) {
if strings.HasPrefix(key, t.prefix) {
return t.systemBackwardTranslator(key), true
}
return key, false
}
func TransformIfSystem(key []byte) []byte {
for _, transformer := range transformers {
key, transformed := transformer.transformIfSystem(key)
if transformed {
return key
}
}
return key
}
func BackwardTransformIfSystem(key string) string {
for _, transformer := range transformers {
key, transformed := transformer.backwardTransformIfSystem(key)
if transformed {
return key
}
}
return key
}
func title(str string) string {
if str == "" {
return ""
}
r, size := utf8.DecodeRuneInString(str)
r0 := unicode.ToTitle(r)
return string(r0) + str[size:]
}
func PrepareExpirationHeader(ctx context.Context, p *pool.Pool, headers map[string]string, now time.Time) error {
formatsNum := 0
index := -1
for i, transformer := range transformers {
if transformer.existsExpirationAttributes(headers) {
formatsNum++
index = i
}
}
switch formatsNum {
case 0:
return nil
case 1:
epochDuration, err := GetEpochDurations(ctx, p)
if err != nil {
return fmt.Errorf("couldn't get epoch durations from network info: %w", err)
}
return transformers[index].prepareExpirationHeader(headers, epochDuration, now)
default:
return errors.New("both deprecated and new system attributes formats are used, please use only one")
}
}
func (t systemTransformer) prepareExpirationHeader(headers map[string]string, epochDurations *EpochDurations, now time.Time) error {
expirationInEpoch := headers[t.expirationEpochAttr()]
if timeRFC3339, ok := headers[t.expirationRFC3339Attr()]; ok {
expTime, err := time.Parse(time.RFC3339, timeRFC3339)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, t.expirationRFC3339Attr())
}
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, t.expirationRFC3339Attr())
}
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, t.expirationRFC3339Attr())
}
if timestamp, ok := headers[t.expirationTimestampAttr()]; ok {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timestamp, t.expirationTimestampAttr())
}
expTime := time.Unix(value, 0)
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timestamp, t.expirationTimestampAttr())
}
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, t.expirationTimestampAttr())
}
if duration, ok := headers[t.expirationDurationAttr()]; ok {
expDuration, err := time.ParseDuration(duration)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", duration, t.expirationDurationAttr())
}
if expDuration <= 0 {
return fmt.Errorf("value %s of header %s must be positive", expDuration, t.expirationDurationAttr())
}
t.updateExpirationHeader(headers, epochDurations, expDuration)
delete(headers, t.expirationDurationAttr())
}
if expirationInEpoch != "" {
expEpoch, err := strconv.ParseUint(expirationInEpoch, 10, 64)
if err != nil {
return fmt.Errorf("parse expiration epoch '%s': %w", expirationInEpoch, err)
}
if expEpoch < epochDurations.CurrentEpoch {
return fmt.Errorf("expiration epoch '%d' must be greater than current epoch '%d'", expEpoch, epochDurations.CurrentEpoch)
}
headers[t.expirationEpochAttr()] = expirationInEpoch
}
return nil
}
func (t systemTransformer) updateExpirationHeader(headers map[string]string, durations *EpochDurations, expDuration time.Duration) {
epochDuration := uint64(durations.MsPerBlock) * durations.BlockPerEpoch
currentEpoch := durations.CurrentEpoch
numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
if uint64(expDuration.Milliseconds())%epochDuration != 0 {
numEpoch++
}
expirationEpoch := uint64(math.MaxUint64)
if numEpoch < math.MaxUint64-currentEpoch {
expirationEpoch = currentEpoch + numEpoch
}
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
}

189
utils/attributes_test.go Normal file
View file

@ -0,0 +1,189 @@
//go:build !integration
package utils
import (
"math"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestPrepareExpirationHeader(t *testing.T) {
tomorrow := time.Now().Add(24 * time.Hour)
tomorrowUnix := tomorrow.Unix()
tomorrowUnixNano := tomorrow.UnixNano()
tomorrowUnixMilli := tomorrowUnixNano / 1e6
epoch := "100"
duration := "24h"
timestampSec := strconv.FormatInt(tomorrowUnix, 10)
timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10)
timestampNano := strconv.FormatInt(tomorrowUnixNano, 10)
defaultDurations := &EpochDurations{
CurrentEpoch: 10,
MsPerBlock: 1000,
BlockPerEpoch: 101,
}
msPerBlock := defaultDurations.BlockPerEpoch * uint64(defaultDurations.MsPerBlock)
epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock
if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 {
epochPerDay++
}
defaultExpEpoch := strconv.FormatUint(defaultDurations.CurrentEpoch+epochPerDay, 10)
for _, transformer := range transformers {
for _, tc := range []struct {
name string
headers map[string]string
durations *EpochDurations
err bool
expected map[string]string
}{
{
name: "valid epoch",
headers: map[string]string{transformer.expirationEpochAttr(): epoch},
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
durations: defaultDurations,
},
{
name: "valid epoch, valid duration",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationDurationAttr(): duration,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid rfc3339",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339),
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp sec",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampSec,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp milli",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampMilli,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp nano",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampNano,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid timestamp sec",
headers: map[string]string{transformer.expirationTimestampAttr(): timestampSec},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid duration",
headers: map[string]string{transformer.expirationDurationAttr(): duration},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid rfc3339",
headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid max uint 64",
headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)},
durations: &EpochDurations{
CurrentEpoch: math.MaxUint64 - 1,
MsPerBlock: defaultDurations.MsPerBlock,
BlockPerEpoch: defaultDurations.BlockPerEpoch,
},
expected: map[string]string{transformer.expirationEpochAttr(): strconv.FormatUint(uint64(math.MaxUint64), 10)},
},
{
name: "invalid timestamp sec",
headers: map[string]string{transformer.expirationTimestampAttr(): "abc"},
err: true,
},
{
name: "invalid timestamp sec zero",
headers: map[string]string{transformer.expirationTimestampAttr(): "0"},
err: true,
},
{
name: "invalid duration",
headers: map[string]string{transformer.expirationDurationAttr(): "1d"},
err: true,
},
{
name: "invalid duration negative",
headers: map[string]string{transformer.expirationDurationAttr(): "-5h"},
err: true,
},
{
name: "invalid rfc3339",
headers: map[string]string{transformer.expirationRFC3339Attr(): "abc"},
err: true,
},
{
name: "invalid rfc3339 zero",
headers: map[string]string{transformer.expirationRFC3339Attr(): time.RFC3339},
err: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := transformer.prepareExpirationHeader(tc.headers, tc.durations, time.Now())
if tc.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tc.expected, tc.headers)
}
})
}
}
}
func TestSystemBackwardTranslator(t *testing.T) {
input := []string{
"__SYSTEM__EXPIRATION_EPOCH",
"__SYSTEM__RANDOM_ATTR",
"__NEOFS__EXPIRATION_EPOCH",
"__NEOFS__RANDOM_ATTR",
}
expected := []string{
"System-Expiration-Epoch",
"System-Random-Attr",
"Neofs-Expiration-Epoch",
"Neofs-Random-Attr",
}
for i, str := range input {
res := BackwardTransformIfSystem(str)
require.Equal(t, expected[i], res)
}
}

View file

@ -1,9 +1,9 @@
package utils
import (
"github.com/nspcc-dev/neofs-http-gw/resolver"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"github.com/nspcc-dev/neofs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
)

83
utils/tracing.go Normal file
View file

@ -0,0 +1,83 @@
package utils
import (
"context"
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
"github.com/valyala/fasthttp"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace"
)
type httpCarrier struct {
r *fasthttp.RequestCtx
}
func (c *httpCarrier) Get(key string) string {
bytes := c.r.Request.Header.Peek(key)
if len(bytes) == 0 {
return ""
}
return string(bytes)
}
func (c *httpCarrier) Set(key string, value string) {
c.r.Response.Header.Set(key, value)
}
func (c *httpCarrier) Keys() []string {
dict := make(map[string]interface{})
c.r.Request.Header.VisitAll(
func(key, value []byte) {
dict[string(key)] = true
},
)
c.r.Response.Header.VisitAll(
func(key, value []byte) {
dict[string(key)] = true
},
)
result := make([]string, 0, len(dict))
for key := range dict {
result = append(result, key)
}
return result
}
func extractHTTPTraceInfo(ctx context.Context, req *fasthttp.RequestCtx) context.Context {
if req == nil {
return ctx
}
carrier := &httpCarrier{r: req}
return tracing.Propagator.Extract(ctx, carrier)
}
// SetHTTPTraceInfo saves trace headers to response.
func SetHTTPTraceInfo(ctx context.Context, span trace.Span, req *fasthttp.RequestCtx) {
if req == nil {
return
}
if err := req.Err(); err != nil {
span.SetStatus(codes.Error, err.Error())
}
span.SetAttributes(
semconv.HTTPStatusCode(req.Response.StatusCode()),
)
carrier := &httpCarrier{r: req}
tracing.Propagator.Inject(ctx, carrier)
}
// StartHTTPServerSpan starts root HTTP server span.
func StartHTTPServerSpan(ctx context.Context, req *fasthttp.RequestCtx, operationName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
ctx = extractHTTPTraceInfo(ctx, req)
opts = append(opts, trace.WithAttributes(
attribute.String("http.client_address", req.RemoteAddr().String()),
attribute.String("http.path", string(req.Path())),
semconv.HTTPMethod(string(req.Method())),
semconv.RPCService("frostfs-http-gw"),
attribute.String("http.query", req.QueryArgs().String()),
), trace.WithSpanKind(trace.SpanKindServer))
return tracing.StartSpanFromContext(ctx, operationName, opts...)
}

View file

@ -2,9 +2,12 @@ package utils
import (
"context"
"fmt"
"github.com/nspcc-dev/neofs-http-gw/resolver"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/valyala/fasthttp"
)
// GetContainerID decode container id, if it's not a valid container id
@ -17,3 +20,37 @@ func GetContainerID(ctx context.Context, containerID string, resolver *resolver.
}
return cnrID, err
}
type EpochDurations struct {
CurrentEpoch uint64
MsPerBlock int64
BlockPerEpoch uint64
}
func GetEpochDurations(ctx context.Context, p *pool.Pool) (*EpochDurations, error) {
networkInfo, err := p.NetworkInfo(ctx)
if err != nil {
return nil, err
}
res := &EpochDurations{
CurrentEpoch: networkInfo.CurrentEpoch(),
MsPerBlock: networkInfo.MsPerBlock(),
BlockPerEpoch: networkInfo.EpochDuration(),
}
if res.BlockPerEpoch == 0 {
return nil, fmt.Errorf("EpochDuration is empty")
}
return res, nil
}
// SetContextToRequest adds new context to fasthttp request.
func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
c.SetUserValue("context", ctx)
}
// GetContextFromRequest returns main context from fasthttp request context.
func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context {
return c.UserValue("context").(context.Context)
}