forked from TrueCloudLab/frostfs-http-gw
Compare commits
146 commits
support/v0
...
master
Author | SHA1 | Date | |
---|---|---|---|
8fe8f2dcc2 | |||
77eb474581 | |||
c8473498ae | |||
a4233b006c | |||
7e80f0cce6 | |||
843708a558 | |||
77ffde58e9 | |||
ca426fff4d | |||
151e5bc1c8 | |||
5ee09790f0 | |||
fcf99d9a59 | |||
f20ea67b46 | |||
9e2d1208cb | |||
418767c8ec | |||
16545bd3b0 | |||
d9cbd302b1 | |||
1737f1d95f | |||
0f22ca43c1 | |||
27478995b5 | |||
3741e3b003 | |||
826dd0cdbe | |||
23ed3ab86e | |||
5a87ee7625 | |||
b73a4a25b3 | |||
5b7b872dcd | |||
c851c0529c | |||
16d6e6c34e | |||
11965deb41 | |||
a95dc6c8c7 | |||
f39b3aa93a | |||
6695ebe5a0 | |||
c6383fc135 | |||
5ded105c09 | |||
88e32ddd7f | |||
007d278caa | |||
7ec9b34d33 | |||
5470916361 | |||
c038957649 | |||
ce4ec032f9 | |||
4049255eed | |||
2c95250f72 | |||
5ae75eb9d8 | |||
627294bf70 | |||
0ef3e18ee1 | |||
2e28b2ac85 | |||
a375af7d98 | |||
dc8d0d4ab3 | |||
7fa973b261 | |||
1ced82a714 | |||
49d6a27562 | |||
9a5a2239bd | |||
8bc246f8f9 | |||
9b34413e17 | |||
e61b4867c9 | |||
84eb57475b | |||
e26577e753 | |||
d219943542 | |||
add07a21ed | |||
40568590c7 | |||
834d5b93e5 | |||
dbc6804d27 | |||
7d47e88e36 | |||
54eadc3c31 | |||
fa28f1ff82 | |||
cc69601b32 | |||
97ac638dff | |||
0882d344a2 | |||
6fac6341c2 | |||
d9122e2093 | |||
6f64557a4b | |||
2ccb43bc8c | |||
202ef5cc54 | |||
1dfbe36eca | |||
5be537321b | |||
2c706bec71 | |||
d0f6baa44b | |||
d7dbff1255 | |||
61d152ee6a | |||
9765adf844 | |||
b8944adb65 | |||
f24f39ec92 | |||
f17f6747c4 | |||
01b9df83e6 | |||
c4fe718556 | |||
cdaab4feab | |||
8a22991326 | |||
1776db289c | |||
1f702ad2d8 | |||
adb95642d4 | |||
3844ac83e6 | |||
f7784db146 | |||
8c3c3782f5 | |||
37dbb29535 | |||
a945cdd42c | |||
ad05f1eb82 | |||
9eeaf44163 | |||
15b65b521b | |||
385f336a17 | |||
6c6fd0e9a5 | |||
cc37c34396 | |||
959213520e | |||
162738e771 | |||
7c16ffa250 | |||
6f35d7198d | |||
81f7168a16 | |||
a8ec09e76a | |||
1f66149316 | |||
e2059a8926 | |||
53ee124b19 | |||
8f6be59e23 | |||
e02ee50d7b | |||
93ec4c444d | |||
6be8d47d92 | |||
ed983f8ad0 | |||
5f01abf300 | |||
38aa6db041 | |||
5a98df9d2d | |||
361acacf07 | |||
6909ef5382 | |||
148b1aa7f5 | |||
7df26d9181 | |||
913644d64a | |||
4c30ff6638 | |||
72734ab486 | |||
6abd500b11 | |||
67c5818fc1 | |||
de309e3264 | |||
31d396a125 | |||
a014fb96fc | |||
2d9eee81c2 | |||
f88fe1092d | |||
|
f4fbd936bc | ||
278376643a | |||
8f1d84ba8d | |||
f3d58e4ef0 | |||
|
7b1410968e | ||
|
408d914347 | ||
|
a6ec194c2a | ||
|
4a0188b8f9 | ||
|
ef0c17372f | ||
|
d2ced3ccbb | ||
|
9fda0397e4 | ||
|
be47263c42 | ||
|
9dad47502e | ||
|
b2db6300c4 | ||
131f2dbcfc |
104 changed files with 8875 additions and 3716 deletions
|
@ -1,9 +1,9 @@
|
||||||
FROM golang:1.19-alpine as basebuilder
|
FROM golang:1.22-alpine AS basebuilder
|
||||||
RUN apk add --update make bash ca-certificates
|
RUN apk add --update make bash ca-certificates
|
||||||
|
|
||||||
FROM basebuilder as builder
|
FROM basebuilder AS builder
|
||||||
ENV GOGC off
|
ENV GOGC=off
|
||||||
ENV CGO_ENABLED 0
|
ENV CGO_ENABLED=0
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
@ -18,6 +18,6 @@ FROM scratch
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||||
COPY --from=builder /src/bin/neofs-http-gw /bin/neofs-http-gw
|
COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/neofs-http-gw"]
|
ENTRYPOINT ["/bin/frostfs-http-gw"]
|
|
@ -3,6 +3,6 @@ RUN apk add --update --no-cache bash ca-certificates
|
||||||
|
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
|
|
||||||
COPY bin/neofs-http-gw /bin/neofs-http-gw
|
COPY bin/frostfs-http-gw /bin/frostfs-http-gw
|
||||||
|
|
||||||
CMD ["neofs-http-gw"]
|
CMD ["frostfs-http-gw"]
|
23
.forgejo/workflows/builds.yml
Normal file
23
.forgejo/workflows/builds.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
builds:
|
||||||
|
name: Builds
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go_versions: [ '1.22', '1.23' ]
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '${{ matrix.go_versions }}'
|
||||||
|
|
||||||
|
- name: Build binary
|
||||||
|
run: make
|
||||||
|
|
||||||
|
- name: Check dirty suffix
|
||||||
|
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
20
.forgejo/workflows/dco.yml
Normal file
20
.forgejo/workflows/dco.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
dco:
|
||||||
|
name: DCO
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.23'
|
||||||
|
|
||||||
|
- name: Run commit format checker
|
||||||
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
with:
|
||||||
|
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
41
.forgejo/workflows/tests.yml
Normal file
41
.forgejo/workflows/tests.yml
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
lint:
|
||||||
|
name: Lint
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.23'
|
||||||
|
cache: true
|
||||||
|
|
||||||
|
- name: Install linters
|
||||||
|
run: make lint-install
|
||||||
|
|
||||||
|
- name: Run linters
|
||||||
|
run: make lint
|
||||||
|
|
||||||
|
tests:
|
||||||
|
name: Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go_versions: [ '1.22', '1.23' ]
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '${{ matrix.go_versions }}'
|
||||||
|
|
||||||
|
- name: Update Go modules
|
||||||
|
run: make dep
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: make test
|
21
.forgejo/workflows/vulncheck.yml
Normal file
21
.forgejo/workflows/vulncheck.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
vulncheck:
|
||||||
|
name: Vulncheck
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.22'
|
||||||
|
|
||||||
|
- name: Install govulncheck
|
||||||
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
||||||
|
- name: Run govulncheck
|
||||||
|
run: govulncheck ./...
|
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
|
@ -1 +1 @@
|
||||||
* @alexvanin @masterSplinter01 @KirillovDenis
|
* @alexvanin @dkirillov
|
||||||
|
|
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
12
.github/ISSUE_TEMPLATE/bug_report.md
vendored
|
@ -2,7 +2,7 @@
|
||||||
name: Bug report
|
name: Bug report
|
||||||
about: Create a report to help us improve
|
about: Create a report to help us improve
|
||||||
title: ''
|
title: ''
|
||||||
labels: community, triage
|
labels: community, triage, bug
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -18,17 +18,17 @@ assignees: ''
|
||||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||||
|
|
||||||
## Possible Solution
|
## Possible Solution
|
||||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
<!-- Not obligatory
|
||||||
<!--- or ideas how to implement the addition or change -->
|
If no reason/fix/additions for the bug can be suggested,
|
||||||
|
uncomment the following phrase:
|
||||||
|
|
||||||
|
<-- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
||||||
|
|
||||||
## Steps to Reproduce (for bugs)
|
## Steps to Reproduce (for bugs)
|
||||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||||
<!--- reproduce this bug. -->
|
<!--- reproduce this bug. -->
|
||||||
|
|
||||||
1.
|
1.
|
||||||
2.
|
|
||||||
3.
|
|
||||||
4.
|
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||||
|
|
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
16
.github/ISSUE_TEMPLATE/feature_request.md
vendored
|
@ -7,14 +7,14 @@ assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
## Is your feature request related to a problem? Please describe.
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
## Describe the solution you'd like
|
||||||
A clear and concise description of what you want to happen.
|
<!--- A clear and concise description of what you want to happen. -->
|
||||||
|
|
||||||
**Describe alternatives you've considered**
|
## Describe alternatives you've considered
|
||||||
A clear and concise description of any alternative solutions or features you've considered.
|
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||||
|
|
||||||
**Additional context**
|
## Additional context
|
||||||
Add any other context or screenshots about the feature request here.
|
<!--- Add any other context or screenshots about the feature request here. -->
|
||||||
|
|
187
.github/logo.svg
vendored
187
.github/logo.svg
vendored
|
@ -1,129 +1,70 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<svg
|
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||||
xmlns:cc="http://creativecommons.org/ns#"
|
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
|
||||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
<style type="text/css">
|
||||||
xmlns:svg="http://www.w3.org/2000/svg"
|
.st0{display:none;}
|
||||||
xmlns="http://www.w3.org/2000/svg"
|
.st1{display:inline;}
|
||||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
.st2{fill:#01E397;}
|
||||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
.st3{display:inline;fill:#010032;}
|
||||||
sodipodi:docname="logo_fs.svg"
|
.st4{display:inline;fill:#00E599;}
|
||||||
inkscape:version="1.0 (4035a4fb49, 2020-05-01)"
|
.st5{display:inline;fill:#00AF92;}
|
||||||
id="svg57"
|
.st6{fill:#00C3E5;}
|
||||||
version="1.1"
|
</style>
|
||||||
viewBox="0 0 105 25"
|
<g id="Layer_2">
|
||||||
height="25mm"
|
<g id="Layer_1-2" class="st0">
|
||||||
width="105mm">
|
<g class="st1">
|
||||||
<defs
|
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
|
||||||
id="defs51">
|
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
|
||||||
<clipPath
|
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
|
||||||
clipPathUnits="userSpaceOnUse"
|
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
|
||||||
id="clipPath434">
|
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
|
||||||
<path
|
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
|
||||||
d="M 0,0 H 1366 V 768 H 0 Z"
|
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
|
||||||
id="path432" />
|
|
||||||
</clipPath>
|
|
||||||
</defs>
|
|
||||||
<sodipodi:namedview
|
|
||||||
inkscape:window-maximized="0"
|
|
||||||
inkscape:window-y="0"
|
|
||||||
inkscape:window-x="130"
|
|
||||||
inkscape:window-height="1040"
|
|
||||||
inkscape:window-width="1274"
|
|
||||||
height="50mm"
|
|
||||||
units="mm"
|
|
||||||
showgrid="false"
|
|
||||||
inkscape:document-rotation="0"
|
|
||||||
inkscape:current-layer="layer1"
|
|
||||||
inkscape:document-units="mm"
|
|
||||||
inkscape:cy="344.49897"
|
|
||||||
inkscape:cx="468.64708"
|
|
||||||
inkscape:zoom="0.7"
|
|
||||||
inkscape:pageshadow="2"
|
|
||||||
inkscape:pageopacity="0.0"
|
|
||||||
borderopacity="1.0"
|
|
||||||
bordercolor="#666666"
|
|
||||||
pagecolor="#ffffff"
|
|
||||||
id="base" />
|
|
||||||
<metadata
|
|
||||||
id="metadata54">
|
|
||||||
<rdf:RDF>
|
|
||||||
<cc:Work
|
|
||||||
rdf:about="">
|
|
||||||
<dc:format>image/svg+xml</dc:format>
|
|
||||||
<dc:type
|
|
||||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
|
||||||
<dc:title></dc:title>
|
|
||||||
</cc:Work>
|
|
||||||
</rdf:RDF>
|
|
||||||
</metadata>
|
|
||||||
<g
|
|
||||||
id="layer1"
|
|
||||||
inkscape:groupmode="layer"
|
|
||||||
inkscape:label="Layer 1">
|
|
||||||
<g
|
|
||||||
id="g424"
|
|
||||||
transform="matrix(0.35277777,0,0,-0.35277777,63.946468,10.194047)">
|
|
||||||
<path
|
|
||||||
d="m 0,0 v -8.093 h 12.287 v -3.94 H 0 V -24.067 H -4.534 V 3.898 H 15.677 V 0 Z"
|
|
||||||
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
|
||||||
id="path426" />
|
|
||||||
</g>
|
</g>
|
||||||
<g
|
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
|
||||||
transform="matrix(0.35277777,0,0,-0.35277777,-315.43002,107.34005)"
|
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
|
||||||
id="g428">
|
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
|
||||||
<g
|
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
|
||||||
id="g430"
|
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
|
||||||
clip-path="url(#clipPath434)">
|
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
|
||||||
<g
|
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
|
||||||
id="g436"
|
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
|
||||||
transform="translate(1112.874,278.2981)">
|
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
|
||||||
<path
|
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
|
||||||
d="M 0,0 C 1.822,-0.932 3.354,-2.359 4.597,-4.28 L 1.165,-7.373 c -0.791,1.695 -1.779,2.924 -2.966,3.686 -1.186,0.763 -2.768,1.145 -4.745,1.145 -1.949,0 -3.461,-0.389 -4.534,-1.166 -1.074,-0.777 -1.61,-1.772 -1.61,-2.987 0,-1.13 0.523,-2.027 1.568,-2.69 1.045,-0.664 2.909,-1.236 5.593,-1.716 2.514,-0.452 4.512,-1.024 5.995,-1.716 1.483,-0.693 2.564,-1.554 3.242,-2.585 0.677,-1.031 1.016,-2.309 1.016,-3.834 0,-1.639 -0.466,-3.079 -1.398,-4.322 -0.932,-1.243 -2.239,-2.197 -3.919,-2.86 -1.681,-0.664 -3.623,-0.996 -5.826,-0.996 -5.678,0 -9.689,1.892 -12.033,5.678 l 3.178,3.178 c 0.903,-1.695 2.068,-2.939 3.495,-3.729 1.426,-0.791 3.199,-1.186 5.318,-1.186 2.005,0 3.58,0.345 4.724,1.038 1.144,0.692 1.716,1.674 1.716,2.945 0,1.017 -0.516,1.835 -1.547,2.457 -1.031,0.621 -2.832,1.172 -5.402,1.653 -2.571,0.479 -4.618,1.073 -6.143,1.779 -1.526,0.706 -2.635,1.582 -3.326,2.627 -0.693,1.045 -1.039,2.316 -1.039,3.813 0,1.582 0.438,3.023 1.314,4.322 0.875,1.299 2.14,2.33 3.792,3.093 1.653,0.763 3.58,1.144 5.783,1.144 C -4.018,1.398 -1.822,0.932 0,0"
|
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
|
||||||
style="fill:#00e396;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
|
||||||
id="path438" />
|
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
|
||||||
</g>
|
</g>
|
||||||
<g
|
<g>
|
||||||
id="g440"
|
<g>
|
||||||
transform="translate(993.0239,277.5454)">
|
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
|
||||||
<path
|
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
|
||||||
d="m 0,0 c 2.054,-1.831 3.083,-4.465 3.083,-7.902 v -17.935 h -4.484 v 16.366 c 0,2.914 -0.626,5.024 -1.877,6.332 -1.253,1.308 -2.924,1.962 -5.016,1.962 -1.495,0 -2.896,-0.327 -4.204,-0.981 -1.308,-0.654 -2.381,-1.719 -3.222,-3.194 -0.841,-1.477 -1.261,-3.335 -1.261,-5.576 v -14.909 h -4.484 V 1.328 l 4.086,-1.674 0.118,-1.84 c 0.933,1.681 2.222,2.923 3.867,3.727 1.643,0.803 3.493,1.205 5.548,1.205 C -4.671,2.746 -2.055,1.83 0,0"
|
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
|
||||||
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
|
||||||
id="path442" />
|
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
|
||||||
|
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
|
||||||
|
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
|
||||||
|
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
|
||||||
|
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
|
||||||
|
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
|
||||||
|
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
|
||||||
|
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
|
||||||
|
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
|
||||||
|
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
|
||||||
|
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
|
||||||
|
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
|
||||||
|
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
|
||||||
|
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
|
||||||
|
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
|
||||||
|
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
|
||||||
|
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
|
||||||
|
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
|
||||||
|
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
|
||||||
|
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
|
||||||
|
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
|
||||||
|
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
|
||||||
</g>
|
</g>
|
||||||
<g
|
|
||||||
id="g444"
|
|
||||||
transform="translate(1027.9968,264.0386)">
|
|
||||||
<path
|
|
||||||
d="m 0,0 h -21.128 c 0.261,-2.84 1.205,-5.044 2.83,-6.613 1.625,-1.57 3.727,-2.355 6.305,-2.355 2.054,0 3.763,0.356 5.128,1.065 1.363,0.71 2.288,1.738 2.774,3.083 l 3.755,-1.961 c -1.121,-1.981 -2.616,-3.495 -4.484,-4.54 -1.868,-1.046 -4.259,-1.569 -7.173,-1.569 -4.223,0 -7.538,1.289 -9.948,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.149,8.127 3.447,10.705 2.298,2.578 5.557,3.867 9.779,3.867 2.615,0 4.876,-0.58 6.782,-1.738 1.905,-1.158 3.343,-2.728 4.315,-4.707 C -0.262,7.827 0.224,5.605 0.224,3.139 0.224,2.092 0.149,1.046 0,0 m -18.298,10.144 c -1.513,-1.457 -2.438,-3.512 -2.775,-6.165 h 16.982 c -0.3,2.615 -1.159,4.661 -2.578,6.137 -1.42,1.476 -3.307,2.214 -5.661,2.214 -2.466,0 -4.455,-0.728 -5.968,-2.186"
|
|
||||||
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
|
||||||
id="path446" />
|
|
||||||
</g>
|
|
||||||
<g
|
|
||||||
id="g448"
|
|
||||||
transform="translate(1057.8818,276.4246)">
|
|
||||||
<path
|
|
||||||
d="m 0,0 c 2.41,-2.578 3.615,-6.147 3.615,-10.705 0,-4.558 -1.205,-8.126 -3.615,-10.704 -2.41,-2.578 -5.726,-3.867 -9.948,-3.867 -4.222,0 -7.537,1.289 -9.947,3.867 -2.41,2.578 -3.615,6.146 -3.615,10.704 0,4.558 1.205,8.127 3.615,10.705 2.41,2.578 5.725,3.867 9.947,3.867 C -5.726,3.867 -2.41,2.578 0,0 m -16.617,-2.858 c -1.607,-1.906 -2.41,-4.522 -2.41,-7.847 0,-3.326 0.803,-5.94 2.41,-7.846 1.607,-1.905 3.83,-2.858 6.669,-2.858 2.839,0 5.063,0.953 6.67,2.858 1.606,1.906 2.41,4.52 2.41,7.846 0,3.325 -0.804,5.941 -2.41,7.847 C -4.885,-0.953 -7.109,0 -9.948,0 c -2.839,0 -5.062,-0.953 -6.669,-2.858"
|
|
||||||
style="fill:#000033;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
|
||||||
id="path450" />
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
<g
|
|
||||||
id="g452"
|
|
||||||
transform="matrix(0.35277777,0,0,-0.35277777,5.8329581,6.5590171)">
|
|
||||||
<path
|
|
||||||
d="m 0,0 0.001,-38.946 25.286,-9.076 V -8.753 L 52.626,1.321 27.815,10.207 Z"
|
|
||||||
style="fill:#00e599;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
|
||||||
id="path454" />
|
|
||||||
</g>
|
|
||||||
<g
|
|
||||||
id="g456"
|
|
||||||
transform="matrix(0.35277777,0,0,-0.35277777,15.479008,10.041927)">
|
|
||||||
<path
|
|
||||||
d="M 0,0 V -21.306 L 25.293,-30.364 25.282,9.347 Z"
|
|
||||||
style="fill:#00b091;fill-opacity:1;fill-rule:nonzero;stroke:none"
|
|
||||||
id="path458" />
|
|
||||||
</g>
|
</g>
|
||||||
</g>
|
</g>
|
||||||
</svg>
|
</svg>
|
||||||
|
|
Before Width: | Height: | Size: 6.5 KiB After Width: | Height: | Size: 5.5 KiB |
66
.github/workflows/builds.yml
vendored
66
.github/workflows/builds.yml
vendored
|
@ -1,66 +0,0 @@
|
||||||
name: Builds
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
types: [opened, synchronize]
|
|
||||||
paths-ignore:
|
|
||||||
- '**/*.md'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build_cli:
|
|
||||||
name: Build CLI
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.19
|
|
||||||
|
|
||||||
- name: Restore Go modules from cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /home/runner/go/pkg/mod
|
|
||||||
key: deps-${{ hashFiles('go.sum') }}
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Build CLI
|
|
||||||
run: make
|
|
||||||
|
|
||||||
- name: Check version
|
|
||||||
run: if [[ $(make version) == *"dirty"* ]]; then exit 1; fi
|
|
||||||
|
|
||||||
build_image:
|
|
||||||
needs: build_cli
|
|
||||||
name: Build Docker image
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.19
|
|
||||||
|
|
||||||
- name: Restore Go modules from cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /home/runner/go/pkg/mod
|
|
||||||
key: deps-${{ hashFiles('go.sum') }}
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Build Docker image
|
|
||||||
run: make image
|
|
67
.github/workflows/codeql-analysis.yml
vendored
67
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,67 +0,0 @@
|
||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches: [ master ]
|
|
||||||
schedule:
|
|
||||||
- cron: '35 8 * * 1'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'go' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
|
||||||
# Learn more:
|
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v2
|
|
||||||
with:
|
|
||||||
languages: ${{ matrix.language }}
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
|
||||||
|
|
||||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
|
||||||
# If this step fails, then you should remove it and run the build manually (see below)
|
|
||||||
- name: Autobuild
|
|
||||||
uses: github/codeql-action/autobuild@v2
|
|
||||||
|
|
||||||
# ℹ️ Command-line programs to run using the OS shell.
|
|
||||||
# 📚 https://git.io/JvXDl
|
|
||||||
|
|
||||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
|
||||||
# and modify them (or add more) to build your code if your project
|
|
||||||
# uses a compiled language
|
|
||||||
|
|
||||||
#- run: |
|
|
||||||
# make bootstrap
|
|
||||||
# make release
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v2
|
|
21
.github/workflows/dco.yml
vendored
21
.github/workflows/dco.yml
vendored
|
@ -1,21 +0,0 @@
|
||||||
name: DCO check
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
commits_check_job:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
name: Commits Check
|
|
||||||
steps:
|
|
||||||
- name: Get PR Commits
|
|
||||||
id: 'get-pr-commits'
|
|
||||||
uses: tim-actions/get-pr-commits@master
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: DCO Check
|
|
||||||
uses: tim-actions/dco@master
|
|
||||||
with:
|
|
||||||
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
|
85
.github/workflows/tests.yml
vendored
85
.github/workflows/tests.yml
vendored
|
@ -1,85 +0,0 @@
|
||||||
name: Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
types: [opened, synchronize]
|
|
||||||
paths-ignore:
|
|
||||||
- '**/*.md'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
cover:
|
|
||||||
name: Coverage
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
|
|
||||||
env:
|
|
||||||
CGO_ENABLED: 1
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.19
|
|
||||||
|
|
||||||
- name: Restore Go modules from cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /home/runner/go/pkg/mod
|
|
||||||
key: deps-${{ hashFiles('go.sum') }}
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Test and write coverage profile
|
|
||||||
run: make cover
|
|
||||||
|
|
||||||
- name: Upload coverage results to Codecov
|
|
||||||
uses: codecov/codecov-action@v1
|
|
||||||
with:
|
|
||||||
fail_ci_if_error: false
|
|
||||||
path_to_write_report: ./coverage.txt
|
|
||||||
verbose: true
|
|
||||||
|
|
||||||
tests:
|
|
||||||
name: Tests
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.17', '1.18', '1.19' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Restore Go modules from cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /home/runner/go/pkg/mod
|
|
||||||
key: deps-${{ hashFiles('go.sum') }}
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: make test
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -10,9 +10,12 @@ temp
|
||||||
test.sh
|
test.sh
|
||||||
testfile
|
testfile
|
||||||
.blast.yml
|
.blast.yml
|
||||||
.neofs-cli.yml
|
.frostfs-cli.yml
|
||||||
|
|
||||||
.cache
|
.cache
|
||||||
|
|
||||||
coverage.txt
|
coverage.txt
|
||||||
coverage.html
|
coverage.html
|
||||||
|
|
||||||
|
# debhelpers
|
||||||
|
**/.debhelper
|
||||||
|
|
11
.gitlint
Normal file
11
.gitlint
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
[general]
|
||||||
|
fail-without-commits=True
|
||||||
|
regex-style-search=True
|
||||||
|
contrib=CC1
|
||||||
|
|
||||||
|
[title-match-regex]
|
||||||
|
regex=^\[\#[0-9Xx]+\]\s
|
||||||
|
|
||||||
|
[ignore-by-title]
|
||||||
|
regex=^Release(.*)
|
||||||
|
ignore=title-match-regex
|
|
@ -4,7 +4,7 @@
|
||||||
# options for analysis running
|
# options for analysis running
|
||||||
run:
|
run:
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
timeout: 5m
|
timeout: 15m
|
||||||
|
|
||||||
# include test files or not, default is true
|
# include test files or not, default is true
|
||||||
tests: true
|
tests: true
|
||||||
|
@ -12,7 +12,8 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
format: tab
|
formats:
|
||||||
|
- format: tab
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
@ -24,6 +25,16 @@ linters-settings:
|
||||||
govet:
|
govet:
|
||||||
# report about shadowed variables
|
# report about shadowed variables
|
||||||
check-shadowing: false
|
check-shadowing: false
|
||||||
|
custom:
|
||||||
|
truecloudlab-linters:
|
||||||
|
path: bin/external_linters.so
|
||||||
|
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
||||||
|
settings:
|
||||||
|
noliteral:
|
||||||
|
enable: true
|
||||||
|
target-methods: ["Fatal"]
|
||||||
|
disable-packages: ["req", "r"]
|
||||||
|
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
|
@ -32,15 +43,12 @@ linters:
|
||||||
- revive
|
- revive
|
||||||
|
|
||||||
# some default golangci-lint linters
|
# some default golangci-lint linters
|
||||||
- deadcode
|
|
||||||
- errcheck
|
- errcheck
|
||||||
- gosimple
|
- gosimple
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- structcheck
|
|
||||||
- typecheck
|
- typecheck
|
||||||
- unused
|
- unused
|
||||||
- varcheck
|
|
||||||
|
|
||||||
# extra linters
|
# extra linters
|
||||||
- exhaustive
|
- exhaustive
|
||||||
|
@ -48,6 +56,7 @@ linters:
|
||||||
- gofmt
|
- gofmt
|
||||||
- whitespace
|
- whitespace
|
||||||
- goimports
|
- goimports
|
||||||
|
- truecloudlab-linters
|
||||||
disable-all: true
|
disable-all: true
|
||||||
fast: false
|
fast: false
|
||||||
|
|
||||||
|
|
52
.pre-commit-config.yaml
Normal file
52
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
ci:
|
||||||
|
autofix_prs: false
|
||||||
|
|
||||||
|
repos:
|
||||||
|
- repo: https://github.com/jorisroovers/gitlint
|
||||||
|
rev: v0.19.1
|
||||||
|
hooks:
|
||||||
|
- id: gitlint
|
||||||
|
stages: [commit-msg]
|
||||||
|
- id: gitlint-ci
|
||||||
|
|
||||||
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
|
rev: v4.4.0
|
||||||
|
hooks:
|
||||||
|
- id: check-added-large-files
|
||||||
|
- id: check-case-conflict
|
||||||
|
- id: check-executables-have-shebangs
|
||||||
|
- id: check-shebang-scripts-are-executable
|
||||||
|
- id: check-merge-conflict
|
||||||
|
- id: check-json
|
||||||
|
- id: check-xml
|
||||||
|
- id: check-yaml
|
||||||
|
- id: trailing-whitespace
|
||||||
|
args: [--markdown-linebreak-ext=md]
|
||||||
|
- id: end-of-file-fixer
|
||||||
|
exclude: ".key$"
|
||||||
|
|
||||||
|
- repo: https://github.com/shellcheck-py/shellcheck-py
|
||||||
|
rev: v0.9.0.2
|
||||||
|
hooks:
|
||||||
|
- id: shellcheck
|
||||||
|
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: make-lint-install
|
||||||
|
name: install linters
|
||||||
|
entry: make lint-install
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: make-lint
|
||||||
|
name: run linters
|
||||||
|
entry: make lint
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
|
||||||
|
- id: go-unit-tests
|
||||||
|
name: go unit tests
|
||||||
|
entry: make test
|
||||||
|
pass_filenames: false
|
||||||
|
types: [go]
|
||||||
|
language: system
|
282
CHANGELOG.md
282
CHANGELOG.md
|
@ -3,237 +3,125 @@
|
||||||
This document outlines major changes between releases.
|
This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
- Support percent-encoding for GET queries (#134)
|
||||||
## [0.25.0] - 2022-10-31
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Config reloading on SIGHUP (#200, #208)
|
|
||||||
- Stop pool dial on SIGINT (#212)
|
|
||||||
- Makefile help (#213)
|
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Update NeoFS error handling (#206)
|
- Update go version to 1.22 (#132)
|
||||||
- GitHub actions updates (#205, #209)
|
|
||||||
- Unified system attribute format for GET and HEAD (#213)
|
|
||||||
|
|
||||||
## [0.24.0] - 2022-09-14
|
## [0.30.0] - Kangshung - 2024-07-22
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Fix expiration epoch calculation (#198)
|
- Handle query unescape and invalid bearer token errors (#107)
|
||||||
- Fix panic on go1.19 (#188)
|
- Fix HTTP/2 requests (#110)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Exposure of pool metrics (#179, #194)
|
- Add new `reconnect_interval` config param (#100)
|
||||||
|
- Erasure coding support in placement policy (#114)
|
||||||
|
- HTTP Header canonicalizer for well-known headers (#121)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Help doesn't print empty parameters (#186)
|
- Improve test coverage (#112, #117)
|
||||||
- Update version calculation (#190, #199)
|
- Bumped vulnerable dependencies (#115)
|
||||||
- Update neofs-sdk-go (#196)
|
- Replace extended ACL examples with policies in README (#118)
|
||||||
- Update go version in CI and docker (#197, #202)
|
|
||||||
|
|
||||||
## [0.23.0] - 2022-08-02
|
### Removed
|
||||||
|
|
||||||
### Added
|
## [0.29.0] - Zemu - 2024-05-27
|
||||||
- New param to configure pool error threshold (#184)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Pprof and prometheus metrics configuration (#171)
|
|
||||||
- Drop GO111MODULES from builds (#182)
|
|
||||||
|
|
||||||
### Updating from v0.22.0
|
|
||||||
1. To enable pprof use `pprof.enabled` instead of `pprof` in config.
|
|
||||||
To enable prometheus metrics use `prometheus.enabled` instead of `metrics` in config.
|
|
||||||
If you are using the command line flags you can skip this step.
|
|
||||||
|
|
||||||
## [0.22.0] - 2022-07-25
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Default params documentation (#172)
|
|
||||||
- Health metric (#175)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Version output (#169)
|
|
||||||
- Updated SDK Version (#178)
|
|
||||||
|
|
||||||
## [0.21.0] - 2022-06-20
|
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Downloading ZIP archive using streaming (#163)
|
- Fix possibility of panic during SIGHUP (#99)
|
||||||
|
- Handle query unescape and invalid bearer token errors (#108)
|
||||||
|
- Fix log-level change on SIGHUP (#105)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- New make target to build app in docker (#159)
|
- Support client side object cut (#70)
|
||||||
|
- Add `frostfs.client_cut` config param
|
||||||
|
- Add `frostfs.buffer_max_size_for_put` config param
|
||||||
|
- Add bucket/container caching
|
||||||
|
- Disable homomorphic hash for PUT if it's disabled in container itself
|
||||||
|
- Add new `logger.destination` config param with journald support (#89, #104)
|
||||||
|
- Add support namespaces (#91)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Increased buffer size for file uploading (#148)
|
- Replace atomics with mutex for reloadable params (#74)
|
||||||
- Updated linter version to v1.46.2 (#161)
|
|
||||||
- Updated CodeQL version to v2 (#158)
|
|
||||||
|
|
||||||
|
## [0.28.1] - 2024-01-24
|
||||||
## [0.20.0] - 2022-04-29
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Get rid of data race on server shutdown (#145)
|
|
||||||
- Improved English in docs and comments (#153)
|
|
||||||
- Use `FilePath` to download zip (#150)
|
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Support container name NNS resolving (#142)
|
- Tree pool traversal limit (#92)
|
||||||
|
|
||||||
|
### Update from 0.28.0
|
||||||
|
See new `frostfs.tree_pool_max_attempts` config parameter.
|
||||||
|
|
||||||
|
## [0.28.0] - Academy of Sciences - 2023-12-07
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- `grpc` schemas in tree configuration (#62)
|
||||||
|
- `GetSubTree` failures (#67)
|
||||||
|
- Debian packaging (#69, #90)
|
||||||
|
- Get latest version of tree node (#85)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Support dump metrics descriptions (#29)
|
||||||
|
- Support impersonate bearer token (#40, #45)
|
||||||
|
- Tracing support (#20, #44, #60)
|
||||||
|
- Object name resolving with tree service (#30)
|
||||||
|
- Metrics for current endpoint status (#77)
|
||||||
|
- Soft memory limit with `runtime.soft_memory_limit` (#72)
|
||||||
|
- Add selection of the node of the latest version of the object (#85)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Updated docs (#133, #140)
|
- Update prometheus to v1.15.0 (#35)
|
||||||
- Increased default read/write timeouts (#154)
|
- Update go version to 1.19 (#50)
|
||||||
- Updated SDK (#137, #139)
|
- Finish rebranding (#2)
|
||||||
- Updated go version to 1.17 (#143)
|
- Use gate key to form object owner (#66)
|
||||||
- Improved error messages (#144)
|
- Move log messages to constants (#36)
|
||||||
|
- Uploader and downloader refactor (#73)
|
||||||
|
|
||||||
## [0.19.0] - 2022-03-16
|
### Removed
|
||||||
|
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
|
||||||
|
|
||||||
|
## [0.27.0] - Karpinsky - 2023-07-12
|
||||||
|
|
||||||
|
This is a first FrostFS HTTP Gateway release named after
|
||||||
|
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Uploading object with zero payload (#122)
|
- Require only one healthy storage server to start (#7)
|
||||||
- Different headers format in GET and HEAD (#125)
|
- Enable gate metrics (#38)
|
||||||
- Fixed project name in docs (#120)
|
- `Too many pings` error (#61)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Support object attributes with spaces (#123)
|
- Multiple configs support (#12)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Updated fasthttp to v1.34.0 (#129)
|
- Repository rebranding (#1)
|
||||||
- Updated NeoFS SDK to v1.0.0-rc.3 (#126, #132)
|
- Update neo-go to v0.101.0 (#8)
|
||||||
- Refactored content type detecting (#128)
|
- Update viper to v1.15.0 (#8)
|
||||||
|
- Update go version to 1.18 (#9)
|
||||||
|
- Errors have become more detailed (#18)
|
||||||
|
- Update system attribute names (#22)
|
||||||
|
- Separate integration tests with build tags (#24)
|
||||||
|
- Changed values for `frostfs_http_gw_state_health` metric (#32)
|
||||||
|
|
||||||
|
### Updating from neofs-http-gw v0.26.0
|
||||||
|
|
||||||
## [0.18.0] - 2021-12-10
|
To set system attributes use updated headers
|
||||||
|
(you can use old ones for now, but their support will be dropped in the future releases):
|
||||||
|
|
||||||
### Fixed
|
* `X-Attribute-Neofs-*` -> `X-Attribute-System-*`
|
||||||
- System headers format (#111)
|
* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*`
|
||||||
|
* `X-Attribute-neofs-*` -> `X-Attribute-system-*`
|
||||||
|
|
||||||
### Added
|
|
||||||
- Different formats to set object's expiration: in epoch, duration, timestamp,
|
|
||||||
RFC3339 (#108)
|
|
||||||
- Support of nodes priority (#115)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated testcontainers dependency (#100)
|
|
||||||
|
|
||||||
## [0.17.0] - 2021-11-15
|
|
||||||
|
|
||||||
Support of bulk file download with zip streams and various bug fixes.
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Allow canonical `X-Attribute-Neofs-*` headers (#87)
|
|
||||||
- Responses with error message now end with `\n` character (#105)
|
|
||||||
- Application does not require all neofs endpoints to be healthy at start now
|
|
||||||
(#103)
|
|
||||||
- Application now tracks session token errors and recreates tokens in runtime
|
|
||||||
(#95)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Integration tests with [all-in-one](https://github.com/nspcc-dev/neofs-aio/)
|
|
||||||
test containers (#85, #94)
|
|
||||||
- Bulk download support with zip streams (#92, #96)
|
|
||||||
|
|
||||||
## 0.16.1 (28 Jul 2021)
|
|
||||||
|
|
||||||
New features:
|
|
||||||
* logging requests (#77)
|
|
||||||
* HEAD methods for download routes (#76)
|
|
||||||
|
|
||||||
Improvements:
|
|
||||||
* updated sdk-go dependency (#82)
|
|
||||||
|
|
||||||
Bugs fixed:
|
|
||||||
* wrong NotFound status was used (#30)
|
|
||||||
|
|
||||||
## 0.16.0 (29 Jun 2021)
|
|
||||||
|
|
||||||
We update HTTP gateway with NEP-6 wallets support, YAML configuration files
|
|
||||||
and small fixes.
|
|
||||||
|
|
||||||
New features:
|
|
||||||
* YAML configuration file (#71)
|
|
||||||
|
|
||||||
Behavior changes:
|
|
||||||
* gateway key needs to be stored in a proper NEP-6 wallet now, `-k` option is
|
|
||||||
no longer available, see `-w` and `-a` (#68)
|
|
||||||
|
|
||||||
Bugs fixed:
|
|
||||||
* downloads were not streamed leading to excessive memory usage (#67)
|
|
||||||
* Last-Modified header incorrectly used local time (#75)
|
|
||||||
|
|
||||||
## 0.15.2 (22 Jun 2021)
|
|
||||||
|
|
||||||
New features:
|
|
||||||
* Content-Type returned for object GET requests can now be taken from
|
|
||||||
attributes (overriding autodetection, #65)
|
|
||||||
|
|
||||||
Behavior changes:
|
|
||||||
* grpc keepalive options can no longer be changed (#60)
|
|
||||||
|
|
||||||
Improvements:
|
|
||||||
* code refactoring (more reuse between different gateways, moved some code to
|
|
||||||
sdk-go, #47, #46, #51, #62, #63)
|
|
||||||
* documentation updates and fixes (#53, #49, #55, #59)
|
|
||||||
* updated api-go dependency (#57)
|
|
||||||
|
|
||||||
Bugs fixed:
|
|
||||||
* `-k` option wasn't accepted for key although it was documented (#50)
|
|
||||||
|
|
||||||
## 0.15.1 (24 May 2021)
|
|
||||||
|
|
||||||
This important release makes HTTP gateway compatible with NeoFS node version
|
|
||||||
0.20.0.
|
|
||||||
|
|
||||||
Behavior changes:
|
|
||||||
* neofs-api-go was updated to 1.26.1, which contains some incompatible
|
|
||||||
changes in underlying components (#39, #44)
|
|
||||||
* `neofs-http-gw` is consistently used now for repository, binary and image
|
|
||||||
names (#43)
|
|
||||||
|
|
||||||
Improvements:
|
|
||||||
* minor code cleanups based on stricter set of linters (#41)
|
|
||||||
* updated README (#42)
|
|
||||||
|
|
||||||
## 0.15.0 (30 Apr 2021)
|
|
||||||
|
|
||||||
This is the first public release incorporating latest NeoFS protocol support
|
|
||||||
and fixing some bugs.
|
|
||||||
|
|
||||||
New features:
|
|
||||||
* upload support (#14, #13, #29)
|
|
||||||
* ephemeral keys (#26)
|
|
||||||
* TLS server support (#28)
|
|
||||||
|
|
||||||
Behavior changes:
|
|
||||||
* node weights can now be specified as simple numbers instead of percentages
|
|
||||||
and gateway will calculate the proportion automatically (#27)
|
|
||||||
* attributes are converted now to `X-Attribute-*` headers when retrieving
|
|
||||||
object from gate instead of `X-*` (#29)
|
|
||||||
|
|
||||||
Improvements:
|
|
||||||
* better Makefile (#16, #24, #33, #34)
|
|
||||||
* updated documentation (#16, #29, #35, #36)
|
|
||||||
* updated neofs-api-go to v1.25.0 (#17, #20)
|
|
||||||
* updated fasthttp to v1.23.0+ (#17, #29)
|
|
||||||
* refactoring, eliminating some dependencies (#20, #29)
|
|
||||||
|
|
||||||
Bugs fixed:
|
|
||||||
* gateway attempted to work with no NeoFS peers configured (#29)
|
|
||||||
* some invalid headers could be sent for attributes using non-ASCII or
|
|
||||||
non-printable characters (#29)
|
|
||||||
|
|
||||||
## Older versions
|
## Older versions
|
||||||
|
|
||||||
Please refer to [Github
|
This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0.
|
||||||
releases](https://github.com/nspcc-dev/neofs-http-gw/releases/) for older
|
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
|
||||||
releases.
|
|
||||||
|
|
||||||
[0.17.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.16.1...v0.17.0
|
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
|
||||||
[0.18.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.17.0...v0.18.0
|
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0
|
||||||
[0.19.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.18.0...v0.19.0
|
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
||||||
[0.20.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.19.0...v0.20.0
|
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
|
||||||
[0.21.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.20.0...v0.21.0
|
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
|
||||||
[0.22.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.21.0...v0.22.0
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...master
|
||||||
[0.23.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.22.0...v0.23.0
|
|
||||||
[0.24.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.23.0...v0.24.0
|
|
||||||
[0.25.0]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.24.0...v0.25.0
|
|
||||||
[Unreleased]: https://github.com/nspcc-dev/neofs-http-gw/compare/v0.25.0...master
|
|
||||||
|
|
|
@ -3,8 +3,8 @@
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
First, thank you for contributing! We love and encourage pull requests from
|
||||||
everyone. Please follow the guidelines:
|
everyone. Please follow the guidelines:
|
||||||
|
|
||||||
- Check the open [issues](https://github.com/nspcc-dev/neofs-http-gw/issues) and
|
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and
|
||||||
[pull requests](https://github.com/nspcc-dev/neofs-http-gw/pulls) for existing
|
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing
|
||||||
discussions.
|
discussions.
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
- Open an issue first, to discuss a new feature or enhancement.
|
||||||
|
@ -23,24 +23,24 @@ everyone. Please follow the guidelines:
|
||||||
|
|
||||||
## Development Workflow
|
## Development Workflow
|
||||||
|
|
||||||
Start by forking the `neofs-http-gw` repository, make changes in a branch and then
|
Start by forking the `frostfs-http-gw` repository, make changes in a branch and then
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
send a pull request. We encourage pull requests to discuss code changes. Here
|
||||||
are the steps in details:
|
are the steps in details:
|
||||||
|
|
||||||
### Set up your GitHub Repository
|
### Set up your git repository
|
||||||
Fork [NeoFS HTTP Gateway
|
Fork [FrostFS HTTP Gateway
|
||||||
upstream](https://github.com/nspcc-dev/neofs-http-gw/fork) source repository
|
upstream](https://git.frostfs.info/repo/fork/8) source repository
|
||||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
to your own personal repository. Copy the URL of your fork (you will need it for
|
||||||
the `git clone` command below).
|
the `git clone` command below).
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ git clone https://github.com/nspcc-dev/neofs-http-gw
|
$ git clone https://git.frostfs.info/<username>/frostfs-http-gw.git
|
||||||
```
|
```
|
||||||
|
|
||||||
### Set up git remote as ``upstream``
|
### Set up git remote as ``upstream``
|
||||||
```sh
|
```sh
|
||||||
$ cd neofs-http-gw
|
$ cd frostfs-http-gw
|
||||||
$ git remote add upstream https://github.com/nspcc-dev/neofs-http-gw
|
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
|
||||||
$ git fetch upstream
|
$ git fetch upstream
|
||||||
$ git merge upstream/master
|
$ git merge upstream/master
|
||||||
...
|
...
|
||||||
|
@ -90,8 +90,8 @@ $ git push origin feature/123-something_awesome
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a Pull Request
|
### Create a Pull Request
|
||||||
Pull requests can be created via GitHub. Refer to [this
|
Pull requests can be created via Forgejo. Refer to [this
|
||||||
document](https://help.github.com/articles/creating-a-pull-request/) for
|
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
detailed steps on how to create a pull request. After a Pull Request gets peer
|
||||||
reviewed and approved, it will be merged.
|
reviewed and approved, it will be merged.
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ contributors".
|
||||||
To sign your work, just add a line like this at the end of your commit message:
|
To sign your work, just add a line like this at the end of your commit message:
|
||||||
|
|
||||||
```
|
```
|
||||||
Signed-off-by: Samii Sakisaka <samii@nspcc.ru>
|
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
|
||||||
```
|
```
|
||||||
|
|
||||||
This can be easily done with the `--signoff` option to `git commit`.
|
This can be easily done with the `--signoff` option to `git commit`.
|
||||||
|
|
118
Makefile
Normal file → Executable file
118
Makefile
Normal file → Executable file
|
@ -2,29 +2,47 @@
|
||||||
|
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.19
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.49.0
|
LINT_VERSION ?= 1.60.3
|
||||||
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||||
BUILD ?= $(shell date -u --iso=seconds)
|
BUILD ?= $(shell date -u --iso=seconds)
|
||||||
|
|
||||||
HUB_IMAGE ?= nspccdev/neofs-http-gw
|
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
|
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
|
|
||||||
|
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||||
|
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||||
|
TMP_DIR := .cache
|
||||||
|
|
||||||
# List of binaries to build. For now just one.
|
# List of binaries to build. For now just one.
|
||||||
BINDIR = bin
|
BINDIR = bin
|
||||||
DIRS = $(BINDIR)
|
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||||
BINS = $(BINDIR)/neofs-http-gw
|
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint version clean
|
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean
|
||||||
|
|
||||||
|
# .deb package versioning
|
||||||
|
OS_RELEASE = $(shell lsb_release -cs)
|
||||||
|
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
||||||
|
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
||||||
|
sed "s/-/~/")-${OS_RELEASE}
|
||||||
|
.PHONY: debpackage debclean
|
||||||
|
|
||||||
|
FUZZ_NGFUZZ_DIR ?= ""
|
||||||
|
FUZZ_TIMEOUT ?= 30
|
||||||
|
FUZZ_FUNCTIONS ?= "all"
|
||||||
|
FUZZ_AUX ?= ""
|
||||||
|
|
||||||
# Make all binaries
|
# Make all binaries
|
||||||
all: $(BINS)
|
all: $(BINS)
|
||||||
|
|
||||||
$(BINS): $(DIRS) dep
|
$(BINS): $(DIRS) dep
|
||||||
@echo "⇒ Build $@"
|
@echo "⇒ Build $@"
|
||||||
CGO_ENABLED=0 \
|
CGO_ENABLED=0 \
|
||||||
go build -v -trimpath \
|
go build -v -trimpath \
|
||||||
-ldflags "-X main.Version=$(VERSION)" \
|
-ldflags "-X main.Version=$(VERSION)" \
|
||||||
-o $@ ./
|
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||||
|
|
||||||
$(DIRS):
|
$(DIRS):
|
||||||
@echo "⇒ Ensure dir: $@"
|
@echo "⇒ Ensure dir: $@"
|
||||||
|
@ -55,11 +73,45 @@ docker/%:
|
||||||
test:
|
test:
|
||||||
@go test ./... -cover
|
@go test ./... -cover
|
||||||
|
|
||||||
|
# Run integration tests
|
||||||
|
.PHONY: integration-test
|
||||||
|
integration-test:
|
||||||
|
@go test ./... -cover --tags=integration
|
||||||
|
|
||||||
# Run tests with race detection and produce coverage output
|
# Run tests with race detection and produce coverage output
|
||||||
cover:
|
cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
|
# Run fuzzing
|
||||||
|
CLANG := $(shell which clang-17 2>/dev/null)
|
||||||
|
.PHONY: check-clang all
|
||||||
|
check-clang:
|
||||||
|
ifeq ($(CLANG),)
|
||||||
|
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
||||||
|
@exit 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: check-ngfuzz all
|
||||||
|
check-ngfuzz:
|
||||||
|
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
||||||
|
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: install-fuzzing-deps
|
||||||
|
install-fuzzing-deps: check-clang check-ngfuzz
|
||||||
|
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz: install-fuzzing-deps
|
||||||
|
@START_PATH=$$(pwd); \
|
||||||
|
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
||||||
|
cd $(FUZZ_NGFUZZ_DIR) && \
|
||||||
|
./ngfuzz -clean && \
|
||||||
|
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||||
|
./ngfuzz -report
|
||||||
|
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
fmt:
|
fmt:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
@ -67,12 +119,12 @@ fmt:
|
||||||
|
|
||||||
# Build clean Docker image
|
# Build clean Docker image
|
||||||
image:
|
image:
|
||||||
@echo "⇒ Build NeoFS HTTP Gateway docker image "
|
@echo "⇒ Build FrostFS HTTP Gateway docker image "
|
||||||
@docker build \
|
@docker build \
|
||||||
--build-arg REPO=$(REPO) \
|
--build-arg REPO=$(REPO) \
|
||||||
--build-arg VERSION=$(VERSION) \
|
--build-arg VERSION=$(VERSION) \
|
||||||
--rm \
|
--rm \
|
||||||
-f Dockerfile \
|
-f .docker/Dockerfile \
|
||||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
-t $(HUB_IMAGE):$(HUB_TAG) .
|
||||||
|
|
||||||
# Push Docker image to the hub
|
# Push Docker image to the hub
|
||||||
|
@ -82,17 +134,31 @@ image-push:
|
||||||
|
|
||||||
# Build dirty Docker image
|
# Build dirty Docker image
|
||||||
dirty-image:
|
dirty-image:
|
||||||
@echo "⇒ Build NeoFS HTTP Gateway dirty docker image "
|
@echo "⇒ Build FrostFS HTTP Gateway dirty docker image "
|
||||||
@docker build \
|
@docker build \
|
||||||
--build-arg REPO=$(REPO) \
|
--build-arg REPO=$(REPO) \
|
||||||
--build-arg VERSION=$(VERSION) \
|
--build-arg VERSION=$(VERSION) \
|
||||||
--rm \
|
--rm \
|
||||||
-f Dockerfile.dirty \
|
-f .docker/Dockerfile.dirty \
|
||||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
||||||
|
|
||||||
|
# Install linters
|
||||||
|
lint-install:
|
||||||
|
@mkdir -p $(TMP_DIR)
|
||||||
|
@rm -rf $(TMP_DIR)/linters
|
||||||
|
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
||||||
|
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
||||||
|
@rm -rf $(TMP_DIR)/linters
|
||||||
|
@rmdir $(TMP_DIR) 2>/dev/null || true
|
||||||
|
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
||||||
|
|
||||||
# Run linters
|
# Run linters
|
||||||
lint:
|
lint:
|
||||||
@golangci-lint --timeout=5m run
|
@if [ ! -d "$(LINT_DIR)" ]; then \
|
||||||
|
echo "Run make lint-install"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
$(LINT_DIR)/golangci-lint --timeout=5m run
|
||||||
|
|
||||||
# Run linters in Docker
|
# Run linters in Docker
|
||||||
docker/lint:
|
docker/lint:
|
||||||
|
@ -102,6 +168,14 @@ docker/lint:
|
||||||
--env HOME=/src \
|
--env HOME=/src \
|
||||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
||||||
|
|
||||||
|
# Activate pre-commit hooks
|
||||||
|
pre-commit:
|
||||||
|
pre-commit install -t pre-commit -t commit-msg
|
||||||
|
|
||||||
|
# Deactivate pre-commit hooks
|
||||||
|
unpre-commit:
|
||||||
|
pre-commit uninstall -t pre-commit -t commit-msg
|
||||||
|
|
||||||
# Print version
|
# Print version
|
||||||
version:
|
version:
|
||||||
@echo $(VERSION)
|
@echo $(VERSION)
|
||||||
|
@ -111,4 +185,22 @@ clean:
|
||||||
rm -rf vendor
|
rm -rf vendor
|
||||||
rm -rf $(BINDIR)
|
rm -rf $(BINDIR)
|
||||||
|
|
||||||
|
# Package for Debian
|
||||||
|
debpackage:
|
||||||
|
dch --package frostfs-http-gw \
|
||||||
|
--controlmaint \
|
||||||
|
--newversion $(PKG_VERSION) \
|
||||||
|
--distribution $(OS_RELEASE) \
|
||||||
|
"Please see CHANGELOG.md for code changes for $(VERSION)"
|
||||||
|
dpkg-buildpackage --no-sign -b
|
||||||
|
|
||||||
|
debclean:
|
||||||
|
dh clean
|
||||||
|
|
||||||
|
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
||||||
|
.PHONY: dump-metrics
|
||||||
|
dump-metrics:
|
||||||
|
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
||||||
|
|
||||||
|
|
||||||
include help.mk
|
include help.mk
|
||||||
|
|
277
README.md
277
README.md
|
@ -1,28 +1,30 @@
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./.github/logo.svg" width="500px" alt="NeoFS">
|
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://fs.neo.org">NeoFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
---
|
---
|
||||||
[![Report](https://goreportcard.com/badge/github.com/nspcc-dev/neofs-http-gw)](https://goreportcard.com/report/github.com/nspcc-dev/neofs-http-gw)
|
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw)
|
||||||
![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nspcc-dev/neofs-http-gw?sort=semver)
|
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange)
|
||||||
![License](https://img.shields.io/github/license/nspcc-dev/neofs-http-gw.svg?style=popout)
|
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
||||||
|
|
||||||
# NeoFS HTTP Gateway
|
# FrostFS HTTP Gateway
|
||||||
|
|
||||||
NeoFS HTTP Gateway bridges NeoFS internal protocol and HTTP standard.
|
FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
|
||||||
- you can download one file per request from the NeoFS Network
|
- you can download one file per request from the FrostFS Network
|
||||||
- you can upload one file per request into the NeoFS Network
|
- you can upload one file per request into the FrostFS Network
|
||||||
|
|
||||||
|
See available routes in [specification](./docs/api.md).
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
```go install github.com/nspcc-dev/neofs-http-gw```
|
```go install git.frostfs.info/TrueCloudLab/frostfs-http-gw```
|
||||||
|
|
||||||
Or you can call `make` to build it from the cloned repository (the binary will
|
Or you can call `make` to build it from the cloned repository (the binary will
|
||||||
end up in `bin/neofs-http-gw`). To build neofs-http-gw binary in clean docker
|
end up in `bin/frostfs-http-gw`). To build frostfs-http-gw binary in clean docker
|
||||||
environment, call `make docker/bin/neofs-http-gw`.
|
environment, call `make docker/bin/frostfs-http-gw`.
|
||||||
|
|
||||||
Other notable make targets:
|
Other notable make targets:
|
||||||
|
|
||||||
|
@ -36,32 +38,32 @@ version Show current version
|
||||||
```
|
```
|
||||||
|
|
||||||
Or you can also use a [Docker
|
Or you can also use a [Docker
|
||||||
image](https://hub.docker.com/r/nspccdev/neofs-http-gw) provided for the released
|
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
|
||||||
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
||||||
latest stable release).
|
latest stable release).
|
||||||
|
|
||||||
## Execution
|
## Execution
|
||||||
|
|
||||||
HTTP gateway itself is not a NeoFS node, so to access NeoFS it uses node's
|
HTTP gateway itself is not a FrostFS node, so to access FrostFS it uses node's
|
||||||
gRPC interface and you need to provide some node that it will connect to. This
|
gRPC interface and you need to provide some node that it will connect to. This
|
||||||
can be done either via `-p` parameter or via `HTTP_GW_PEERS_<N>_ADDRESS` and
|
can be done either via `-p` parameter or via `HTTP_GW_PEERS_<N>_ADDRESS` and
|
||||||
`HTTP_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple
|
`HTTP_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple
|
||||||
NeoFS nodes with weighted load balancing).
|
FrostFS nodes with weighted load balancing).
|
||||||
|
|
||||||
If you launch HTTP gateway in bundle with [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env),
|
If you launch HTTP gateway in bundle with [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
|
||||||
you can get the IP address of the node in the output of `make hosts` command
|
you can get the IP address of the node in the output of `make hosts` command
|
||||||
(with s0*.neofs.devenv name).
|
(with s0*.frostfs.devenv name).
|
||||||
|
|
||||||
These two commands are functionally equivalent, they run the gate with one
|
These two commands are functionally equivalent, they run the gate with one
|
||||||
backend node (and otherwise default settings):
|
backend node (and otherwise default settings):
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p 192.168.130.72:8080
|
$ frostfs-http-gw -p 192.168.130.72:8080
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 neofs-http-gw
|
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 frostfs-http-gw
|
||||||
```
|
```
|
||||||
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
|
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p grpc://192.168.130.72:8080
|
$ frostfs-http-gw -p grpc://192.168.130.72:8080
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 neofs-http-gw
|
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 frostfs-http-gw
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
@ -72,11 +74,11 @@ environment variables (see [example](./config/config.env)), so they're not speci
|
||||||
|
|
||||||
### Nodes: weights and priorities
|
### Nodes: weights and priorities
|
||||||
|
|
||||||
You can specify multiple `-p` options to add more NeoFS nodes, this will make
|
You can specify multiple `-p` options to add more FrostFS nodes, this will make
|
||||||
gateway spread requests equally among them (using weight 1 and priority 1 for every node):
|
gateway spread requests equally among them (using weight 1 and priority 1 for every node):
|
||||||
|
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
|
$ frostfs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
|
||||||
```
|
```
|
||||||
If you want some specific load distribution proportions, use weights and priorities:
|
If you want some specific load distribution proportions, use weights and priorities:
|
||||||
|
|
||||||
|
@ -84,7 +86,7 @@ If you want some specific load distribution proportions, use weights and priorit
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \
|
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \
|
||||||
HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \
|
HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \
|
||||||
HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \
|
HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \
|
||||||
neofs-http-gw
|
frostfs-http-gw
|
||||||
```
|
```
|
||||||
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
|
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
|
||||||
192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%.
|
192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%.
|
||||||
|
@ -92,13 +94,13 @@ This command will make gateway use 192.168.130.71 while it is healthy. Otherwise
|
||||||
### Keys
|
### Keys
|
||||||
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
|
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
|
||||||
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
|
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
|
||||||
If no wallet provided, the gateway autogenerates a key pair it will use for NeoFS requests.
|
If no wallet provided, the gateway autogenerates a key pair it will use for FrostFS requests.
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p $NEOFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
|
$ frostfs-http-gw -p $FROSTFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
|
||||||
```
|
```
|
||||||
Example:
|
Example:
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
$ frostfs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
||||||
```
|
```
|
||||||
|
|
||||||
### Binding and TLS
|
### Binding and TLS
|
||||||
|
@ -114,7 +116,7 @@ external redirecting solution.
|
||||||
Example to bind to `192.168.130.130:443` and serve TLS there:
|
Example to bind to `192.168.130.130:443` and serve TLS there:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ neofs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
|
$ frostfs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
|
||||||
--tls_key=key.pem --tls_certificate=cert.pem
|
--tls_key=key.pem --tls_certificate=cert.pem
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -132,12 +134,12 @@ request with data stream after timeout.
|
||||||
|
|
||||||
`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable
|
`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable
|
||||||
request body streaming (effectively it'll make the gateway accept the file completely
|
request body streaming (effectively it'll make the gateway accept the file completely
|
||||||
first and only then try sending it to NeoFS).
|
first and only then try sending it to FrostFS).
|
||||||
|
|
||||||
`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size
|
`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size
|
||||||
limiting uploads to files slightly lower than this limit.
|
limiting uploads to files slightly lower than this limit.
|
||||||
|
|
||||||
### NeoFS parameters
|
### FrostFS parameters
|
||||||
|
|
||||||
Gateway can automatically set timestamps for uploaded files based on local
|
Gateway can automatically set timestamps for uploaded files based on local
|
||||||
time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment
|
time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment
|
||||||
|
@ -175,15 +177,32 @@ HTTP_GW_LOGGER_LEVEL=debug
|
||||||
Configuration file is optional and can be used instead of environment variables/other parameters.
|
Configuration file is optional and can be used instead of environment variables/other parameters.
|
||||||
It can be specified with `--config` parameter:
|
It can be specified with `--config` parameter:
|
||||||
```
|
```
|
||||||
$ neofs-http-gw --config your-config.yaml
|
$ frostfs-http-gw --config your-config.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example.
|
See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example.
|
||||||
|
|
||||||
|
#### Multiple configs
|
||||||
|
|
||||||
|
You can use several config files when running application. It allows you to split configuration into parts.
|
||||||
|
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](./config)).
|
||||||
|
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag.
|
||||||
|
Also, you can combine these flags:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ frostfs-http-gw --config ./config/config.yaml --config /your/partial/config.yaml --config-dir ./config/dir
|
||||||
|
```
|
||||||
|
|
||||||
|
**Note:** next file in `--config` flag overwrites values from the previous one.
|
||||||
|
Files from `--config-dir` directory overwrite values from `--config` files.
|
||||||
|
So the command above run `frostfs-http-gw` to listen on `0.0.0.0:8080` address (value from `./config/config.yaml`),
|
||||||
|
applies parameters from `/your/partial/config.yaml`,
|
||||||
|
enable pprof (value from `./config/dir/pprof.yaml`) and prometheus (value from `./config/dir/prometheus.yaml`).
|
||||||
|
|
||||||
## HTTP API provided
|
## HTTP API provided
|
||||||
|
|
||||||
This gateway intentionally provides limited feature set and doesn't try to
|
This gateway intentionally provides limited feature set and doesn't try to
|
||||||
substitute (or completely wrap) regular gRPC NeoFS interface. You can download
|
substitute (or completely wrap) regular gRPC FrostFS interface. You can download
|
||||||
and upload objects with it, but deleting, searching, managing ACLs, creating
|
and upload objects with it, but deleting, searching, managing ACLs, creating
|
||||||
containers and other activities are not supported and not planned to be
|
containers and other activities are not supported and not planned to be
|
||||||
supported.
|
supported.
|
||||||
|
@ -204,23 +223,23 @@ Steps to start using name resolving:
|
||||||
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
rpc_endpoint: http://morph-chain.neofs.devenv:30333
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
resolve_order:
|
resolve_order:
|
||||||
- nns
|
- nns
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Make sure your container is registered in NNS contract. If you use [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env)
|
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
||||||
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
||||||
http://morph-chain.neofs.devenv:30333 | jq -r '.result.hash'
|
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
||||||
|
|
||||||
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
||||||
|
|
||||||
$ docker exec -it morph_chain neo-go \
|
$ docker exec -it morph_chain neo-go \
|
||||||
contract testinvokefunction \
|
contract testinvokefunction \
|
||||||
-r http://morph-chain.neofs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
||||||
resolve string:container-name.container int:16 \
|
resolve string:container-name.container int:16 \
|
||||||
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
||||||
| base64 -d && echo
|
| base64 -d && echo
|
||||||
|
@ -236,9 +255,9 @@ $ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-nam
|
||||||
|
|
||||||
#### Create a container
|
#### Create a container
|
||||||
|
|
||||||
You can create a container via [neofs-cli](https://github.com/nspcc-dev/neofs-node/releases):
|
You can create a container via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases):
|
||||||
```
|
```
|
||||||
$ neofs-cli -r $NEOFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
|
$ frostfs-cli -r $FROSTFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
|
||||||
```
|
```
|
||||||
where `$WALLET` is a path to user wallet,
|
where `$WALLET` is a path to user wallet,
|
||||||
`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and
|
`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and
|
||||||
|
@ -246,18 +265,18 @@ where `$WALLET` is a path to user wallet,
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
```
|
```
|
||||||
$ neofs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
|
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
|
||||||
```
|
```
|
||||||
|
|
||||||
If you have launched nodes via [neofs-dev-env](https://github.com/nspcc-dev/neofs-dev-env),
|
If you have launched nodes via [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
|
||||||
you can get the key value from `wallets/wallet.json` or write the path to
|
you can get the key value from `wallets/wallet.json` or write the path to
|
||||||
the file `wallets/wallet.key`.
|
the file `wallets/wallet.key`.
|
||||||
|
|
||||||
#### Prepare a file in a container
|
#### Prepare a file in a container
|
||||||
|
|
||||||
To create a file via [neofs-cli](https://github.com/nspcc-dev/neofs-node/releases), run a command below:
|
To create a file via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases), run a command below:
|
||||||
```
|
```
|
||||||
$ neofs-cli -r $NEOFS_NODE -k $KEY object put --file $FILENAME --cid $CID
|
$ frostfs-cli -r $FROSTFS_NODE -k $KEY object put --file $FILENAME --cid $CID
|
||||||
```
|
```
|
||||||
where
|
where
|
||||||
`$KEY` -- the key, please read the information [above](#create-a-container),
|
`$KEY` -- the key, please read the information [above](#create-a-container),
|
||||||
|
@ -265,7 +284,7 @@ where
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
```
|
```
|
||||||
$ neofs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
|
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
@ -305,8 +324,9 @@ where
|
||||||
`$ATTRIBUTE_NAME` is the name of the attribute we want to use,
|
`$ATTRIBUTE_NAME` is the name of the attribute we want to use,
|
||||||
`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have.
|
`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have.
|
||||||
|
|
||||||
**NB!** The attribute key and value must be url encoded, i.e., if you want to download an object with the attribute value
|
**NB!** The attribute key and value should be url encoded, i.e., if you want to download an object with the attribute value
|
||||||
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key.
|
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key. If you don't escape such values
|
||||||
|
everything can still work (for example you can use `d@ta` without encoding) but it's HIGHLY RECOMMENDED to encode all your attributes.
|
||||||
|
|
||||||
If multiple objects have specified attribute with specified value, then the
|
If multiple objects have specified attribute with specified value, then the
|
||||||
first one of them is returned (and you can't get others via this interface).
|
first one of them is returned (and you can't get others via this interface).
|
||||||
|
@ -347,8 +367,8 @@ You can download some dir (files with the same prefix) in zip (it will be compre
|
||||||
$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix
|
$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** the objects must have a `FilePath` attribute, otherwise they will not be in the zip archive.
|
**Note:** the objects must have a valid `FilePath` attribute (it should not contain trailing `/`),
|
||||||
You can upload file with this attribute using `curl`:
|
otherwise they will not be in the zip archive. You can upload file with this attribute using `curl`:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
|
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
|
||||||
|
@ -369,10 +389,15 @@ set of reply headers generated using the following rules:
|
||||||
* `x-container-id` contains container ID
|
* `x-container-id` contains container ID
|
||||||
* `x-object-id` contains object ID
|
* `x-object-id` contains object ID
|
||||||
* `x-owner-id` contains owner address
|
* `x-owner-id` contains owner address
|
||||||
* all the other NeoFS attributes are converted to `X-Attribute-*` headers (but only
|
* all the other FrostFS attributes are converted to `X-Attribute-*` headers (but only
|
||||||
if they can be safely represented in HTTP header), for example `FileName`
|
if they can be safely represented in HTTP header), for example `FileName`
|
||||||
attribute becomes `X-Attribute-FileName` header
|
attribute becomes `X-Attribute-FileName` header
|
||||||
|
|
||||||
|
##### Caching strategy
|
||||||
|
|
||||||
|
HTTP Gateway doesn't control caching (doesn't anything with the `Cache-Control` header). Caching strategy strictly
|
||||||
|
depends on application use case. So it should be carefully done by proxy server.
|
||||||
|
|
||||||
### Uploading
|
### Uploading
|
||||||
|
|
||||||
You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The
|
You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The
|
||||||
|
@ -401,12 +426,12 @@ You can also add some attributes to your file using the following rules:
|
||||||
"X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo:
|
"X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo:
|
||||||
100500" header to your request the resulting object will get "Ololo:
|
100500" header to your request the resulting object will get "Ololo:
|
||||||
100500" attribute
|
100500" attribute
|
||||||
* "X-Attribute-NEOFS-*" headers are special
|
* "X-Attribute-SYSTEM-*" headers are special
|
||||||
(`-NEOFS-` part can also be `-neofs-` or`-Neofs-`), they're used to set internal
|
(`-SYSTEM-` part can also be `-system-` or`-System-` (and even legacy `-Neofs-` for some next releases)), they're used to set internal
|
||||||
NeoFS attributes starting with `__NEOFS__` prefix, for these attributes all
|
FrostFS attributes starting with `__SYSTEM__` prefix, for these attributes all
|
||||||
dashes get converted to underscores and all letters are capitalized. For
|
dashes get converted to underscores and all letters are capitalized. For
|
||||||
example, you can use "X-Attribute-NEOFS-Expiration-Epoch" header to set
|
example, you can use "X-Attribute-SYSTEM-Expiration-Epoch" header to set
|
||||||
`__NEOFS__EXPIRATION_EPOCH` attribute
|
`__SYSTEM__EXPIRATION_EPOCH` attribute
|
||||||
* `FileName` attribute is set from multipart's `filename` if not set
|
* `FileName` attribute is set from multipart's `filename` if not set
|
||||||
explicitly via `X-Attribute-FileName` header
|
explicitly via `X-Attribute-FileName` header
|
||||||
* `Timestamp` attribute can be set using gateway local time if using
|
* `Timestamp` attribute can be set using gateway local time if using
|
||||||
|
@ -416,13 +441,13 @@ You can also add some attributes to your file using the following rules:
|
||||||
---
|
---
|
||||||
**NOTE**
|
**NOTE**
|
||||||
|
|
||||||
There are some reserved headers type of `X-Attribute-NEOFS-*` (headers are arranged in descending order of priority):
|
There are some reserved headers type of `X-Attribute-SYSTEM-*` (headers are arranged in descending order of priority):
|
||||||
1. `X-Attribute-Neofs-Expiration-Epoch: 100`
|
1. `X-Attribute-System-Expiration-Epoch: 100`
|
||||||
2. `X-Attribute-Neofs-Expiration-Duration: 24h30m`
|
2. `X-Attribute-System-Expiration-Duration: 24h30m`
|
||||||
3. `X-Attribute-Neofs-Expiration-Timestamp: 1637574797`
|
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
|
||||||
4. `X-Attribute-Neofs-Expiration-RFC3339: 2021-11-22T09:55:49Z`
|
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
|
||||||
|
|
||||||
which transforms to `X-Attribute-Neofs-Expiration-Epoch`. So you can provide expiration any convenient way.
|
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
@ -441,66 +466,47 @@ You can always upload files to public containers (open for anyone to put
|
||||||
objects into), but for restricted containers you need to explicitly allow PUT
|
objects into), but for restricted containers you need to explicitly allow PUT
|
||||||
operations for a request signed with your HTTP Gateway keys.
|
operations for a request signed with your HTTP Gateway keys.
|
||||||
|
|
||||||
If your don't want to manage gateway's secret keys and adjust eACL rules when
|
If you don't want to manage gateway's secret keys and adjust policies when
|
||||||
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
||||||
public services, there is an option to let your application backend (or you) to
|
public services, there is an option to let your application backend (or you) to
|
||||||
issue Bearer Tokens ans pass them from the client via gate down to NeoFS level
|
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
||||||
to grant access.
|
to grant access.
|
||||||
|
|
||||||
NeoFS Bearer Token basically is a container owner-signed ACL data (refer to NeoFS
|
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
||||||
documentation for more details). There are two options to pass them to gateway:
|
documentation for more details). There are two options to pass them to gateway:
|
||||||
* "Authorization" header with "Bearer" type and base64-encoded token in
|
* "Authorization" header with "Bearer" type and base64-encoded token in
|
||||||
credentials field
|
credentials field
|
||||||
* "Bearer" cookie with base64-encoded token contents
|
* "Bearer" cookie with base64-encoded token contents
|
||||||
|
|
||||||
For example, you have a mobile application frontend with a backend part storing
|
For example, you have a mobile application frontend with a backend part storing
|
||||||
data in NeoFS. When a user authorizes in the mobile app, the backend issues a NeoFS
|
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
||||||
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
||||||
some data and upload it via any available NeoFS HTTP Gateway by adding
|
some data and upload it via any available FrostFS HTTP Gateway by adding
|
||||||
the corresponding header to the upload request. Accessing the ACL protected data
|
the corresponding header to the upload request. Accessing policy protected data
|
||||||
works the same way.
|
works the same way.
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
In order to generate a bearer token, you need to know the container owner key and
|
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
||||||
the address of the sender who will do the request to NeoFS (in our case, it's a gateway wallet address).
|
|
||||||
|
|
||||||
Suppose we have:
|
1. Suppose you have a container with private policy for wallet key
|
||||||
* **KxDgvEKzgSBPPfuVfw67oPQBSjidEiqTHURKSDL1R7yGaGYAeYnr** (container owner key)
|
|
||||||
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner address)
|
|
||||||
* **BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K** (container id)
|
|
||||||
|
|
||||||
Firstly, we need to encode the container id and the sender address to base64 (now it's base58).
|
|
||||||
So use **base58** and **base64** utils.
|
|
||||||
|
|
||||||
1. Encoding container id:
|
|
||||||
```
|
```
|
||||||
$ echo 'BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K' | base58 --decode | base64
|
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
||||||
# output: mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg=
|
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
||||||
|
|
||||||
|
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
||||||
|
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
||||||
|
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
||||||
|
--chain-id <chainID>
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Encoding token owner id:
|
|
||||||
```
|
|
||||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
|
||||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and save it to **bearer.json**:
|
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
||||||
|
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"body": {
|
"body": {
|
||||||
"eaclTable": {
|
"allowImpersonate": true,
|
||||||
"version": {
|
|
||||||
"major": 0,
|
|
||||||
"minor": 0
|
|
||||||
},
|
|
||||||
"containerID": {
|
|
||||||
"value": "mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg="
|
|
||||||
},
|
|
||||||
"records": []
|
|
||||||
},
|
|
||||||
"ownerID": {
|
|
||||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
|
||||||
},
|
|
||||||
"lifetime": {
|
"lifetime": {
|
||||||
"exp": "10000",
|
"exp": "10000",
|
||||||
"nbf": "0",
|
"nbf": "0",
|
||||||
|
@ -511,11 +517,12 @@ Now, we can form a Bearer token (10000 is liftetime expiration in epoch) and sav
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, sign it with the container owner key:
|
3. Sign it with the wallet:
|
||||||
```
|
```
|
||||||
$ neofs-cli util sign bearer-token --from bearer.json --to signed.json -w ./wallet.json
|
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
||||||
```
|
```
|
||||||
Encoding to base64 to use via the header:
|
|
||||||
|
4. Encode to base64 to use in header:
|
||||||
```
|
```
|
||||||
$ base64 -w 0 signed.json
|
$ base64 -w 0 signed.json
|
||||||
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
||||||
|
@ -533,47 +540,32 @@ $ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoEC
|
||||||
# }
|
# }
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Note
|
##### Note: Bearer Token owner
|
||||||
For the token to work correctly, you need to create a container with a basic ACL that:
|
|
||||||
1. Allow PUT operation to others
|
You can specify exact key who can use Bearer Token (gateway wallet address).
|
||||||
2. Doesn't set "final" bit
|
To do this, encode wallet address in base64 format
|
||||||
|
|
||||||
For example:
|
|
||||||
```
|
```
|
||||||
$ neofs-cli -w ./wallet.json --basic-acl 0x0FFFCFFF -r 192.168.130.72:8080 container create --policy "REP 3" --await
|
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
||||||
|
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
||||||
```
|
```
|
||||||
|
|
||||||
To deny access to a container without a token, set the eACL rules:
|
Then specify this value in Bearer Token Json
|
||||||
```
|
|
||||||
$ neofs-cli -w ./wallet.json -r 192.168.130.72:8080 container set-eacl --table eacl.json --await --cid BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
|
||||||
```
|
|
||||||
|
|
||||||
File **eacl.json**:
|
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"version": {
|
"body": {
|
||||||
"major": 0,
|
"ownerID": {
|
||||||
"minor": 0
|
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
||||||
},
|
},
|
||||||
"containerID": {
|
...
|
||||||
"value": "mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg="
|
|
||||||
},
|
|
||||||
"records": [
|
|
||||||
{
|
|
||||||
"operation": "PUT",
|
|
||||||
"action": "DENY",
|
|
||||||
"filters": [],
|
|
||||||
"targets": [
|
|
||||||
{
|
|
||||||
"role": "OTHERS",
|
|
||||||
"keys": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### Note: Policy override
|
||||||
|
|
||||||
|
Instead of impersonation, you can define the set of policies that will be applied
|
||||||
|
to the request sender. This allows to restrict access to specific operation and
|
||||||
|
specific objects without giving full impersonation control to the token user.
|
||||||
|
|
||||||
### Metrics and Pprof
|
### Metrics and Pprof
|
||||||
|
|
||||||
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
|
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
|
||||||
|
@ -583,3 +575,26 @@ See [configuration](./docs/gate-configuration.md).
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
Please see [CREDITS](CREDITS.md) for details.
|
||||||
|
|
||||||
|
## Fuzzing
|
||||||
|
|
||||||
|
To run fuzzing tests use the following command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make fuzz
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
||||||
|
|
||||||
|
You can also use the following arguments:
|
||||||
|
|
||||||
|
```
|
||||||
|
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
||||||
|
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
||||||
|
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
||||||
|
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
||||||
|
````
|
||||||
|
|
||||||
|
## Credits
|
||||||
|
|
||||||
|
Please see [CREDITS](CREDITS.md) for details.
|
||||||
|
|
26
SECURITY.md
Normal file
26
SECURITY.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
|
||||||
|
## How To Report a Vulnerability
|
||||||
|
|
||||||
|
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
||||||
|
|
||||||
|
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
||||||
|
|
||||||
|
Instead, you can report it using one of the following ways:
|
||||||
|
|
||||||
|
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
||||||
|
|
||||||
|
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||||
|
|
||||||
|
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
||||||
|
* Affected version(s)
|
||||||
|
* Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
* Step-by-step instructions to reproduce the issue
|
||||||
|
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
* Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
* Any special configuration required to reproduce the issue
|
||||||
|
* Any log files that are related to this issue (if possible)
|
||||||
|
* Proof-of-concept or exploit code (if possible)
|
||||||
|
|
||||||
|
This information will help us triage your report more quickly.
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.25.0
|
v0.30.0
|
||||||
|
|
544
app.go
544
app.go
|
@ -1,544 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/fasthttp/router"
|
|
||||||
"github.com/nspcc-dev/neo-go/cli/flags"
|
|
||||||
"github.com/nspcc-dev/neo-go/cli/input"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/downloader"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/metrics"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/resolver"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/response"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/uploader"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
app struct {
|
|
||||||
log *zap.Logger
|
|
||||||
logLevel zap.AtomicLevel
|
|
||||||
pool *pool.Pool
|
|
||||||
owner *user.ID
|
|
||||||
cfg *viper.Viper
|
|
||||||
webServer *fasthttp.Server
|
|
||||||
webDone chan struct{}
|
|
||||||
resolver *resolver.ContainerResolver
|
|
||||||
metrics *gateMetrics
|
|
||||||
services []*metrics.Service
|
|
||||||
settings *appSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
appSettings struct {
|
|
||||||
Uploader *uploader.Settings
|
|
||||||
Downloader *downloader.Settings
|
|
||||||
TLSProvider *certProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
// App is an interface for the main gateway function.
|
|
||||||
App interface {
|
|
||||||
Wait()
|
|
||||||
Serve(context.Context)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Option is an application option.
|
|
||||||
Option func(a *app)
|
|
||||||
|
|
||||||
gateMetrics struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
provider GateMetricsProvider
|
|
||||||
mu sync.RWMutex
|
|
||||||
enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
GateMetricsProvider interface {
|
|
||||||
SetHealth(int32)
|
|
||||||
Unregister()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// WithLogger returns Option to set a specific logger.
|
|
||||||
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
|
||||||
return func(a *app) {
|
|
||||||
if l == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.log = l
|
|
||||||
a.logLevel = lvl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithConfig returns Option to use specific Viper configuration.
|
|
||||||
func WithConfig(c *viper.Viper) Option {
|
|
||||||
return func(a *app) {
|
|
||||||
if c == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
a.cfg = c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newApp(ctx context.Context, opt ...Option) App {
|
|
||||||
var (
|
|
||||||
key *ecdsa.PrivateKey
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
a := &app{
|
|
||||||
log: zap.L(),
|
|
||||||
cfg: viper.GetViper(),
|
|
||||||
webServer: new(fasthttp.Server),
|
|
||||||
webDone: make(chan struct{}),
|
|
||||||
}
|
|
||||||
for i := range opt {
|
|
||||||
opt[i](a)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- setup FastHTTP server --
|
|
||||||
a.webServer.Name = "neofs-http-gw"
|
|
||||||
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
|
||||||
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
|
||||||
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
|
||||||
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
|
||||||
a.webServer.DisableHeaderNamesNormalizing = true
|
|
||||||
a.webServer.NoDefaultServerHeader = true
|
|
||||||
a.webServer.NoDefaultContentType = true
|
|
||||||
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
|
||||||
a.webServer.DisablePreParseMultipartForm = true
|
|
||||||
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
|
||||||
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
|
||||||
key, err = getNeoFSKey(a)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to get neofs credentials", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, key.PublicKey)
|
|
||||||
a.owner = &owner
|
|
||||||
|
|
||||||
var prm pool.InitParameters
|
|
||||||
prm.SetKey(key)
|
|
||||||
prm.SetNodeDialTimeout(a.cfg.GetDuration(cfgConTimeout))
|
|
||||||
prm.SetHealthcheckTimeout(a.cfg.GetDuration(cfgReqTimeout))
|
|
||||||
prm.SetClientRebalanceInterval(a.cfg.GetDuration(cfgRebalance))
|
|
||||||
prm.SetErrorThreshold(a.cfg.GetUint32(cfgPoolErrorThreshold))
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
address := a.cfg.GetString(cfgPeers + "." + strconv.Itoa(i) + ".address")
|
|
||||||
weight := a.cfg.GetFloat64(cfgPeers + "." + strconv.Itoa(i) + ".weight")
|
|
||||||
priority := a.cfg.GetInt(cfgPeers + "." + strconv.Itoa(i) + ".priority")
|
|
||||||
if address == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if weight <= 0 { // unspecified or wrong
|
|
||||||
weight = 1
|
|
||||||
}
|
|
||||||
if priority <= 0 { // unspecified or wrong
|
|
||||||
priority = 1
|
|
||||||
}
|
|
||||||
prm.AddNode(pool.NewNodeParam(priority, address, weight))
|
|
||||||
a.log.Info("add connection", zap.String("address", address),
|
|
||||||
zap.Float64("weight", weight), zap.Int("priority", priority))
|
|
||||||
}
|
|
||||||
|
|
||||||
a.pool, err = pool.NewPool(prm)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to create connection pool", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = a.pool.Dial(ctx)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to dial pool", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
a.initAppSettings()
|
|
||||||
a.initResolver()
|
|
||||||
a.initMetrics()
|
|
||||||
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initAppSettings() {
|
|
||||||
a.settings = &appSettings{
|
|
||||||
Uploader: &uploader.Settings{},
|
|
||||||
Downloader: &downloader.Settings{},
|
|
||||||
TLSProvider: &certProvider{Enabled: a.cfg.IsSet(cfgTLSCertificate) || a.cfg.IsSet(cfgTLSKey)},
|
|
||||||
}
|
|
||||||
|
|
||||||
a.updateSettings()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initResolver() {
|
|
||||||
var err error
|
|
||||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to create resolver", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
|
||||||
resolveCfg := &resolver.Config{
|
|
||||||
NeoFS: resolver.NewNeoFSResolver(a.pool),
|
|
||||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
|
||||||
}
|
|
||||||
|
|
||||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
|
||||||
if resolveCfg.RPCAddress == "" {
|
|
||||||
order = remove(order, resolver.NNSResolver)
|
|
||||||
a.log.Warn(fmt.Sprintf("resolver '%s' won't be used since '%s' isn't provided", resolver.NNSResolver, cfgRPCEndpoint))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(order) == 0 {
|
|
||||||
a.log.Info("container resolver will be disabled because of resolvers 'resolver_order' is empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return order, resolveCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initMetrics() {
|
|
||||||
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
|
||||||
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
|
||||||
}
|
|
||||||
|
|
||||||
func newGateMetrics(logger *zap.Logger, provider GateMetricsProvider, enabled bool) *gateMetrics {
|
|
||||||
if !enabled {
|
|
||||||
logger.Warn("metrics are disabled")
|
|
||||||
}
|
|
||||||
return &gateMetrics{
|
|
||||||
logger: logger,
|
|
||||||
provider: provider,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
|
||||||
if !enabled {
|
|
||||||
m.logger.Warn("metrics are disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
m.enabled = enabled
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *gateMetrics) SetHealth(status int32) {
|
|
||||||
m.mu.RLock()
|
|
||||||
if !m.enabled {
|
|
||||||
m.mu.RUnlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.mu.RUnlock()
|
|
||||||
|
|
||||||
m.provider.SetHealth(status)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *gateMetrics) Shutdown() {
|
|
||||||
m.mu.Lock()
|
|
||||||
if m.enabled {
|
|
||||||
m.provider.SetHealth(0)
|
|
||||||
m.enabled = false
|
|
||||||
}
|
|
||||||
m.provider.Unregister()
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func remove(list []string, element string) []string {
|
|
||||||
for i, item := range list {
|
|
||||||
if item == element {
|
|
||||||
return append(list[:i], list[i+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNeoFSKey(a *app) (*ecdsa.PrivateKey, error) {
|
|
||||||
walletPath := a.cfg.GetString(cfgWalletPath)
|
|
||||||
|
|
||||||
if len(walletPath) == 0 {
|
|
||||||
a.log.Info("no wallet path specified, creating ephemeral key automatically for this run")
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &key.PrivateKey, nil
|
|
||||||
}
|
|
||||||
w, err := wallet.NewWalletFromFile(walletPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var password *string
|
|
||||||
if a.cfg.IsSet(cfgWalletPassphrase) {
|
|
||||||
pwd := a.cfg.GetString(cfgWalletPassphrase)
|
|
||||||
password = &pwd
|
|
||||||
}
|
|
||||||
|
|
||||||
address := a.cfg.GetString(cfgWalletAddress)
|
|
||||||
|
|
||||||
return getKeyFromWallet(w, address, password)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*ecdsa.PrivateKey, error) {
|
|
||||||
var addr util.Uint160
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if addrStr == "" {
|
|
||||||
addr = w.GetChangeAddress()
|
|
||||||
} else {
|
|
||||||
addr, err = flags.ParseAddress(addrStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid address")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
acc := w.GetAccount(addr)
|
|
||||||
if acc == nil {
|
|
||||||
return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if password == nil {
|
|
||||||
pwd, err := input.ReadPassword("Enter password > ")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't read password")
|
|
||||||
}
|
|
||||||
password = &pwd
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := acc.Decrypt(*password, w.Scrypt); err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &acc.PrivateKey().PrivateKey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) Wait() {
|
|
||||||
a.log.Info("starting application", zap.String("app_name", "neofs-http-gw"), zap.String("version", Version))
|
|
||||||
|
|
||||||
a.setHealthStatus()
|
|
||||||
|
|
||||||
<-a.webDone // wait for web-server to be stopped
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) setHealthStatus() {
|
|
||||||
a.metrics.SetHealth(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
type certProvider struct {
|
|
||||||
Enabled bool
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
certPath string
|
|
||||||
keyPath string
|
|
||||||
cert *tls.Certificate
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
if !p.Enabled {
|
|
||||||
return nil, errors.New("cert provider: disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.RLock()
|
|
||||||
defer p.mu.RUnlock()
|
|
||||||
return p.cert, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
|
|
||||||
if !p.Enabled {
|
|
||||||
return fmt.Errorf("tls disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.Lock()
|
|
||||||
p.certPath = certPath
|
|
||||||
p.keyPath = keyPath
|
|
||||||
p.cert = &cert
|
|
||||||
p.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) Serve(ctx context.Context) {
|
|
||||||
uploadRoutes := uploader.New(ctx, a.AppParams(), a.settings.Uploader)
|
|
||||||
downloadRoutes := downloader.New(ctx, a.AppParams(), a.settings.Downloader)
|
|
||||||
|
|
||||||
// Configure router.
|
|
||||||
a.configureRouter(uploadRoutes, downloadRoutes)
|
|
||||||
|
|
||||||
a.startServices()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var err error
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("could not start server", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
bind := a.cfg.GetString(cfgListenAddress)
|
|
||||||
|
|
||||||
if a.settings.TLSProvider.Enabled {
|
|
||||||
if err = a.settings.TLSProvider.UpdateCert(a.cfg.GetString(cfgTLSCertificate), a.cfg.GetString(cfgTLSKey)); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var lnConf net.ListenConfig
|
|
||||||
var ln net.Listener
|
|
||||||
if ln, err = lnConf.Listen(ctx, "tcp4", bind); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lnTLS := tls.NewListener(ln, &tls.Config{
|
|
||||||
GetCertificate: a.settings.TLSProvider.GetCertificate,
|
|
||||||
})
|
|
||||||
|
|
||||||
a.log.Info("running web server (TLS-enabled)", zap.String("address", bind))
|
|
||||||
err = a.webServer.Serve(lnTLS)
|
|
||||||
} else {
|
|
||||||
a.log.Info("running web server", zap.String("address", bind))
|
|
||||||
err = a.webServer.ListenAndServe(bind)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
sigs := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sigs, syscall.SIGHUP)
|
|
||||||
|
|
||||||
LOOP:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
break LOOP
|
|
||||||
case <-sigs:
|
|
||||||
a.configReload()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
a.log.Info("shutting down web server", zap.Error(a.webServer.Shutdown()))
|
|
||||||
|
|
||||||
a.metrics.Shutdown()
|
|
||||||
a.stopServices()
|
|
||||||
|
|
||||||
close(a.webDone)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) configReload() {
|
|
||||||
a.log.Info("SIGHUP config reload started")
|
|
||||||
if !a.cfg.IsSet(cmdConfig) {
|
|
||||||
a.log.Warn("failed to reload config because it's missed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := readConfig(a.cfg); err != nil {
|
|
||||||
a.log.Warn("failed to reload config", zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if lvl, err := getLogLevel(a.cfg); err != nil {
|
|
||||||
a.log.Warn("log level won't be updated", zap.Error(err))
|
|
||||||
} else {
|
|
||||||
a.logLevel.SetLevel(lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
|
||||||
a.log.Warn("failed to update resolvers", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
a.stopServices()
|
|
||||||
a.startServices()
|
|
||||||
|
|
||||||
a.updateSettings()
|
|
||||||
|
|
||||||
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
|
||||||
a.setHealthStatus()
|
|
||||||
|
|
||||||
a.log.Info("SIGHUP config reload completed")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) updateSettings() {
|
|
||||||
a.settings.Uploader.SetDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
|
||||||
a.settings.Downloader.SetZipCompression(a.cfg.GetBool(cfgZipCompression))
|
|
||||||
|
|
||||||
if err := a.settings.TLSProvider.UpdateCert(a.cfg.GetString(cfgTLSCertificate), a.cfg.GetString(cfgTLSKey)); err != nil {
|
|
||||||
a.log.Warn("failed to reload TLS certs", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) startServices() {
|
|
||||||
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
|
||||||
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
|
||||||
a.services = append(a.services, pprofService)
|
|
||||||
go pprofService.Start()
|
|
||||||
|
|
||||||
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
|
||||||
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
|
||||||
a.services = append(a.services, prometheusService)
|
|
||||||
go prometheusService.Start()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) stopServices() {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for _, svc := range a.services {
|
|
||||||
svc.ShutDown(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) configureRouter(uploadRoutes *uploader.Uploader, downloadRoutes *downloader.Downloader) {
|
|
||||||
r := router.New()
|
|
||||||
r.RedirectTrailingSlash = true
|
|
||||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
|
||||||
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
|
||||||
}
|
|
||||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
|
||||||
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
r.POST("/upload/{cid}", a.logger(uploadRoutes.Upload))
|
|
||||||
a.log.Info("added path /upload/{cid}")
|
|
||||||
r.GET("/get/{cid}/{oid}", a.logger(downloadRoutes.DownloadByAddress))
|
|
||||||
r.HEAD("/get/{cid}/{oid}", a.logger(downloadRoutes.HeadByAddress))
|
|
||||||
a.log.Info("added path /get/{cid}/{oid}")
|
|
||||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.DownloadByAttribute))
|
|
||||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(downloadRoutes.HeadByAttribute))
|
|
||||||
a.log.Info("added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}")
|
|
||||||
r.GET("/zip/{cid}/{prefix:*}", a.logger(downloadRoutes.DownloadZipped))
|
|
||||||
a.log.Info("added path /zip/{cid}/{prefix}")
|
|
||||||
|
|
||||||
a.webServer.Handler = r.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|
||||||
return func(ctx *fasthttp.RequestCtx) {
|
|
||||||
a.log.Info("request", zap.String("remote", ctx.RemoteAddr().String()),
|
|
||||||
zap.ByteString("method", ctx.Method()),
|
|
||||||
zap.ByteString("path", ctx.Path()),
|
|
||||||
zap.ByteString("query", ctx.QueryArgs().QueryString()),
|
|
||||||
zap.Uint64("id", ctx.ID()))
|
|
||||||
h(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) AppParams() *utils.AppParams {
|
|
||||||
return &utils.AppParams{
|
|
||||||
Logger: a.log,
|
|
||||||
Pool: a.pool,
|
|
||||||
Owner: a.owner,
|
|
||||||
Resolver: a.resolver,
|
|
||||||
}
|
|
||||||
}
|
|
924
cmd/http-gw/app.go
Normal file
924
cmd/http-gw/app.go
Normal file
|
@ -0,0 +1,924 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/x509"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"github.com/fasthttp/router"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/flags"
|
||||||
|
"github.com/nspcc-dev/neo-go/cli/input"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
app struct {
|
||||||
|
ctx context.Context
|
||||||
|
log *zap.Logger
|
||||||
|
logLevel zap.AtomicLevel
|
||||||
|
pool *pool.Pool
|
||||||
|
treePool *treepool.Pool
|
||||||
|
key *keys.PrivateKey
|
||||||
|
owner *user.ID
|
||||||
|
cfg *viper.Viper
|
||||||
|
webServer *fasthttp.Server
|
||||||
|
webDone chan struct{}
|
||||||
|
resolver *resolver.ContainerResolver
|
||||||
|
metrics *gateMetrics
|
||||||
|
services []*metrics.Service
|
||||||
|
settings *appSettings
|
||||||
|
|
||||||
|
servers []Server
|
||||||
|
unbindServers []ServerInfo
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// App is an interface for the main gateway function.
|
||||||
|
App interface {
|
||||||
|
Wait()
|
||||||
|
Serve()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option is an application option.
|
||||||
|
Option func(a *app)
|
||||||
|
|
||||||
|
gateMetrics struct {
|
||||||
|
logger *zap.Logger
|
||||||
|
provider *metrics.GateMetrics
|
||||||
|
mu sync.RWMutex
|
||||||
|
enabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
||||||
|
appSettings struct {
|
||||||
|
reconnectInterval time.Duration
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
defaultTimestamp bool
|
||||||
|
zipCompression bool
|
||||||
|
clientCut bool
|
||||||
|
returnIndexPage bool
|
||||||
|
indexPageTemplate string
|
||||||
|
bufferMaxSizeForPut uint64
|
||||||
|
namespaceHeader string
|
||||||
|
defaultNamespaces []string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithLogger returns Option to set a specific logger.
|
||||||
|
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
||||||
|
return func(a *app) {
|
||||||
|
if l == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.log = l
|
||||||
|
a.logLevel = lvl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithConfig returns Option to use specific Viper configuration.
|
||||||
|
func WithConfig(c *viper.Viper) Option {
|
||||||
|
return func(a *app) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.cfg = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newApp(ctx context.Context, opt ...Option) App {
|
||||||
|
a := &app{
|
||||||
|
ctx: ctx,
|
||||||
|
log: zap.L(),
|
||||||
|
cfg: viper.GetViper(),
|
||||||
|
webServer: new(fasthttp.Server),
|
||||||
|
webDone: make(chan struct{}),
|
||||||
|
}
|
||||||
|
for i := range opt {
|
||||||
|
opt[i](a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- setup FastHTTP server --
|
||||||
|
a.webServer.Name = "frost-http-gw"
|
||||||
|
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
||||||
|
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
||||||
|
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||||
|
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||||
|
a.webServer.DisableHeaderNamesNormalizing = true
|
||||||
|
a.webServer.NoDefaultServerHeader = true
|
||||||
|
a.webServer.NoDefaultContentType = true
|
||||||
|
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
||||||
|
a.webServer.DisablePreParseMultipartForm = true
|
||||||
|
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
||||||
|
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
||||||
|
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg)
|
||||||
|
|
||||||
|
var owner user.ID
|
||||||
|
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
||||||
|
a.owner = &owner
|
||||||
|
|
||||||
|
a.setRuntimeParameters()
|
||||||
|
|
||||||
|
a.initAppSettings()
|
||||||
|
a.initResolver()
|
||||||
|
a.initMetrics()
|
||||||
|
a.initTracing(ctx)
|
||||||
|
a.loadIndexPageTemplate()
|
||||||
|
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) DefaultTimestamp() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.defaultTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setDefaultTimestamp(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.defaultTimestamp = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) ZipCompression() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.zipCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) IndexPageEnabled() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.returnIndexPage
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) IndexPageTemplate() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
if s.indexPageTemplate == "" {
|
||||||
|
return templates.DefaultIndexTemplate
|
||||||
|
}
|
||||||
|
return s.indexPageTemplate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setZipCompression(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.zipCompression = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setReturnIndexPage(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.returnIndexPage = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setIndexTemplate(val string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.indexPageTemplate = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) loadIndexPageTemplate() {
|
||||||
|
if !a.settings.IndexPageEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
reader, err := os.Open(a.cfg.GetString(cfgIndexPageTemplatePath))
|
||||||
|
if err != nil {
|
||||||
|
a.settings.setIndexTemplate("")
|
||||||
|
a.log.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tmpl, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
a.settings.setIndexTemplate("")
|
||||||
|
a.log.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.settings.setIndexTemplate(string(tmpl))
|
||||||
|
a.log.Info(logs.SetCustomIndexPageTemplate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) ClientCut() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.clientCut
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setClientCut(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.clientCut = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.bufferMaxSizeForPut
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setBufferMaxSizeForPut(val uint64) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.bufferMaxSizeForPut = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) initAppSettings() {
|
||||||
|
a.settings = &appSettings{
|
||||||
|
reconnectInterval: fetchReconnectInterval(a.cfg),
|
||||||
|
}
|
||||||
|
a.updateSettings()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) initResolver() {
|
||||||
|
var err error
|
||||||
|
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
|
resolveCfg := &resolver.Config{
|
||||||
|
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||||
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
|
Settings: a.settings,
|
||||||
|
}
|
||||||
|
|
||||||
|
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||||
|
if resolveCfg.RPCAddress == "" {
|
||||||
|
order = remove(order, resolver.NNSResolver)
|
||||||
|
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(order) == 0 {
|
||||||
|
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||||
|
}
|
||||||
|
|
||||||
|
return order, resolveCfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) initMetrics() {
|
||||||
|
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
||||||
|
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
|
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||||
|
if !enabled {
|
||||||
|
logger.Warn(logs.MetricsAreDisabled)
|
||||||
|
}
|
||||||
|
return &gateMetrics{
|
||||||
|
logger: logger,
|
||||||
|
provider: provider,
|
||||||
|
enabled: enabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) isEnabled() bool {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
return m.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||||
|
if !enabled {
|
||||||
|
m.logger.Warn(logs.MetricsAreDisabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mu.Lock()
|
||||||
|
m.enabled = enabled
|
||||||
|
m.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) SetHealth(status metrics.HealthStatus) {
|
||||||
|
if !m.isEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.provider.SetHealth(status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) SetVersion(ver string) {
|
||||||
|
if !m.isEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.provider.SetVersion(ver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) Shutdown() {
|
||||||
|
m.mu.Lock()
|
||||||
|
if m.enabled {
|
||||||
|
m.provider.SetHealth(metrics.HealthStatusShuttingDown)
|
||||||
|
m.enabled = false
|
||||||
|
}
|
||||||
|
m.provider.Unregister()
|
||||||
|
m.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) MarkHealthy(endpoint string) {
|
||||||
|
if !m.isEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.provider.MarkHealthy(endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gateMetrics) MarkUnhealthy(endpoint string) {
|
||||||
|
if !m.isEnabled() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.provider.MarkUnhealthy(endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
func remove(list []string, element string) []string {
|
||||||
|
for i, item := range list {
|
||||||
|
if item == element {
|
||||||
|
return append(list[:i], list[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error) {
|
||||||
|
walletPath := cfg.GetString(cfgWalletPath)
|
||||||
|
|
||||||
|
if len(walletPath) == 0 {
|
||||||
|
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
|
||||||
|
key, err := keys.NewPrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
w, err := wallet.NewWalletFromFile(walletPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var password *string
|
||||||
|
if cfg.IsSet(cfgWalletPassphrase) {
|
||||||
|
pwd := cfg.GetString(cfgWalletPassphrase)
|
||||||
|
password = &pwd
|
||||||
|
}
|
||||||
|
|
||||||
|
address := cfg.GetString(cfgWalletAddress)
|
||||||
|
|
||||||
|
return getKeyFromWallet(w, address, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys.PrivateKey, error) {
|
||||||
|
var addr util.Uint160
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if addrStr == "" {
|
||||||
|
addr = w.GetChangeAddress()
|
||||||
|
} else {
|
||||||
|
addr, err = flags.ParseAddress(addrStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid address")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
acc := w.GetAccount(addr)
|
||||||
|
if acc == nil {
|
||||||
|
return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if password == nil {
|
||||||
|
pwd, err := input.ReadPassword("Enter password > ")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't read password")
|
||||||
|
}
|
||||||
|
password = &pwd
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := acc.Decrypt(*password, w.Scrypt); err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't decrypt account: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return acc.PrivateKey(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) Wait() {
|
||||||
|
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||||
|
|
||||||
|
a.metrics.SetVersion(Version)
|
||||||
|
a.setHealthStatus()
|
||||||
|
|
||||||
|
<-a.webDone // wait for web-server to be stopped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) setHealthStatus() {
|
||||||
|
a.metrics.SetHealth(metrics.HealthStatusReady)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) Serve() {
|
||||||
|
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(services.NewPoolWrapper(a.treePool)))
|
||||||
|
|
||||||
|
// Configure router.
|
||||||
|
a.configureRouter(handler)
|
||||||
|
|
||||||
|
a.startServices()
|
||||||
|
a.initServers(a.ctx)
|
||||||
|
|
||||||
|
servs := a.getServers()
|
||||||
|
|
||||||
|
for i := range servs {
|
||||||
|
go func(i int) {
|
||||||
|
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||||
|
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||||
|
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||||
|
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(a.unbindServers) != 0 {
|
||||||
|
a.scheduleReconnect(a.ctx, a.webServer)
|
||||||
|
}
|
||||||
|
|
||||||
|
sigs := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sigs, syscall.SIGHUP)
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-a.ctx.Done():
|
||||||
|
break LOOP
|
||||||
|
case <-sigs:
|
||||||
|
a.configReload(a.ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
|
||||||
|
|
||||||
|
a.metrics.Shutdown()
|
||||||
|
a.stopServices()
|
||||||
|
a.shutdownTracing()
|
||||||
|
|
||||||
|
close(a.webDone)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) shutdownTracing() {
|
||||||
|
const tracingShutdownTimeout = 5 * time.Second
|
||||||
|
shdnCtx, cancel := context.WithTimeout(context.Background(), tracingShutdownTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||||
|
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) configReload(ctx context.Context) {
|
||||||
|
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||||
|
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||||
|
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := readInConfig(a.cfg); err != nil {
|
||||||
|
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||||
|
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||||
|
} else {
|
||||||
|
a.logLevel.SetLevel(lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||||
|
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := a.updateServers(); err != nil {
|
||||||
|
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
a.setRuntimeParameters()
|
||||||
|
|
||||||
|
a.stopServices()
|
||||||
|
a.startServices()
|
||||||
|
|
||||||
|
a.updateSettings()
|
||||||
|
|
||||||
|
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
|
a.initTracing(ctx)
|
||||||
|
a.loadIndexPageTemplate()
|
||||||
|
a.setHealthStatus()
|
||||||
|
|
||||||
|
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) updateSettings() {
|
||||||
|
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||||
|
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||||
|
a.settings.setReturnIndexPage(a.cfg.GetBool(cfgIndexPageEnabled))
|
||||||
|
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||||
|
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||||
|
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
||||||
|
a.settings.setDefaultNamespaces(a.cfg.GetStringSlice(cfgResolveDefaultNamespaces))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) startServices() {
|
||||||
|
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
||||||
|
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
||||||
|
a.services = append(a.services, pprofService)
|
||||||
|
go pprofService.Start()
|
||||||
|
|
||||||
|
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
||||||
|
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
||||||
|
a.services = append(a.services, prometheusService)
|
||||||
|
go prometheusService.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) stopServices() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
for _, svc := range a.services {
|
||||||
|
svc.ShutDown(ctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) configureRouter(handler *handler.Handler) {
|
||||||
|
r := router.New()
|
||||||
|
r.RedirectTrailingSlash = true
|
||||||
|
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||||
|
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
||||||
|
}
|
||||||
|
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||||
|
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.POST("/upload/{cid}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.Upload))))))
|
||||||
|
a.log.Info(logs.AddedPathUploadCid)
|
||||||
|
r.GET("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAddressOrBucketName))))))
|
||||||
|
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAddressOrBucketName))))))
|
||||||
|
a.log.Info(logs.AddedPathGetCidOid)
|
||||||
|
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAttribute))))))
|
||||||
|
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAttribute))))))
|
||||||
|
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||||
|
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadZipped))))))
|
||||||
|
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||||
|
|
||||||
|
a.webServer.Handler = r.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||||
|
zap.ByteString("method", req.Method()),
|
||||||
|
zap.ByteString("path", req.Path()),
|
||||||
|
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||||
|
zap.Uint64("id", req.ID()))
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) canonicalizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
// regardless of DisableHeaderNamesNormalizing setting, some headers
|
||||||
|
// MUST be normalized in order to process execution. They are normalized
|
||||||
|
// here.
|
||||||
|
|
||||||
|
toAddKeys := make([][]byte, 0, 10)
|
||||||
|
toAddValues := make([][]byte, 0, 10)
|
||||||
|
prefix := []byte(utils.UserAttributeHeaderPrefix)
|
||||||
|
|
||||||
|
req.Request.Header.VisitAll(func(k, v []byte) {
|
||||||
|
if bytes.HasPrefix(k, prefix) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
toAddKeys = append(toAddKeys, k)
|
||||||
|
toAddValues = append(toAddValues, v)
|
||||||
|
})
|
||||||
|
|
||||||
|
// this is safe to do after all headers were read into header structure
|
||||||
|
req.Request.Header.EnableNormalizing()
|
||||||
|
|
||||||
|
for i := range toAddKeys {
|
||||||
|
req.Request.Header.SetBytesKV(toAddKeys[i], toAddValues[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// return normalization setting back
|
||||||
|
req.Request.Header.DisableNormalizing()
|
||||||
|
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Uint64("id", req.ID()), zap.Error(err))
|
||||||
|
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.SetContextToRequest(appCtx, req)
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
appCtx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
|
appCtx, span := utils.StartHTTPServerSpan(appCtx, req, "REQUEST")
|
||||||
|
defer func() {
|
||||||
|
utils.SetHTTPTraceInfo(appCtx, span, req)
|
||||||
|
span.End()
|
||||||
|
}()
|
||||||
|
|
||||||
|
appCtx = treepool.SetRequestID(appCtx, strconv.FormatUint(req.ID(), 10))
|
||||||
|
|
||||||
|
utils.SetContextToRequest(appCtx, req)
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
appCtx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
|
nsBytes := req.Request.Header.Peek(a.settings.NamespaceHeader())
|
||||||
|
appCtx = middleware.SetNamespace(appCtx, string(nsBytes))
|
||||||
|
|
||||||
|
utils.SetContextToRequest(appCtx, req)
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) AppParams() *handler.AppParams {
|
||||||
|
return &handler.AppParams{
|
||||||
|
Logger: a.log,
|
||||||
|
FrostFS: frostfs.NewFrostFS(a.pool),
|
||||||
|
Owner: a.owner,
|
||||||
|
Resolver: a.resolver,
|
||||||
|
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) initServers(ctx context.Context) {
|
||||||
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
|
a.servers = make([]Server, 0, len(serversInfo))
|
||||||
|
for _, serverInfo := range serversInfo {
|
||||||
|
fields := []zap.Field{
|
||||||
|
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||||
|
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||||
|
}
|
||||||
|
srv, err := newServer(ctx, serverInfo)
|
||||||
|
if err != nil {
|
||||||
|
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||||
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
|
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||
|
|
||||||
|
a.servers = append(a.servers, srv)
|
||||||
|
a.log.Info(logs.AddServer, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(a.servers) == 0 {
|
||||||
|
a.log.Fatal(logs.NoHealthyServers)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) updateServers() error {
|
||||||
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
for _, serverInfo := range serversInfo {
|
||||||
|
ser := a.getServer(serverInfo.Address)
|
||||||
|
if ser != nil {
|
||||||
|
if serverInfo.TLS.Enabled {
|
||||||
|
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
|
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return fmt.Errorf("invalid servers configuration: no known server found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) getServers() []Server {
|
||||||
|
a.mu.RLock()
|
||||||
|
defer a.mu.RUnlock()
|
||||||
|
return a.servers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) getServer(address string) Server {
|
||||||
|
for i := range a.servers {
|
||||||
|
if a.servers[i].Address() == address {
|
||||||
|
return a.servers[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) updateUnbindServerInfo(info ServerInfo) bool {
|
||||||
|
for i := range a.unbindServers {
|
||||||
|
if a.unbindServers[i].Address == info.Address {
|
||||||
|
a.unbindServers[i] = info
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) initTracing(ctx context.Context) {
|
||||||
|
instanceID := ""
|
||||||
|
if len(a.servers) > 0 {
|
||||||
|
instanceID = a.servers[0].Address()
|
||||||
|
}
|
||||||
|
cfg := tracing.Config{
|
||||||
|
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||||
|
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||||
|
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||||
|
Service: "frostfs-http-gw",
|
||||||
|
InstanceID: instanceID,
|
||||||
|
Version: Version,
|
||||||
|
}
|
||||||
|
|
||||||
|
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||||
|
caBytes, err := os.ReadFile(trustedCa)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
certPool := x509.NewCertPool()
|
||||||
|
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||||
|
if !ok {
|
||||||
|
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cfg.ServerCaCertPool = certPool
|
||||||
|
}
|
||||||
|
|
||||||
|
updated, err := tracing.Setup(ctx, cfg)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
|
}
|
||||||
|
if updated {
|
||||||
|
a.log.Info(logs.TracingConfigUpdated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) setRuntimeParameters() {
|
||||||
|
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||||
|
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||||
|
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||||
|
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||||
|
if softMemoryLimit != previous {
|
||||||
|
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||||
|
zap.Int64("new_value", softMemoryLimit),
|
||||||
|
zap.Int64("old_value", previous))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) NamespaceHeader() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.namespaceHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setNamespaceHeader(nsHeader string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.namespaceHeader = nsHeader
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||||
|
s.mu.RLock()
|
||||||
|
namespaces := s.defaultNamespaces
|
||||||
|
s.mu.RUnlock()
|
||||||
|
if slices.Contains(namespaces, ns) {
|
||||||
|
return v2container.SysAttributeZoneDefault, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns + ".ns", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setDefaultNamespaces(namespaces []string) {
|
||||||
|
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
||||||
|
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.defaultNamespaces = namespaces
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) {
|
||||||
|
go func() {
|
||||||
|
t := time.NewTicker(a.settings.reconnectInterval)
|
||||||
|
defer t.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
if a.tryReconnect(ctx, srv) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.Reset(a.settings.reconnectInterval)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
a.log.Info(logs.ServerReconnecting)
|
||||||
|
var failedServers []ServerInfo
|
||||||
|
|
||||||
|
for _, serverInfo := range a.unbindServers {
|
||||||
|
fields := []zap.Field{
|
||||||
|
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||||
|
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||||
|
}
|
||||||
|
|
||||||
|
srv, err := newServer(ctx, serverInfo)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||||
|
failedServers = append(failedServers, serverInfo)
|
||||||
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||||
|
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||
|
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||||
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
a.servers = append(a.servers, srv)
|
||||||
|
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.unbindServers = failedServers
|
||||||
|
|
||||||
|
return len(a.unbindServers) == 0
|
||||||
|
}
|
|
@ -1,35 +1,42 @@
|
||||||
|
//go:build integration
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/container/acl"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
const attributeFilePath = "FilePath"
|
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
CID string `json:"container_id"`
|
CID string `json:"container_id"`
|
||||||
OID string `json:"object_id"`
|
OID string `json:"object_id"`
|
||||||
|
@ -37,25 +44,26 @@ type putResponse struct {
|
||||||
|
|
||||||
const (
|
const (
|
||||||
testContainerName = "friendly"
|
testContainerName = "friendly"
|
||||||
versionWithNativeNames = "0.27.5"
|
|
||||||
testListenAddress = "localhost:8082"
|
testListenAddress = "localhost:8082"
|
||||||
testHost = "http://" + testListenAddress
|
testHost = "http://" + testListenAddress
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
rootCtx := context.Background()
|
rootCtx := context.Background()
|
||||||
aioImage := "nspccdev/neofs-aio-testcontainer:"
|
aioImage := "truecloudlab/frostfs-aio:"
|
||||||
versions := []string{
|
versions := []string{
|
||||||
"0.27.5",
|
"1.2.7",
|
||||||
"0.28.1",
|
"1.3.0",
|
||||||
"0.29.0",
|
"1.5.0",
|
||||||
"0.30.0",
|
|
||||||
"0.32.0",
|
|
||||||
"latest",
|
|
||||||
}
|
}
|
||||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
file, err := os.CreateTemp("", "wallet")
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer os.Remove(file.Name())
|
||||||
|
makeTempWallet(t, key, file.Name())
|
||||||
|
|
||||||
var ownerID user.ID
|
var ownerID user.ID
|
||||||
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
|
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
|
@ -63,15 +71,21 @@ func TestIntegration(t *testing.T) {
|
||||||
ctx, cancel2 := context.WithCancel(rootCtx)
|
ctx, cancel2 := context.WithCancel(rootCtx)
|
||||||
|
|
||||||
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
||||||
server, cancel := runServer()
|
server, cancel := runServer(file.Name())
|
||||||
clientPool := getPool(ctx, t, key)
|
clientPool := getPool(ctx, t, key)
|
||||||
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
||||||
require.NoError(t, err, version)
|
require.NoError(t, err, version)
|
||||||
|
|
||||||
|
token := makeBearerToken(t, key, ownerID, version)
|
||||||
|
|
||||||
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
||||||
|
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
|
||||||
|
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
|
||||||
|
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
||||||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
|
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
server.Wait()
|
server.Wait()
|
||||||
|
@ -81,13 +95,16 @@ func TestIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runServer() (App, context.CancelFunc) {
|
func runServer(pathToWallet string) (App, context.CancelFunc) {
|
||||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
v := getDefaultConfig()
|
v := getDefaultConfig()
|
||||||
l, lvl := newLogger(v)
|
v.Set(cfgWalletPath, pathToWallet)
|
||||||
|
v.Set(cfgWalletPassphrase, "")
|
||||||
|
|
||||||
|
l, lvl := newStdoutLogger(zapcore.DebugLevel)
|
||||||
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
||||||
go application.Serve(cancelCtx)
|
go application.Serve()
|
||||||
|
|
||||||
return application, cancel
|
return application, cancel
|
||||||
}
|
}
|
||||||
|
@ -96,13 +113,42 @@ func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, vers
|
||||||
url := testHost + "/upload/" + CID.String()
|
url := testHost + "/upload/" + CID.String()
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||||
|
|
||||||
if version >= versionWithNativeNames {
|
|
||||||
url = testHost + "/upload/" + testContainerName
|
url = testHost + "/upload/" + testContainerName
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putWithBearerTokenInHeader(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
||||||
|
url := testHost + "/upload/" + CID.String()
|
||||||
|
|
||||||
|
request, content, attributes := makePutRequest(t, url)
|
||||||
|
request.Header.Set("Authorization", "Bearer "+token)
|
||||||
|
resp, err := http.DefaultClient.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putWithBearerTokenInCookie(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
||||||
|
url := testHost + "/upload/" + CID.String()
|
||||||
|
|
||||||
|
request, content, attributes := makePutRequest(t, url)
|
||||||
|
request.AddCookie(&http.Cookie{Name: "Bearer", Value: token})
|
||||||
|
resp, err := http.DefaultClient.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
||||||
|
request, content, attributes := makePutRequest(t, url)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
checkPutResponse(ctx, t, p, cnrID, resp, content, attributes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string]string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
keyAttr, valAttr := "User-Attribute", "user value"
|
keyAttr, valAttr := "User-Attribute", "user value"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
|
@ -124,9 +170,10 @@ func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnr
|
||||||
request.Header.Set("Content-Type", w.FormDataContentType())
|
request.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
return request, content, attributes
|
||||||
require.NoError(t, err)
|
}
|
||||||
|
|
||||||
|
func checkPutResponse(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, resp *http.Response, content string, attributes map[string]string) {
|
||||||
defer func() {
|
defer func() {
|
||||||
err := resp.Body.Close()
|
err := resp.Body.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -173,6 +220,43 @@ func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
|
||||||
|
url := testHost + "/upload/" + CID.String()
|
||||||
|
|
||||||
|
attr := "X-Attribute-User-Attribute"
|
||||||
|
content := "content of file"
|
||||||
|
valOne, valTwo := "first_value", "second_value"
|
||||||
|
fileName := "newFile.txt"
|
||||||
|
|
||||||
|
var buff bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&buff)
|
||||||
|
fw, err := w.CreateFormFile("file", fileName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = io.Copy(fw, bytes.NewBufferString(content))
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = w.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
request, err := http.NewRequest(http.MethodPost, url, &buff)
|
||||||
|
require.NoError(t, err)
|
||||||
|
request.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
|
request.Header.Add(attr, valOne)
|
||||||
|
request.Header.Add(attr, valTwo)
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(request)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := resp.Body.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
|
||||||
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
|
@ -185,12 +269,10 @@ func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkGetResponse(t, resp, content, attributes)
|
checkGetResponse(t, resp, content, attributes)
|
||||||
|
|
||||||
if version >= versionWithNativeNames {
|
|
||||||
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
|
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkGetResponse(t, resp, content, attributes)
|
checkGetResponse(t, resp, content, attributes)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
|
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -239,18 +321,16 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||||
|
|
||||||
if version >= versionWithNativeNames {
|
|
||||||
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
|
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
||||||
contents := []string{"content of file1", "content of file2"}
|
contents := []string{"content of file1", "content of file2"}
|
||||||
attributes1 := map[string]string{attributeFilePath: names[0]}
|
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
||||||
attributes2 := map[string]string{attributeFilePath: names[1]}
|
attributes2 := map[string]string{object.AttributeFilePath: names[1]}
|
||||||
|
|
||||||
putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
|
putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
|
||||||
putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
|
putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
|
||||||
|
@ -258,11 +338,9 @@ func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID us
|
||||||
baseURL := testHost + "/zip/" + CID.String()
|
baseURL := testHost + "/zip/" + CID.String()
|
||||||
makeZipTest(t, baseURL, names, contents)
|
makeZipTest(t, baseURL, names, contents)
|
||||||
|
|
||||||
if version >= versionWithNativeNames {
|
|
||||||
baseURL = testHost + "/zip/" + testContainerName
|
baseURL = testHost + "/zip/" + testContainerName
|
||||||
makeZipTest(t, baseURL, names, contents)
|
makeZipTest(t, baseURL, names, contents)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
|
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
|
||||||
url := baseURL + "/zipfolder"
|
url := baseURL + "/zipfolder"
|
||||||
|
@ -313,6 +391,40 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
|
content := "content of file"
|
||||||
|
attributes := map[string]string{
|
||||||
|
"some-attr": "some-get-value",
|
||||||
|
}
|
||||||
|
|
||||||
|
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set(defaultNamespaceHeader, "")
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkGetResponse(t, resp, content, attributes)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set(defaultNamespaceHeader, "root")
|
||||||
|
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
checkGetResponse(t, resp, content, attributes)
|
||||||
|
|
||||||
|
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header.Set(defaultNamespaceHeader, "root2")
|
||||||
|
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||||
req := testcontainers.ContainerRequest{
|
req := testcontainers.ContainerRequest{
|
||||||
Image: image,
|
Image: image,
|
||||||
|
@ -337,7 +449,7 @@ func getDefaultConfig() *viper.Viper {
|
||||||
v.SetDefault(cfgPeers+".0.priority", 1)
|
v.SetDefault(cfgPeers+".0.priority", 1)
|
||||||
|
|
||||||
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
||||||
v.SetDefault(cfgListenAddress, testListenAddress)
|
v.SetDefault("server.0.address", testListenAddress)
|
||||||
|
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
@ -369,11 +481,11 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
||||||
|
|
||||||
container.SetCreationTime(&cnr, time.Now())
|
container.SetCreationTime(&cnr, time.Now())
|
||||||
|
|
||||||
if version >= versionWithNativeNames {
|
|
||||||
var domain container.Domain
|
var domain container.Domain
|
||||||
domain.SetName(testContainerName)
|
domain.SetName(testContainerName)
|
||||||
container.WriteDomain(&cnr, domain)
|
|
||||||
}
|
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
|
||||||
|
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
|
||||||
|
|
||||||
var waitPrm pool.WaitParams
|
var waitPrm pool.WaitParams
|
||||||
waitPrm.SetTimeout(15 * time.Second)
|
waitPrm.SetTimeout(15 * time.Second)
|
||||||
|
@ -395,7 +507,7 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
||||||
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
|
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
|
||||||
obj := object.New()
|
obj := object.New()
|
||||||
obj.SetContainerID(CID)
|
obj.SetContainerID(CID)
|
||||||
obj.SetOwnerID(&ownerID)
|
obj.SetOwnerID(ownerID)
|
||||||
|
|
||||||
var attrs []object.Attribute
|
var attrs []object.Attribute
|
||||||
for key, val := range attributes {
|
for key, val := range attributes {
|
||||||
|
@ -415,3 +527,37 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
|
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
|
||||||
|
tkn := new(bearer.Token)
|
||||||
|
tkn.ForUser(ownerID)
|
||||||
|
tkn.SetExp(10000)
|
||||||
|
|
||||||
|
if version == "1.2.7" {
|
||||||
|
tkn.SetEACLTable(*eacl.NewTable())
|
||||||
|
} else {
|
||||||
|
tkn.SetImpersonate(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := tkn.Sign(key.PrivateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
|
return t64
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
||||||
|
w, err := wallet.NewWallet(path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
acc := wallet.NewAccountFromPrivateKey(key)
|
||||||
|
err = acc.Encrypt("", w.Scrypt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
w.AddAccount(acc)
|
||||||
|
|
||||||
|
err = w.Save()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
|
@ -9,9 +9,9 @@ import (
|
||||||
func main() {
|
func main() {
|
||||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
v := settings()
|
v := settings()
|
||||||
logger, atomicLevel := newLogger(v)
|
logger, atomicLevel := pickLogger(v)
|
||||||
|
|
||||||
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
||||||
go application.Serve(globalContext)
|
go application.Serve()
|
||||||
application.Wait()
|
application.Wait()
|
||||||
}
|
}
|
124
cmd/http-gw/server.go
Normal file
124
cmd/http-gw/server.go
Normal file
|
@ -0,0 +1,124 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
ServerInfo struct {
|
||||||
|
Address string
|
||||||
|
TLS ServerTLSInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
ServerTLSInfo struct {
|
||||||
|
Enabled bool
|
||||||
|
CertFile string
|
||||||
|
KeyFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
Server interface {
|
||||||
|
Address() string
|
||||||
|
Listener() net.Listener
|
||||||
|
UpdateCert(certFile, keyFile string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
server struct {
|
||||||
|
address string
|
||||||
|
listener net.Listener
|
||||||
|
tlsProvider *certProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
certProvider struct {
|
||||||
|
Enabled bool
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
certPath string
|
||||||
|
keyPath string
|
||||||
|
cert *tls.Certificate
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *server) Address() string {
|
||||||
|
return s.address
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) Listener() net.Listener {
|
||||||
|
return s.listener
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) UpdateCert(certFile, keyFile string) error {
|
||||||
|
return s.tlsProvider.UpdateCert(certFile, keyFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
||||||
|
var lic net.ListenConfig
|
||||||
|
ln, err := lic.Listen(ctx, "tcp", serverInfo.Address)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not prepare listener: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsProvider := &certProvider{
|
||||||
|
Enabled: serverInfo.TLS.Enabled,
|
||||||
|
}
|
||||||
|
|
||||||
|
if serverInfo.TLS.Enabled {
|
||||||
|
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
|
lnErr := ln.Close()
|
||||||
|
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ln = tls.NewListener(ln, &tls.Config{
|
||||||
|
GetCertificate: tlsProvider.GetCertificate,
|
||||||
|
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &server{
|
||||||
|
address: serverInfo.Address,
|
||||||
|
listener: ln,
|
||||||
|
tlsProvider: tlsProvider,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
if !p.Enabled {
|
||||||
|
return nil, errors.New("cert provider: disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mu.RLock()
|
||||||
|
defer p.mu.RUnlock()
|
||||||
|
return p.cert, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
|
||||||
|
if !p.Enabled {
|
||||||
|
return fmt.Errorf("tls disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mu.Lock()
|
||||||
|
p.certPath = certPath
|
||||||
|
p.keyPath = keyPath
|
||||||
|
p.cert = &cert
|
||||||
|
p.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *certProvider) FilePaths() (string, string) {
|
||||||
|
if !p.Enabled {
|
||||||
|
return "", ""
|
||||||
|
}
|
||||||
|
|
||||||
|
p.mu.RLock()
|
||||||
|
defer p.mu.RUnlock()
|
||||||
|
return p.certPath, p.keyPath
|
||||||
|
}
|
119
cmd/http-gw/server_test.go
Normal file
119
cmd/http-gw/server_test.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"crypto/x509/pkix"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
expHeaderKey = "Foo"
|
||||||
|
expHeaderValue = "Bar"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHTTP2TLS(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
certPath, keyPath := prepareTestCerts(t)
|
||||||
|
|
||||||
|
srv := &http.Server{
|
||||||
|
Handler: http.HandlerFunc(testHandler),
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsListener, err := newServer(ctx, ServerInfo{
|
||||||
|
Address: ":0",
|
||||||
|
TLS: ServerTLSInfo{
|
||||||
|
Enabled: true,
|
||||||
|
CertFile: certPath,
|
||||||
|
KeyFile: keyPath,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
|
||||||
|
addr := fmt.Sprintf("https://localhost:%d", port)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_ = srv.Serve(tlsListener.Listener())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Server is running, now send HTTP/2 request
|
||||||
|
|
||||||
|
tlsClientConfig := &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
||||||
|
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("GET", addr, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req.Header[expHeaderKey] = []string{expHeaderValue}
|
||||||
|
|
||||||
|
resp, err := cliHTTP1.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
|
||||||
|
resp, err = cliHTTP2.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
||||||
|
hdr, ok := req.Header[expHeaderKey]
|
||||||
|
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
||||||
|
resp.WriteHeader(http.StatusBadRequest)
|
||||||
|
} else {
|
||||||
|
resp.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
|
||||||
|
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
template := x509.Certificate{
|
||||||
|
SerialNumber: big.NewInt(1),
|
||||||
|
Subject: pkix.Name{CommonName: "localhost"},
|
||||||
|
NotBefore: time.Now(),
|
||||||
|
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
||||||
|
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||||
|
BasicConstraintsValid: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
dir := t.TempDir()
|
||||||
|
certPath = path.Join(dir, "cert.pem")
|
||||||
|
keyPath = path.Join(dir, "key.pem")
|
||||||
|
|
||||||
|
certFile, err := os.Create(certPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer certFile.Close()
|
||||||
|
|
||||||
|
keyFile, err := os.Create(keyPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer keyFile.Close()
|
||||||
|
|
||||||
|
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return certPath, keyPath
|
||||||
|
}
|
702
cmd/http-gw/settings.go
Normal file
702
cmd/http-gw/settings.go
Normal file
|
@ -0,0 +1,702 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/ssgreg/journald"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
destinationStdout = "stdout"
|
||||||
|
destinationJournald = "journald"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultRebalanceTimer = 60 * time.Second
|
||||||
|
defaultRequestTimeout = 15 * time.Second
|
||||||
|
defaultConnectTimeout = 10 * time.Second
|
||||||
|
defaultStreamTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
defaultLoggerSamplerInterval = 1 * time.Second
|
||||||
|
|
||||||
|
defaultShutdownTimeout = 15 * time.Second
|
||||||
|
|
||||||
|
defaultPoolErrorThreshold uint32 = 100
|
||||||
|
|
||||||
|
defaultSoftMemoryLimit = math.MaxInt64
|
||||||
|
|
||||||
|
defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb
|
||||||
|
|
||||||
|
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||||
|
|
||||||
|
defaultReconnectInterval = time.Minute
|
||||||
|
|
||||||
|
cfgServer = "server"
|
||||||
|
cfgTLSEnabled = "tls.enabled"
|
||||||
|
cfgTLSCertFile = "tls.cert_file"
|
||||||
|
cfgTLSKeyFile = "tls.key_file"
|
||||||
|
|
||||||
|
cfgReconnectInterval = "reconnect_interval"
|
||||||
|
|
||||||
|
cfgIndexPageEnabled = "index_page.enabled"
|
||||||
|
cfgIndexPageTemplatePath = "index_page.template_path"
|
||||||
|
|
||||||
|
// Web.
|
||||||
|
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||||
|
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||||
|
cfgWebReadTimeout = "web.read_timeout"
|
||||||
|
cfgWebWriteTimeout = "web.write_timeout"
|
||||||
|
cfgWebStreamRequestBody = "web.stream_request_body"
|
||||||
|
cfgWebMaxRequestBodySize = "web.max_request_body_size"
|
||||||
|
|
||||||
|
// Metrics / Profiler.
|
||||||
|
cfgPrometheusEnabled = "prometheus.enabled"
|
||||||
|
cfgPrometheusAddress = "prometheus.address"
|
||||||
|
cfgPprofEnabled = "pprof.enabled"
|
||||||
|
cfgPprofAddress = "pprof.address"
|
||||||
|
|
||||||
|
// Tracing ...
|
||||||
|
cfgTracingEnabled = "tracing.enabled"
|
||||||
|
cfgTracingExporter = "tracing.exporter"
|
||||||
|
cfgTracingEndpoint = "tracing.endpoint"
|
||||||
|
cfgTracingTrustedCa = "tracing.trusted_ca"
|
||||||
|
|
||||||
|
// Pool config.
|
||||||
|
cfgConTimeout = "connect_timeout"
|
||||||
|
cfgStreamTimeout = "stream_timeout"
|
||||||
|
cfgReqTimeout = "request_timeout"
|
||||||
|
cfgRebalance = "rebalance_timer"
|
||||||
|
cfgPoolErrorThreshold = "pool_error_threshold"
|
||||||
|
|
||||||
|
// Logger.
|
||||||
|
cfgLoggerLevel = "logger.level"
|
||||||
|
cfgLoggerDestination = "logger.destination"
|
||||||
|
|
||||||
|
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
|
||||||
|
cfgLoggerSamplingInitial = "logger.sampling.initial"
|
||||||
|
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||||
|
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||||
|
|
||||||
|
// Wallet.
|
||||||
|
cfgWalletPassphrase = "wallet.passphrase"
|
||||||
|
cfgWalletPath = "wallet.path"
|
||||||
|
cfgWalletAddress = "wallet.address"
|
||||||
|
|
||||||
|
// Uploader Header.
|
||||||
|
cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp"
|
||||||
|
|
||||||
|
// Peers.
|
||||||
|
cfgPeers = "peers"
|
||||||
|
|
||||||
|
// NeoGo.
|
||||||
|
cfgRPCEndpoint = "rpc_endpoint"
|
||||||
|
|
||||||
|
// Resolving.
|
||||||
|
cfgResolveOrder = "resolve_order"
|
||||||
|
|
||||||
|
// Zip compression.
|
||||||
|
cfgZipCompression = "zip.compression"
|
||||||
|
|
||||||
|
// Runtime.
|
||||||
|
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||||
|
|
||||||
|
// Enabling client side object preparing for PUT operations.
|
||||||
|
cfgClientCut = "frostfs.client_cut"
|
||||||
|
// Sets max buffer size for read payload in put operations.
|
||||||
|
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
||||||
|
// Configuration of parameters of requests to FrostFS.
|
||||||
|
// Sets max attempt to make successful tree request.
|
||||||
|
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||||
|
|
||||||
|
// Caching.
|
||||||
|
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||||
|
cfgBucketsCacheSize = "cache.buckets.size"
|
||||||
|
|
||||||
|
// Bucket resolving options.
|
||||||
|
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||||
|
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
||||||
|
|
||||||
|
// Command line args.
|
||||||
|
cmdHelp = "help"
|
||||||
|
cmdVersion = "version"
|
||||||
|
cmdPprof = "pprof"
|
||||||
|
cmdMetrics = "metrics"
|
||||||
|
cmdWallet = "wallet"
|
||||||
|
cmdAddress = "address"
|
||||||
|
cmdConfig = "config"
|
||||||
|
cmdConfigDir = "config-dir"
|
||||||
|
cmdListenAddress = "listen_address"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignore = map[string]struct{}{
|
||||||
|
cfgPeers: {},
|
||||||
|
cmdHelp: {},
|
||||||
|
cmdVersion: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
func settings() *viper.Viper {
|
||||||
|
v := viper.New()
|
||||||
|
v.AutomaticEnv()
|
||||||
|
v.SetEnvPrefix(Prefix)
|
||||||
|
v.AllowEmptyEnv(true)
|
||||||
|
v.SetConfigType("yaml")
|
||||||
|
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||||
|
|
||||||
|
// flags setup:
|
||||||
|
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
||||||
|
flags.SetOutput(os.Stdout)
|
||||||
|
flags.SortFlags = false
|
||||||
|
|
||||||
|
flags.Bool(cmdPprof, false, "enable pprof")
|
||||||
|
flags.Bool(cmdMetrics, false, "enable prometheus")
|
||||||
|
|
||||||
|
help := flags.BoolP(cmdHelp, "h", false, "show help")
|
||||||
|
version := flags.BoolP(cmdVersion, "v", false, "show version")
|
||||||
|
|
||||||
|
flags.StringP(cmdWallet, "w", "", `path to the wallet`)
|
||||||
|
flags.String(cmdAddress, "", `address of wallet account`)
|
||||||
|
flags.StringArray(cmdConfig, nil, "config paths")
|
||||||
|
flags.String(cmdConfigDir, "", "config dir path")
|
||||||
|
flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
|
||||||
|
flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout")
|
||||||
|
flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
|
||||||
|
flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
|
||||||
|
|
||||||
|
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
||||||
|
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
||||||
|
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
||||||
|
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
||||||
|
|
||||||
|
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
||||||
|
|
||||||
|
// set defaults:
|
||||||
|
|
||||||
|
// logger:
|
||||||
|
v.SetDefault(cfgLoggerLevel, "debug")
|
||||||
|
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||||
|
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
||||||
|
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
||||||
|
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
||||||
|
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
||||||
|
|
||||||
|
// pool:
|
||||||
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
||||||
|
v.SetDefault(cfgIndexPageEnabled, false)
|
||||||
|
v.SetDefault(cfgIndexPageTemplatePath, "")
|
||||||
|
|
||||||
|
// frostfs:
|
||||||
|
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||||
|
|
||||||
|
// web-server:
|
||||||
|
v.SetDefault(cfgWebReadBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
||||||
|
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
||||||
|
v.SetDefault(cfgWebStreamRequestBody, true)
|
||||||
|
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
||||||
|
|
||||||
|
// upload header
|
||||||
|
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
||||||
|
|
||||||
|
// zip:
|
||||||
|
v.SetDefault(cfgZipCompression, false)
|
||||||
|
|
||||||
|
// metrics
|
||||||
|
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||||
|
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||||
|
|
||||||
|
// resolve bucket
|
||||||
|
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||||
|
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
||||||
|
|
||||||
|
// Binding flags
|
||||||
|
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlags(flags); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := flags.Parse(os.Args); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||||
|
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resolveMethods != nil {
|
||||||
|
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case help != nil && *help:
|
||||||
|
fmt.Printf("FrostFS HTTP Gateway %s\n", Version)
|
||||||
|
flags.PrintDefaults()
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Default environments:")
|
||||||
|
fmt.Println()
|
||||||
|
keys := v.AllKeys()
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for i := range keys {
|
||||||
|
if _, ok := ignore[keys[i]]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultValue := v.GetString(keys[i])
|
||||||
|
if len(defaultValue) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
k := strings.Replace(keys[i], ".", "_", -1)
|
||||||
|
fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println("Peers preset:")
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers))
|
||||||
|
fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers))
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
|
case version != nil && *version:
|
||||||
|
fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := readInConfig(v); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if peers != nil && len(*peers) > 0 {
|
||||||
|
for i := range *peers {
|
||||||
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
||||||
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
||||||
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func readInConfig(v *viper.Viper) error {
|
||||||
|
if v.IsSet(cmdConfig) {
|
||||||
|
if err := readConfig(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.IsSet(cmdConfigDir) {
|
||||||
|
if err := readConfigDir(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readConfigDir(v *viper.Viper) error {
|
||||||
|
cfgSubConfigDir := v.GetString(cmdConfigDir)
|
||||||
|
entries, err := os.ReadDir(cfgSubConfigDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ext := path.Ext(entry.Name())
|
||||||
|
if ext != ".yaml" && ext != ".yml" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readConfig(v *viper.Viper) error {
|
||||||
|
for _, fileName := range v.GetStringSlice(cmdConfig) {
|
||||||
|
if err := mergeConfig(v, fileName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeConfig(v *viper.Viper, fileName string) error {
|
||||||
|
cfgFile, err := os.Open(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if errClose := cfgFile.Close(); errClose != nil {
|
||||||
|
panic(errClose)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return v.MergeConfig(cfgFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
lvl, err := getLogLevel(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dest := v.GetString(cfgLoggerDestination)
|
||||||
|
|
||||||
|
switch dest {
|
||||||
|
case destinationStdout:
|
||||||
|
return newStdoutLogger(v, lvl)
|
||||||
|
case destinationJournald:
|
||||||
|
return newJournaldLogger(v, lvl)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||||
|
// Panics on failure.
|
||||||
|
//
|
||||||
|
// Logger is built from zap's production logging configuration with:
|
||||||
|
// - parameterized level (debug by default)
|
||||||
|
// - console encoding
|
||||||
|
// - ISO8601 time encoding
|
||||||
|
//
|
||||||
|
// Logger records a stack trace for all messages at or above fatal level.
|
||||||
|
//
|
||||||
|
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||||
|
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
stdout := zapcore.AddSync(os.Stderr)
|
||||||
|
level := zap.NewAtomicLevelAt(lvl)
|
||||||
|
|
||||||
|
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||||
|
consoleOutCore = samplingEnabling(v, consoleOutCore)
|
||||||
|
|
||||||
|
l := zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||||
|
return l, level
|
||||||
|
}
|
||||||
|
|
||||||
|
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
level := zap.NewAtomicLevelAt(lvl)
|
||||||
|
|
||||||
|
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||||
|
|
||||||
|
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||||
|
coreWithContext := core.With([]zapcore.Field{
|
||||||
|
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||||
|
zapjournald.SyslogIdentifier(),
|
||||||
|
zapjournald.SyslogPid(),
|
||||||
|
})
|
||||||
|
|
||||||
|
coreWithContext = samplingEnabling(v, coreWithContext)
|
||||||
|
|
||||||
|
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||||
|
|
||||||
|
return l, level
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogEncoder() zapcore.Encoder {
|
||||||
|
c := zap.NewProductionEncoderConfig()
|
||||||
|
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
|
return zapcore.NewConsoleEncoder(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
|
||||||
|
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
|
||||||
|
// and message within the specified time interval.
|
||||||
|
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
|
||||||
|
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
|
||||||
|
// cfgLoggerSamplingThereafter is specified here.
|
||||||
|
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||||
|
core = zapcore.NewSamplerWithOptions(
|
||||||
|
core,
|
||||||
|
v.GetDuration(cfgLoggerSamplingInterval),
|
||||||
|
v.GetInt(cfgLoggerSamplingInitial),
|
||||||
|
v.GetInt(cfgLoggerSamplingThereafter),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return core
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
|
var lvl zapcore.Level
|
||||||
|
lvlStr := v.GetString(cfgLoggerLevel)
|
||||||
|
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||||
|
if err != nil {
|
||||||
|
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||||
|
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||||
|
zapcore.DebugLevel,
|
||||||
|
zapcore.InfoLevel,
|
||||||
|
zapcore.WarnLevel,
|
||||||
|
zapcore.ErrorLevel,
|
||||||
|
zapcore.DPanicLevel,
|
||||||
|
zapcore.PanicLevel,
|
||||||
|
zapcore.FatalLevel,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return lvl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||||
|
reconnect := cfg.GetDuration(cfgReconnectInterval)
|
||||||
|
if reconnect <= 0 {
|
||||||
|
reconnect = defaultReconnectInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
return reconnect
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
|
var servers []ServerInfo
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
key := cfgServer + "." + strconv.Itoa(i) + "."
|
||||||
|
|
||||||
|
var serverInfo ServerInfo
|
||||||
|
serverInfo.Address = v.GetString(key + "address")
|
||||||
|
serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled)
|
||||||
|
serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile)
|
||||||
|
serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile)
|
||||||
|
|
||||||
|
if serverInfo.Address == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := seen[serverInfo.Address]; ok {
|
||||||
|
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[serverInfo.Address] = struct{}{}
|
||||||
|
servers = append(servers, serverInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return servers
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||||
|
key, err := getFrostFSKey(cfg, logger)
|
||||||
|
if err != nil {
|
||||||
|
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
var prm pool.InitParameters
|
||||||
|
var prmTree treepool.InitParameters
|
||||||
|
|
||||||
|
prm.SetKey(&key.PrivateKey)
|
||||||
|
prmTree.SetKey(key)
|
||||||
|
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||||
|
|
||||||
|
for _, peer := range fetchPeers(logger, cfg) {
|
||||||
|
prm.AddNode(peer)
|
||||||
|
prmTree.AddNode(peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
connTimeout := cfg.GetDuration(cfgConTimeout)
|
||||||
|
if connTimeout <= 0 {
|
||||||
|
connTimeout = defaultConnectTimeout
|
||||||
|
}
|
||||||
|
prm.SetNodeDialTimeout(connTimeout)
|
||||||
|
prmTree.SetNodeDialTimeout(connTimeout)
|
||||||
|
|
||||||
|
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||||
|
if streamTimeout <= 0 {
|
||||||
|
streamTimeout = defaultStreamTimeout
|
||||||
|
}
|
||||||
|
prm.SetNodeStreamTimeout(streamTimeout)
|
||||||
|
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||||
|
|
||||||
|
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
|
||||||
|
if healthCheckTimeout <= 0 {
|
||||||
|
healthCheckTimeout = defaultRequestTimeout
|
||||||
|
}
|
||||||
|
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
|
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
|
|
||||||
|
rebalanceInterval := cfg.GetDuration(cfgRebalance)
|
||||||
|
if rebalanceInterval <= 0 {
|
||||||
|
rebalanceInterval = defaultRebalanceTimer
|
||||||
|
}
|
||||||
|
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
|
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
|
|
||||||
|
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||||
|
if errorThreshold <= 0 {
|
||||||
|
errorThreshold = defaultPoolErrorThreshold
|
||||||
|
}
|
||||||
|
prm.SetErrorThreshold(errorThreshold)
|
||||||
|
prm.SetLogger(logger)
|
||||||
|
prmTree.SetLogger(logger)
|
||||||
|
|
||||||
|
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||||
|
|
||||||
|
var apiGRPCDialOpts []grpc.DialOption
|
||||||
|
var treeGRPCDialOpts []grpc.DialOption
|
||||||
|
if cfg.GetBool(cfgTracingEnabled) {
|
||||||
|
interceptors := []grpc.DialOption{
|
||||||
|
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||||
|
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||||
|
}
|
||||||
|
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
|
||||||
|
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
|
||||||
|
}
|
||||||
|
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
|
||||||
|
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
|
||||||
|
|
||||||
|
p, err := pool.NewPool(prm)
|
||||||
|
if err != nil {
|
||||||
|
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = p.Dial(ctx); err != nil {
|
||||||
|
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
treePool, err := treepool.NewPool(prmTree)
|
||||||
|
if err != nil {
|
||||||
|
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||||
|
}
|
||||||
|
if err = treePool.Dial(ctx); err != nil {
|
||||||
|
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, treePool, key
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||||
|
var nodes []pool.NodeParam
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
key := cfgPeers + "." + strconv.Itoa(i) + "."
|
||||||
|
address := v.GetString(key + "address")
|
||||||
|
weight := v.GetFloat64(key + "weight")
|
||||||
|
priority := v.GetInt(key + "priority")
|
||||||
|
|
||||||
|
if address == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if weight <= 0 { // unspecified or wrong
|
||||||
|
weight = 1
|
||||||
|
}
|
||||||
|
if priority <= 0 { // unspecified or wrong
|
||||||
|
priority = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
|
||||||
|
|
||||||
|
l.Info(logs.AddedStoragePeer,
|
||||||
|
zap.Int("priority", priority),
|
||||||
|
zap.String("address", address),
|
||||||
|
zap.Float64("weight", weight))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
||||||
|
softMemoryLimit := cfg.GetSizeInBytes(cfgSoftMemoryLimit)
|
||||||
|
if softMemoryLimit <= 0 {
|
||||||
|
softMemoryLimit = defaultSoftMemoryLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
return int64(softMemoryLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
|
cacheCfg := cache.DefaultBucketConfig(l)
|
||||||
|
|
||||||
|
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
||||||
|
cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size)
|
||||||
|
|
||||||
|
return cacheCfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
||||||
|
if v.IsSet(cfgEntry) {
|
||||||
|
lifetime := v.GetDuration(cfgEntry)
|
||||||
|
if lifetime <= 0 {
|
||||||
|
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||||
|
zap.String("parameter", cfgEntry),
|
||||||
|
zap.Duration("value in config", lifetime),
|
||||||
|
zap.Duration("default", defaultValue))
|
||||||
|
} else {
|
||||||
|
return lifetime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int {
|
||||||
|
if v.IsSet(cfgEntry) {
|
||||||
|
size := v.GetInt(cfgEntry)
|
||||||
|
if size <= 0 {
|
||||||
|
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||||
|
zap.String("parameter", cfgEntry),
|
||||||
|
zap.Int("value in config", size),
|
||||||
|
zap.Int("default", defaultValue))
|
||||||
|
} else {
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultValue
|
||||||
|
}
|
|
@ -14,24 +14,33 @@ HTTP_GW_PPROF_ADDRESS=localhost:8083
|
||||||
HTTP_GW_PROMETHEUS_ENABLED=true
|
HTTP_GW_PROMETHEUS_ENABLED=true
|
||||||
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
||||||
|
|
||||||
# Log level.
|
# Logger.
|
||||||
HTTP_GW_LOGGER_LEVEL=debug
|
HTTP_GW_LOGGER_LEVEL=debug
|
||||||
|
HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
||||||
|
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
||||||
|
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||||
|
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||||
|
|
||||||
# Address to bind.
|
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||||
HTTP_GW_LISTEN_ADDRESS=0.0.0.0:443
|
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||||
# Provide cert to enable TLS.
|
HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert
|
||||||
HTTP_GW_TLS_CERTIFICATE=/path/to/tls/cert
|
HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key
|
||||||
# Provide key to enable TLS.
|
HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444
|
||||||
HTTP_GW_TLS_KEY=/path/to/tls/key
|
HTTP_GW_SERVER_1_TLS_ENABLED=true
|
||||||
|
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
||||||
|
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||||
|
|
||||||
|
# How often to reconnect to the servers
|
||||||
|
HTTP_GW_RECONNECT_INTERVAL: 1m
|
||||||
|
|
||||||
# Nodes configuration.
|
# Nodes configuration.
|
||||||
# This configuration make the gateway use the first node (grpc://s01.neofs.devenv:8080)
|
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
||||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.neofs.devenv:8080)
|
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
||||||
# for 10% of requests and the third node for 90% of requests.
|
# for 10% of requests and the third node for 90% of requests.
|
||||||
|
|
||||||
# Peer 1.
|
# Peer 1.
|
||||||
# Endpoint.
|
# Endpoint.
|
||||||
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.neofs.devenv:8080
|
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080
|
||||||
# Until nodes with the same priority level are healthy
|
# Until nodes with the same priority level are healthy
|
||||||
# nodes with other priority are not used.
|
# nodes with other priority are not used.
|
||||||
# The lower the value, the higher the priority.
|
# The lower the value, the higher the priority.
|
||||||
|
@ -39,11 +48,11 @@ HTTP_GW_PEERS_0_PRIORITY=1
|
||||||
# Load distribution proportion for nodes with the same priority.
|
# Load distribution proportion for nodes with the same priority.
|
||||||
HTTP_GW_PEERS_0_WEIGHT=1
|
HTTP_GW_PEERS_0_WEIGHT=1
|
||||||
# Peer 2.
|
# Peer 2.
|
||||||
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.neofs.devenv:8080
|
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080
|
||||||
HTTP_GW_PEERS_1_PRIORITY=2
|
HTTP_GW_PEERS_1_PRIORITY=2
|
||||||
HTTP_GW_PEERS_1_WEIGHT=1
|
HTTP_GW_PEERS_1_WEIGHT=1
|
||||||
# Peer 3.
|
# Peer 3.
|
||||||
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.neofs.devenv:8080
|
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080
|
||||||
HTTP_GW_PEERS_2_PRIORITY=2
|
HTTP_GW_PEERS_2_PRIORITY=2
|
||||||
HTTP_GW_PEERS_2_WEIGHT=9
|
HTTP_GW_PEERS_2_WEIGHT=9
|
||||||
|
|
||||||
|
@ -70,7 +79,7 @@ HTTP_GW_STREAM_REQUEST_BODY=true
|
||||||
HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
|
HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
|
||||||
|
|
||||||
# RPC endpoint to be able to use nns container resolving.
|
# RPC endpoint to be able to use nns container resolving.
|
||||||
HTTP_GW_RPC_ENDPOINT=http://morph-chain.neofs.devenv:30333
|
HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333
|
||||||
# The order in which resolvers are used to find an container id by name.
|
# The order in which resolvers are used to find an container id by name.
|
||||||
HTTP_GW_RESOLVE_ORDER="nns dns"
|
HTTP_GW_RESOLVE_ORDER="nns dns"
|
||||||
|
|
||||||
|
@ -79,12 +88,41 @@ HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false
|
||||||
|
|
||||||
# Timeout to dial node.
|
# Timeout to dial node.
|
||||||
HTTP_GW_CONNECT_TIMEOUT=5s
|
HTTP_GW_CONNECT_TIMEOUT=5s
|
||||||
|
# Timeout for individual operations in streaming RPC.
|
||||||
|
HTTP_GW_STREAM_TIMEOUT=10s
|
||||||
# Timeout to check node health during rebalance.
|
# Timeout to check node health during rebalance.
|
||||||
HTTP_GW_REQUEST_TIMEOUT=5s
|
HTTP_GW_REQUEST_TIMEOUT=5s
|
||||||
# Interval to check nodes health.
|
# Interval to check nodes health.
|
||||||
HTTP_GW_REBALANCE_TIMER=30s
|
HTTP_GW_REBALANCE_TIMER=30s
|
||||||
# The number of errors on connection after which node is considered as unhealthy
|
# The number of errors on connection after which node is considered as unhealthy
|
||||||
S3_GW_POOL_ERROR_THRESHOLD=100
|
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
||||||
|
|
||||||
# Enable zip compression to download files by common prefix.
|
# Enable zip compression to download files by common prefix.
|
||||||
HTTP_GW_ZIP_COMPRESSION=false
|
HTTP_GW_ZIP_COMPRESSION=false
|
||||||
|
|
||||||
|
HTTP_GW_TRACING_ENABLED=true
|
||||||
|
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||||
|
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||||
|
HTTP_GW_TRACING_TRUSTED_CA=""
|
||||||
|
|
||||||
|
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||||
|
|
||||||
|
# Parameters of requests to FrostFS
|
||||||
|
# This flag enables client side object preparing.
|
||||||
|
HTTP_GW_FROSTFS_CLIENT_CUT=false
|
||||||
|
# Sets max buffer size for read payload in put operations.
|
||||||
|
HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||||
|
|
||||||
|
# Caching
|
||||||
|
# Cache which contains mapping of bucket name to bucket info
|
||||||
|
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||||
|
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
||||||
|
|
||||||
|
# Header to determine zone to resolve bucket name
|
||||||
|
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||||
|
# Namespaces that should be handled as default
|
||||||
|
HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
|
||||||
|
|
||||||
|
# Max attempt to make successful tree request.
|
||||||
|
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||||
|
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||||
|
|
|
@ -4,27 +4,46 @@ wallet:
|
||||||
passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
|
passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
|
||||||
|
|
||||||
pprof:
|
pprof:
|
||||||
enabled: true # Enable pprof.
|
enabled: false # Enable pprof.
|
||||||
address: localhost:8083
|
address: localhost:8083
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: true # Enable metrics.
|
enabled: false # Enable metrics.
|
||||||
address: localhost:8084
|
address: localhost:8084
|
||||||
|
tracing:
|
||||||
|
enabled: true
|
||||||
|
exporter: "otlp_grpc"
|
||||||
|
endpoint: "localhost:4317"
|
||||||
|
trusted_ca: ""
|
||||||
|
|
||||||
logger:
|
logger:
|
||||||
level: debug # Log level.
|
level: debug # Log level.
|
||||||
|
destination: stdout
|
||||||
|
sampling:
|
||||||
|
enabled: false
|
||||||
|
initial: 100
|
||||||
|
thereafter: 100
|
||||||
|
interval: 1s
|
||||||
|
|
||||||
listen_address: 0.0.0.0:443 # Address to bind.
|
server:
|
||||||
tls_certificate: /path/to/tls/cert # Provide cert to enable TLS.
|
- address: 0.0.0.0:8080
|
||||||
tls_key: /path/to/tls/key # Provide key to enable TLS.
|
tls:
|
||||||
|
enabled: false
|
||||||
|
cert_file: /path/to/cert
|
||||||
|
key_file: /path/to/key
|
||||||
|
- address: 0.0.0.0:8081
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
cert_file: /path/to/cert
|
||||||
|
key_file: /path/to/key
|
||||||
|
|
||||||
# Nodes configuration.
|
# Nodes configuration.
|
||||||
# This configuration make the gateway use the first node (grpc://s01.neofs.devenv:8080)
|
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
||||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.neofs.devenv:8080)
|
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
||||||
# for 10% of requests and the third node for 90% of requests.
|
# for 10% of requests and the third node for 90% of requests.
|
||||||
peers:
|
peers:
|
||||||
0:
|
0:
|
||||||
# Endpoint.
|
# Endpoint.
|
||||||
address: grpc://s01.neofs.devenv:8080
|
address: grpc://s01.frostfs.devenv:8080
|
||||||
|
|
||||||
# Until nodes with the same priority level are healthy
|
# Until nodes with the same priority level are healthy
|
||||||
# nodes with other priority are not used.
|
# nodes with other priority are not used.
|
||||||
|
@ -34,14 +53,15 @@ peers:
|
||||||
# Load distribution proportion for nodes with the same priority.
|
# Load distribution proportion for nodes with the same priority.
|
||||||
weight: 1
|
weight: 1
|
||||||
1:
|
1:
|
||||||
address: grpc://s02.neofs.devenv:8080
|
address: grpc://s02.frostfs.devenv:8080
|
||||||
priority: 2
|
priority: 2
|
||||||
weight: 1
|
weight: 1
|
||||||
2:
|
2:
|
||||||
address: grpc://s03.neofs.devenv:8080
|
address: grpc://s03.frostfs.devenv:8080
|
||||||
priority: 2
|
priority: 2
|
||||||
weight: 9
|
weight: 9
|
||||||
|
|
||||||
|
reconnect_interval: 1m
|
||||||
|
|
||||||
web:
|
web:
|
||||||
# Per-connection buffer size for requests' reading.
|
# Per-connection buffer size for requests' reading.
|
||||||
|
@ -72,7 +92,7 @@ web:
|
||||||
max_request_body_size: 4194304
|
max_request_body_size: 4194304
|
||||||
|
|
||||||
# RPC endpoint to be able to use nns container resolving.
|
# RPC endpoint to be able to use nns container resolving.
|
||||||
rpc_endpoint: http://morph-chain.neofs.devenv:30333
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
# The order in which resolvers are used to find an container id by name.
|
# The order in which resolvers are used to find an container id by name.
|
||||||
resolve_order:
|
resolve_order:
|
||||||
- nns
|
- nns
|
||||||
|
@ -82,9 +102,39 @@ upload_header:
|
||||||
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
|
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
|
||||||
|
|
||||||
connect_timeout: 5s # Timeout to dial node.
|
connect_timeout: 5s # Timeout to dial node.
|
||||||
|
stream_timeout: 10s # Timeout for individual operations in streaming RPC.
|
||||||
request_timeout: 5s # Timeout to check node health during rebalance.
|
request_timeout: 5s # Timeout to check node health during rebalance.
|
||||||
rebalance_timer: 30s # Interval to check nodes health.
|
rebalance_timer: 30s # Interval to check nodes health.
|
||||||
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
||||||
|
|
||||||
|
# Enable index page to see objects list for specified container and prefix
|
||||||
|
index_page:
|
||||||
|
enabled: false
|
||||||
|
template_path: internal/handler/templates/index.gotmpl
|
||||||
|
|
||||||
zip:
|
zip:
|
||||||
compression: false # Enable zip compression to download files by common prefix.
|
compression: false # Enable zip compression to download files by common prefix.
|
||||||
|
|
||||||
|
runtime:
|
||||||
|
soft_memory_limit: 1gb
|
||||||
|
|
||||||
|
# Parameters of requests to FrostFS
|
||||||
|
frostfs:
|
||||||
|
# This flag enables client side object preparing.
|
||||||
|
client_cut: false
|
||||||
|
# Sets max buffer size for read payload in put operations.
|
||||||
|
buffer_max_size_for_put: 1048576
|
||||||
|
# Max attempt to make successful tree request.
|
||||||
|
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||||
|
tree_pool_max_attempts: 0
|
||||||
|
|
||||||
|
# Caching
|
||||||
|
cache:
|
||||||
|
# Cache which contains mapping of bucket name to bucket info
|
||||||
|
buckets:
|
||||||
|
lifetime: 1m
|
||||||
|
size: 1000
|
||||||
|
|
||||||
|
resolve_bucket:
|
||||||
|
namespace_header: X-Frostfs-Namespace
|
||||||
|
default_namespaces: [ "", "root" ]
|
||||||
|
|
3
config/dir/pprof.yaml
Normal file
3
config/dir/pprof.yaml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
pprof:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:8083
|
3
config/dir/prometheus.yaml
Normal file
3
config/dir/prometheus.yaml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
prometheus:
|
||||||
|
enabled: true
|
||||||
|
address: localhost:8084
|
5
debian/changelog
vendored
Normal file
5
debian/changelog
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
frostfs-http-gw (0.0.0) stable; urgency=medium
|
||||||
|
|
||||||
|
* Please see CHANGELOG.md
|
||||||
|
|
||||||
|
-- TrueCloudLab <tech@frostfs.info> Wed, 24 Aug 2022 18:29:49 +0300
|
14
debian/control
vendored
Normal file
14
debian/control
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
Source: frostfs-http-gw
|
||||||
|
Section: frostfs
|
||||||
|
Priority: optional
|
||||||
|
Maintainer: TrueCloudLab <tech@frostfs.info>
|
||||||
|
Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts
|
||||||
|
Standards-Version: 4.5.1
|
||||||
|
Homepage: https://frostfs.info/
|
||||||
|
Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
|
||||||
|
Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
|
|
||||||
|
Package: frostfs-http-gw
|
||||||
|
Architecture: any
|
||||||
|
Depends: ${misc:Depends}
|
||||||
|
Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
|
25
debian/copyright
vendored
Normal file
25
debian/copyright
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||||
|
Upstream-Name: frostfs-http-gw
|
||||||
|
Upstream-Contact: tech@frostfs.info
|
||||||
|
Source: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
|
|
||||||
|
Files: *
|
||||||
|
Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project
|
||||||
|
(https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md)
|
||||||
|
2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project
|
||||||
|
(https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/src/branch/master/CREDITS.md)
|
||||||
|
|
||||||
|
|
||||||
|
License: GPL-3
|
||||||
|
This program is free software: you can redistribute it and/or modify it
|
||||||
|
under the terms of the GNU General Public License as published
|
||||||
|
by the Free Software Foundation; version 3.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program or at /usr/share/common-licenses/GPL-3.
|
||||||
|
If not, see <http://www.gnu.org/licenses/>.
|
2
debian/frostfs-http-gw.dirs
vendored
Normal file
2
debian/frostfs-http-gw.dirs
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
etc/frostfs
|
||||||
|
srv/frostfs_cache
|
4
debian/frostfs-http-gw.docs
vendored
Normal file
4
debian/frostfs-http-gw.docs
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
docs/gate-configuration.md
|
||||||
|
README.md
|
||||||
|
CREDITS.md
|
||||||
|
CONTRIBUTING.md
|
1
debian/frostfs-http-gw.examples
vendored
Normal file
1
debian/frostfs-http-gw.examples
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
config/*
|
2
debian/frostfs-http-gw.install
vendored
Normal file
2
debian/frostfs-http-gw.install
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
bin/frostfs-http-gw usr/bin
|
||||||
|
config/config.yaml etc/frostfs/http
|
51
debian/frostfs-http-gw.postinst
vendored
Executable file
51
debian/frostfs-http-gw.postinst
vendored
Executable file
|
@ -0,0 +1,51 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# postinst script for frostfs-http-gw
|
||||||
|
#
|
||||||
|
# see: dh_installdeb(1)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# summary of how this script can be called:
|
||||||
|
# * <postinst> `configure' <most-recently-configured-version>
|
||||||
|
# * <old-postinst> `abort-upgrade' <new version>
|
||||||
|
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
|
||||||
|
# <new-version>
|
||||||
|
# * <postinst> `abort-remove'
|
||||||
|
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
|
||||||
|
# <failed-install-package> <version> `removing'
|
||||||
|
# <conflicting-package> <version>
|
||||||
|
# for details, see https://www.debian.org/doc/debian-policy/ or
|
||||||
|
# the debian-policy package
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
configure)
|
||||||
|
USERNAME=http
|
||||||
|
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -m -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
|
||||||
|
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
|
||||||
|
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
|
||||||
|
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
|
||||||
|
chmod -f 0750 /etc/frostfs/$USERNAME
|
||||||
|
chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
|
||||||
|
fi
|
||||||
|
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
|
||||||
|
if ! dpkg-statoverride --list "$USERDIR" >/dev/null; then
|
||||||
|
chown -f frostfs-$USERNAME: "$USERDIR"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade|abort-remove|abort-deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# dh_installdeb will replace this with shell code automatically
|
||||||
|
# generated by other debhelper scripts.
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
exit 0
|
41
debian/frostfs-http-gw.postrm
vendored
Executable file
41
debian/frostfs-http-gw.postrm
vendored
Executable file
|
@ -0,0 +1,41 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# postrm script for frostfs-http-gw
|
||||||
|
#
|
||||||
|
# see: dh_installdeb(1)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# summary of how this script can be called:
|
||||||
|
# * <postrm> `remove'
|
||||||
|
# * <postrm> `purge'
|
||||||
|
# * <old-postrm> `upgrade' <new-version>
|
||||||
|
# * <new-postrm> `failed-upgrade' <old-version>
|
||||||
|
# * <new-postrm> `abort-install'
|
||||||
|
# * <new-postrm> `abort-install' <old-version>
|
||||||
|
# * <new-postrm> `abort-upgrade' <old-version>
|
||||||
|
# * <disappearer's-postrm> `disappear' <overwriter>
|
||||||
|
# <overwriter-version>
|
||||||
|
# for details, see https://www.debian.org/doc/debian-policy/ or
|
||||||
|
# the debian-policy package
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
purge)
|
||||||
|
rm -rf /srv/frostfs_cache
|
||||||
|
;;
|
||||||
|
|
||||||
|
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "postrm called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# dh_installdeb will replace this with shell code automatically
|
||||||
|
# generated by other debhelper scripts.
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
exit 0
|
35
debian/frostfs-http-gw.preinst
vendored
Executable file
35
debian/frostfs-http-gw.preinst
vendored
Executable file
|
@ -0,0 +1,35 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# preinst script for frostfs-http-gw
|
||||||
|
#
|
||||||
|
# see: dh_installdeb(1)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# summary of how this script can be called:
|
||||||
|
# * <new-preinst> `install'
|
||||||
|
# * <new-preinst> `install' <old-version>
|
||||||
|
# * <new-preinst> `upgrade' <old-version>
|
||||||
|
# * <old-preinst> `abort-upgrade' <new-version>
|
||||||
|
# for details, see https://www.debian.org/doc/debian-policy/ or
|
||||||
|
# the debian-policy package
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
install|upgrade)
|
||||||
|
;;
|
||||||
|
|
||||||
|
abort-upgrade)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "preinst called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# dh_installdeb will replace this with shell code automatically
|
||||||
|
# generated by other debhelper scripts.
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
exit 0
|
38
debian/frostfs-http-gw.prerm
vendored
Executable file
38
debian/frostfs-http-gw.prerm
vendored
Executable file
|
@ -0,0 +1,38 @@
|
||||||
|
#!/bin/sh
|
||||||
|
# prerm script for frostfs-http-gw
|
||||||
|
#
|
||||||
|
# see: dh_installdeb(1)
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# summary of how this script can be called:
|
||||||
|
# * <prerm> `remove'
|
||||||
|
# * <old-prerm> `upgrade' <new-version>
|
||||||
|
# * <new-prerm> `failed-upgrade' <old-version>
|
||||||
|
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
|
||||||
|
# * <deconfigured's-prerm> `deconfigure' `in-favour'
|
||||||
|
# <package-being-installed> <version> `removing'
|
||||||
|
# <conflicting-package> <version>
|
||||||
|
# for details, see https://www.debian.org/doc/debian-policy/ or
|
||||||
|
# the debian-policy package
|
||||||
|
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
remove|upgrade|deconfigure)
|
||||||
|
;;
|
||||||
|
|
||||||
|
failed-upgrade)
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "prerm called with unknown argument \`$1'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# dh_installdeb will replace this with shell code automatically
|
||||||
|
# generated by other debhelper scripts.
|
||||||
|
|
||||||
|
#DEBHELPER#
|
||||||
|
|
||||||
|
exit 0
|
16
debian/frostfs-http-gw.service
vendored
Normal file
16
debian/frostfs-http-gw.service
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
[Unit]
|
||||||
|
Description=FrostFS HTTP Gateway
|
||||||
|
Requires=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml
|
||||||
|
User=frostfs-http
|
||||||
|
Group=frostfs-http
|
||||||
|
WorkingDirectory=/srv/frostfs_cache
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
PrivateTmp=true
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
14
debian/rules
vendored
Executable file
14
debian/rules
vendored
Executable file
|
@ -0,0 +1,14 @@
|
||||||
|
#!/usr/bin/make -f
|
||||||
|
|
||||||
|
# Do not try to strip Go binaries and do not run test
|
||||||
|
export DEB_BUILD_OPTIONS := nostrip nocheck
|
||||||
|
SERVICE = frostfs-http-gw
|
||||||
|
|
||||||
|
%:
|
||||||
|
dh $@
|
||||||
|
|
||||||
|
override_dh_installsystemd:
|
||||||
|
dh_installsystemd --no-enable --no-start $(SERVICE).service
|
||||||
|
|
||||||
|
override_dh_installchangelogs:
|
||||||
|
dh_installchangelogs -k CHANGELOG.md
|
1
debian/source/format
vendored
Normal file
1
debian/source/format
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
3.0 (quilt)
|
316
docs/api.md
Normal file
316
docs/api.md
Normal file
|
@ -0,0 +1,316 @@
|
||||||
|
# HTTP Gateway Specification
|
||||||
|
|
||||||
|
| Route | Description |
|
||||||
|
|-------------------------------------------------|----------------------------------------------|
|
||||||
|
| `/upload/{cid}` | [Put object](#put-object) |
|
||||||
|
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
||||||
|
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
||||||
|
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
|
||||||
|
|
||||||
|
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
||||||
|
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
|
||||||
|
|
||||||
|
Route parameters can be:
|
||||||
|
|
||||||
|
* `Single` - match a single path segment (cannot contain `/` and be empty)
|
||||||
|
* `Catch-All` - match everything (such parameter usually the last one in routes)
|
||||||
|
* `Query` - regular query parameter
|
||||||
|
|
||||||
|
### Bearer token
|
||||||
|
|
||||||
|
All routes can accept [bearer token](../README.md#authentication) from:
|
||||||
|
|
||||||
|
* `Authorization` header with `Bearer` type and base64-encoded token in
|
||||||
|
credentials field
|
||||||
|
* `Bearer` cookie with base64-encoded token contents
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
Header:
|
||||||
|
|
||||||
|
```
|
||||||
|
Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
|
||||||
|
```
|
||||||
|
|
||||||
|
Cookie:
|
||||||
|
|
||||||
|
```
|
||||||
|
cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
|
||||||
|
```
|
||||||
|
|
||||||
|
## Put object
|
||||||
|
|
||||||
|
Route: `/upload/{cid}`
|
||||||
|
|
||||||
|
| Route parameter | Type | Description |
|
||||||
|
|-----------------|--------|---------------------------------------------------------|
|
||||||
|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
||||||
|
|
||||||
|
### Methods
|
||||||
|
|
||||||
|
#### POST
|
||||||
|
|
||||||
|
Upload file as object with attributes to FrostFS.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
||||||
|
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
||||||
|
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
||||||
|
|
||||||
|
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
||||||
|
|
||||||
|
1. `X-Attribute-System-Expiration-Epoch: 100`
|
||||||
|
2. `X-Attribute-System-Expiration-Duration: 24h30m`
|
||||||
|
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
|
||||||
|
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
|
||||||
|
|
||||||
|
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
|
||||||
|
|
||||||
|
If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway
|
||||||
|
(see http-gw [configuration](gate-configuration.md#upload-header-section)).
|
||||||
|
|
||||||
|
The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used.
|
||||||
|
Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb.
|
||||||
|
|
||||||
|
###### Body
|
||||||
|
|
||||||
|
Body must contain multipart form with file.
|
||||||
|
The `filename` field from the multipart form will be set as `FileName` attribute of object
|
||||||
|
(can be overriden by `X-Attribute-FileName` header).
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|----------------------------------------------|
|
||||||
|
| 200 | Object created successfully. |
|
||||||
|
| 400 | Some error occurred during object uploading. |
|
||||||
|
|
||||||
|
## Get object
|
||||||
|
|
||||||
|
Route: `/get/{cid}/{oid}?[download=false]`
|
||||||
|
|
||||||
|
| Route parameter | Type | Description |
|
||||||
|
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
|
||||||
|
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
|
||||||
|
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
||||||
|
|
||||||
|
### Methods
|
||||||
|
|
||||||
|
#### GET
|
||||||
|
|
||||||
|
Get an object (payload and attributes) by an address.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|----------------|------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
||||||
|
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
||||||
|
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
|
||||||
|
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
||||||
|
| `Content-Length` | Size of object payload. |
|
||||||
|
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
||||||
|
| `X-Owner-Id` | Base58 encoded owner ID. |
|
||||||
|
| `X-Container-Id` | Base58 encoded container ID. |
|
||||||
|
| `X-Object-Id` | Base58 encoded object ID. |
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|------------------------------------------------|
|
||||||
|
| 200 | Object got successfully. |
|
||||||
|
| 400 | Some error occurred during object downloading. |
|
||||||
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
|
###### Body
|
||||||
|
|
||||||
|
Returns object data. If request performed from browser, either displays raw data or downloads it as
|
||||||
|
attachment if `download` query parameter is set to `true`.
|
||||||
|
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
|
||||||
|
S3-name was found.
|
||||||
|
|
||||||
|
#### HEAD
|
||||||
|
|
||||||
|
Get an object attributes by an address.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|----------------|------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
||||||
|
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
||||||
|
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
||||||
|
| `Content-Length` | Size of object payload. |
|
||||||
|
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
||||||
|
| `X-Owner-Id` | Base58 encoded owner ID. |
|
||||||
|
| `X-Container-Id` | Base58 encoded container ID. |
|
||||||
|
| `X-Object-Id` | Base58 encoded object ID. |
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|---------------------------------------------------|
|
||||||
|
| 200 | Object head successfully. |
|
||||||
|
| 400 | Some error occurred during object HEAD operation. |
|
||||||
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
|
## Search object
|
||||||
|
|
||||||
|
Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]`
|
||||||
|
|
||||||
|
| Route parameter | Type | Description |
|
||||||
|
|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
||||||
|
| `attr_key` | Single | Object attribute key to search. |
|
||||||
|
| `attr_val` | Catch-All | Object attribute value to match. |
|
||||||
|
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. |
|
||||||
|
|
||||||
|
### Methods
|
||||||
|
|
||||||
|
#### GET
|
||||||
|
|
||||||
|
Find and get an object (payload and attributes) by a specific attribute.
|
||||||
|
If more than one object is found, an arbitrary one will be returned.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|----------------|------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
||||||
|
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
||||||
|
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
|
||||||
|
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
||||||
|
| `Content-Length` | Size of object payload. |
|
||||||
|
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
||||||
|
| `X-Owner-Id` | Base58 encoded owner ID. |
|
||||||
|
| `X-Container-Id` | Base58 encoded container ID. |
|
||||||
|
| `X-Object-Id` | Base58 encoded object ID. |
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|------------------------------------------------|
|
||||||
|
| 200 | Object got successfully. |
|
||||||
|
| 400 | Some error occurred during object downloading. |
|
||||||
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
|
#### HEAD
|
||||||
|
|
||||||
|
Get object attributes by a specific attribute.
|
||||||
|
If more than one object is found, an arbitrary one will be used to get attributes.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|----------------|------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
||||||
|
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
||||||
|
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
||||||
|
| `Content-Length` | Size of object payload. |
|
||||||
|
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
||||||
|
| `X-Owner-Id` | Base58 encoded owner ID. |
|
||||||
|
| `X-Container-Id` | Base58 encoded container ID. |
|
||||||
|
| `X-Object-Id` | Base58 encoded object ID. |
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|---------------------------------------|
|
||||||
|
| 200 | Object head successfully. |
|
||||||
|
| 400 | Some error occurred during operation. |
|
||||||
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
|
## Download zip
|
||||||
|
|
||||||
|
Route: `/zip/{cid}/{prefix}`
|
||||||
|
|
||||||
|
| Route parameter | Type | Description |
|
||||||
|
|-----------------|-----------|---------------------------------------------------------|
|
||||||
|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
||||||
|
| `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. |
|
||||||
|
|
||||||
|
### Methods
|
||||||
|
|
||||||
|
#### GET
|
||||||
|
|
||||||
|
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
|
||||||
|
Name of files in archive sets to `FilePath` attribute of objects.
|
||||||
|
Time of files sets to time when object has started downloading.
|
||||||
|
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
|
||||||
|
|
||||||
|
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|----------------|------------------------------------|
|
||||||
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
###### Headers
|
||||||
|
|
||||||
|
| Header | Description |
|
||||||
|
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
|
||||||
|
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
|
||||||
|
|
||||||
|
###### Status codes
|
||||||
|
|
||||||
|
| Status | Description |
|
||||||
|
|--------|-----------------------------------------------------|
|
||||||
|
| 200 | Object got successfully. |
|
||||||
|
| 400 | Some error occurred during object downloading. |
|
||||||
|
| 404 | Container or objects not found. |
|
||||||
|
| 500 | Some inner error (e.g. error on streaming objects). |
|
46
docs/building-deb-package.md
Normal file
46
docs/building-deb-package.md
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# Building Debian package on host
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
For now, we're assuming building for Debian 11 (stable) x86_64.
|
||||||
|
|
||||||
|
Go version 18.4 or later should already be installed, i.e. this runs
|
||||||
|
successfully:
|
||||||
|
|
||||||
|
* `make all`
|
||||||
|
|
||||||
|
## Installing packaging dependencies
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
|
||||||
|
```
|
||||||
|
|
||||||
|
Warining: number of package installed is pretty large considering dependecies.
|
||||||
|
|
||||||
|
## Package building
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make debpackage
|
||||||
|
```
|
||||||
|
|
||||||
|
## Leftovers cleaning
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make debclean
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```shell
|
||||||
|
$ dh clean
|
||||||
|
```
|
||||||
|
|
||||||
|
# Package versioning
|
||||||
|
|
||||||
|
By default, package version is based on product version and may also contain git
|
||||||
|
tags and hashes.
|
||||||
|
|
||||||
|
Package version could be overwritten by setting `PKG_VERSION` variable before
|
||||||
|
build, Debian package versioning rules should be respected.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ PKG_VERSION=0.32.0 make debpackge
|
||||||
|
```
|
|
@ -1,6 +1,6 @@
|
||||||
# NeoFS HTTP Gateway configuration file
|
# FrostFS HTTP Gateway configuration file
|
||||||
|
|
||||||
This section contains detailed NeoFS HTTP Gateway configuration file description
|
This section contains detailed FrostFS HTTP Gateway configuration file description
|
||||||
including default config values and some tips to set up configurable values.
|
including default config values and some tips to set up configurable values.
|
||||||
|
|
||||||
There are some custom types used for brevity:
|
There are some custom types used for brevity:
|
||||||
|
@ -23,65 +23,69 @@ $ kill -s SIGHUP <app_pid>
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ ./bin/neofs-http-gw --config config.yaml &> http.log &
|
$ ./bin/frostfs-http-gw --config config.yaml &> http.log &
|
||||||
[1] 998346
|
[1] 998346
|
||||||
|
|
||||||
$ cat http.log
|
$ cat http.log
|
||||||
# ...
|
# ...
|
||||||
2022-10-03T09:37:25.826+0300 info neofs-http-gw/app.go:332 starting application {"app_name": "neofs-http-gw", "version": "v0.24.0"}
|
2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"}
|
||||||
# ...
|
# ...
|
||||||
|
|
||||||
$ kill -s SIGHUP 998346
|
$ kill -s SIGHUP 998346
|
||||||
|
|
||||||
$ cat http.log
|
$ cat http.log
|
||||||
# ...
|
# ...
|
||||||
2022-10-03T09:38:16.205+0300 info neofs-http-gw/app.go:470 SIGHUP config reload completed
|
2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed
|
||||||
```
|
```
|
||||||
|
|
||||||
# Structure
|
# Structure
|
||||||
|
|
||||||
| Section | Description |
|
| Section | Description |
|
||||||
|-----------------|-------------------------------------------------------|
|
|------------------|----------------------------------------------------------------|
|
||||||
| no section | [General parameters](#general-section) |
|
| no section | [General parameters](#general-section) |
|
||||||
| `wallet` | [Wallet configuration](#wallet-section) |
|
| `wallet` | [Wallet configuration](#wallet-section) |
|
||||||
| `peers` | [Nodes configuration](#peers-section) |
|
| `peers` | [Nodes configuration](#peers-section) |
|
||||||
| `logger` | [Logger configuration](#logger-section) |
|
| `logger` | [Logger configuration](#logger-section) |
|
||||||
| `web` | [Web configuration](#web-section) |
|
| `web` | [Web configuration](#web-section) |
|
||||||
|
| `server` | [Server configuration](#server-section) |
|
||||||
| `upload-header` | [Upload header configuration](#upload-header-section) |
|
| `upload-header` | [Upload header configuration](#upload-header-section) |
|
||||||
| `zip` | [ZIP configuration](#zip-section) |
|
| `zip` | [ZIP configuration](#zip-section) |
|
||||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
||||||
|
| `tracing` | [Tracing configuration](#tracing-section) |
|
||||||
|
| `runtime` | [Runtime configuration](#runtime-section) |
|
||||||
|
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
||||||
|
| `cache` | [Cache configuration](#cache-section) |
|
||||||
|
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||||
|
| `index_page` | [Index page configuration](#index_page-section) |
|
||||||
|
|
||||||
|
|
||||||
# General section
|
# General section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
listen_address: 0.0.0.0:8082
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
tls_certificate: /path/to/tls/cert
|
|
||||||
tls_key: /path/to/tls/key
|
|
||||||
|
|
||||||
rpc_endpoint: http://morph-chain.neofs.devenv:30333
|
|
||||||
resolve_order:
|
resolve_order:
|
||||||
- nns
|
- nns
|
||||||
- dns
|
- dns
|
||||||
|
|
||||||
connect_timeout: 5s
|
connect_timeout: 5s
|
||||||
|
stream_timeout: 10s
|
||||||
request_timeout: 5s
|
request_timeout: 5s
|
||||||
rebalance_timer: 30s
|
rebalance_timer: 30s
|
||||||
pool_error_threshold: 100
|
pool_error_threshold: 100
|
||||||
|
reconnect_interval: 1m
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
|
|------------------------|------------|---------------|---------------|-------------------------------------------------------------------------------------------------|
|
||||||
| `listen_address` | `string` | | `0.0.0.0:8082` | The address that the gateway is listening on. |
|
|
||||||
| `tls_certificate` | `string` | yes | | Path to the TLS certificate. |
|
|
||||||
| `tls_key` | `string` | yes | | Path to the TLS key. |
|
|
||||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
||||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
||||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||||
|
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
||||||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
||||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
||||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||||
|
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
||||||
|
|
||||||
# `wallet` section
|
# `wallet` section
|
||||||
|
|
||||||
|
@ -102,23 +106,23 @@ wallet:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# Nodes configuration
|
# Nodes configuration
|
||||||
# This configuration makes the gateway use the first node (node1.neofs:8080)
|
# This configuration makes the gateway use the first node (node1.frostfs:8080)
|
||||||
# while it's healthy. Otherwise, gateway uses the second node (node2.neofs:8080)
|
# while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080)
|
||||||
# for 10% of requests and the third node (node3.neofs:8080) for 90% of requests.
|
# for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
|
||||||
# Until nodes with the same priority level are healthy
|
# Until nodes with the same priority level are healthy
|
||||||
# nodes with other priority are not used.
|
# nodes with other priority are not used.
|
||||||
# The lower the value, the higher the priority.
|
# The lower the value, the higher the priority.
|
||||||
peers:
|
peers:
|
||||||
0:
|
0:
|
||||||
address: node1.neofs:8080
|
address: node1.frostfs:8080
|
||||||
priority: 1
|
priority: 1
|
||||||
weight: 1
|
weight: 1
|
||||||
1:
|
1:
|
||||||
address: node2.neofs:8080
|
address: node2.frostfs:8080
|
||||||
priority: 2
|
priority: 2
|
||||||
weight: 0.1
|
weight: 0.1
|
||||||
2:
|
2:
|
||||||
address: node3.neofs:8080
|
address: node3.frostfs:8080
|
||||||
priority: 2
|
priority: 2
|
||||||
weight: 0.9
|
weight: 0.9
|
||||||
```
|
```
|
||||||
|
@ -129,17 +133,53 @@ peers:
|
||||||
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
|
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
|
||||||
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
|
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
|
||||||
|
|
||||||
|
# `server` section
|
||||||
|
|
||||||
|
You can specify several listeners for server. For example, for `http` and `https`.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server:
|
||||||
|
- address: 0.0.0.0:8080
|
||||||
|
tls:
|
||||||
|
enabled: false
|
||||||
|
cert_file: /path/to/cert
|
||||||
|
key_file: /path/to/key
|
||||||
|
- address: 0.0.0.0:8081
|
||||||
|
tls:
|
||||||
|
enabled: true
|
||||||
|
cert_file: /path/to/another/cert
|
||||||
|
key_file: /path/to/another/key
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|-----------------|----------|---------------|----------------|-----------------------------------------------|
|
||||||
|
| `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. |
|
||||||
|
| `tls.enabled` | `bool` | | false | Enable TLS or not. |
|
||||||
|
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
|
||||||
|
| `tls.key_file` | `string` | yes | | Path to the key. |
|
||||||
|
|
||||||
|
|
||||||
# `logger` section
|
# `logger` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
logger:
|
logger:
|
||||||
level: debug
|
level: debug
|
||||||
|
destination: stdout
|
||||||
|
sampling:
|
||||||
|
enabled: false
|
||||||
|
initial: 100
|
||||||
|
thereafter: 100
|
||||||
|
interval: 1s
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|-----------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||||
|
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||||
|
| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. |
|
||||||
|
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
||||||
|
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||||
|
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||||
|
|
||||||
# `web` section
|
# `web` section
|
||||||
|
|
||||||
|
@ -216,3 +256,110 @@ prometheus:
|
||||||
|-----------|----------|---------------|------------------|-----------------------------------------|
|
|-----------|----------|---------------|------------------|-----------------------------------------|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
||||||
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
|
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
|
||||||
|
|
||||||
|
# `tracing` section
|
||||||
|
|
||||||
|
Contains configuration for the `tracing` service.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tracing:
|
||||||
|
enabled: true
|
||||||
|
exporter: "otlp_grpc"
|
||||||
|
endpoint: "localhost:4317"
|
||||||
|
trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem"
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|--------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `enabled` | `bool` | no | `false` | Flag to enable the tracing. |
|
||||||
|
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||||
|
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||||
|
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
||||||
|
|
||||||
|
# `runtime` section
|
||||||
|
Contains runtime parameters.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
runtime:
|
||||||
|
soft_memory_limit: 1gb
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
||||||
|
|
||||||
|
# `frostfs` section
|
||||||
|
|
||||||
|
Contains parameters of requests to FrostFS.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
frostfs:
|
||||||
|
client_cut: false
|
||||||
|
buffer_max_size_for_put: 1048576 # 1mb
|
||||||
|
tree_pool_max_attempts: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
||||||
|
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
||||||
|
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
||||||
|
|
||||||
|
|
||||||
|
### `cache` section
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
cache:
|
||||||
|
buckets:
|
||||||
|
lifetime: 1m
|
||||||
|
size: 1000
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | Default value | Description |
|
||||||
|
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||||
|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||||
|
|
||||||
|
|
||||||
|
#### `cache` subsection
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
lifetime: 1m
|
||||||
|
size: 1000
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | Default value | Description |
|
||||||
|
|------------|------------|------------------|-------------------------------|
|
||||||
|
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
||||||
|
| `size` | `int` | depends on cache | LRU cache size. |
|
||||||
|
|
||||||
|
|
||||||
|
# `resolve_bucket` section
|
||||||
|
|
||||||
|
Bucket name resolving parameters from and to container ID.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
resolve_bucket:
|
||||||
|
namespace_header: X-Frostfs-Namespace
|
||||||
|
default_namespaces: [ "", "root" ]
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||||
|
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||||
|
|
||||||
|
# `index_page` section
|
||||||
|
|
||||||
|
Parameters for index HTML-page output with S3-bucket or S3-subdir content for `Get object` request
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
index_page:
|
||||||
|
enabled: false
|
||||||
|
template_path: ""
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
|
||||||
|
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
|
||||||
|
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
|
||||||
|
|
|
@ -1,520 +0,0 @@
|
||||||
package downloader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/resolver"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/response"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/tokens"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/client"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type request struct {
|
|
||||||
*fasthttp.RequestCtx
|
|
||||||
appCtx context.Context
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const attributeFilePath = "FilePath"
|
|
||||||
|
|
||||||
func isValidToken(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
if c <= ' ' || c > 127 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidValue(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
// HTTP specification allows for more technically, but we don't want to escape things.
|
|
||||||
if c < ' ' || c > 127 || c == '"' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type readCloser struct {
|
|
||||||
io.Reader
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
|
||||||
// Returns r's error directly. Also returns the processed data.
|
|
||||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
|
||||||
if maxSize > sizeToDetectType {
|
|
||||||
maxSize = sizeToDetectType
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
|
||||||
|
|
||||||
r, err := rInit(maxSize)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := r.Read(buf)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r request) receiveFile(clnt *pool.Pool, objectAddress oid.Address) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
dis = "inline"
|
|
||||||
start = time.Now()
|
|
||||||
filename string
|
|
||||||
)
|
|
||||||
if err = tokens.StoreBearerToken(r.RequestCtx); err != nil {
|
|
||||||
r.log.Error("could not fetch and store bearer token", zap.Error(err))
|
|
||||||
response.Error(r.RequestCtx, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var prm pool.PrmObjectGet
|
|
||||||
prm.SetAddress(objectAddress)
|
|
||||||
if btoken := bearerToken(r.RequestCtx); btoken != nil {
|
|
||||||
prm.UseBearer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
rObj, err := clnt.GetObject(r.appCtx, prm)
|
|
||||||
if err != nil {
|
|
||||||
r.handleNeoFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// we can't close reader in this function, so how to do it?
|
|
||||||
|
|
||||||
if r.Request.URI().QueryArgs().GetBool("download") {
|
|
||||||
dis = "attachment"
|
|
||||||
}
|
|
||||||
|
|
||||||
payloadSize := rObj.Header.PayloadSize()
|
|
||||||
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
|
||||||
var contentType string
|
|
||||||
for _, attr := range rObj.Header.Attributes() {
|
|
||||||
key := attr.Key()
|
|
||||||
val := attr.Value()
|
|
||||||
if !isValidToken(key) || !isValidValue(val) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(key, utils.SystemAttributePrefix) {
|
|
||||||
key = systemBackwardTranslator(key)
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
|
||||||
switch key {
|
|
||||||
case object.AttributeFileName:
|
|
||||||
filename = val
|
|
||||||
case object.AttributeTimestamp:
|
|
||||||
value, err := strconv.ParseInt(val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.log.Info("couldn't parse creation date",
|
|
||||||
zap.String("key", key),
|
|
||||||
zap.String("val", val),
|
|
||||||
zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
|
||||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
||||||
case object.AttributeContentType:
|
|
||||||
contentType = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
idsToResponse(&r.Response, &rObj.Header)
|
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
|
||||||
// determine the Content-Type from the payload head
|
|
||||||
var payloadHead []byte
|
|
||||||
|
|
||||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
|
||||||
return rObj.Payload, nil
|
|
||||||
})
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.log.Error("could not detect Content-Type from payload", zap.Error(err))
|
|
||||||
response.Error(r.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset payload reader since a part of the data has been read
|
|
||||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
|
||||||
|
|
||||||
if err != io.EOF { // otherwise, we've already read full payload
|
|
||||||
headReader = io.MultiReader(headReader, rObj.Payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
|
||||||
// if it implements io.Closer and that's useful for us.
|
|
||||||
rObj.Payload = readCloser{headReader, rObj.Payload}
|
|
||||||
}
|
|
||||||
r.SetContentType(contentType)
|
|
||||||
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
|
||||||
|
|
||||||
r.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemBackwardTranslator is used to convert headers looking like '__NEOFS__ATTR_NAME' to 'Neofs-Attr-Name'.
|
|
||||||
func systemBackwardTranslator(key string) string {
|
|
||||||
// trim specified prefix '__NEOFS__'
|
|
||||||
key = strings.TrimPrefix(key, utils.SystemAttributePrefix)
|
|
||||||
|
|
||||||
var res strings.Builder
|
|
||||||
res.WriteString("Neofs-")
|
|
||||||
|
|
||||||
strs := strings.Split(key, "_")
|
|
||||||
for i, s := range strs {
|
|
||||||
s = title(strings.ToLower(s))
|
|
||||||
res.WriteString(s)
|
|
||||||
if i != len(strs)-1 {
|
|
||||||
res.WriteString("-")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func title(str string) string {
|
|
||||||
if str == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
r, size := utf8.DecodeRuneInString(str)
|
|
||||||
r0 := unicode.ToTitle(r)
|
|
||||||
return string(r0) + str[size:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func bearerToken(ctx context.Context) *bearer.Token {
|
|
||||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
|
||||||
return tkn
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) handleNeoFSErr(err error, start time.Time) {
|
|
||||||
r.log.Error(
|
|
||||||
"could not receive object",
|
|
||||||
zap.Stringer("elapsed", time.Since(start)),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
|
|
||||||
if client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err) {
|
|
||||||
response.Error(r.RequestCtx, "Not Found", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := fmt.Sprintf("could not receive object: %v", err)
|
|
||||||
response.Error(r.RequestCtx, msg, fasthttp.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Downloader is a download request handler.
|
|
||||||
type Downloader struct {
|
|
||||||
appCtx context.Context
|
|
||||||
log *zap.Logger
|
|
||||||
pool *pool.Pool
|
|
||||||
containerResolver *resolver.ContainerResolver
|
|
||||||
settings *Settings
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
|
||||||
type Settings struct {
|
|
||||||
zipCompression atomic.Bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Settings) ZipCompression() bool {
|
|
||||||
return s.zipCompression.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Settings) SetZipCompression(val bool) {
|
|
||||||
s.zipCompression.Store(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an instance of Downloader using specified options.
|
|
||||||
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Downloader {
|
|
||||||
return &Downloader{
|
|
||||||
appCtx: ctx,
|
|
||||||
log: params.Logger,
|
|
||||||
pool: params.Pool,
|
|
||||||
settings: settings,
|
|
||||||
containerResolver: params.Resolver,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
|
||||||
return &request{
|
|
||||||
RequestCtx: ctx,
|
|
||||||
appCtx: d.appCtx,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadByAddress handles download requests using simple cid/oid format.
|
|
||||||
func (d *Downloader) DownloadByAddress(c *fasthttp.RequestCtx) {
|
|
||||||
d.byAddress(c, request.receiveFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
||||||
// prepares request and object address to it.
|
|
||||||
func (d *Downloader) byAddress(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
|
|
||||||
var (
|
|
||||||
idCnr, _ = c.UserValue("cid").(string)
|
|
||||||
idObj, _ = c.UserValue("oid").(string)
|
|
||||||
log = d.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
|
||||||
)
|
|
||||||
|
|
||||||
cnrID, err := utils.GetContainerID(d.appCtx, idCnr, d.containerResolver)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("wrong container id", zap.Error(err))
|
|
||||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
objID := new(oid.ID)
|
|
||||||
if err = objID.DecodeString(idObj); err != nil {
|
|
||||||
log.Error("wrong object id", zap.Error(err))
|
|
||||||
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(*cnrID)
|
|
||||||
addr.SetObject(*objID)
|
|
||||||
|
|
||||||
f(*d.newRequest(c, log), d.pool, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadByAttribute handles attribute-based download requests.
|
|
||||||
func (d *Downloader) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
|
||||||
d.byAttribute(c, request.receiveFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byAddress.
|
|
||||||
func (d *Downloader) byAttribute(c *fasthttp.RequestCtx, f func(request, *pool.Pool, oid.Address)) {
|
|
||||||
var (
|
|
||||||
scid, _ = c.UserValue("cid").(string)
|
|
||||||
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
|
|
||||||
val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
|
|
||||||
log = d.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
|
||||||
)
|
|
||||||
|
|
||||||
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("wrong container id", zap.Error(err))
|
|
||||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := d.search(c, containerID, key, val, object.MatchStringEqual)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("could not search for objects", zap.Error(err))
|
|
||||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer res.Close()
|
|
||||||
|
|
||||||
buf := make([]oid.ID, 1)
|
|
||||||
|
|
||||||
n, err := res.Read(buf)
|
|
||||||
if n == 0 {
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
log.Error("object not found", zap.Error(err))
|
|
||||||
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Error("read object list failed", zap.Error(err))
|
|
||||||
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var addrObj oid.Address
|
|
||||||
addrObj.SetContainer(*containerID)
|
|
||||||
addrObj.SetObject(buf[0])
|
|
||||||
|
|
||||||
f(*d.newRequest(c, log), d.pool, addrObj)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) search(c *fasthttp.RequestCtx, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
|
|
||||||
filters := object.NewSearchFilters()
|
|
||||||
filters.AddRootFilter()
|
|
||||||
filters.AddFilter(key, val, op)
|
|
||||||
|
|
||||||
var prm pool.PrmObjectSearch
|
|
||||||
prm.SetContainerID(*cid)
|
|
||||||
prm.SetFilters(filters)
|
|
||||||
if btoken := bearerToken(c); btoken != nil {
|
|
||||||
prm.UseBearer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.pool.SearchObjects(d.appCtx, prm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
|
||||||
method := zip.Store
|
|
||||||
if d.settings.ZipCompression() {
|
|
||||||
method = zip.Deflate
|
|
||||||
}
|
|
||||||
|
|
||||||
return zw.CreateHeader(&zip.FileHeader{
|
|
||||||
Name: getZipFilePath(obj),
|
|
||||||
Method: method,
|
|
||||||
Modified: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadZipped handles zip by prefix requests.
|
|
||||||
func (d *Downloader) DownloadZipped(c *fasthttp.RequestCtx) {
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
|
|
||||||
log := d.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
||||||
|
|
||||||
containerID, err := utils.GetContainerID(d.appCtx, scid, d.containerResolver)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("wrong container id", zap.Error(err))
|
|
||||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = tokens.StoreBearerToken(c); err != nil {
|
|
||||||
log.Error("could not fetch and store bearer token", zap.Error(err))
|
|
||||||
response.Error(c, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resSearch, err := d.search(c, containerID, attributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("could not search for objects", zap.Error(err))
|
|
||||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
|
||||||
c.Response.SetStatusCode(http.StatusOK)
|
|
||||||
|
|
||||||
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
|
||||||
|
|
||||||
zipWriter := zip.NewWriter(w)
|
|
||||||
|
|
||||||
var bufZip []byte
|
|
||||||
var addr oid.Address
|
|
||||||
|
|
||||||
empty := true
|
|
||||||
called := false
|
|
||||||
btoken := bearerToken(c)
|
|
||||||
addr.SetContainer(*containerID)
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
|
||||||
called = true
|
|
||||||
|
|
||||||
if empty {
|
|
||||||
bufZip = make([]byte, 3<<20) // the same as for upload
|
|
||||||
}
|
|
||||||
empty = false
|
|
||||||
|
|
||||||
addr.SetObject(id)
|
|
||||||
if err = d.zipObject(zipWriter, addr, btoken, bufZip); err != nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
if errIter != nil {
|
|
||||||
log.Error("iterating over selected objects failed", zap.Error(errIter))
|
|
||||||
response.Error(c, "iterating over selected objects: "+errIter.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
} else if !called {
|
|
||||||
log.Error("objects not found")
|
|
||||||
response.Error(c, "objects not found", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
err = zipWriter.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Error("file streaming failure", zap.Error(err))
|
|
||||||
response.Error(c, "file streaming failure: "+err.Error(), fasthttp.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Downloader) zipObject(zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
|
||||||
var prm pool.PrmObjectGet
|
|
||||||
prm.SetAddress(addr)
|
|
||||||
if btoken != nil {
|
|
||||||
prm.UseBearer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
resGet, err := d.pool.GetObject(d.appCtx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("get NeoFS object: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objWriter, err := d.addObjectToZip(zipWriter, &resGet.Header)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("zip create header: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
|
||||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = resGet.Payload.Close(); err != nil {
|
|
||||||
return fmt.Errorf("object body close error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = zipWriter.Flush(); err != nil {
|
|
||||||
return fmt.Errorf("flush zip writer: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZipFilePath(obj *object.Object) string {
|
|
||||||
for _, attr := range obj.Attributes() {
|
|
||||||
if attr.Key() == attributeFilePath {
|
|
||||||
return attr.Value()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,23 +0,0 @@
|
||||||
package downloader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSystemBackwardTranslator(t *testing.T) {
|
|
||||||
input := []string{
|
|
||||||
"__NEOFS__EXPIRATION_EPOCH",
|
|
||||||
"__NEOFS__RANDOM_ATTR",
|
|
||||||
}
|
|
||||||
expected := []string{
|
|
||||||
"Neofs-Expiration-Epoch",
|
|
||||||
"Neofs-Random-Attr",
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, str := range input {
|
|
||||||
res := systemBackwardTranslator(str)
|
|
||||||
require.Equal(t, expected[i], res)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,120 +0,0 @@
|
||||||
package downloader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/response"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/tokens"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// max bytes needed to detect content type according to http.DetectContentType docs.
|
|
||||||
const sizeToDetectType = 512
|
|
||||||
|
|
||||||
const (
|
|
||||||
hdrObjectID = "X-Object-Id"
|
|
||||||
hdrOwnerID = "X-Owner-Id"
|
|
||||||
hdrContainerID = "X-Container-Id"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r request) headObject(clnt *pool.Pool, objectAddress oid.Address) {
|
|
||||||
var start = time.Now()
|
|
||||||
if err := tokens.StoreBearerToken(r.RequestCtx); err != nil {
|
|
||||||
r.log.Error("could not fetch and store bearer token", zap.Error(err))
|
|
||||||
response.Error(r.RequestCtx, "could not fetch and store bearer token", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
btoken := bearerToken(r.RequestCtx)
|
|
||||||
|
|
||||||
var prm pool.PrmObjectHead
|
|
||||||
prm.SetAddress(objectAddress)
|
|
||||||
if btoken != nil {
|
|
||||||
prm.UseBearer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
obj, err := clnt.HeadObject(r.appCtx, prm)
|
|
||||||
if err != nil {
|
|
||||||
r.handleNeoFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
|
||||||
var contentType string
|
|
||||||
for _, attr := range obj.Attributes() {
|
|
||||||
key := attr.Key()
|
|
||||||
val := attr.Value()
|
|
||||||
if !isValidToken(key) || !isValidValue(val) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(key, utils.SystemAttributePrefix) {
|
|
||||||
key = systemBackwardTranslator(key)
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
|
||||||
switch key {
|
|
||||||
case object.AttributeTimestamp:
|
|
||||||
value, err := strconv.ParseInt(val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
r.log.Info("couldn't parse creation date",
|
|
||||||
zap.String("key", key),
|
|
||||||
zap.String("val", val),
|
|
||||||
zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
||||||
case object.AttributeContentType:
|
|
||||||
contentType = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
idsToResponse(&r.Response, &obj)
|
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
|
||||||
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
|
||||||
var prmRange pool.PrmObjectRange
|
|
||||||
prmRange.SetAddress(objectAddress)
|
|
||||||
prmRange.SetLength(sz)
|
|
||||||
if btoken != nil {
|
|
||||||
prmRange.UseBearer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
resObj, err := clnt.ObjectRange(r.appCtx, prmRange)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &resObj, nil
|
|
||||||
})
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
r.handleNeoFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r.SetContentType(contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
|
||||||
objID, _ := obj.ID()
|
|
||||||
cnrID, _ := obj.ContainerID()
|
|
||||||
resp.Header.Set(hdrObjectID, objID.String())
|
|
||||||
resp.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
||||||
resp.Header.Set(hdrContainerID, cnrID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadByAddress handles head requests using simple cid/oid format.
|
|
||||||
func (d *Downloader) HeadByAddress(c *fasthttp.RequestCtx) {
|
|
||||||
d.byAddress(c, request.headObject)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadByAttribute handles attribute-based head requests.
|
|
||||||
func (d *Downloader) HeadByAttribute(c *fasthttp.RequestCtx) {
|
|
||||||
d.byAttribute(c, request.headObject)
|
|
||||||
}
|
|
129
go.mod
129
go.mod
|
@ -1,99 +1,118 @@
|
||||||
module github.com/nspcc-dev/neofs-http-gw
|
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
|
|
||||||
go 1.17
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240916093537-13fa0da3741e
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20240909114314-666d326cc573
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240918095938-e580ee991d98
|
||||||
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
|
github.com/bluele/gcache v0.0.2
|
||||||
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/fasthttp/router v1.4.1
|
github.com/fasthttp/router v1.4.1
|
||||||
github.com/nspcc-dev/neo-go v0.99.2
|
github.com/nspcc-dev/neo-go v0.106.2
|
||||||
github.com/nspcc-dev/neofs-api-go/v2 v2.13.2-0.20221005093543-3a91383f24a9
|
github.com/prometheus/client_golang v1.19.0
|
||||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.6.0.20221007102402-8c682641bfd2
|
github.com/prometheus/client_model v0.5.0
|
||||||
github.com/prometheus/client_golang v1.13.0
|
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.8.1
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/ssgreg/journald v1.0.0
|
||||||
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/testcontainers/testcontainers-go v0.13.0
|
github.com/testcontainers/testcontainers-go v0.13.0
|
||||||
|
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
||||||
github.com/valyala/fasthttp v1.34.0
|
github.com/valyala/fasthttp v1.34.0
|
||||||
go.uber.org/atomic v1.9.0
|
go.opentelemetry.io/otel v1.28.0
|
||||||
go.uber.org/zap v1.18.1
|
go.opentelemetry.io/otel/trace v1.28.0
|
||||||
|
go.uber.org/zap v1.27.0
|
||||||
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||||
|
golang.org/x/net v0.26.0
|
||||||
|
google.golang.org/grpc v1.66.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||||
|
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
|
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/btcsuite/btcd v0.22.0-beta // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
|
||||||
github.com/containerd/cgroups v1.0.3 // indirect
|
github.com/containerd/cgroups v1.0.3 // indirect
|
||||||
github.com/containerd/containerd v1.6.2 // indirect
|
github.com/containerd/containerd v1.6.2 // indirect
|
||||||
github.com/coreos/go-semver v0.3.0 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/docker v20.10.14+incompatible // indirect
|
github.com/docker/docker v20.10.14+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/gorilla/websocket v1.4.2 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.15.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.6 // indirect
|
github.com/klauspost/compress v1.16.4 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/moby/sys/mount v0.3.2 // indirect
|
github.com/moby/sys/mount v0.3.2 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
||||||
github.com/nspcc-dev/hrw v1.0.9 // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220809123759-3094d3e0c14b // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
||||||
github.com/nspcc-dev/neofs-contract v0.15.3 // indirect
|
|
||||||
github.com/nspcc-dev/neofs-crypto v0.4.0 // indirect
|
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
|
||||||
github.com/nspcc-dev/tzhash v1.6.1 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/opencontainers/runc v1.1.1 // indirect
|
github.com/opencontainers/runc v1.1.1 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.3 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
github.com/spf13/afero v1.9.3 // indirect
|
||||||
github.com/spf13/afero v1.6.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/spf13/cast v1.3.1 // indirect
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
github.com/subosito/gotenv v1.4.2 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
github.com/urfave/cli v1.22.5 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.etcd.io/bbolt v1.3.9 // indirect
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
||||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
google.golang.org/grpc v1.48.0 // indirect
|
golang.org/x/crypto v0.24.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
golang.org/x/sys v0.22.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
golang.org/x/term v0.21.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
|
golang.org/x/time v0.3.0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||||
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
22
internal/api/layer/tree_service.go
Normal file
22
internal/api/layer/tree_service.go
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package layer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TreeService provide interface to interact with tree service using s3 data models.
|
||||||
|
type TreeService interface {
|
||||||
|
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
||||||
|
ErrNodeNotFound = errors.New("not found")
|
||||||
|
|
||||||
|
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
|
||||||
|
ErrNodeAccessDenied = errors.New("access denied")
|
||||||
|
)
|
18
internal/api/tree.go
Normal file
18
internal/api/tree.go
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeVersion represent node from tree service.
|
||||||
|
type NodeVersion struct {
|
||||||
|
BaseNodeVersion
|
||||||
|
DeleteMarker bool
|
||||||
|
IsPrefixNode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseNodeVersion is minimal node info from tree service.
|
||||||
|
// Basically used for "system" object.
|
||||||
|
type BaseNodeVersion struct {
|
||||||
|
OID oid.ID
|
||||||
|
}
|
72
internal/cache/buckets.go
vendored
Normal file
72
internal/cache/buckets.go
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"github.com/bluele/gcache"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||||
|
type BucketCache struct {
|
||||||
|
cache gcache.Cache
|
||||||
|
logger *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config stores expiration params for cache.
|
||||||
|
type Config struct {
|
||||||
|
Size int
|
||||||
|
Lifetime time.Duration
|
||||||
|
Logger *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultBucketCacheSize is a default maximum number of entries in cache.
|
||||||
|
DefaultBucketCacheSize = 1e3
|
||||||
|
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
|
||||||
|
DefaultBucketCacheLifetime = time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultBucketConfig returns new default cache expiration values.
|
||||||
|
func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||||
|
return &Config{
|
||||||
|
Size: DefaultBucketCacheSize,
|
||||||
|
Lifetime: DefaultBucketCacheLifetime,
|
||||||
|
Logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBucketCache creates an object of BucketCache.
|
||||||
|
func NewBucketCache(config *Config) *BucketCache {
|
||||||
|
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
|
return &BucketCache{cache: gc, logger: config.Logger}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a cached object.
|
||||||
|
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||||
|
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result, ok := entry.(*data.BucketInfo)
|
||||||
|
if !ok {
|
||||||
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts an object to cache.
|
||||||
|
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||||
|
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formKey(ns, name string) string {
|
||||||
|
return name + "." + ns
|
||||||
|
}
|
12
internal/data/bucket.go
Normal file
12
internal/data/bucket.go
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
package data
|
||||||
|
|
||||||
|
import (
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BucketInfo struct {
|
||||||
|
Name string // container name from system attribute
|
||||||
|
Zone string // container zone from system attribute
|
||||||
|
CID cid.ID
|
||||||
|
HomomorphicHashDisabled bool
|
||||||
|
}
|
245
internal/frostfs/frostfs.go
Normal file
245
internal/frostfs/frostfs.go
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
package frostfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FrostFS represents virtual connection to the FrostFS network.
|
||||||
|
// It is used to provide an interface to dependent packages
|
||||||
|
// which work with FrostFS.
|
||||||
|
type FrostFS struct {
|
||||||
|
pool *pool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFrostFS creates new FrostFS using provided pool.Pool.
|
||||||
|
func NewFrostFS(p *pool.Pool) *FrostFS {
|
||||||
|
return &FrostFS{
|
||||||
|
pool: p,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Container implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) Container(ctx context.Context, layerPrm handler.PrmContainer) (*container.Container, error) {
|
||||||
|
prm := pool.PrmContainerGet{
|
||||||
|
ContainerID: layerPrm.ContainerID,
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.GetContainer(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("read container via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateObject implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
||||||
|
var prmPut pool.PrmObjectPut
|
||||||
|
prmPut.SetHeader(*prm.Object)
|
||||||
|
prmPut.SetPayload(prm.Payload)
|
||||||
|
prmPut.SetClientCut(prm.ClientCut)
|
||||||
|
prmPut.WithoutHomomorphicHash(prm.WithoutHomomorphicHash)
|
||||||
|
prmPut.SetBufferMaxSize(prm.BufferMaxSize)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmPut.UseBearer(*prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
idObj, err := x.pool.PutObject(ctx, prmPut)
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, handleObjectError("save object via connection pool", err)
|
||||||
|
}
|
||||||
|
return idObj.ObjectID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wraps io.ReadCloser and transforms Read errors related to access violation
|
||||||
|
// to frostfs.ErrAccessDenied.
|
||||||
|
type payloadReader struct {
|
||||||
|
io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x payloadReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := x.ReadCloser.Read(p)
|
||||||
|
if err != nil && errors.Is(err, io.EOF) {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
return n, handleObjectError("read payload", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadObject implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
||||||
|
var prmHead pool.PrmObjectHead
|
||||||
|
prmHead.SetAddress(prm.Address)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmHead.UseBearer(*prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.HeadObject(ctx, prmHead)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("read object header via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObject implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
||||||
|
var prmGet pool.PrmObjectGet
|
||||||
|
prmGet.SetAddress(prm.Address)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmGet.UseBearer(*prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.GetObject(ctx, prmGet)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("init full object reading via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &handler.Object{
|
||||||
|
Header: res.Header,
|
||||||
|
Payload: res.Payload,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeObject implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
||||||
|
var prmRange pool.PrmObjectRange
|
||||||
|
prmRange.SetAddress(prm.Address)
|
||||||
|
prmRange.SetOffset(prm.PayloadRange[0])
|
||||||
|
prmRange.SetLength(prm.PayloadRange[1])
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmRange.UseBearer(*prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.ObjectRange(ctx, prmRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("init payload range reading via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return payloadReader{&res}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SearchObjects implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
||||||
|
var prmSearch pool.PrmObjectSearch
|
||||||
|
prmSearch.SetContainerID(prm.Container)
|
||||||
|
prmSearch.SetFilters(prm.Filters)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmSearch.UseBearer(*prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.SearchObjects(ctx, prmSearch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("init object search via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEpochDurations implements frostfs.FrostFS interface method.
|
||||||
|
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
||||||
|
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &utils.EpochDurations{
|
||||||
|
CurrentEpoch: networkInfo.CurrentEpoch(),
|
||||||
|
MsPerBlock: networkInfo.MsPerBlock(),
|
||||||
|
BlockPerEpoch: networkInfo.EpochDuration(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.BlockPerEpoch == 0 {
|
||||||
|
return nil, fmt.Errorf("EpochDuration is empty")
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||||
|
// It implements resolver.FrostFS.
|
||||||
|
type ResolverFrostFS struct {
|
||||||
|
pool *pool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewResolverFrostFS creates new ResolverFrostFS using provided pool.Pool.
|
||||||
|
func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
||||||
|
return &ResolverFrostFS{pool: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemDNS implements resolver.FrostFS interface method.
|
||||||
|
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
||||||
|
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", handleObjectError("read network info via client", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
||||||
|
if domain == nil {
|
||||||
|
return "", errors.New("system DNS parameter not found or empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(domain), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleObjectError(msg string, err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
||||||
|
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsTimeoutError(err) {
|
||||||
|
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("%s: %w", msg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UnwrapErr(err error) error {
|
||||||
|
unwrappedErr := errors.Unwrap(err)
|
||||||
|
for unwrappedErr != nil {
|
||||||
|
err = unwrappedErr
|
||||||
|
unwrappedErr = errors.Unwrap(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsErrObjectAccessDenied(err error) (string, bool) {
|
||||||
|
err = UnwrapErr(err)
|
||||||
|
switch err := err.(type) {
|
||||||
|
default:
|
||||||
|
return "", false
|
||||||
|
case *apistatus.ObjectAccessDenied:
|
||||||
|
return err.Reason(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsTimeoutError(err error) bool {
|
||||||
|
if strings.Contains(err.Error(), "timeout") ||
|
||||||
|
errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded
|
||||||
|
}
|
163
internal/frostfs/services/pool_wrapper.go
Normal file
163
internal/frostfs/services/pool_wrapper.go
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GetNodeByPathResponseInfoWrapper struct {
|
||||||
|
response *grpcService.GetNodeByPathResponse_Info
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
||||||
|
return []uint64{n.response.GetNodeId()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
||||||
|
return []uint64{n.response.GetParentId()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||||
|
return []uint64{n.response.GetTimestamp()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
|
for i, value := range n.response.Meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
type PoolWrapper struct {
|
||||||
|
p *treepool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||||
|
return &PoolWrapper{p: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
|
poolPrm := treepool.GetNodesParams{
|
||||||
|
CID: prm.CnrID,
|
||||||
|
TreeID: prm.TreeID,
|
||||||
|
Path: prm.Path,
|
||||||
|
Meta: prm.Meta,
|
||||||
|
PathAttribute: tree.FileNameKey,
|
||||||
|
LatestOnly: prm.LatestOnly,
|
||||||
|
AllAttrs: prm.AllAttrs,
|
||||||
|
BearerToken: getBearer(ctx),
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]tree.NodeResponse, len(nodes))
|
||||||
|
for i, info := range nodes {
|
||||||
|
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBearer(ctx context.Context) []byte {
|
||||||
|
token, err := tokens.LoadBearerToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return token.Marshal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||||
|
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||||
|
}
|
||||||
|
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||||
|
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||||
|
order := treepool.NoneOrder
|
||||||
|
if sort {
|
||||||
|
order = treepool.AscendingOrder
|
||||||
|
}
|
||||||
|
poolPrm := treepool.GetSubTreeParams{
|
||||||
|
CID: bktInfo.CID,
|
||||||
|
TreeID: treeID,
|
||||||
|
RootID: rootID,
|
||||||
|
Depth: depth,
|
||||||
|
BearerToken: getBearer(ctx),
|
||||||
|
Order: order,
|
||||||
|
}
|
||||||
|
if len(rootID) == 1 && rootID[0] == 0 {
|
||||||
|
// storage node interprets 'nil' value as []uint64{0}
|
||||||
|
// gate wants to send 'nil' value instead of []uint64{0}, because
|
||||||
|
// it provides compatibility with previous tree service api where
|
||||||
|
// single uint64(0) value is dropped from signature
|
||||||
|
poolPrm.RootID = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var subtree []tree.NodeResponse
|
||||||
|
|
||||||
|
node, err := subTreeReader.Next()
|
||||||
|
for err == nil {
|
||||||
|
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
||||||
|
node, err = subTreeReader.Next()
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return subtree, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetSubTreeResponseBodyWrapper struct {
|
||||||
|
response *grpcService.GetSubTreeResponse_Body
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
||||||
|
return n.response.GetNodeId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
||||||
|
resp := n.response.GetParentId()
|
||||||
|
if resp == nil {
|
||||||
|
// storage sends nil that should be interpreted as []uint64{0}
|
||||||
|
// due to protobuf compatibility, see 'GetSubTree' function
|
||||||
|
return []uint64{0}
|
||||||
|
}
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
||||||
|
return n.response.GetTimestamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
|
for i, value := range n.response.Meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
157
internal/handler/browse.go
Normal file
157
internal/handler/browse.go
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"html/template"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dateFormat = "02-01-2006 15:04"
|
||||||
|
attrOID = "OID"
|
||||||
|
attrCreated = "Created"
|
||||||
|
attrFileName = "FileName"
|
||||||
|
attrSize = "Size"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
BrowsePageData struct {
|
||||||
|
BucketName,
|
||||||
|
Prefix string
|
||||||
|
Objects []ResponseObject
|
||||||
|
}
|
||||||
|
ResponseObject struct {
|
||||||
|
OID string
|
||||||
|
Created string
|
||||||
|
FileName string
|
||||||
|
Size string
|
||||||
|
IsDir bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func parseTimestamp(tstamp string) (time.Time, error) {
|
||||||
|
millis, err := strconv.ParseInt(tstamp, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.UnixMilli(millis), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewResponseObject(nodes map[string]string) ResponseObject {
|
||||||
|
return ResponseObject{
|
||||||
|
OID: nodes[attrOID],
|
||||||
|
Created: nodes[attrCreated],
|
||||||
|
FileName: nodes[attrFileName],
|
||||||
|
Size: nodes[attrSize],
|
||||||
|
IsDir: nodes[attrOID] == "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatTimestamp(strdate string) string {
|
||||||
|
date, err := parseTimestamp(strdate)
|
||||||
|
if err != nil || date.IsZero() {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return date.Format(dateFormat)
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatSize(strsize string) string {
|
||||||
|
size, err := strconv.ParseFloat(strsize, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "0B"
|
||||||
|
}
|
||||||
|
return units.HumanSize(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parentDir(prefix string) string {
|
||||||
|
index := strings.LastIndex(prefix, "/")
|
||||||
|
if index == -1 {
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
return prefix[index:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func trimPrefix(encPrefix string) string {
|
||||||
|
prefix, err := url.PathUnescape(encPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
slashIndex := strings.LastIndex(prefix, "/")
|
||||||
|
if slashIndex == -1 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return prefix[:slashIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
func urlencode(prefix, filename string) string {
|
||||||
|
var res strings.Builder
|
||||||
|
path := filename
|
||||||
|
if prefix != "" {
|
||||||
|
path = strings.Join([]string{prefix, filename}, "/")
|
||||||
|
}
|
||||||
|
prefixParts := strings.Split(path, "/")
|
||||||
|
for _, prefixPart := range prefixParts {
|
||||||
|
prefixPart = "/" + url.PathEscape(prefixPart)
|
||||||
|
if prefixPart == "/." || prefixPart == "/.." {
|
||||||
|
prefixPart = url.PathEscape(prefixPart)
|
||||||
|
}
|
||||||
|
res.WriteString(prefixPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, bucketInfo *data.BucketInfo, prefix string) {
|
||||||
|
log := h.log.With(zap.String("bucket", bucketInfo.Name))
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
nodes, err := h.listObjects(ctx, bucketInfo, prefix)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
respObjects := make([]ResponseObject, len(nodes))
|
||||||
|
|
||||||
|
for i, node := range nodes {
|
||||||
|
respObjects[i] = NewResponseObject(node)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(respObjects, func(i, j int) bool {
|
||||||
|
if respObjects[i].IsDir == respObjects[j].IsDir {
|
||||||
|
return respObjects[i].FileName < respObjects[j].FileName
|
||||||
|
}
|
||||||
|
return respObjects[i].IsDir
|
||||||
|
})
|
||||||
|
indexTemplate := h.config.IndexPageTemplate()
|
||||||
|
|
||||||
|
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
||||||
|
"formatTimestamp": formatTimestamp,
|
||||||
|
"formatSize": formatSize,
|
||||||
|
"trimPrefix": trimPrefix,
|
||||||
|
"urlencode": urlencode,
|
||||||
|
"parentDir": parentDir,
|
||||||
|
}).Parse(indexTemplate)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = tmpl.Execute(c, &BrowsePageData{
|
||||||
|
BucketName: bucketInfo.Name,
|
||||||
|
Prefix: prefix,
|
||||||
|
Objects: respObjects,
|
||||||
|
}); err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
196
internal/handler/download.go
Normal file
196
internal/handler/download.go
Normal file
|
@ -0,0 +1,196 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
||||||
|
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
|
test, _ := c.UserValue("oid").(string)
|
||||||
|
var id oid.ID
|
||||||
|
err := id.DecodeString(test)
|
||||||
|
if err != nil {
|
||||||
|
h.byObjectName(c, h.receiveFile)
|
||||||
|
} else {
|
||||||
|
h.byAddress(c, h.receiveFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
||||||
|
return &request{
|
||||||
|
RequestCtx: ctx,
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadByAttribute handles attribute-based download requests.
|
||||||
|
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
|
h.byAttribute(c, h.receiveFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) search(ctx context.Context, cnrID *cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
||||||
|
filters := object.NewSearchFilters()
|
||||||
|
filters.AddRootFilter()
|
||||||
|
filters.AddFilter(key, val, op)
|
||||||
|
|
||||||
|
prm := PrmObjectSearch{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: bearerToken(ctx),
|
||||||
|
},
|
||||||
|
Container: *cnrID,
|
||||||
|
Filters: filters,
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.frostfs.SearchObjects(ctx, prm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||||
|
method := zip.Store
|
||||||
|
if h.config.ZipCompression() {
|
||||||
|
method = zip.Deflate
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := getZipFilePath(obj)
|
||||||
|
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||||
|
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return zw.CreateHeader(&zip.FileHeader{
|
||||||
|
Name: filePath,
|
||||||
|
Method: method,
|
||||||
|
Modified: time.Now(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadZipped handles zip by prefix requests.
|
||||||
|
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
|
scid, _ := c.UserValue("cid").(string)
|
||||||
|
prefix, _ := c.UserValue("prefix").(string)
|
||||||
|
|
||||||
|
prefix, err := url.QueryUnescape(prefix)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()))
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resSearch, err := h.search(ctx, &bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||||
|
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||||
|
c.Response.SetStatusCode(http.StatusOK)
|
||||||
|
|
||||||
|
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||||
|
defer resSearch.Close()
|
||||||
|
|
||||||
|
zipWriter := zip.NewWriter(w)
|
||||||
|
|
||||||
|
var bufZip []byte
|
||||||
|
var addr oid.Address
|
||||||
|
|
||||||
|
empty := true
|
||||||
|
called := false
|
||||||
|
btoken := bearerToken(ctx)
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
|
||||||
|
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
if empty {
|
||||||
|
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||||
|
}
|
||||||
|
empty = false
|
||||||
|
|
||||||
|
addr.SetObject(id)
|
||||||
|
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
||||||
|
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
if errIter != nil {
|
||||||
|
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||||
|
} else if !called {
|
||||||
|
log.Error(logs.ObjectsNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = zipWriter.Close(); err != nil {
|
||||||
|
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||||
|
prm := PrmObjectGet{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: btoken,
|
||||||
|
},
|
||||||
|
Address: addr,
|
||||||
|
}
|
||||||
|
|
||||||
|
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get FrostFS object: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("zip create header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||||
|
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = resGet.Payload.Close(); err != nil {
|
||||||
|
return fmt.Errorf("object body close error: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = zipWriter.Flush(); err != nil {
|
||||||
|
return fmt.Errorf("flush zip writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getZipFilePath(obj *object.Object) string {
|
||||||
|
for _, attr := range obj.Attributes() {
|
||||||
|
if attr.Key() == object.AttributeFilePath {
|
||||||
|
return attr.Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
57
internal/handler/filter.go
Normal file
57
internal/handler/filter.go
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) {
|
||||||
|
var err error
|
||||||
|
result := make(map[string]string)
|
||||||
|
prefix := []byte(utils.UserAttributeHeaderPrefix)
|
||||||
|
|
||||||
|
header.VisitAll(func(key, val []byte) {
|
||||||
|
// checks that the key and the val not empty
|
||||||
|
if len(key) == 0 || len(val) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// checks that the key has attribute prefix
|
||||||
|
if !bytes.HasPrefix(key, prefix) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// removing attribute prefix
|
||||||
|
clearKey := bytes.TrimPrefix(key, prefix)
|
||||||
|
|
||||||
|
clearKey = utils.TransformIfSystem(clearKey)
|
||||||
|
|
||||||
|
// checks that the attribute key is not empty
|
||||||
|
if len(clearKey) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if key gets duplicated
|
||||||
|
// return error containing full key name (with prefix)
|
||||||
|
if _, ok := result[string(clearKey)]; ok {
|
||||||
|
err = fmt.Errorf("key duplication error: %s", string(key))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// make string representation of key / val
|
||||||
|
k, v := string(clearKey), string(val)
|
||||||
|
|
||||||
|
result[k] = v
|
||||||
|
|
||||||
|
l.Debug(logs.AddAttributeToResultObject,
|
||||||
|
zap.String("key", k),
|
||||||
|
zap.String("val", v))
|
||||||
|
})
|
||||||
|
|
||||||
|
return result, err
|
||||||
|
}
|
53
internal/handler/filter_test.go
Normal file
53
internal/handler/filter_test.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
//go:build !integration
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFilter(t *testing.T) {
|
||||||
|
log := zap.NewNop()
|
||||||
|
|
||||||
|
t.Run("duplicate keys error", func(t *testing.T) {
|
||||||
|
req := &fasthttp.RequestHeader{}
|
||||||
|
req.DisableNormalizing()
|
||||||
|
req.Add("X-Attribute-DupKey", "first-value")
|
||||||
|
req.Add("X-Attribute-DupKey", "second-value")
|
||||||
|
_, err := filterHeaders(log, req)
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("duplicate system keys error", func(t *testing.T) {
|
||||||
|
req := &fasthttp.RequestHeader{}
|
||||||
|
req.DisableNormalizing()
|
||||||
|
req.Add("X-Attribute-System-DupKey", "first-value")
|
||||||
|
req.Add("X-Attribute-System-DupKey", "second-value")
|
||||||
|
_, err := filterHeaders(log, req)
|
||||||
|
require.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
req := &fasthttp.RequestHeader{}
|
||||||
|
req.DisableNormalizing()
|
||||||
|
|
||||||
|
req.Set("X-Attribute-System-Expiration-Epoch1", "101")
|
||||||
|
req.Set("X-Attribute-SYSTEM-Expiration-Epoch2", "102")
|
||||||
|
req.Set("X-Attribute-system-Expiration-Epoch3", "103")
|
||||||
|
req.Set("X-Attribute-MyAttribute", "value")
|
||||||
|
|
||||||
|
expected := map[string]string{
|
||||||
|
"__SYSTEM__EXPIRATION_EPOCH1": "101",
|
||||||
|
"MyAttribute": "value",
|
||||||
|
"__SYSTEM__EXPIRATION_EPOCH3": "103",
|
||||||
|
"__SYSTEM__EXPIRATION_EPOCH2": "102",
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := filterHeaders(log, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, expected, result)
|
||||||
|
}
|
278
internal/handler/frostfs_mock.go
Normal file
278
internal/handler/frostfs_mock.go
Normal file
|
@ -0,0 +1,278 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TestFrostFS struct {
|
||||||
|
objects map[string]*object.Object
|
||||||
|
containers map[string]*container.Container
|
||||||
|
accessList map[string]bool
|
||||||
|
key *keys.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
||||||
|
return &TestFrostFS{
|
||||||
|
objects: make(map[string]*object.Object),
|
||||||
|
containers: make(map[string]*container.Container),
|
||||||
|
accessList: make(map[string]bool),
|
||||||
|
key: key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) ContainerID(name string) (*cid.ID, error) {
|
||||||
|
for id, cnr := range t.containers {
|
||||||
|
if container.Name(*cnr) == name {
|
||||||
|
var cnrID cid.ID
|
||||||
|
return &cnrID, cnrID.DecodeString(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
||||||
|
t.containers[cnrID.EncodeToString()] = cnr
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowUserOperation grants access to object operations.
|
||||||
|
// Empty userID and objID means any user and object respectively.
|
||||||
|
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
|
||||||
|
t.accessList[fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
|
||||||
|
for k, v := range t.containers {
|
||||||
|
if k == prm.ContainerID.EncodeToString() {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) requestOwner(btoken *bearer.Token) user.ID {
|
||||||
|
if btoken != nil {
|
||||||
|
return bearer.ResolveIssuer(*btoken)
|
||||||
|
}
|
||||||
|
|
||||||
|
var owner user.ID
|
||||||
|
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
|
||||||
|
return owner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) retrieveObject(addr oid.Address, btoken *bearer.Token) (*object.Object, error) {
|
||||||
|
sAddr := addr.EncodeToString()
|
||||||
|
|
||||||
|
if obj, ok := t.objects[sAddr]; ok {
|
||||||
|
owner := t.requestOwner(btoken)
|
||||||
|
|
||||||
|
if !t.isAllowed(addr.Container(), owner, acl.OpObjectGet, addr.Object()) {
|
||||||
|
return nil, ErrAccessDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) HeadObject(_ context.Context, prm PrmObjectHead) (*object.Object, error) {
|
||||||
|
return t.retrieveObject(prm.Address, prm.BearerToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) GetObject(_ context.Context, prm PrmObjectGet) (*Object, error) {
|
||||||
|
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
Header: *obj,
|
||||||
|
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) RangeObject(_ context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
|
||||||
|
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
off := prm.PayloadRange[0]
|
||||||
|
payload := obj.Payload()[off : off+prm.PayloadRange[1]]
|
||||||
|
return io.NopCloser(bytes.NewReader(payload)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||||
|
b := make([]byte, 32)
|
||||||
|
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
var id oid.ID
|
||||||
|
id.SetSHA256(sha256.Sum256(b))
|
||||||
|
prm.Object.SetID(id)
|
||||||
|
|
||||||
|
attrs := prm.Object.Attributes()
|
||||||
|
if prm.ClientCut {
|
||||||
|
a := object.NewAttribute()
|
||||||
|
a.SetKey("s3-client-cut")
|
||||||
|
a.SetValue("true")
|
||||||
|
attrs = append(attrs, *a)
|
||||||
|
}
|
||||||
|
|
||||||
|
prm.Object.SetAttributes(attrs...)
|
||||||
|
|
||||||
|
if prm.Payload != nil {
|
||||||
|
all, err := io.ReadAll(prm.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
prm.Object.SetPayload(all)
|
||||||
|
prm.Object.SetPayloadSize(uint64(len(all)))
|
||||||
|
var hash checksum.Checksum
|
||||||
|
checksum.Calculate(&hash, checksum.SHA256, all)
|
||||||
|
prm.Object.SetPayloadChecksum(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, _ := prm.Object.ContainerID()
|
||||||
|
objID, _ := prm.Object.ID()
|
||||||
|
|
||||||
|
owner := t.requestOwner(prm.BearerToken)
|
||||||
|
|
||||||
|
if !t.isAllowed(cnrID, owner, acl.OpObjectPut, objID) {
|
||||||
|
return oid.ID{}, ErrAccessDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := newAddress(cnrID, objID)
|
||||||
|
t.objects[addr.EncodeToString()] = prm.Object
|
||||||
|
return objID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type resObjectSearchMock struct {
|
||||||
|
res []oid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resObjectSearchMock) Read(buf []oid.ID) (int, error) {
|
||||||
|
for i := range buf {
|
||||||
|
if i > len(r.res)-1 {
|
||||||
|
return len(r.res), io.EOF
|
||||||
|
}
|
||||||
|
buf[i] = r.res[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
r.res = r.res[len(buf):]
|
||||||
|
|
||||||
|
return len(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resObjectSearchMock) Iterate(f func(oid.ID) bool) error {
|
||||||
|
for _, id := range r.res {
|
||||||
|
if f(id) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resObjectSearchMock) Close() {}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (ResObjectSearch, error) {
|
||||||
|
if !t.isAllowed(prm.Container, t.requestOwner(prm.BearerToken), acl.OpObjectSearch, oid.ID{}) {
|
||||||
|
return nil, ErrAccessDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
cidStr := prm.Container.EncodeToString()
|
||||||
|
var res []oid.ID
|
||||||
|
|
||||||
|
if len(prm.Filters) == 1 { // match root filter
|
||||||
|
for k, v := range t.objects {
|
||||||
|
if strings.Contains(k, cidStr) {
|
||||||
|
id, _ := v.ID()
|
||||||
|
res = append(res, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &resObjectSearchMock{res: res}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filter := prm.Filters[1]
|
||||||
|
if len(prm.Filters) != 2 ||
|
||||||
|
filter.Operation() != object.MatchCommonPrefix && filter.Operation() != object.MatchStringEqual {
|
||||||
|
return nil, fmt.Errorf("usupported filters")
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range t.objects {
|
||||||
|
if strings.Contains(k, cidStr) && isMatched(v.Attributes(), filter) {
|
||||||
|
id, _ := v.ID()
|
||||||
|
res = append(res, id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resObjectSearchMock{res: res}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
||||||
|
for _, attr := range attributes {
|
||||||
|
if attr.Key() == filter.Header() {
|
||||||
|
switch filter.Operation() {
|
||||||
|
case object.MatchStringEqual:
|
||||||
|
return attr.Value() == filter.Value()
|
||||||
|
case object.MatchCommonPrefix:
|
||||||
|
return strings.HasPrefix(attr.Value(), filter.Value())
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) GetEpochDurations(context.Context) (*utils.EpochDurations, error) {
|
||||||
|
return &utils.EpochDurations{
|
||||||
|
CurrentEpoch: 10,
|
||||||
|
MsPerBlock: 1000,
|
||||||
|
BlockPerEpoch: 100,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) bool {
|
||||||
|
keysToCheck := []string{
|
||||||
|
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID),
|
||||||
|
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, oid.ID{}),
|
||||||
|
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, objID),
|
||||||
|
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, oid.ID{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, key := range keysToCheck {
|
||||||
|
if t.accessList[key] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(cnr)
|
||||||
|
addr.SetObject(obj)
|
||||||
|
return addr
|
||||||
|
}
|
414
internal/handler/handler.go
Normal file
414
internal/handler/handler.go
Normal file
|
@ -0,0 +1,414 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config interface {
|
||||||
|
DefaultTimestamp() bool
|
||||||
|
ZipCompression() bool
|
||||||
|
ClientCut() bool
|
||||||
|
IndexPageEnabled() bool
|
||||||
|
IndexPageTemplate() string
|
||||||
|
BufferMaxSizeForPut() uint64
|
||||||
|
NamespaceHeader() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmContainer groups parameters of FrostFS.Container operation.
|
||||||
|
type PrmContainer struct {
|
||||||
|
// Container identifier.
|
||||||
|
ContainerID cid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmAuth groups authentication parameters for the FrostFS operation.
|
||||||
|
type PrmAuth struct {
|
||||||
|
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
|
||||||
|
BearerToken *bearer.Token
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectHead groups parameters of FrostFS.HeadObject operation.
|
||||||
|
type PrmObjectHead struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Address to read the object header from.
|
||||||
|
Address oid.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectGet groups parameters of FrostFS.GetObject operation.
|
||||||
|
type PrmObjectGet struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Address to read the object header from.
|
||||||
|
Address oid.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectRange groups parameters of FrostFS.RangeObject operation.
|
||||||
|
type PrmObjectRange struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Address to read the object header from.
|
||||||
|
Address oid.Address
|
||||||
|
|
||||||
|
// Offset-length range of the object payload to be read.
|
||||||
|
PayloadRange [2]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object represents FrostFS object.
|
||||||
|
type Object struct {
|
||||||
|
// Object header (doesn't contain payload).
|
||||||
|
Header object.Object
|
||||||
|
|
||||||
|
// Object payload part encapsulated in io.Reader primitive.
|
||||||
|
// Returns ErrAccessDenied on read access violation.
|
||||||
|
Payload io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectCreate groups parameters of FrostFS.CreateObject operation.
|
||||||
|
type PrmObjectCreate struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
Object *object.Object
|
||||||
|
|
||||||
|
// Object payload encapsulated in io.Reader primitive.
|
||||||
|
Payload io.Reader
|
||||||
|
|
||||||
|
// Enables client side object preparing.
|
||||||
|
ClientCut bool
|
||||||
|
|
||||||
|
// Disables using Tillich-Zémor hash for payload.
|
||||||
|
WithoutHomomorphicHash bool
|
||||||
|
|
||||||
|
// Sets max buffer size to read payload.
|
||||||
|
BufferMaxSize uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
||||||
|
type PrmObjectSearch struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Container to select the objects from.
|
||||||
|
Container cid.ID
|
||||||
|
|
||||||
|
Filters object.SearchFilters
|
||||||
|
}
|
||||||
|
|
||||||
|
type ResObjectSearch interface {
|
||||||
|
Read(buf []oid.ID) (int, error)
|
||||||
|
Iterate(f func(oid.ID) bool) error
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||||
|
ErrAccessDenied = errors.New("access denied")
|
||||||
|
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||||
|
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||||
|
)
|
||||||
|
|
||||||
|
// FrostFS represents virtual connection to FrostFS network.
|
||||||
|
type FrostFS interface {
|
||||||
|
Container(context.Context, PrmContainer) (*container.Container, error)
|
||||||
|
HeadObject(context.Context, PrmObjectHead) (*object.Object, error)
|
||||||
|
GetObject(context.Context, PrmObjectGet) (*Object, error)
|
||||||
|
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
||||||
|
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||||
|
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
||||||
|
utils.EpochInfoFetcher
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerResolver interface {
|
||||||
|
Resolve(ctx context.Context, name string) (*cid.ID, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
log *zap.Logger
|
||||||
|
frostfs FrostFS
|
||||||
|
ownerID *user.ID
|
||||||
|
config Config
|
||||||
|
containerResolver ContainerResolver
|
||||||
|
tree *tree.Tree
|
||||||
|
cache *cache.BucketCache
|
||||||
|
}
|
||||||
|
|
||||||
|
type AppParams struct {
|
||||||
|
Logger *zap.Logger
|
||||||
|
FrostFS FrostFS
|
||||||
|
Owner *user.ID
|
||||||
|
Resolver ContainerResolver
|
||||||
|
Cache *cache.BucketCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(params *AppParams, config Config, tree *tree.Tree) *Handler {
|
||||||
|
return &Handler{
|
||||||
|
log: params.Logger,
|
||||||
|
frostfs: params.FrostFS,
|
||||||
|
ownerID: params.Owner,
|
||||||
|
config: config,
|
||||||
|
containerResolver: params.Resolver,
|
||||||
|
tree: tree,
|
||||||
|
cache: params.Cache,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
|
// prepares request and object address to it.
|
||||||
|
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
var (
|
||||||
|
idCnr, _ = c.UserValue("cid").(string)
|
||||||
|
idObj, _ = c.UserValue("oid").(string)
|
||||||
|
log = h.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
objID := new(oid.ID)
|
||||||
|
if err = objID.DecodeString(idObj); err != nil {
|
||||||
|
log.Error(logs.WrongObjectID, zap.Error(err))
|
||||||
|
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(*objID)
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
|
// prepares request and object address to it.
|
||||||
|
func (h *Handler) byObjectName(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
var (
|
||||||
|
bucketname = c.UserValue("cid").(string)
|
||||||
|
key = c.UserValue("oid").(string)
|
||||||
|
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||||
|
download = c.QueryArgs().GetBool("download")
|
||||||
|
)
|
||||||
|
|
||||||
|
unescapedKey, err := url.QueryUnescape(key)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
||||||
|
if h.config.IndexPageEnabled() && !download && string(c.Method()) != fasthttp.MethodHead {
|
||||||
|
if isDir(unescapedKey) || isContainerRoot(unescapedKey) {
|
||||||
|
if code := checkErrorType(err); code == fasthttp.StatusNotFound || code == fasthttp.StatusOK {
|
||||||
|
c.SetStatusCode(code)
|
||||||
|
h.browseObjects(c, bktInfo, unescapedKey)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||||
|
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
|
||||||
|
} else {
|
||||||
|
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
|
||||||
|
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if foundOid.DeleteMarker {
|
||||||
|
log.Error(logs.ObjectWasDeleted)
|
||||||
|
response.Error(c, "object deleted", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(foundOid.OID)
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byAttribute is a wrapper similar to byAddress.
|
||||||
|
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
scid, _ := c.UserValue("cid").(string)
|
||||||
|
key, _ := c.UserValue("attr_key").(string)
|
||||||
|
val, _ := c.UserValue("attr_val").(string)
|
||||||
|
|
||||||
|
key, err := url.QueryUnescape(key)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err = url.QueryUnescape(val)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log := h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := h.search(ctx, &bktInfo.CID, key, val, object.MatchStringEqual)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer res.Close()
|
||||||
|
|
||||||
|
buf := make([]oid.ID, 1)
|
||||||
|
|
||||||
|
n, err := res.Read(buf)
|
||||||
|
if n == 0 {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||||
|
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||||
|
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var addrObj oid.Address
|
||||||
|
addrObj.SetContainer(bktInfo.CID)
|
||||||
|
addrObj.SetObject(buf[0])
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(c, log), addrObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveContainer decode container id, if it's not a valid container id
|
||||||
|
// then trey to resolve name using provided resolver.
|
||||||
|
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
||||||
|
cnrID := new(cid.ID)
|
||||||
|
err := cnrID.DecodeString(containerID)
|
||||||
|
if err != nil {
|
||||||
|
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
||||||
|
if err != nil && strings.Contains(err.Error(), "not found") {
|
||||||
|
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cnrID, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
|
||||||
|
ns, err := middleware.GetNamespace(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil {
|
||||||
|
return bktInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, err := h.resolveContainer(ctx, containerName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo, err := h.readContainer(ctx, *cnrID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.cache.Put(bktInfo); err != nil {
|
||||||
|
log.Warn(logs.CouldntPutBucketIntoCache,
|
||||||
|
zap.String("bucket name", bktInfo.Name),
|
||||||
|
zap.Stringer("bucket cid", bktInfo.CID),
|
||||||
|
zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return bktInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
|
||||||
|
prm := PrmContainer{ContainerID: cnrID}
|
||||||
|
res, err := h.frostfs.Container(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo := &data.BucketInfo{
|
||||||
|
CID: cnrID,
|
||||||
|
Name: cnrID.EncodeToString(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if domain := container.ReadDomain(*res); domain.Name() != "" {
|
||||||
|
bktInfo.Name = domain.Name()
|
||||||
|
bktInfo.Zone = domain.Zone()
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
||||||
|
|
||||||
|
return bktInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) listObjects(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) ([]map[string]string, error) {
|
||||||
|
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var objects = make([]map[string]string, 0, len(nodes))
|
||||||
|
for _, node := range nodes {
|
||||||
|
meta := node.GetMeta()
|
||||||
|
if meta == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var obj = make(map[string]string, len(meta))
|
||||||
|
for _, m := range meta {
|
||||||
|
obj[m.GetKey()] = string(m.GetValue())
|
||||||
|
}
|
||||||
|
objects = append(objects, obj)
|
||||||
|
}
|
||||||
|
|
||||||
|
return objects, nil
|
||||||
|
}
|
580
internal/handler/handler_fuzz_test.go
Normal file
580
internal/handler/handler_fuzz_test.go
Normal file
|
@ -0,0 +1,580 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fuzzSuccessExitCode = 0
|
||||||
|
fuzzFailExitCode = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) {
|
||||||
|
array := make([]string, count)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err = tp.Reset()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
array[i], err = tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return array, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) {
|
||||||
|
array := make([]bool, count)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err = tp.Reset()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
array[i], err = tp.GetBool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return array, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) {
|
||||||
|
count, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
count = count % max
|
||||||
|
if count < 0 {
|
||||||
|
count += max
|
||||||
|
}
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error {
|
||||||
|
count, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
count = count % len(params)
|
||||||
|
if count < 0 {
|
||||||
|
count += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
position, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
position = position % len(params)
|
||||||
|
if position < 0 {
|
||||||
|
position += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Header.Set(params[position], v)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) {
|
||||||
|
rnd, err := tp.GetBool()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if rnd == true {
|
||||||
|
initValue, err = tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return initValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
|
||||||
|
hc, err := prepareHandlerContext()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
aclList := []acl.Basic{
|
||||||
|
acl.Private,
|
||||||
|
acl.PrivateExtended,
|
||||||
|
acl.PublicRO,
|
||||||
|
acl.PublicROExtended,
|
||||||
|
acl.PublicRW,
|
||||||
|
acl.PublicRWExtended,
|
||||||
|
acl.PublicAppend,
|
||||||
|
acl.PublicAppendExtended,
|
||||||
|
}
|
||||||
|
|
||||||
|
pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
acl := aclList[pos]
|
||||||
|
|
||||||
|
strings, err := prepareStrings(tp, 6)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
bktName := strings[0]
|
||||||
|
objFileName := strings[1]
|
||||||
|
valAttr := strings[2]
|
||||||
|
keyAttr := strings[3]
|
||||||
|
|
||||||
|
if len(bktName) == 0 {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets")
|
||||||
|
}
|
||||||
|
|
||||||
|
cnrID, cnr, err := hc.prepareContainer(bktName, acl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.frostfs.SetContainer(cnrID, cnr)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx = middleware.SetNamespace(ctx, "")
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", cnrID.EncodeToString())
|
||||||
|
|
||||||
|
attributes := map[string]string{
|
||||||
|
object.AttributeFileName: objFileName,
|
||||||
|
keyAttr: valAttr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var buff bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&buff)
|
||||||
|
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := tp.GetBytes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = w.Close(); err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Request.SetBodyStream(&buff, buff.Len())
|
||||||
|
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
|
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
||||||
|
|
||||||
|
err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.Handler().Upload(r)
|
||||||
|
|
||||||
|
if r.Response.StatusCode() != http.StatusOK {
|
||||||
|
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzUpload() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzUpload(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, _, _, _, _, _, err = upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzUpload(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzUpload(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) {
|
||||||
|
|
||||||
|
var putRes putResponse
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
panic(resp)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
data := resp.Response.Body()
|
||||||
|
err := json.Unmarshal(data, &putRes)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||||
|
attr := object.NewAttribute()
|
||||||
|
attr.SetKey(object.AttributeFilePath)
|
||||||
|
|
||||||
|
filename, err = maybeFillRandom(tp, filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
attr.SetValue(filename)
|
||||||
|
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
|
||||||
|
cid := cnrID.EncodeToString()
|
||||||
|
cid, err = maybeFillRandom(tp, cid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
oid := putRes.ObjectID
|
||||||
|
oid, err = maybeFillRandom(tp, oid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r.SetUserValue("cid", cid)
|
||||||
|
r.SetUserValue("oid", oid)
|
||||||
|
|
||||||
|
rnd, err := tp.GetBool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if rnd == true {
|
||||||
|
r.SetUserValue("download", "true")
|
||||||
|
}
|
||||||
|
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzGet() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzGet(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGet(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzUpload(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzHead() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzHead(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
hc.Handler().HeadByAddressOrBucketName(r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzHead(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzHead(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDownloadByAttribute() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDownloadByAttribute(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := cnrID.EncodeToString()
|
||||||
|
cid, err = maybeFillRandom(tp, cid)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
attrKey, err = maybeFillRandom(tp, attrKey)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
attrVal, err = maybeFillRandom(tp, attrVal)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", cid)
|
||||||
|
r.SetUserValue("attr_key", attrKey)
|
||||||
|
r.SetUserValue("attr_val", attrVal)
|
||||||
|
|
||||||
|
hc.Handler().DownloadByAttribute(r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDownloadByAttribute(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDownloadByAttribute(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzHeadByAttribute() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzHeadByAttribute(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := cnrID.EncodeToString()
|
||||||
|
cid, err = maybeFillRandom(tp, cid)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
attrKey, err = maybeFillRandom(tp, attrKey)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
attrVal, err = maybeFillRandom(tp, attrVal)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", cid)
|
||||||
|
r.SetUserValue("attr_key", attrKey)
|
||||||
|
r.SetUserValue("attr_val", attrVal)
|
||||||
|
|
||||||
|
hc.Handler().HeadByAttribute(r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzHeadByAttribute(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzHeadByAttribute(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDownloadZipped() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDownloadZipped(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, hc, cnrID, _, _, _, _, err := upload(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
cid := cnrID.EncodeToString()
|
||||||
|
cid, err = maybeFillRandom(tp, cid)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := ""
|
||||||
|
prefix, err = maybeFillRandom(tp, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", cid)
|
||||||
|
r.SetUserValue("prefix", prefix)
|
||||||
|
|
||||||
|
hc.Handler().DownloadZipped(r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDownloadZipped(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDownloadZipped(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzStoreBearerTokenAppCtx() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzStoreBearerTokenAppCtx(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := ""
|
||||||
|
prefix, err = maybeFillRandom(tp, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx = middleware.SetNamespace(ctx, "")
|
||||||
|
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
|
||||||
|
strings, err := prepareStrings(tp, 3)
|
||||||
|
|
||||||
|
rand, err := prepareBools(tp, 2)
|
||||||
|
|
||||||
|
if rand[0] == true {
|
||||||
|
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
||||||
|
} else if rand[1] == true {
|
||||||
|
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
||||||
|
} else {
|
||||||
|
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
||||||
|
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens.StoreBearerTokenAppCtx(ctx, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzStoreBearerTokenAppCtx(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzStoreBearerTokenAppCtx(data)
|
||||||
|
})
|
||||||
|
}
|
312
internal/handler/handler_test.go
Normal file
312
internal/handler/handler_test.go
Normal file
|
@ -0,0 +1,312 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type treeClientMock struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type configMock struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) DefaultTimestamp() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) ZipCompression() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) IndexPageEnabled() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) IndexPageTemplatePath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
func (c *configMock) IndexPageTemplate() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) ClientCut() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) BufferMaxSizeForPut() uint64 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *configMock) NamespaceHeader() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
type handlerContext struct {
|
||||||
|
key *keys.PrivateKey
|
||||||
|
owner user.ID
|
||||||
|
|
||||||
|
h *Handler
|
||||||
|
frostfs *TestFrostFS
|
||||||
|
tree *treeClientMock
|
||||||
|
cfg *configMock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *handlerContext) Handler() *Handler {
|
||||||
|
return hc.h
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareHandlerContext() (*handlerContext, error) {
|
||||||
|
logger, err := zap.NewDevelopment()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := keys.NewPrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var owner user.ID
|
||||||
|
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
|
testFrostFS := NewTestFrostFS(key)
|
||||||
|
|
||||||
|
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||||
|
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
|
||||||
|
return testFrostFS.ContainerID(name)
|
||||||
|
})
|
||||||
|
|
||||||
|
params := &AppParams{
|
||||||
|
Logger: logger,
|
||||||
|
FrostFS: testFrostFS,
|
||||||
|
Owner: &owner,
|
||||||
|
Resolver: testResolver,
|
||||||
|
Cache: cache.NewBucketCache(&cache.Config{
|
||||||
|
Size: 1,
|
||||||
|
Lifetime: 1,
|
||||||
|
Logger: logger,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
treeMock := &treeClientMock{}
|
||||||
|
cfgMock := &configMock{}
|
||||||
|
|
||||||
|
handler := New(params, cfgMock, tree.NewTree(treeMock))
|
||||||
|
|
||||||
|
return &handlerContext{
|
||||||
|
key: key,
|
||||||
|
owner: owner,
|
||||||
|
h: handler,
|
||||||
|
frostfs: testFrostFS,
|
||||||
|
tree: treeMock,
|
||||||
|
cfg: cfgMock,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
|
||||||
|
var pp netmap.PlacementPolicy
|
||||||
|
err := pp.DecodeString("REP 1")
|
||||||
|
if err != nil {
|
||||||
|
return cid.ID{}, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var cnr container.Container
|
||||||
|
cnr.Init()
|
||||||
|
cnr.SetOwner(hc.owner)
|
||||||
|
cnr.SetPlacementPolicy(pp)
|
||||||
|
cnr.SetBasicACL(basicACL)
|
||||||
|
|
||||||
|
var domain container.Domain
|
||||||
|
domain.SetName(name)
|
||||||
|
container.WriteDomain(&cnr, domain)
|
||||||
|
container.SetName(&cnr, name)
|
||||||
|
container.SetCreationTime(&cnr, time.Now())
|
||||||
|
|
||||||
|
cnrID := cidtest.ID()
|
||||||
|
|
||||||
|
for op := acl.OpObjectGet; op < acl.OpObjectHash; op++ {
|
||||||
|
hc.frostfs.AllowUserOperation(cnrID, hc.owner, op, oid.ID{})
|
||||||
|
if basicACL.IsOpAllowed(op, acl.RoleOthers) {
|
||||||
|
hc.frostfs.AllowUserOperation(cnrID, user.ID{}, op, oid.ID{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cnrID, &cnr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBasic(t *testing.T) {
|
||||||
|
hc, err := prepareHandlerContext()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bktName := "bucket"
|
||||||
|
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
||||||
|
require.NoError(t, err)
|
||||||
|
hc.frostfs.SetContainer(cnrID, cnr)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ctx = middleware.SetNamespace(ctx, "")
|
||||||
|
|
||||||
|
content := "hello"
|
||||||
|
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.Handler().Upload(r)
|
||||||
|
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
||||||
|
|
||||||
|
var putRes putResponse
|
||||||
|
err = json.Unmarshal(r.Response.Body(), &putRes)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||||
|
attr := object.NewAttribute()
|
||||||
|
attr.SetKey(object.AttributeFilePath)
|
||||||
|
attr.SetValue(objFileName)
|
||||||
|
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
||||||
|
|
||||||
|
t.Run("get", func(t *testing.T) {
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||||
|
hc.Handler().DownloadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("head", func(t *testing.T) {
|
||||||
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||||
|
hc.Handler().HeadByAddressOrBucketName(r)
|
||||||
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("get by attribute", func(t *testing.T) {
|
||||||
|
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
||||||
|
hc.Handler().DownloadByAttribute(r)
|
||||||
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("head by attribute", func(t *testing.T) {
|
||||||
|
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
||||||
|
hc.Handler().HeadByAttribute(r)
|
||||||
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("zip", func(t *testing.T) {
|
||||||
|
r = prepareGetZipped(ctx, bktName, "")
|
||||||
|
hc.Handler().DownloadZipped(r)
|
||||||
|
|
||||||
|
readerAt := bytes.NewReader(r.Response.Body())
|
||||||
|
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, zipReader.File, 1)
|
||||||
|
require.Equal(t, objFileName, zipReader.File[0].Name)
|
||||||
|
f, err := zipReader.File[0].Open()
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() {
|
||||||
|
inErr := f.Close()
|
||||||
|
require.NoError(t, inErr)
|
||||||
|
}()
|
||||||
|
data, err := io.ReadAll(f)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, content, string(data))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", bucket)
|
||||||
|
return r, fillMultipartBody(r, content)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.RequestCtx {
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", bucket)
|
||||||
|
r.SetUserValue("oid", objID)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", bucket)
|
||||||
|
r.SetUserValue("attr_key", attrKey)
|
||||||
|
r.SetUserValue("attr_val", attrVal)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.RequestCtx {
|
||||||
|
r := new(fasthttp.RequestCtx)
|
||||||
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
r.SetUserValue("cid", bucket)
|
||||||
|
r.SetUserValue("prefix", prefix)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
keyAttr = "User-Attribute"
|
||||||
|
valAttr = "user value"
|
||||||
|
objFileName = "newFile.txt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {
|
||||||
|
attributes := map[string]string{
|
||||||
|
object.AttributeFileName: objFileName,
|
||||||
|
keyAttr: valAttr,
|
||||||
|
}
|
||||||
|
|
||||||
|
var buff bytes.Buffer
|
||||||
|
w := multipart.NewWriter(&buff)
|
||||||
|
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(fw, bytes.NewBufferString(content)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = w.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Request.SetBodyStream(&buff, buff.Len())
|
||||||
|
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
|
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
119
internal/handler/head.go
Normal file
119
internal/handler/head.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
// max bytes needed to detect content type according to http.DetectContentType docs.
|
||||||
|
const sizeToDetectType = 512
|
||||||
|
|
||||||
|
const (
|
||||||
|
hdrObjectID = "X-Object-Id"
|
||||||
|
hdrOwnerID = "X-Owner-Id"
|
||||||
|
hdrContainerID = "X-Container-Id"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
|
||||||
|
var start = time.Now()
|
||||||
|
|
||||||
|
btoken := bearerToken(ctx)
|
||||||
|
|
||||||
|
prm := PrmObjectHead{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: btoken,
|
||||||
|
},
|
||||||
|
Address: objectAddress,
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := h.frostfs.HeadObject(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
req.handleFrostFSErr(err, start)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
||||||
|
var contentType string
|
||||||
|
for _, attr := range obj.Attributes() {
|
||||||
|
key := attr.Key()
|
||||||
|
val := attr.Value()
|
||||||
|
if !isValidToken(key) || !isValidValue(val) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key = utils.BackwardTransformIfSystem(key)
|
||||||
|
|
||||||
|
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||||
|
switch key {
|
||||||
|
case object.AttributeTimestamp:
|
||||||
|
value, err := strconv.ParseInt(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
req.log.Info(logs.CouldntParseCreationDate,
|
||||||
|
zap.String("key", key),
|
||||||
|
zap.String("val", val),
|
||||||
|
zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||||
|
case object.AttributeContentType:
|
||||||
|
contentType = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idsToResponse(&req.Response, obj)
|
||||||
|
|
||||||
|
if len(contentType) == 0 {
|
||||||
|
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
||||||
|
prmRange := PrmObjectRange{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: btoken,
|
||||||
|
},
|
||||||
|
Address: objectAddress,
|
||||||
|
PayloadRange: [2]uint64{0, sz},
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.frostfs.RangeObject(ctx, prmRange)
|
||||||
|
})
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
req.handleFrostFSErr(err, start)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
req.SetContentType(contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
||||||
|
objID, _ := obj.ID()
|
||||||
|
cnrID, _ := obj.ContainerID()
|
||||||
|
resp.Header.Set(hdrObjectID, objID.String())
|
||||||
|
resp.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
||||||
|
resp.Header.Set(hdrContainerID, cnrID.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
||||||
|
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
|
test, _ := c.UserValue("oid").(string)
|
||||||
|
var id oid.ID
|
||||||
|
|
||||||
|
err := id.DecodeString(test)
|
||||||
|
if err != nil {
|
||||||
|
h.byObjectName(c, h.headObject)
|
||||||
|
} else {
|
||||||
|
h.byAddress(c, h.headObject)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadByAttribute handles attribute-based head requests.
|
||||||
|
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
|
h.byAttribute(c, h.headObject)
|
||||||
|
}
|
26
internal/handler/middleware/util.go
Normal file
26
internal/handler/middleware/util.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// keyWrapper is wrapper for context keys.
|
||||||
|
type keyWrapper string
|
||||||
|
|
||||||
|
const nsKey = keyWrapper("namespace")
|
||||||
|
|
||||||
|
// GetNamespace extract namespace from context.
|
||||||
|
func GetNamespace(ctx context.Context) (string, error) {
|
||||||
|
ns, ok := ctx.Value(nsKey).(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("couldn't get namespace from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNamespace sets namespace in the context.
|
||||||
|
func SetNamespace(ctx context.Context, ns string) context.Context {
|
||||||
|
return context.WithValue(ctx, nsKey, ns)
|
||||||
|
}
|
|
@ -1,9 +1,10 @@
|
||||||
package uploader
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/uploader/multipart"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -16,7 +17,8 @@ type MultipartFile interface {
|
||||||
|
|
||||||
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
|
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
|
||||||
// To have a custom buffer (3mb) the custom multipart reader is used.
|
// To have a custom buffer (3mb) the custom multipart reader is used.
|
||||||
// https://github.com/nspcc-dev/neofs-http-gw/issues/148
|
// Default reader uses 4KiB chunks, which slow down upload speed up to 400%
|
||||||
|
// https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32
|
||||||
reader := multipart.NewReader(r, boundary)
|
reader := multipart.NewReader(r, boundary)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -27,7 +29,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
||||||
|
|
||||||
name := part.FormName()
|
name := part.FormName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
l.Debug("ignore part, empty form name")
|
l.Debug(logs.IgnorePartEmptyFormName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +37,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
// ignore multipart/form-data values
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
l.Debug("ignore part, empty filename", zap.String("form", name))
|
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
|
@ -1,4 +1,6 @@
|
||||||
package uploader
|
//go:build !integration
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
@ -8,6 +10,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -109,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
||||||
|
|
||||||
name := part.FormName()
|
name := part.FormName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
l.Debug("ignore part, empty form name")
|
l.Debug(logs.IgnorePartEmptyFormName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +120,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
// ignore multipart/form-data values
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
l.Debug("ignore part, empty filename", zap.String("form", name))
|
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
148
internal/handler/reader.go
Normal file
148
internal/handler/reader.go
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readCloser struct {
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||||
|
// Returns r's error directly. Also returns the processed data.
|
||||||
|
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||||
|
if maxSize > sizeToDetectType {
|
||||||
|
maxSize = sizeToDetectType
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
||||||
|
|
||||||
|
r, err := rInit(maxSize)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := r.Read(buf)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = buf[:n]
|
||||||
|
|
||||||
|
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oid.Address) {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
dis = "inline"
|
||||||
|
start = time.Now()
|
||||||
|
filename string
|
||||||
|
filepath string
|
||||||
|
)
|
||||||
|
|
||||||
|
prm := PrmObjectGet{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: bearerToken(ctx),
|
||||||
|
},
|
||||||
|
Address: objectAddress,
|
||||||
|
}
|
||||||
|
|
||||||
|
rObj, err := h.frostfs.GetObject(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
req.handleFrostFSErr(err, start)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// we can't close reader in this function, so how to do it?
|
||||||
|
|
||||||
|
if req.Request.URI().QueryArgs().GetBool("download") {
|
||||||
|
dis = "attachment"
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadSize := rObj.Header.PayloadSize()
|
||||||
|
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||||
|
var contentType string
|
||||||
|
for _, attr := range rObj.Header.Attributes() {
|
||||||
|
key := attr.Key()
|
||||||
|
val := attr.Value()
|
||||||
|
if !isValidToken(key) || !isValidValue(val) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key = utils.BackwardTransformIfSystem(key)
|
||||||
|
|
||||||
|
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
||||||
|
switch key {
|
||||||
|
case object.AttributeFileName:
|
||||||
|
filename = val
|
||||||
|
case object.AttributeTimestamp:
|
||||||
|
value, err := strconv.ParseInt(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
req.log.Info(logs.CouldntParseCreationDate,
|
||||||
|
zap.String("key", key),
|
||||||
|
zap.String("val", val),
|
||||||
|
zap.Error(err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||||
|
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||||
|
case object.AttributeContentType:
|
||||||
|
contentType = val
|
||||||
|
case object.AttributeFilePath:
|
||||||
|
filepath = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
idsToResponse(&req.Response, &rObj.Header)
|
||||||
|
|
||||||
|
if len(contentType) == 0 {
|
||||||
|
// determine the Content-Type from the payload head
|
||||||
|
var payloadHead []byte
|
||||||
|
|
||||||
|
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||||
|
return rObj.Payload, nil
|
||||||
|
})
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||||
|
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset payload reader since a part of the data has been read
|
||||||
|
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||||
|
|
||||||
|
if err != io.EOF { // otherwise, we've already read full payload
|
||||||
|
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||||
|
// if it implements io.Closer and that's useful for us.
|
||||||
|
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||||
|
}
|
||||||
|
req.SetContentType(contentType)
|
||||||
|
|
||||||
|
if filename == "" {
|
||||||
|
filename = filepath
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||||
|
|
||||||
|
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||||
|
}
|
|
@ -1,4 +1,6 @@
|
||||||
package downloader
|
//go:build !integration
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
@ -33,7 +35,7 @@ func TestDetector(t *testing.T) {
|
||||||
} {
|
} {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
||||||
func(sz uint64) (io.Reader, error) {
|
func(uint64) (io.Reader, error) {
|
||||||
return strings.NewReader(tc.Expected), nil
|
return strings.NewReader(tc.Expected), nil
|
||||||
},
|
},
|
||||||
)
|
)
|
190
internal/handler/upload.go
Normal file
190
internal/handler/upload.go
Normal file
|
@ -0,0 +1,190 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
jsonHeader = "application/json; charset=UTF-8"
|
||||||
|
drainBufSize = 4096
|
||||||
|
)
|
||||||
|
|
||||||
|
type putResponse struct {
|
||||||
|
ObjectID string `json:"object_id"`
|
||||||
|
ContainerID string `json:"container_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPutResponse(addr oid.Address) *putResponse {
|
||||||
|
return &putResponse{
|
||||||
|
ObjectID: addr.Object().EncodeToString(),
|
||||||
|
ContainerID: addr.Container().EncodeToString(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pr *putResponse) encode(w io.Writer) error {
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
enc.SetIndent("", "\t")
|
||||||
|
return enc.Encode(pr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload handles multipart upload request.
|
||||||
|
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||||
|
var (
|
||||||
|
file MultipartFile
|
||||||
|
idObj oid.ID
|
||||||
|
addr oid.Address
|
||||||
|
scid, _ = c.UserValue("cid").(string)
|
||||||
|
log = h.log.With(zap.String("cid", scid))
|
||||||
|
bodyStream = c.RequestBodyStream()
|
||||||
|
drainBuf = make([]byte, drainBufSize)
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// If the temporary reader can be closed - let's close it.
|
||||||
|
if file == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := file.Close()
|
||||||
|
log.Debug(
|
||||||
|
logs.CloseTemporaryMultipartFormFile,
|
||||||
|
zap.Stringer("address", addr),
|
||||||
|
zap.String("filename", file.FileName()),
|
||||||
|
zap.Error(err),
|
||||||
|
)
|
||||||
|
}()
|
||||||
|
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||||
|
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
||||||
|
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||||
|
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
filtered, err := filterHeaders(h.log, &c.Request.Header)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||||
|
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||||
|
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||||
|
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||||
|
} else {
|
||||||
|
now = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||||
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||||
|
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := make([]object.Attribute, 0, len(filtered))
|
||||||
|
// prepares attributes from filtered headers
|
||||||
|
for key, val := range filtered {
|
||||||
|
attribute := object.NewAttribute()
|
||||||
|
attribute.SetKey(key)
|
||||||
|
attribute.SetValue(val)
|
||||||
|
attributes = append(attributes, *attribute)
|
||||||
|
}
|
||||||
|
// sets FileName attribute if it wasn't set from header
|
||||||
|
if _, ok := filtered[object.AttributeFileName]; !ok {
|
||||||
|
filename := object.NewAttribute()
|
||||||
|
filename.SetKey(object.AttributeFileName)
|
||||||
|
filename.SetValue(file.FileName())
|
||||||
|
attributes = append(attributes, *filename)
|
||||||
|
}
|
||||||
|
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||||
|
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||||
|
timestamp := object.NewAttribute()
|
||||||
|
timestamp.SetKey(object.AttributeTimestamp)
|
||||||
|
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
|
attributes = append(attributes, *timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
obj := object.New()
|
||||||
|
obj.SetContainerID(bktInfo.CID)
|
||||||
|
obj.SetOwnerID(*h.ownerID)
|
||||||
|
obj.SetAttributes(attributes...)
|
||||||
|
|
||||||
|
prm := PrmObjectCreate{
|
||||||
|
PrmAuth: PrmAuth{
|
||||||
|
BearerToken: h.fetchBearerToken(ctx),
|
||||||
|
},
|
||||||
|
Object: obj,
|
||||||
|
Payload: file,
|
||||||
|
ClientCut: h.config.ClientCut(),
|
||||||
|
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
|
||||||
|
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||||
|
h.handlePutFrostFSErr(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
addr.SetObject(idObj)
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
|
||||||
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||||
|
if err = newPutResponse(addr).encode(c); err != nil {
|
||||||
|
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||||
|
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Multipart is multipart and thus can contain more than one part which
|
||||||
|
// we ignore at the moment. Also, when dealing with chunked encoding
|
||||||
|
// the last zero-length chunk might be left unread (because multipart
|
||||||
|
// reader only cares about its boundary and doesn't look further) and
|
||||||
|
// it will be (erroneously) interpreted as the start of the next
|
||||||
|
// pipelined header. Thus we need to drain the body buffer.
|
||||||
|
for {
|
||||||
|
_, err = bodyStream.Read(drainBuf)
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Report status code and content type.
|
||||||
|
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||||
|
c.Response.Header.SetContentType(jsonHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||||
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
||||||
|
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||||
|
|
||||||
|
h.log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||||
|
response.Error(r, msg, statusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
||||||
|
return tkn
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
92
internal/handler/utils.go
Normal file
92
internal/handler/utils.go
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type request struct {
|
||||||
|
*fasthttp.RequestCtx
|
||||||
|
log *zap.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||||
|
logFields := []zap.Field{
|
||||||
|
zap.Stringer("elapsed", time.Since(start)),
|
||||||
|
zap.Error(err),
|
||||||
|
}
|
||||||
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
||||||
|
logFields = append(logFields, additionalFields...)
|
||||||
|
|
||||||
|
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||||
|
response.Error(r.RequestCtx, msg, statusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func bearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
||||||
|
return tkn
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDir(name string) bool {
|
||||||
|
return strings.HasSuffix(name, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isContainerRoot(key string) bool {
|
||||||
|
return key == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkErrorType(err error) int {
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return fasthttp.StatusOK
|
||||||
|
case errors.Is(err, tree.ErrNodeAccessDenied):
|
||||||
|
return fasthttp.StatusForbidden
|
||||||
|
default:
|
||||||
|
return fasthttp.StatusNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidToken(s string) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
if c <= ' ' || c > 127 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidValue(s string) bool {
|
||||||
|
for _, c := range s {
|
||||||
|
// HTTP specification allows for more technically, but we don't want to escape things.
|
||||||
|
if c < ' ' || c > 127 || c == '"' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||||
|
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||||
|
|
||||||
|
if client.IsErrContainerNotFound(err) {
|
||||||
|
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
}
|
82
internal/logs/logs.go
Normal file
82
internal/logs/logs.go
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
package logs
|
||||||
|
|
||||||
|
const (
|
||||||
|
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||||
|
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||||
|
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||||
|
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||||
|
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
||||||
|
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||||
|
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||||
|
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||||
|
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||||
|
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||||
|
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||||
|
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||||
|
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
|
||||||
|
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
|
||||||
|
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
|
||||||
|
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||||
|
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||||
|
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||||
|
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
||||||
|
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||||
|
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||||
|
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||||
|
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
|
||||||
|
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
|
||||||
|
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
|
||||||
|
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
|
||||||
|
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
|
||||||
|
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||||
|
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||||
|
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||||
|
FailedToReadIndexPageTemplate = "failed to read index page template, set default" // Warn in ../../app.go
|
||||||
|
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
|
||||||
|
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||||
|
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||||
|
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||||
|
StartingApplication = "starting application" // Info in ../../app.go
|
||||||
|
StartingServer = "starting server" // Info in ../../app.go
|
||||||
|
ListenAndServe = "listen and serve" // Fatal in ../../app.go
|
||||||
|
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
|
||||||
|
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
|
||||||
|
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
|
||||||
|
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
|
||||||
|
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
|
||||||
|
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
|
||||||
|
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
|
||||||
|
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
|
||||||
|
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
|
||||||
|
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
|
||||||
|
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
|
||||||
|
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
|
||||||
|
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
|
||||||
|
Request = "request" // Info in ../../app.go
|
||||||
|
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
|
||||||
|
FailedToAddServer = "failed to add server" // Warn in ../../app.go
|
||||||
|
AddServer = "add server" // Info in ../../app.go
|
||||||
|
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
|
||||||
|
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
|
||||||
|
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
|
||||||
|
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
|
||||||
|
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
|
||||||
|
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
|
||||||
|
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
|
||||||
|
UsingCredentials = "using credentials" // Info in ../../settings.go
|
||||||
|
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
|
||||||
|
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
|
||||||
|
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||||
|
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||||
|
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||||
|
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
||||||
|
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
||||||
|
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
||||||
|
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
||||||
|
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
||||||
|
FailedToUnescapeQuery = "failed to unescape query"
|
||||||
|
ServerReconnecting = "reconnecting server..."
|
||||||
|
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||||
|
ServerReconnectFailed = "failed to reconnect server"
|
||||||
|
WarnDuplicateAddress = "duplicate address"
|
||||||
|
)
|
90
internal/templates/index.gotmpl
Normal file
90
internal/templates/index.gotmpl
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
{{$bucketName := .BucketName}}
|
||||||
|
{{ $prefix := trimPrefix .Prefix }}
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8"/>
|
||||||
|
<title>Index of s3://{{$bucketName}}/{{if $prefix}}/{{$prefix}}/{{end}}</title>
|
||||||
|
<style>
|
||||||
|
table {
|
||||||
|
width: 80%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
}
|
||||||
|
body {
|
||||||
|
background: #f2f2f2;
|
||||||
|
}
|
||||||
|
table, th, td {
|
||||||
|
border: 0 solid transparent;
|
||||||
|
}
|
||||||
|
th, td {
|
||||||
|
padding: 10px;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
th {
|
||||||
|
background-color: #c3bcbc;
|
||||||
|
}
|
||||||
|
tr:nth-child(even) {background-color: #ebe7e7;}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Index of s3://{{$bucketName}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
|
||||||
|
<table>
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Filename</th>
|
||||||
|
<th>Size</th>
|
||||||
|
<th>Created</th>
|
||||||
|
<th>Download</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{{ $trimmedPrefix := trimPrefix $prefix }}
|
||||||
|
{{if $trimmedPrefix }}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
⮐<a href="/get/{{$bucketName}}{{ urlencode $trimmedPrefix "" }}">..</a>
|
||||||
|
</td>
|
||||||
|
<td></td>
|
||||||
|
<td></td>
|
||||||
|
<td></td>
|
||||||
|
</tr>
|
||||||
|
{{else}}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
⮐<a href="/get/{{ $bucketName }}/">..</a>
|
||||||
|
</td>
|
||||||
|
<td></td>
|
||||||
|
<td></td>
|
||||||
|
<td></td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
{{range .Objects}}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
{{if .IsDir}}
|
||||||
|
🗀
|
||||||
|
<a href="/get/{{ $bucketName }}{{ urlencode $prefix .FileName }}/">
|
||||||
|
{{.FileName}}/
|
||||||
|
</a>
|
||||||
|
{{else}}
|
||||||
|
🗎
|
||||||
|
<a href="/get/{{ $bucketName }}{{ urlencode $prefix .FileName }}">
|
||||||
|
{{.FileName}}
|
||||||
|
</a>
|
||||||
|
{{end}}
|
||||||
|
</td>
|
||||||
|
<td>{{if not .IsDir}}{{ formatSize .Size }}{{end}}</td>
|
||||||
|
<td>{{if not .IsDir}}{{ formatTimestamp .Created }}{{end}}</td>
|
||||||
|
<td>
|
||||||
|
{{ if not .IsDir }}
|
||||||
|
<a href="/get/{{ $bucketName}}{{ urlencode $prefix .FileName }}?download=true">
|
||||||
|
Link
|
||||||
|
</a>
|
||||||
|
{{ end }}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{{end}}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</body>
|
||||||
|
</html>
|
6
internal/templates/template.go
Normal file
6
internal/templates/template.go
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package templates
|
||||||
|
|
||||||
|
import _ "embed"
|
||||||
|
|
||||||
|
//go:embed index.gotmpl
|
||||||
|
var DefaultIndexTemplate string
|
150
metrics/desc.go
Normal file
150
metrics/desc.go
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
var appMetricsDesc = map[string]map[string]Description{
|
||||||
|
poolSubsystem: {
|
||||||
|
overallErrorsMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: poolSubsystem,
|
||||||
|
Name: overallErrorsMetric,
|
||||||
|
Help: "Total number of errors in pool",
|
||||||
|
},
|
||||||
|
overallNodeErrorsMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: poolSubsystem,
|
||||||
|
Name: overallNodeErrorsMetric,
|
||||||
|
Help: "Total number of errors for connection in pool",
|
||||||
|
VariableLabels: []string{"node"},
|
||||||
|
},
|
||||||
|
overallNodeRequestsMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: poolSubsystem,
|
||||||
|
Name: overallNodeRequestsMetric,
|
||||||
|
Help: "Total number of requests to specific node in pool",
|
||||||
|
VariableLabels: []string{"node"},
|
||||||
|
},
|
||||||
|
currentErrorMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: poolSubsystem,
|
||||||
|
Name: currentErrorMetric,
|
||||||
|
Help: "Number of errors on current connections that will be reset after the threshold",
|
||||||
|
VariableLabels: []string{"node"},
|
||||||
|
},
|
||||||
|
avgRequestDurationMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: poolSubsystem,
|
||||||
|
Name: avgRequestDurationMetric,
|
||||||
|
Help: "Average request duration (in milliseconds) for specific method on node in pool",
|
||||||
|
VariableLabels: []string{"node", "method"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
stateSubsystem: {
|
||||||
|
healthMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: stateSubsystem,
|
||||||
|
Name: healthMetric,
|
||||||
|
Help: "Current HTTP gateway state",
|
||||||
|
},
|
||||||
|
versionInfoMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: stateSubsystem,
|
||||||
|
Name: versionInfoMetric,
|
||||||
|
Help: "Version of current FrostFS HTTP Gate instance",
|
||||||
|
VariableLabels: []string{"version"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
serverSubsystem: {
|
||||||
|
healthMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: serverSubsystem,
|
||||||
|
Name: healthMetric,
|
||||||
|
Help: "HTTP Server endpoint health",
|
||||||
|
VariableLabels: []string{"endpoint"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type Description struct {
|
||||||
|
Type dto.MetricType
|
||||||
|
Namespace string
|
||||||
|
Subsystem string
|
||||||
|
Name string
|
||||||
|
Help string
|
||||||
|
ConstantLabels prometheus.Labels
|
||||||
|
VariableLabels []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Description) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(&struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
FQName string `json:"name"`
|
||||||
|
Help string `json:"help"`
|
||||||
|
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
|
||||||
|
VariableLabels []string `json:"variable_labels,omitempty"`
|
||||||
|
}{
|
||||||
|
Type: d.Type.String(),
|
||||||
|
FQName: d.BuildFQName(),
|
||||||
|
Help: d.Help,
|
||||||
|
ConstantLabels: d.ConstantLabels,
|
||||||
|
VariableLabels: d.VariableLabels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Description) BuildFQName() string {
|
||||||
|
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DescribeAll returns descriptions for metrics.
|
||||||
|
func DescribeAll() []Description {
|
||||||
|
var list []Description
|
||||||
|
for _, m := range appMetricsDesc {
|
||||||
|
for _, description := range m {
|
||||||
|
list = append(list, description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOpts(description Description) prometheus.Opts {
|
||||||
|
return prometheus.Opts{
|
||||||
|
Namespace: description.Namespace,
|
||||||
|
Subsystem: description.Subsystem,
|
||||||
|
Name: description.Name,
|
||||||
|
Help: description.Help,
|
||||||
|
ConstLabels: description.ConstantLabels,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustNewGauge(description Description) prometheus.Gauge {
|
||||||
|
if description.Type != dto.MetricType_GAUGE {
|
||||||
|
panic("invalid metric type")
|
||||||
|
}
|
||||||
|
return prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts(newOpts(description)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
|
||||||
|
if description.Type != dto.MetricType_GAUGE {
|
||||||
|
panic("invalid metric type")
|
||||||
|
}
|
||||||
|
return prometheus.NewGaugeVec(
|
||||||
|
prometheus.GaugeOpts(newOpts(description)),
|
||||||
|
description.VariableLabels,
|
||||||
|
)
|
||||||
|
}
|
37
metrics/desc_test.go
Normal file
37
metrics/desc_test.go
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
//go:build dump_metrics
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mock struct{}
|
||||||
|
|
||||||
|
func (m mock) Statistic() pool.Statistic {
|
||||||
|
return pool.Statistic{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var metricsPath = flag.String("out", "", "File to export http gateway metrics to.")
|
||||||
|
|
||||||
|
func TestDescribeAll(t *testing.T) {
|
||||||
|
// to check correct metrics type mapping
|
||||||
|
_ = NewGateMetrics(mock{})
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
|
||||||
|
|
||||||
|
desc := DescribeAll()
|
||||||
|
data, err := json.Marshal(desc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = os.WriteFile(*metricsPath, data, 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
|
@ -3,17 +3,33 @@ package metrics
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
namespace = "neofs_http_gw"
|
namespace = "frostfs_http_gw"
|
||||||
stateSubsystem = "state"
|
stateSubsystem = "state"
|
||||||
poolSubsystem = "pool"
|
poolSubsystem = "pool"
|
||||||
|
serverSubsystem = "server"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
healthMetric = "health"
|
||||||
|
versionInfoMetric = "version_info"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
overallErrorsMetric = "overall_errors"
|
||||||
|
overallNodeErrorsMetric = "overall_node_errors"
|
||||||
|
overallNodeRequestsMetric = "overall_node_requests"
|
||||||
|
currentErrorMetric = "current_errors"
|
||||||
|
avgRequestDurationMetric = "avg_request_duration"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
methodGetBalance = "get_balance"
|
methodGetBalance = "get_balance"
|
||||||
methodPutContainer = "put_container"
|
methodPutContainer = "put_container"
|
||||||
methodGetContainer = "get_container"
|
methodGetContainer = "get_container"
|
||||||
|
@ -31,17 +47,37 @@ const (
|
||||||
methodCreateSession = "create_session"
|
methodCreateSession = "create_session"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// HealthStatus of the gate application.
|
||||||
|
type HealthStatus int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
HealthStatusUndefined HealthStatus = 0
|
||||||
|
HealthStatusStarting HealthStatus = 1
|
||||||
|
HealthStatusReady HealthStatus = 2
|
||||||
|
HealthStatusShuttingDown HealthStatus = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
type StatisticScraper interface {
|
||||||
|
Statistic() pool.Statistic
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverMetrics struct {
|
||||||
|
endpointHealth *prometheus.GaugeVec
|
||||||
|
}
|
||||||
|
|
||||||
type GateMetrics struct {
|
type GateMetrics struct {
|
||||||
stateMetrics
|
stateMetrics
|
||||||
poolMetricsCollector
|
poolMetricsCollector
|
||||||
|
serverMetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
type stateMetrics struct {
|
type stateMetrics struct {
|
||||||
healthCheck prometheus.Gauge
|
healthCheck prometheus.Gauge
|
||||||
|
versionInfo *prometheus.GaugeVec
|
||||||
}
|
}
|
||||||
|
|
||||||
type poolMetricsCollector struct {
|
type poolMetricsCollector struct {
|
||||||
pool *pool.Pool
|
scraper StatisticScraper
|
||||||
overallErrors prometheus.Gauge
|
overallErrors prometheus.Gauge
|
||||||
overallNodeErrors *prometheus.GaugeVec
|
overallNodeErrors *prometheus.GaugeVec
|
||||||
overallNodeRequests *prometheus.GaugeVec
|
overallNodeRequests *prometheus.GaugeVec
|
||||||
|
@ -50,113 +86,62 @@ type poolMetricsCollector struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGateMetrics creates new metrics for http gate.
|
// NewGateMetrics creates new metrics for http gate.
|
||||||
func NewGateMetrics(p *pool.Pool) *GateMetrics {
|
func NewGateMetrics(p StatisticScraper) *GateMetrics {
|
||||||
stateMetric := newStateMetrics()
|
stateMetric := newStateMetrics()
|
||||||
stateMetric.register()
|
stateMetric.register()
|
||||||
|
|
||||||
poolMetric := newPoolMetricsCollector(p)
|
poolMetric := newPoolMetricsCollector(p)
|
||||||
poolMetric.register()
|
poolMetric.register()
|
||||||
|
|
||||||
|
serverMetric := newServerMetrics()
|
||||||
|
serverMetric.register()
|
||||||
|
|
||||||
return &GateMetrics{
|
return &GateMetrics{
|
||||||
stateMetrics: *stateMetric,
|
stateMetrics: *stateMetric,
|
||||||
poolMetricsCollector: *poolMetric,
|
poolMetricsCollector: *poolMetric,
|
||||||
|
serverMetrics: *serverMetric,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *GateMetrics) Unregister() {
|
func (g *GateMetrics) Unregister() {
|
||||||
g.stateMetrics.unregister()
|
g.stateMetrics.unregister()
|
||||||
prometheus.Unregister(&g.poolMetricsCollector)
|
prometheus.Unregister(&g.poolMetricsCollector)
|
||||||
|
g.serverMetrics.unregister()
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStateMetrics() *stateMetrics {
|
func newStateMetrics() *stateMetrics {
|
||||||
return &stateMetrics{
|
return &stateMetrics{
|
||||||
healthCheck: prometheus.NewGauge(prometheus.GaugeOpts{
|
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
|
||||||
Namespace: namespace,
|
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
|
||||||
Subsystem: stateSubsystem,
|
|
||||||
Name: "health",
|
|
||||||
Help: "Current HTTP gateway state",
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m stateMetrics) register() {
|
func (m stateMetrics) register() {
|
||||||
prometheus.MustRegister(m.healthCheck)
|
prometheus.MustRegister(m.healthCheck)
|
||||||
|
prometheus.MustRegister(m.versionInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m stateMetrics) unregister() {
|
func (m stateMetrics) unregister() {
|
||||||
prometheus.Unregister(m.healthCheck)
|
prometheus.Unregister(m.healthCheck)
|
||||||
|
prometheus.Unregister(m.versionInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m stateMetrics) SetHealth(s int32) {
|
func (m stateMetrics) SetHealth(s HealthStatus) {
|
||||||
m.healthCheck.Set(float64(s))
|
m.healthCheck.Set(float64(s))
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPoolMetricsCollector(p *pool.Pool) *poolMetricsCollector {
|
func (m stateMetrics) SetVersion(ver string) {
|
||||||
overallErrors := prometheus.NewGauge(
|
m.versionInfo.WithLabelValues(ver).Set(1)
|
||||||
prometheus.GaugeOpts{
|
}
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: "overall_errors",
|
|
||||||
Help: "Total number of errors in pool",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
overallNodeErrors := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: "overall_node_errors",
|
|
||||||
Help: "Total number of errors for connection in pool",
|
|
||||||
},
|
|
||||||
[]string{
|
|
||||||
"node",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
overallNodeRequests := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: "overall_node_requests",
|
|
||||||
Help: "Total number of requests to specific node in pool",
|
|
||||||
},
|
|
||||||
[]string{
|
|
||||||
"node",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
currentErrors := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: "current_errors",
|
|
||||||
Help: "Number of errors on current connections that will be reset after the threshold",
|
|
||||||
},
|
|
||||||
[]string{
|
|
||||||
"node",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
requestsDuration := prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts{
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: "avg_request_duration",
|
|
||||||
Help: "Average request duration (in milliseconds) for specific method on node in pool",
|
|
||||||
},
|
|
||||||
[]string{
|
|
||||||
"node",
|
|
||||||
"method",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
||||||
return &poolMetricsCollector{
|
return &poolMetricsCollector{
|
||||||
pool: p,
|
scraper: p,
|
||||||
overallErrors: overallErrors,
|
overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]),
|
||||||
overallNodeErrors: overallNodeErrors,
|
overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]),
|
||||||
overallNodeRequests: overallNodeRequests,
|
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
|
||||||
currentErrors: currentErrors,
|
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
|
||||||
requestDuration: requestsDuration,
|
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +167,7 @@ func (m *poolMetricsCollector) register() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *poolMetricsCollector) updateStatistic() {
|
func (m *poolMetricsCollector) updateStatistic() {
|
||||||
stat := m.pool.Statistic()
|
stat := m.scraper.Statistic()
|
||||||
|
|
||||||
m.overallNodeErrors.Reset()
|
m.overallNodeErrors.Reset()
|
||||||
m.overallNodeRequests.Reset()
|
m.overallNodeRequests.Reset()
|
||||||
|
@ -206,8 +191,6 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainerEacl).Set(float64(node.AverageGetContainerEACL().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodSetContainerEacl).Set(float64(node.AverageSetContainerEACL().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
||||||
|
@ -218,6 +201,28 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newServerMetrics() *serverMetrics {
|
||||||
|
return &serverMetrics{
|
||||||
|
endpointHealth: mustNewGaugeVec(appMetricsDesc[serverSubsystem][healthMetric]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m serverMetrics) register() {
|
||||||
|
prometheus.MustRegister(m.endpointHealth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m serverMetrics) unregister() {
|
||||||
|
prometheus.Unregister(m.endpointHealth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m serverMetrics) MarkHealthy(endpoint string) {
|
||||||
|
m.endpointHealth.WithLabelValues(endpoint).Set(float64(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m serverMetrics) MarkUnhealthy(endpoint string) {
|
||||||
|
m.endpointHealth.WithLabelValues(endpoint).Set(float64(0))
|
||||||
|
}
|
||||||
|
|
||||||
// NewPrometheusService creates a new service for gathering prometheus metrics.
|
// NewPrometheusService creates a new service for gathering prometheus metrics.
|
||||||
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
|
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
|
||||||
if log == nil {
|
if log == nil {
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,21 +25,24 @@ type Config struct {
|
||||||
// Start runs http service with the exposed endpoint on the configured port.
|
// Start runs http service with the exposed endpoint on the configured port.
|
||||||
func (ms *Service) Start() {
|
func (ms *Service) Start() {
|
||||||
if ms.enabled {
|
if ms.enabled {
|
||||||
ms.log.Info("service is running", zap.String("endpoint", ms.Addr))
|
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||||
err := ms.ListenAndServe()
|
err := ms.ListenAndServe()
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
ms.log.Warn("service couldn't start on configured port")
|
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ms.log.Info("service hasn't started since it's disabled")
|
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShutDown stops the service.
|
// ShutDown stops the service.
|
||||||
func (ms *Service) ShutDown(ctx context.Context) {
|
func (ms *Service) ShutDown(ctx context.Context) {
|
||||||
ms.log.Info("shutting down service", zap.String("endpoint", ms.Addr))
|
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||||
err := ms.Shutdown(ctx)
|
err := ms.Shutdown(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ms.log.Panic("can't shut down service")
|
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||||
|
if err = ms.Close(); err != nil {
|
||||||
|
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
package resolver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NeoFSResolver represents virtual connection to the NeoFS network.
|
|
||||||
// It implements resolver.NeoFS.
|
|
||||||
type NeoFSResolver struct {
|
|
||||||
pool *pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNeoFSResolver creates new NeoFSResolver using provided pool.Pool.
|
|
||||||
func NewNeoFSResolver(p *pool.Pool) *NeoFSResolver {
|
|
||||||
return &NeoFSResolver{pool: p}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SystemDNS implements resolver.NeoFS interface method.
|
|
||||||
func (x *NeoFSResolver) SystemDNS(ctx context.Context) (string, error) {
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("read network info via client: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
|
||||||
if domain == nil {
|
|
||||||
return "", errors.New("system DNS parameter not found or empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(domain), nil
|
|
||||||
}
|
|
|
@ -6,8 +6,10 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/ns"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -18,18 +20,23 @@ const (
|
||||||
// ErrNoResolvers returns when trying to resolve container without any resolver.
|
// ErrNoResolvers returns when trying to resolve container without any resolver.
|
||||||
var ErrNoResolvers = errors.New("no resolvers")
|
var ErrNoResolvers = errors.New("no resolvers")
|
||||||
|
|
||||||
// NeoFS represents virtual connection to the NeoFS network.
|
// FrostFS represents virtual connection to the FrostFS network.
|
||||||
type NeoFS interface {
|
type FrostFS interface {
|
||||||
// SystemDNS reads system DNS network parameters of the NeoFS.
|
// SystemDNS reads system DNS network parameters of the FrostFS.
|
||||||
//
|
//
|
||||||
// Returns exactly on non-zero value. Returns any error encountered
|
// Returns exactly on non-zero value. Returns any error encountered
|
||||||
// which prevented the parameter to be read.
|
// which prevented the parameter to be read.
|
||||||
SystemDNS(context.Context) (string, error)
|
SystemDNS(context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Settings interface {
|
||||||
|
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||||
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
NeoFS NeoFS
|
FrostFS FrostFS
|
||||||
RPCAddress string
|
RPCAddress string
|
||||||
|
Settings Settings
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContainerResolver struct {
|
type ContainerResolver struct {
|
||||||
|
@ -134,29 +141,43 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
|
||||||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case DNSResolver:
|
case DNSResolver:
|
||||||
return NewDNSResolver(cfg.NeoFS)
|
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||||
case NNSResolver:
|
case NNSResolver:
|
||||||
return NewNNSResolver(cfg.RPCAddress)
|
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDNSResolver(neoFS NeoFS) (*Resolver, error) {
|
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||||
if neoFS == nil {
|
if frostFS == nil {
|
||||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||||
}
|
}
|
||||||
|
if settings == nil {
|
||||||
|
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||||
|
}
|
||||||
|
|
||||||
var dns ns.DNS
|
var dns ns.DNS
|
||||||
|
|
||||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||||
domain, err := neoFS.SystemDNS(ctx)
|
var err error
|
||||||
|
|
||||||
|
namespace, err := middleware.GetNamespace(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read system DNS parameter of the NeoFS: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
domain = name + "." + domain
|
zone, isDefault := settings.FormContainerZone(namespace)
|
||||||
|
if isDefault {
|
||||||
|
zone, err = frostFS.SystemDNS(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := name + "." + zone
|
||||||
cnrID, err := dns.ResolveContainerName(domain)
|
cnrID, err := dns.ResolveContainerName(domain)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
||||||
}
|
}
|
||||||
|
@ -169,15 +190,33 @@ func NewDNSResolver(neoFS NeoFS) (*Resolver, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
|
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
|
||||||
|
if rpcAddress == "" {
|
||||||
|
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||||
|
}
|
||||||
|
if settings == nil {
|
||||||
|
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||||
|
}
|
||||||
|
|
||||||
var nns ns.NNS
|
var nns ns.NNS
|
||||||
|
|
||||||
if err := nns.Dial(rpcAddress); err != nil {
|
if err := nns.Dial(rpcAddress); err != nil {
|
||||||
return nil, fmt.Errorf("could not dial nns: %w", err)
|
return nil, fmt.Errorf("could not dial nns: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resolveFunc := func(_ context.Context, name string) (*cid.ID, error) {
|
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||||
cnrID, err := nns.ResolveContainerName(name)
|
var d container.Domain
|
||||||
|
d.SetName(name)
|
||||||
|
|
||||||
|
namespace, err := middleware.GetNamespace(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone, _ := settings.FormContainerZone(namespace)
|
||||||
|
d.SetZone(zone)
|
||||||
|
|
||||||
|
cnrID, err := nns.ResolveContainerDomain(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,41 @@
|
||||||
package response
|
package response
|
||||||
|
|
||||||
import "github.com/valyala/fasthttp"
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
func Error(r *fasthttp.RequestCtx, msg string, code int) {
|
func Error(r *fasthttp.RequestCtx, msg string, code int) {
|
||||||
r.Error(msg+"\n", code)
|
r.Error(msg+"\n", code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
|
||||||
|
var (
|
||||||
|
msg string
|
||||||
|
statusCode int
|
||||||
|
logFields []zap.Field
|
||||||
|
)
|
||||||
|
|
||||||
|
st := new(sdkstatus.ObjectAccessDenied)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &st):
|
||||||
|
statusCode = fasthttp.StatusForbidden
|
||||||
|
reason := st.Reason()
|
||||||
|
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
||||||
|
logFields = append(logFields, zap.String("error_detail", reason))
|
||||||
|
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
||||||
|
statusCode = fasthttp.StatusNotFound
|
||||||
|
msg = "Not Found"
|
||||||
|
default:
|
||||||
|
statusCode = fasthttp.StatusBadRequest
|
||||||
|
msg = fmt.Sprintf("%s: %v", message, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusCode, msg, logFields
|
||||||
|
}
|
||||||
|
|
296
settings.go
296
settings.go
|
@ -1,296 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/resolver"
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultRebalanceTimer = 60 * time.Second
|
|
||||||
defaultRequestTimeout = 15 * time.Second
|
|
||||||
defaultConnectTimeout = 10 * time.Second
|
|
||||||
|
|
||||||
defaultShutdownTimeout = 15 * time.Second
|
|
||||||
|
|
||||||
defaultPoolErrorThreshold uint32 = 100
|
|
||||||
|
|
||||||
cfgListenAddress = "listen_address"
|
|
||||||
cfgTLSCertificate = "tls_certificate"
|
|
||||||
cfgTLSKey = "tls_key"
|
|
||||||
|
|
||||||
// Web.
|
|
||||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
|
||||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
|
||||||
cfgWebReadTimeout = "web.read_timeout"
|
|
||||||
cfgWebWriteTimeout = "web.write_timeout"
|
|
||||||
cfgWebStreamRequestBody = "web.stream_request_body"
|
|
||||||
cfgWebMaxRequestBodySize = "web.max_request_body_size"
|
|
||||||
|
|
||||||
// Metrics / Profiler.
|
|
||||||
cfgPrometheusEnabled = "prometheus.enabled"
|
|
||||||
cfgPrometheusAddress = "prometheus.address"
|
|
||||||
cfgPprofEnabled = "pprof.enabled"
|
|
||||||
cfgPprofAddress = "pprof.address"
|
|
||||||
|
|
||||||
// Pool config.
|
|
||||||
cfgConTimeout = "connect_timeout"
|
|
||||||
cfgReqTimeout = "request_timeout"
|
|
||||||
cfgRebalance = "rebalance_timer"
|
|
||||||
cfgPoolErrorThreshold = "pool_error_threshold"
|
|
||||||
|
|
||||||
// Logger.
|
|
||||||
cfgLoggerLevel = "logger.level"
|
|
||||||
|
|
||||||
// Wallet.
|
|
||||||
cfgWalletPassphrase = "wallet.passphrase"
|
|
||||||
cfgWalletPath = "wallet.path"
|
|
||||||
cfgWalletAddress = "wallet.address"
|
|
||||||
|
|
||||||
// Uploader Header.
|
|
||||||
cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp"
|
|
||||||
|
|
||||||
// Peers.
|
|
||||||
cfgPeers = "peers"
|
|
||||||
|
|
||||||
// NeoGo.
|
|
||||||
cfgRPCEndpoint = "rpc_endpoint"
|
|
||||||
|
|
||||||
// Resolving.
|
|
||||||
cfgResolveOrder = "resolve_order"
|
|
||||||
|
|
||||||
// Zip compression.
|
|
||||||
cfgZipCompression = "zip.compression"
|
|
||||||
|
|
||||||
// Command line args.
|
|
||||||
cmdHelp = "help"
|
|
||||||
cmdVersion = "version"
|
|
||||||
cmdPprof = "pprof"
|
|
||||||
cmdMetrics = "metrics"
|
|
||||||
cmdWallet = "wallet"
|
|
||||||
cmdAddress = "address"
|
|
||||||
cmdConfig = "config"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ignore = map[string]struct{}{
|
|
||||||
cfgPeers: {},
|
|
||||||
cmdHelp: {},
|
|
||||||
cmdVersion: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func settings() *viper.Viper {
|
|
||||||
v := viper.New()
|
|
||||||
v.AutomaticEnv()
|
|
||||||
v.SetEnvPrefix(Prefix)
|
|
||||||
v.AllowEmptyEnv(true)
|
|
||||||
v.SetConfigType("yaml")
|
|
||||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
|
||||||
|
|
||||||
// flags setup:
|
|
||||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
|
||||||
flags.SetOutput(os.Stdout)
|
|
||||||
flags.SortFlags = false
|
|
||||||
|
|
||||||
flags.Bool(cmdPprof, false, "enable pprof")
|
|
||||||
flags.Bool(cmdMetrics, false, "enable prometheus")
|
|
||||||
|
|
||||||
help := flags.BoolP(cmdHelp, "h", false, "show help")
|
|
||||||
version := flags.BoolP(cmdVersion, "v", false, "show version")
|
|
||||||
|
|
||||||
flags.StringP(cmdWallet, "w", "", `path to the wallet`)
|
|
||||||
flags.String(cmdAddress, "", `address of wallet account`)
|
|
||||||
flags.String(cmdConfig, "", "config path")
|
|
||||||
flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
|
|
||||||
flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
|
|
||||||
flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
|
|
||||||
|
|
||||||
flags.String(cfgListenAddress, "0.0.0.0:8082", "address to listen")
|
|
||||||
flags.String(cfgTLSCertificate, "", "TLS certificate path")
|
|
||||||
flags.String(cfgTLSKey, "", "TLS key path")
|
|
||||||
peers := flags.StringArrayP(cfgPeers, "p", nil, "NeoFS nodes")
|
|
||||||
|
|
||||||
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
|
||||||
|
|
||||||
// set defaults:
|
|
||||||
|
|
||||||
// logger:
|
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
|
||||||
|
|
||||||
// pool:
|
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
|
||||||
|
|
||||||
// web-server:
|
|
||||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
|
||||||
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
|
||||||
v.SetDefault(cfgWebStreamRequestBody, true)
|
|
||||||
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
|
||||||
|
|
||||||
// upload header
|
|
||||||
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
|
||||||
|
|
||||||
// zip:
|
|
||||||
v.SetDefault(cfgZipCompression, false)
|
|
||||||
|
|
||||||
// metrics
|
|
||||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
|
||||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
|
||||||
|
|
||||||
// Binding flags
|
|
||||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlags(flags); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := flags.Parse(os.Args); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resolveMethods != nil {
|
|
||||||
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case help != nil && *help:
|
|
||||||
fmt.Printf("NeoFS HTTP Gateway %s\n", Version)
|
|
||||||
flags.PrintDefaults()
|
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("Default environments:")
|
|
||||||
fmt.Println()
|
|
||||||
keys := v.AllKeys()
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
for i := range keys {
|
|
||||||
if _, ok := ignore[keys[i]]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultValue := v.GetString(keys[i])
|
|
||||||
if len(defaultValue) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
k := strings.Replace(keys[i], ".", "_", -1)
|
|
||||||
fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("Peers preset:")
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers))
|
|
||||||
fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers))
|
|
||||||
|
|
||||||
os.Exit(0)
|
|
||||||
case version != nil && *version:
|
|
||||||
fmt.Printf("NeoFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.IsSet(cmdConfig) {
|
|
||||||
if err := readConfig(v); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if peers != nil && len(*peers) > 0 {
|
|
||||||
for i := range *peers {
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func readConfig(v *viper.Viper) error {
|
|
||||||
cfgFileName := v.GetString(cmdConfig)
|
|
||||||
cfgFile, err := os.Open(cfgFileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = v.ReadConfig(cfgFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfgFile.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLogger constructs a zap.Logger instance for current application.
|
|
||||||
// Panics on failure.
|
|
||||||
//
|
|
||||||
// Logger is built from zap's production logging configuration with:
|
|
||||||
// - parameterized level (debug by default)
|
|
||||||
// - console encoding
|
|
||||||
// - ISO8601 time encoding
|
|
||||||
//
|
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
|
||||||
//
|
|
||||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
|
||||||
func newLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
|
||||||
lvl, err := getLogLevel(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c := zap.NewProductionConfig()
|
|
||||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
|
||||||
c.Encoding = "console"
|
|
||||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
|
|
||||||
l, err := c.Build(
|
|
||||||
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("build zap logger instance: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return l, c.Level
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
|
||||||
var lvl zapcore.Level
|
|
||||||
lvlStr := v.GetString(cfgLoggerLevel)
|
|
||||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
|
||||||
if err != nil {
|
|
||||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
|
||||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
|
||||||
zapcore.DebugLevel,
|
|
||||||
zapcore.InfoLevel,
|
|
||||||
zapcore.WarnLevel,
|
|
||||||
zapcore.ErrorLevel,
|
|
||||||
zapcore.DPanicLevel,
|
|
||||||
zapcore.PanicLevel,
|
|
||||||
zapcore.FatalLevel,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return lvl, nil
|
|
||||||
}
|
|
|
@ -7,15 +7,17 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fromHandler = func(h *fasthttp.RequestHeader) []byte
|
type fromHandler = func(h *fasthttp.RequestHeader) []byte
|
||||||
|
|
||||||
|
type ctxKey string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
bearerTokenHdr = "Bearer"
|
bearerTokenHdr = "Bearer"
|
||||||
bearerTokenKey = "__context_bearer_token_key"
|
bearerTokenKey ctxKey = "__context_bearer_token_key"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BearerToken usage:
|
// BearerToken usage:
|
||||||
|
@ -48,16 +50,15 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
|
||||||
return auth
|
return auth
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreBearerToken extracts a bearer token from the header or cookie and stores
|
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
||||||
// it in the request context.
|
// it in the application context.
|
||||||
func StoreBearerToken(ctx *fasthttp.RequestCtx) error {
|
func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) {
|
||||||
tkn, err := fetchBearerToken(ctx)
|
tkn, err := fetchBearerToken(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
// This is an analog of context.WithValue.
|
newCtx := context.WithValue(ctx, bearerTokenKey, tkn)
|
||||||
ctx.SetUserValue(bearerTokenKey, tkn)
|
return newCtx, nil
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadBearerToken returns a bearer token stored in the context given (if it's
|
// LoadBearerToken returns a bearer token stored in the context given (if it's
|
||||||
|
|
|
@ -1,12 +1,15 @@
|
||||||
|
//go:build !integration
|
||||||
|
|
||||||
package tokens
|
package tokens
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
)
|
)
|
||||||
|
@ -20,19 +23,29 @@ func makeTestCookie(value []byte) *fasthttp.RequestHeader {
|
||||||
func makeTestHeader(value []byte) *fasthttp.RequestHeader {
|
func makeTestHeader(value []byte) *fasthttp.RequestHeader {
|
||||||
header := new(fasthttp.RequestHeader)
|
header := new(fasthttp.RequestHeader)
|
||||||
if value != nil {
|
if value != nil {
|
||||||
header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+string(value))
|
header.Set(fasthttp.HeaderAuthorization, string(value))
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_fromCookie(t *testing.T) {
|
func makeBearer(value string) string {
|
||||||
|
return bearerTokenHdr + " " + value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBearerTokenFromCookie(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
actual []byte
|
actual []byte
|
||||||
expect []byte
|
expect []byte
|
||||||
}{
|
}{
|
||||||
{name: "empty"},
|
{
|
||||||
{name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
|
name: "empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal",
|
||||||
|
actual: []byte("TOKEN"),
|
||||||
|
expect: []byte("TOKEN"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
|
@ -42,14 +55,31 @@ func Test_fromCookie(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_fromHeader(t *testing.T) {
|
func TestBearerTokenFromHeader(t *testing.T) {
|
||||||
|
validToken := "token"
|
||||||
|
tokenWithoutPrefix := "invalid-token"
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
actual []byte
|
actual []byte
|
||||||
expect []byte
|
expect []byte
|
||||||
}{
|
}{
|
||||||
{name: "empty"},
|
{
|
||||||
{name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
|
name: "empty",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "token without the bearer prefix",
|
||||||
|
actual: []byte(tokenWithoutPrefix),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "token without payload",
|
||||||
|
actual: []byte(makeBearer("")),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal",
|
||||||
|
actual: []byte(makeBearer(validToken)),
|
||||||
|
expect: []byte(validToken),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
|
@ -59,7 +89,7 @@ func Test_fromHeader(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_fetchBearerToken(t *testing.T) {
|
func TestFetchBearerToken(t *testing.T) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var uid user.ID
|
var uid user.ID
|
||||||
|
@ -73,42 +103,76 @@ func Test_fetchBearerToken(t *testing.T) {
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
||||||
cookie string
|
cookie string
|
||||||
header string
|
header string
|
||||||
|
|
||||||
error string
|
error string
|
||||||
|
nilCtx bool
|
||||||
expect *bearer.Token
|
expect *bearer.Token
|
||||||
}{
|
}{
|
||||||
{name: "empty"},
|
{
|
||||||
|
name: "empty",
|
||||||
{name: "bad base64 header", header: "WRONG BASE64", error: "can't base64-decode bearer token"},
|
},
|
||||||
{name: "bad base64 cookie", cookie: "WRONG BASE64", error: "can't base64-decode bearer token"},
|
{
|
||||||
|
name: "nil context",
|
||||||
{name: "header token unmarshal error", header: "dGVzdAo=", error: "can't unmarshal bearer token"},
|
nilCtx: true,
|
||||||
{name: "cookie token unmarshal error", cookie: "dGVzdAo=", error: "can't unmarshal bearer token"},
|
},
|
||||||
|
{
|
||||||
|
name: "bad base64 header",
|
||||||
|
header: "WRONG BASE64",
|
||||||
|
error: "can't base64-decode bearer token",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bad base64 cookie",
|
||||||
|
cookie: "WRONG BASE64",
|
||||||
|
error: "can't base64-decode bearer token",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "header token unmarshal error",
|
||||||
|
header: "dGVzdAo=",
|
||||||
|
error: "can't unmarshal bearer token",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cookie token unmarshal error",
|
||||||
|
cookie: "dGVzdAo=",
|
||||||
|
error: "can't unmarshal bearer token",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "bad header and cookie",
|
name: "bad header and cookie",
|
||||||
header: "WRONG BASE64",
|
header: "WRONG BASE64",
|
||||||
cookie: "dGVzdAo=",
|
cookie: "dGVzdAo=",
|
||||||
error: "can't unmarshal bearer token",
|
error: "can't unmarshal bearer token",
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie",
|
name: "bad header, but good cookie",
|
||||||
header: "dGVzdAo=",
|
header: "dGVzdAo=",
|
||||||
cookie: t64,
|
cookie: t64,
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
{name: "ok for header", header: t64, expect: tkn},
|
name: "bad cookie, but good header",
|
||||||
{name: "ok for cookie", cookie: t64, expect: tkn},
|
header: t64,
|
||||||
|
cookie: "dGVzdAo=",
|
||||||
|
expect: tkn,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok for header",
|
||||||
|
header: t64,
|
||||||
|
expect: tkn,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok for cookie",
|
||||||
|
cookie: t64,
|
||||||
|
expect: tkn,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
ctx := makeTestRequest(tt.cookie, tt.header)
|
var ctx *fasthttp.RequestCtx
|
||||||
|
if !tt.nilCtx {
|
||||||
|
ctx = makeTestRequest(tt.cookie, tt.header)
|
||||||
|
}
|
||||||
|
|
||||||
actual, err := fetchBearerToken(ctx)
|
actual, err := fetchBearerToken(ctx)
|
||||||
|
|
||||||
if tt.error == "" {
|
if tt.error == "" {
|
||||||
|
@ -136,7 +200,7 @@ func makeTestRequest(cookie, header string) *fasthttp.RequestCtx {
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_checkAndPropagateBearerToken(t *testing.T) {
|
func TestCheckAndPropagateBearerToken(t *testing.T) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var uid user.ID
|
var uid user.ID
|
||||||
|
@ -148,13 +212,96 @@ func Test_checkAndPropagateBearerToken(t *testing.T) {
|
||||||
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
require.NotEmpty(t, t64)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
ctx := makeTestRequest(t64, "")
|
req := makeTestRequest(t64, "")
|
||||||
|
|
||||||
// Expect to see the token within the context.
|
// Expect to see the token within the context.
|
||||||
require.NoError(t, StoreBearerToken(ctx))
|
appCtx, err := StoreBearerTokenAppCtx(context.Background(), req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Expect to see the same token without errors.
|
// Expect to see the same token without errors.
|
||||||
actual, err := LoadBearerToken(ctx)
|
actual, err := LoadBearerToken(appCtx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tkn, actual)
|
require.Equal(t, tkn, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLoadBearerToken(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
token := new(bearer.Token)
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
appCtx context.Context
|
||||||
|
error string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "token is missing in the context",
|
||||||
|
appCtx: ctx,
|
||||||
|
error: "found empty bearer token",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal",
|
||||||
|
appCtx: context.WithValue(ctx, bearerTokenKey, token),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tkn, err := LoadBearerToken(tt.appCtx)
|
||||||
|
|
||||||
|
if tt.error == "" {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, token, tkn)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Contains(t, err.Error(), tt.error)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreBearerTokenAppCtx(t *testing.T) {
|
||||||
|
key, err := keys.NewPrivateKey()
|
||||||
|
require.NoError(t, err)
|
||||||
|
var uid user.ID
|
||||||
|
user.IDFromKey(&uid, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
|
tkn := new(bearer.Token)
|
||||||
|
tkn.ForUser(uid)
|
||||||
|
|
||||||
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
req *fasthttp.RequestCtx
|
||||||
|
error string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "invalid token",
|
||||||
|
req: makeTestRequest("dGVzdAo=", ""),
|
||||||
|
error: "can't unmarshal bearer token",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal",
|
||||||
|
req: makeTestRequest(t64, ""),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctx, err := StoreBearerTokenAppCtx(context.Background(), tt.req)
|
||||||
|
|
||||||
|
if tt.error == "" {
|
||||||
|
require.NoError(t, err)
|
||||||
|
actualToken, ok := ctx.Value(bearerTokenKey).(*bearer.Token)
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Equal(t, tkn, actualToken)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Contains(t, err.Error(), tt.error)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
347
tree/tree.go
Normal file
347
tree/tree.go
Normal file
|
@ -0,0 +1,347 @@
|
||||||
|
package tree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
Tree struct {
|
||||||
|
service ServiceClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceClient is a client to interact with tree service.
|
||||||
|
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
||||||
|
ServiceClient interface {
|
||||||
|
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
||||||
|
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
treeNode struct {
|
||||||
|
ObjID oid.ID
|
||||||
|
Meta map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
GetNodesParams struct {
|
||||||
|
CnrID cid.ID
|
||||||
|
BktInfo *data.BucketInfo
|
||||||
|
TreeID string
|
||||||
|
Path []string
|
||||||
|
Meta []string
|
||||||
|
LatestOnly bool
|
||||||
|
AllAttrs bool
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
|
||||||
|
ErrNodeNotFound = layer.ErrNodeNotFound
|
||||||
|
|
||||||
|
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
|
||||||
|
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FileNameKey = "FileName"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
oidKV = "OID"
|
||||||
|
|
||||||
|
// keys for delete marker nodes.
|
||||||
|
isDeleteMarkerKV = "IsDeleteMarker"
|
||||||
|
sizeKV = "Size"
|
||||||
|
|
||||||
|
// versionTree -- ID of a tree with object versions.
|
||||||
|
versionTree = "version"
|
||||||
|
|
||||||
|
separator = "/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewTree creates instance of Tree using provided address and create grpc connection.
|
||||||
|
func NewTree(service ServiceClient) *Tree {
|
||||||
|
return &Tree{service: service}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Meta interface {
|
||||||
|
GetKey() string
|
||||||
|
GetValue() []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeResponse interface {
|
||||||
|
GetMeta() []Meta
|
||||||
|
GetTimestamp() []uint64
|
||||||
|
GetNodeID() []uint64
|
||||||
|
GetParentID() []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||||
|
tNode := &treeNode{
|
||||||
|
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range nodeInfo.GetMeta() {
|
||||||
|
switch kv.GetKey() {
|
||||||
|
case oidKV:
|
||||||
|
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tNode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *treeNode) Get(key string) (string, bool) {
|
||||||
|
value, ok := n.Meta[key]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *treeNode) FileName() (string, bool) {
|
||||||
|
value, ok := n.Meta[FileNameKey]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||||
|
tNode, err := newTreeNode(node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newNodeVersionFromTreeNode(tNode), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||||
|
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||||
|
size, _ := treeNode.Get(sizeKV)
|
||||||
|
version := &api.NodeVersion{
|
||||||
|
BaseNodeVersion: api.BaseNodeVersion{
|
||||||
|
OID: treeNode.ObjID,
|
||||||
|
},
|
||||||
|
DeleteMarker: isDeleteMarker,
|
||||||
|
IsPrefixNode: size == "",
|
||||||
|
}
|
||||||
|
|
||||||
|
return version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||||
|
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
latestNode, err := getLatestVersionNode(nodes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newNodeVersion(latestNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
||||||
|
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
||||||
|
path := pathFromName(objectName)
|
||||||
|
|
||||||
|
p := &GetNodesParams{
|
||||||
|
CnrID: *cnrID,
|
||||||
|
TreeID: versionTree,
|
||||||
|
Path: path,
|
||||||
|
Meta: meta,
|
||||||
|
LatestOnly: false,
|
||||||
|
AllAttrs: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.service.GetNodes(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||||
|
var (
|
||||||
|
maxCreationTime uint64
|
||||||
|
targetIndexNode = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, node := range nodes {
|
||||||
|
if !checkExistOID(node.GetMeta()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
|
||||||
|
targetIndexNode = i
|
||||||
|
maxCreationTime = currentCreationTime
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if targetIndexNode == -1 {
|
||||||
|
return nil, layer.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodes[targetIndexNode], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkExistOID(meta []Meta) bool {
|
||||||
|
for _, kv := range meta {
|
||||||
|
if kv.GetKey() == "OID" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// pathFromName splits name by '/'.
|
||||||
|
func pathFromName(objectName string) []string {
|
||||||
|
return strings.Split(objectName, separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
|
||||||
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, layer.ErrNodeNotFound) {
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
||||||
|
for _, node := range subTree {
|
||||||
|
if MultiID(rootID).Equal(node.GetNodeID()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := GetFilename(node)
|
||||||
|
if !strings.HasPrefix(fileName, tailPrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes := nodesMap[fileName]
|
||||||
|
|
||||||
|
// Add all nodes if flag latestOnly is false.
|
||||||
|
// Add all intermediate nodes
|
||||||
|
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
nodes = []NodeResponse{node}
|
||||||
|
} else if !latestOnly || isIntermediate(node) {
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
} else if isIntermediate(nodes[0]) {
|
||||||
|
nodes = append([]NodeResponse{node}, nodes...)
|
||||||
|
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
|
||||||
|
nodes[0] = node
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesMap[fileName] = nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]NodeResponse, 0, len(subTree))
|
||||||
|
for _, nodes := range nodesMap {
|
||||||
|
result = append(result, nodes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
||||||
|
rootID := []uint64{0}
|
||||||
|
path := strings.Split(prefix, separator)
|
||||||
|
tailPrefix := path[len(path)-1]
|
||||||
|
|
||||||
|
if len(path) > 1 {
|
||||||
|
var err error
|
||||||
|
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootID, tailPrefix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
||||||
|
p := &GetNodesParams{
|
||||||
|
CnrID: bktInfo.CID,
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
TreeID: treeID,
|
||||||
|
Path: prefixPath,
|
||||||
|
LatestOnly: false,
|
||||||
|
AllAttrs: true,
|
||||||
|
}
|
||||||
|
nodes, err := c.service.GetNodes(ctx, p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var intermediateNodes []uint64
|
||||||
|
for _, node := range nodes {
|
||||||
|
if isIntermediate(node) {
|
||||||
|
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(intermediateNodes) == 0 {
|
||||||
|
return nil, layer.ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return intermediateNodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFilename(node NodeResponse) string {
|
||||||
|
for _, kv := range node.GetMeta() {
|
||||||
|
if kv.GetKey() == FileNameKey {
|
||||||
|
return string(kv.GetValue())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIntermediate(node NodeResponse) bool {
|
||||||
|
if len(node.GetMeta()) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.GetMeta()[0].GetKey() == FileNameKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMaxTimestamp(node NodeResponse) uint64 {
|
||||||
|
var maxTimestamp uint64
|
||||||
|
|
||||||
|
for _, timestamp := range node.GetTimestamp() {
|
||||||
|
if timestamp > maxTimestamp {
|
||||||
|
maxTimestamp = timestamp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return maxTimestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
type MultiID []uint64
|
||||||
|
|
||||||
|
func (m MultiID) Equal(id MultiID) bool {
|
||||||
|
seen := make(map[uint64]struct{}, len(m))
|
||||||
|
|
||||||
|
for i := range m {
|
||||||
|
seen[m[i]] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range id {
|
||||||
|
if _, ok := seen[id[i]]; !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
150
tree/tree_test.go
Normal file
150
tree/tree_test.go
Normal file
|
@ -0,0 +1,150 @@
|
||||||
|
package tree
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nodeMeta struct {
|
||||||
|
key string
|
||||||
|
value []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m nodeMeta) GetKey() string {
|
||||||
|
return m.key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m nodeMeta) GetValue() []byte {
|
||||||
|
return m.value
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeResponse struct {
|
||||||
|
meta []nodeMeta
|
||||||
|
timestamp uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetTimestamp() []uint64 {
|
||||||
|
return []uint64{n.timestamp}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetMeta() []Meta {
|
||||||
|
res := make([]Meta, len(n.meta))
|
||||||
|
for i, value := range n.meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n nodeResponse) GetNodeID() []uint64 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (n nodeResponse) GetParentID() []uint64 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetLatestNode(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
nodes []NodeResponse
|
||||||
|
exceptedOID string
|
||||||
|
error bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
nodes: []NodeResponse{},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one node of the object version",
|
||||||
|
nodes: []NodeResponse{
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 1,
|
||||||
|
meta: []nodeMeta{
|
||||||
|
{
|
||||||
|
key: oidKV,
|
||||||
|
value: []byte("oid1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
exceptedOID: "oid1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "one node of the object version and one node of the secondary object",
|
||||||
|
nodes: []NodeResponse{
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 3,
|
||||||
|
meta: []nodeMeta{},
|
||||||
|
},
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 1,
|
||||||
|
meta: []nodeMeta{
|
||||||
|
{
|
||||||
|
key: oidKV,
|
||||||
|
value: []byte("oid1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
exceptedOID: "oid1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all nodes represent a secondary object",
|
||||||
|
nodes: []NodeResponse{
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 3,
|
||||||
|
meta: []nodeMeta{},
|
||||||
|
},
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 5,
|
||||||
|
meta: []nodeMeta{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "several nodes of different types and with different timestamp",
|
||||||
|
nodes: []NodeResponse{
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 1,
|
||||||
|
meta: []nodeMeta{
|
||||||
|
{
|
||||||
|
key: oidKV,
|
||||||
|
value: []byte("oid1"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 3,
|
||||||
|
meta: []nodeMeta{},
|
||||||
|
},
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 4,
|
||||||
|
meta: []nodeMeta{
|
||||||
|
{
|
||||||
|
key: oidKV,
|
||||||
|
value: []byte("oid2"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeResponse{
|
||||||
|
timestamp: 6,
|
||||||
|
meta: []nodeMeta{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
exceptedOID: "oid2",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
actualNode, err := getLatestVersionNode(tc.nodes)
|
||||||
|
if tc.error {
|
||||||
|
require.Error(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.exceptedOID, string(actualNode.GetMeta()[0].GetValue()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,139 +0,0 @@
|
||||||
package uploader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-api-go/v2/object"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
var neofsAttributeHeaderPrefixes = [...][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")}
|
|
||||||
|
|
||||||
func systemTranslator(key, prefix []byte) []byte {
|
|
||||||
// replace the specified prefix with `__NEOFS__`
|
|
||||||
key = bytes.Replace(key, prefix, []byte(utils.SystemAttributePrefix), 1)
|
|
||||||
|
|
||||||
// replace `-` with `_`
|
|
||||||
key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
|
|
||||||
|
|
||||||
// replace with uppercase
|
|
||||||
return bytes.ToUpper(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) map[string]string {
|
|
||||||
result := make(map[string]string)
|
|
||||||
prefix := []byte(utils.UserAttributeHeaderPrefix)
|
|
||||||
|
|
||||||
header.VisitAll(func(key, val []byte) {
|
|
||||||
// checks that the key and the val not empty
|
|
||||||
if len(key) == 0 || len(val) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// checks that the key has attribute prefix
|
|
||||||
if !bytes.HasPrefix(key, prefix) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// removing attribute prefix
|
|
||||||
key = bytes.TrimPrefix(key, prefix)
|
|
||||||
|
|
||||||
// checks that it's a system NeoFS header
|
|
||||||
for _, system := range neofsAttributeHeaderPrefixes {
|
|
||||||
if bytes.HasPrefix(key, system) {
|
|
||||||
key = systemTranslator(key, system)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checks that the attribute key is not empty
|
|
||||||
if len(key) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// make string representation of key / val
|
|
||||||
k, v := string(key), string(val)
|
|
||||||
|
|
||||||
result[k] = v
|
|
||||||
|
|
||||||
l.Debug("add attribute to result object",
|
|
||||||
zap.String("key", k),
|
|
||||||
zap.String("val", v))
|
|
||||||
})
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareExpirationHeader(headers map[string]string, epochDurations *epochDurations) error {
|
|
||||||
expirationInEpoch := headers[object.SysAttributeExpEpoch]
|
|
||||||
|
|
||||||
if timeRFC3339, ok := headers[utils.ExpirationRFC3339Attr]; ok {
|
|
||||||
expTime, err := time.Parse(time.RFC3339, timeRFC3339)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, utils.ExpirationRFC3339Attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
if expTime.Before(now) {
|
|
||||||
return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, utils.ExpirationRFC3339Attr)
|
|
||||||
}
|
|
||||||
updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
|
|
||||||
delete(headers, utils.ExpirationRFC3339Attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if timestamp, ok := headers[utils.ExpirationTimestampAttr]; ok {
|
|
||||||
value, err := strconv.ParseInt(timestamp, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't parse value %s of header %s", timestamp, utils.ExpirationTimestampAttr)
|
|
||||||
}
|
|
||||||
expTime := time.Unix(value, 0)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
if expTime.Before(now) {
|
|
||||||
return fmt.Errorf("value %s of header %s must be in the future", timestamp, utils.ExpirationTimestampAttr)
|
|
||||||
}
|
|
||||||
updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
|
|
||||||
delete(headers, utils.ExpirationTimestampAttr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if duration, ok := headers[utils.ExpirationDurationAttr]; ok {
|
|
||||||
expDuration, err := time.ParseDuration(duration)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't parse value %s of header %s", duration, utils.ExpirationDurationAttr)
|
|
||||||
}
|
|
||||||
if expDuration <= 0 {
|
|
||||||
return fmt.Errorf("value %s of header %s must be positive", expDuration, utils.ExpirationDurationAttr)
|
|
||||||
}
|
|
||||||
updateExpirationHeader(headers, epochDurations, expDuration)
|
|
||||||
delete(headers, utils.ExpirationDurationAttr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if expirationInEpoch != "" {
|
|
||||||
headers[object.SysAttributeExpEpoch] = expirationInEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateExpirationHeader(headers map[string]string, durations *epochDurations, expDuration time.Duration) {
|
|
||||||
epochDuration := uint64(durations.msPerBlock) * durations.blockPerEpoch
|
|
||||||
currentEpoch := durations.currentEpoch
|
|
||||||
numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
|
|
||||||
|
|
||||||
if uint64(expDuration.Milliseconds())%epochDuration != 0 {
|
|
||||||
numEpoch++
|
|
||||||
}
|
|
||||||
|
|
||||||
expirationEpoch := uint64(math.MaxUint64)
|
|
||||||
if numEpoch < math.MaxUint64-currentEpoch {
|
|
||||||
expirationEpoch = currentEpoch + numEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
headers[object.SysAttributeExpEpoch] = strconv.FormatUint(expirationEpoch, 10)
|
|
||||||
}
|
|
|
@ -1,190 +0,0 @@
|
||||||
package uploader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-api-go/v2/object"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFilter(t *testing.T) {
|
|
||||||
log := zap.NewNop()
|
|
||||||
|
|
||||||
req := &fasthttp.RequestHeader{}
|
|
||||||
req.DisableNormalizing()
|
|
||||||
req.Set("X-Attribute-Neofs-Expiration-Epoch1", "101")
|
|
||||||
req.Set("X-Attribute-NEOFS-Expiration-Epoch2", "102")
|
|
||||||
req.Set("X-Attribute-neofs-Expiration-Epoch3", "103")
|
|
||||||
req.Set("X-Attribute-MyAttribute", "value")
|
|
||||||
|
|
||||||
expected := map[string]string{
|
|
||||||
"__NEOFS__EXPIRATION_EPOCH1": "101",
|
|
||||||
"MyAttribute": "value",
|
|
||||||
"__NEOFS__EXPIRATION_EPOCH3": "103",
|
|
||||||
"__NEOFS__EXPIRATION_EPOCH2": "102",
|
|
||||||
}
|
|
||||||
|
|
||||||
result := filterHeaders(log, req)
|
|
||||||
|
|
||||||
require.Equal(t, expected, result)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrepareExpirationHeader(t *testing.T) {
|
|
||||||
tomorrow := time.Now().Add(24 * time.Hour)
|
|
||||||
tomorrowUnix := tomorrow.Unix()
|
|
||||||
tomorrowUnixNano := tomorrow.UnixNano()
|
|
||||||
tomorrowUnixMilli := tomorrowUnixNano / 1e6
|
|
||||||
|
|
||||||
epoch := "100"
|
|
||||||
duration := "24h"
|
|
||||||
timestampSec := strconv.FormatInt(tomorrowUnix, 10)
|
|
||||||
timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10)
|
|
||||||
timestampNano := strconv.FormatInt(tomorrowUnixNano, 10)
|
|
||||||
|
|
||||||
defaultDurations := &epochDurations{
|
|
||||||
currentEpoch: 10,
|
|
||||||
msPerBlock: 1000,
|
|
||||||
blockPerEpoch: 101,
|
|
||||||
}
|
|
||||||
|
|
||||||
msPerBlock := defaultDurations.blockPerEpoch * uint64(defaultDurations.msPerBlock)
|
|
||||||
epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock
|
|
||||||
if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 {
|
|
||||||
epochPerDay++
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultExpEpoch := strconv.FormatUint(defaultDurations.currentEpoch+epochPerDay, 10)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
headers map[string]string
|
|
||||||
durations *epochDurations
|
|
||||||
err bool
|
|
||||||
expected map[string]string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "valid epoch",
|
|
||||||
headers: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid epoch, valid duration",
|
|
||||||
headers: map[string]string{
|
|
||||||
object.SysAttributeExpEpoch: epoch,
|
|
||||||
utils.ExpirationDurationAttr: duration,
|
|
||||||
},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid epoch, valid rfc3339",
|
|
||||||
headers: map[string]string{
|
|
||||||
object.SysAttributeExpEpoch: epoch,
|
|
||||||
utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339),
|
|
||||||
},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid epoch, valid timestamp sec",
|
|
||||||
headers: map[string]string{
|
|
||||||
object.SysAttributeExpEpoch: epoch,
|
|
||||||
utils.ExpirationTimestampAttr: timestampSec,
|
|
||||||
},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid epoch, valid timestamp milli",
|
|
||||||
headers: map[string]string{
|
|
||||||
object.SysAttributeExpEpoch: epoch,
|
|
||||||
utils.ExpirationTimestampAttr: timestampMilli,
|
|
||||||
},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid epoch, valid timestamp nano",
|
|
||||||
headers: map[string]string{
|
|
||||||
object.SysAttributeExpEpoch: epoch,
|
|
||||||
utils.ExpirationTimestampAttr: timestampNano,
|
|
||||||
},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: epoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid timestamp sec",
|
|
||||||
headers: map[string]string{utils.ExpirationTimestampAttr: timestampSec},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid duration",
|
|
||||||
headers: map[string]string{utils.ExpirationDurationAttr: duration},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid rfc3339",
|
|
||||||
headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
|
|
||||||
durations: defaultDurations,
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: defaultExpEpoch},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid max uint 64",
|
|
||||||
headers: map[string]string{utils.ExpirationRFC3339Attr: tomorrow.Format(time.RFC3339)},
|
|
||||||
durations: &epochDurations{
|
|
||||||
currentEpoch: math.MaxUint64 - 1,
|
|
||||||
msPerBlock: defaultDurations.msPerBlock,
|
|
||||||
blockPerEpoch: defaultDurations.blockPerEpoch,
|
|
||||||
},
|
|
||||||
expected: map[string]string{object.SysAttributeExpEpoch: strconv.FormatUint(uint64(math.MaxUint64), 10)},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid timestamp sec",
|
|
||||||
headers: map[string]string{utils.ExpirationTimestampAttr: "abc"},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid timestamp sec zero",
|
|
||||||
headers: map[string]string{utils.ExpirationTimestampAttr: "0"},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid duration",
|
|
||||||
headers: map[string]string{utils.ExpirationDurationAttr: "1d"},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid duration negative",
|
|
||||||
headers: map[string]string{utils.ExpirationDurationAttr: "-5h"},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid rfc3339",
|
|
||||||
headers: map[string]string{utils.ExpirationRFC3339Attr: "abc"},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid rfc3339 zero",
|
|
||||||
headers: map[string]string{utils.ExpirationRFC3339Attr: time.RFC3339},
|
|
||||||
err: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
err := prepareExpirationHeader(tc.headers, tc.durations)
|
|
||||||
if tc.err {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tc.expected, tc.headers)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,250 +0,0 @@
|
||||||
package uploader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/resolver"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/response"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/tokens"
|
|
||||||
"github.com/nspcc-dev/neofs-http-gw/utils"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/atomic"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
jsonHeader = "application/json; charset=UTF-8"
|
|
||||||
drainBufSize = 4096
|
|
||||||
)
|
|
||||||
|
|
||||||
// Uploader is an upload request handler.
|
|
||||||
type Uploader struct {
|
|
||||||
appCtx context.Context
|
|
||||||
log *zap.Logger
|
|
||||||
pool *pool.Pool
|
|
||||||
ownerID *user.ID
|
|
||||||
settings *Settings
|
|
||||||
containerResolver *resolver.ContainerResolver
|
|
||||||
}
|
|
||||||
|
|
||||||
type epochDurations struct {
|
|
||||||
currentEpoch uint64
|
|
||||||
msPerBlock int64
|
|
||||||
blockPerEpoch uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settings stores reloading parameters, so it has to provide atomic getters and setters.
|
|
||||||
type Settings struct {
|
|
||||||
defaultTimestamp atomic.Bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Settings) DefaultTimestamp() bool {
|
|
||||||
return s.defaultTimestamp.Load()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Settings) SetDefaultTimestamp(val bool) {
|
|
||||||
s.defaultTimestamp.Store(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Uploader using specified logger, connection pool and
|
|
||||||
// other options.
|
|
||||||
func New(ctx context.Context, params *utils.AppParams, settings *Settings) *Uploader {
|
|
||||||
return &Uploader{
|
|
||||||
appCtx: ctx,
|
|
||||||
log: params.Logger,
|
|
||||||
pool: params.Pool,
|
|
||||||
ownerID: params.Owner,
|
|
||||||
settings: settings,
|
|
||||||
containerResolver: params.Resolver,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload handles multipart upload request.
|
|
||||||
func (u *Uploader) Upload(c *fasthttp.RequestCtx) {
|
|
||||||
var (
|
|
||||||
file MultipartFile
|
|
||||||
idObj oid.ID
|
|
||||||
addr oid.Address
|
|
||||||
scid, _ = c.UserValue("cid").(string)
|
|
||||||
log = u.log.With(zap.String("cid", scid))
|
|
||||||
bodyStream = c.RequestBodyStream()
|
|
||||||
drainBuf = make([]byte, drainBufSize)
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := tokens.StoreBearerToken(c); err != nil {
|
|
||||||
log.Error("could not fetch bearer token", zap.Error(err))
|
|
||||||
response.Error(c, "could not fetch bearer token", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
idCnr, err := utils.GetContainerID(u.appCtx, scid, u.containerResolver)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("wrong container id", zap.Error(err))
|
|
||||||
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
// If the temporary reader can be closed - let's close it.
|
|
||||||
if file == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err := file.Close()
|
|
||||||
log.Debug(
|
|
||||||
"close temporary multipart/form file",
|
|
||||||
zap.Stringer("address", addr),
|
|
||||||
zap.String("filename", file.FileName()),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}()
|
|
||||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
|
||||||
if file, err = fetchMultipartFile(u.log, bodyStream, boundary); err != nil {
|
|
||||||
log.Error("could not receive multipart/form", zap.Error(err))
|
|
||||||
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filtered := filterHeaders(u.log, &c.Request.Header)
|
|
||||||
if needParseExpiration(filtered) {
|
|
||||||
epochDuration, err := getEpochDurations(c, u.pool)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("could not get epoch durations from network info", zap.Error(err))
|
|
||||||
response.Error(c, "could not get epoch durations from network info: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = prepareExpirationHeader(filtered, epochDuration); err != nil {
|
|
||||||
log.Error("could not parse expiration header", zap.Error(err))
|
|
||||||
response.Error(c, "could not parse expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
attributes := make([]object.Attribute, 0, len(filtered))
|
|
||||||
// prepares attributes from filtered headers
|
|
||||||
for key, val := range filtered {
|
|
||||||
attribute := object.NewAttribute()
|
|
||||||
attribute.SetKey(key)
|
|
||||||
attribute.SetValue(val)
|
|
||||||
attributes = append(attributes, *attribute)
|
|
||||||
}
|
|
||||||
// sets FileName attribute if it wasn't set from header
|
|
||||||
if _, ok := filtered[object.AttributeFileName]; !ok {
|
|
||||||
filename := object.NewAttribute()
|
|
||||||
filename.SetKey(object.AttributeFileName)
|
|
||||||
filename.SetValue(file.FileName())
|
|
||||||
attributes = append(attributes, *filename)
|
|
||||||
}
|
|
||||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
|
||||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && u.settings.DefaultTimestamp() {
|
|
||||||
timestamp := object.NewAttribute()
|
|
||||||
timestamp.SetKey(object.AttributeTimestamp)
|
|
||||||
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
attributes = append(attributes, *timestamp)
|
|
||||||
}
|
|
||||||
id, bt := u.fetchOwnerAndBearerToken(c)
|
|
||||||
|
|
||||||
obj := object.New()
|
|
||||||
obj.SetContainerID(*idCnr)
|
|
||||||
obj.SetOwnerID(id)
|
|
||||||
obj.SetAttributes(attributes...)
|
|
||||||
|
|
||||||
var prm pool.PrmObjectPut
|
|
||||||
prm.SetHeader(*obj)
|
|
||||||
prm.SetPayload(file)
|
|
||||||
|
|
||||||
if bt != nil {
|
|
||||||
prm.UseBearer(*bt)
|
|
||||||
}
|
|
||||||
|
|
||||||
if idObj, err = u.pool.PutObject(u.appCtx, prm); err != nil {
|
|
||||||
log.Error("could not store file in neofs", zap.Error(err))
|
|
||||||
response.Error(c, "could not store file in neofs: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
addr.SetObject(idObj)
|
|
||||||
addr.SetContainer(*idCnr)
|
|
||||||
|
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
|
||||||
if err = newPutResponse(addr).encode(c); err != nil {
|
|
||||||
log.Error("could not encode response", zap.Error(err))
|
|
||||||
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Multipart is multipart and thus can contain more than one part which
|
|
||||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
|
||||||
// the last zero-length chunk might be left unread (because multipart
|
|
||||||
// reader only cares about its boundary and doesn't look further) and
|
|
||||||
// it will be (erroneously) interpreted as the start of the next
|
|
||||||
// pipelined header. Thus we need to drain the body buffer.
|
|
||||||
for {
|
|
||||||
_, err = bodyStream.Read(drainBuf)
|
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Report status code and content type.
|
|
||||||
c.Response.SetStatusCode(fasthttp.StatusOK)
|
|
||||||
c.Response.Header.SetContentType(jsonHeader)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *Uploader) fetchOwnerAndBearerToken(ctx context.Context) (*user.ID, *bearer.Token) {
|
|
||||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
|
||||||
issuer := bearer.ResolveIssuer(*tkn)
|
|
||||||
return &issuer, tkn
|
|
||||||
}
|
|
||||||
return u.ownerID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type putResponse struct {
|
|
||||||
ObjectID string `json:"object_id"`
|
|
||||||
ContainerID string `json:"container_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPutResponse(addr oid.Address) *putResponse {
|
|
||||||
return &putResponse{
|
|
||||||
ObjectID: addr.Object().EncodeToString(),
|
|
||||||
ContainerID: addr.Container().EncodeToString(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pr *putResponse) encode(w io.Writer) error {
|
|
||||||
enc := json.NewEncoder(w)
|
|
||||||
enc.SetIndent("", "\t")
|
|
||||||
return enc.Encode(pr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getEpochDurations(ctx context.Context, p *pool.Pool) (*epochDurations, error) {
|
|
||||||
networkInfo, err := p.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &epochDurations{
|
|
||||||
currentEpoch: networkInfo.CurrentEpoch(),
|
|
||||||
msPerBlock: networkInfo.MsPerBlock(),
|
|
||||||
blockPerEpoch: networkInfo.EpochDuration(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.blockPerEpoch == 0 {
|
|
||||||
return nil, fmt.Errorf("EpochDuration is empty")
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func needParseExpiration(headers map[string]string) bool {
|
|
||||||
_, ok1 := headers[utils.ExpirationDurationAttr]
|
|
||||||
_, ok2 := headers[utils.ExpirationRFC3339Attr]
|
|
||||||
_, ok3 := headers[utils.ExpirationTimestampAttr]
|
|
||||||
return ok1 || ok2 || ok3
|
|
||||||
}
|
|
|
@ -1,10 +1,258 @@
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EpochDurations struct {
|
||||||
|
CurrentEpoch uint64
|
||||||
|
MsPerBlock int64
|
||||||
|
BlockPerEpoch uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type EpochInfoFetcher interface {
|
||||||
|
GetEpochDurations(context.Context) (*EpochDurations, error)
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
UserAttributeHeaderPrefix = "X-Attribute-"
|
UserAttributeHeaderPrefix = "X-Attribute-"
|
||||||
SystemAttributePrefix = "__NEOFS__"
|
|
||||||
|
|
||||||
ExpirationDurationAttr = SystemAttributePrefix + "EXPIRATION_DURATION"
|
|
||||||
ExpirationTimestampAttr = SystemAttributePrefix + "EXPIRATION_TIMESTAMP"
|
|
||||||
ExpirationRFC3339Attr = SystemAttributePrefix + "EXPIRATION_RFC3339"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
systemAttributePrefix = "__SYSTEM__"
|
||||||
|
|
||||||
|
// deprecated: use systemAttributePrefix
|
||||||
|
systemAttributePrefixNeoFS = "__NEOFS__"
|
||||||
|
)
|
||||||
|
|
||||||
|
type systemTransformer struct {
|
||||||
|
prefix string
|
||||||
|
backwardPrefix string
|
||||||
|
xAttrPrefixes [][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformers = []systemTransformer{
|
||||||
|
{
|
||||||
|
prefix: systemAttributePrefix,
|
||||||
|
backwardPrefix: "System-",
|
||||||
|
xAttrPrefixes: [][]byte{[]byte("System-"), []byte("SYSTEM-"), []byte("system-")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
prefix: systemAttributePrefixNeoFS,
|
||||||
|
backwardPrefix: "Neofs-",
|
||||||
|
xAttrPrefixes: [][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) existsExpirationAttributes(headers map[string]string) bool {
|
||||||
|
_, ok0 := headers[t.expirationEpochAttr()]
|
||||||
|
_, ok1 := headers[t.expirationDurationAttr()]
|
||||||
|
_, ok2 := headers[t.expirationTimestampAttr()]
|
||||||
|
_, ok3 := headers[t.expirationRFC3339Attr()]
|
||||||
|
return ok0 || ok1 || ok2 || ok3
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) expirationEpochAttr() string {
|
||||||
|
return t.prefix + "EXPIRATION_EPOCH"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) expirationDurationAttr() string {
|
||||||
|
return t.prefix + "EXPIRATION_DURATION"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) expirationTimestampAttr() string {
|
||||||
|
return t.prefix + "EXPIRATION_TIMESTAMP"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) expirationRFC3339Attr() string {
|
||||||
|
return t.prefix + "EXPIRATION_RFC3339"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) systemTranslator(key, prefix []byte) []byte {
|
||||||
|
// replace the specified prefix with system prefix
|
||||||
|
key = bytes.Replace(key, prefix, []byte(t.prefix), 1)
|
||||||
|
|
||||||
|
// replace `-` with `_`
|
||||||
|
key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
|
||||||
|
|
||||||
|
// replace with uppercase
|
||||||
|
return bytes.ToUpper(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) transformIfSystem(key []byte) ([]byte, bool) {
|
||||||
|
// checks that it's a system FrostFS header
|
||||||
|
for _, system := range t.xAttrPrefixes {
|
||||||
|
if bytes.HasPrefix(key, system) {
|
||||||
|
return t.systemTranslator(key, system), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemBackwardTranslator is used to convert headers looking like '__PREFIX__ATTR_NAME' to 'Prefix-Attr-Name'.
|
||||||
|
func (t systemTransformer) systemBackwardTranslator(key string) string {
|
||||||
|
// trim specified prefix '__PREFIX__'
|
||||||
|
key = strings.TrimPrefix(key, t.prefix)
|
||||||
|
|
||||||
|
var res strings.Builder
|
||||||
|
res.WriteString(t.backwardPrefix)
|
||||||
|
|
||||||
|
strs := strings.Split(key, "_")
|
||||||
|
for i, s := range strs {
|
||||||
|
s = title(strings.ToLower(s))
|
||||||
|
res.WriteString(s)
|
||||||
|
if i != len(strs)-1 {
|
||||||
|
res.WriteString("-")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) backwardTransformIfSystem(key string) (string, bool) {
|
||||||
|
if strings.HasPrefix(key, t.prefix) {
|
||||||
|
return t.systemBackwardTranslator(key), true
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func TransformIfSystem(key []byte) []byte {
|
||||||
|
for _, transformer := range transformers {
|
||||||
|
key, transformed := transformer.transformIfSystem(key)
|
||||||
|
if transformed {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func BackwardTransformIfSystem(key string) string {
|
||||||
|
for _, transformer := range transformers {
|
||||||
|
key, transformed := transformer.backwardTransformIfSystem(key)
|
||||||
|
if transformed {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func title(str string) string {
|
||||||
|
if str == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
r, size := utf8.DecodeRuneInString(str)
|
||||||
|
r0 := unicode.ToTitle(r)
|
||||||
|
return string(r0) + str[size:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrepareExpirationHeader(ctx context.Context, epochFetcher EpochInfoFetcher, headers map[string]string, now time.Time) error {
|
||||||
|
formatsNum := 0
|
||||||
|
index := -1
|
||||||
|
for i, transformer := range transformers {
|
||||||
|
if transformer.existsExpirationAttributes(headers) {
|
||||||
|
formatsNum++
|
||||||
|
index = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch formatsNum {
|
||||||
|
case 0:
|
||||||
|
return nil
|
||||||
|
case 1:
|
||||||
|
epochDuration, err := epochFetcher.GetEpochDurations(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't get epoch durations from network info: %w", err)
|
||||||
|
}
|
||||||
|
return transformers[index].prepareExpirationHeader(headers, epochDuration, now)
|
||||||
|
default:
|
||||||
|
return errors.New("both deprecated and new system attributes formats are used, please use only one")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) prepareExpirationHeader(headers map[string]string, epochDurations *EpochDurations, now time.Time) error {
|
||||||
|
expirationInEpoch := headers[t.expirationEpochAttr()]
|
||||||
|
|
||||||
|
if timeRFC3339, ok := headers[t.expirationRFC3339Attr()]; ok {
|
||||||
|
expTime, err := time.Parse(time.RFC3339, timeRFC3339)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, t.expirationRFC3339Attr())
|
||||||
|
}
|
||||||
|
|
||||||
|
if expTime.Before(now) {
|
||||||
|
return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, t.expirationRFC3339Attr())
|
||||||
|
}
|
||||||
|
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
|
||||||
|
delete(headers, t.expirationRFC3339Attr())
|
||||||
|
}
|
||||||
|
|
||||||
|
if timestamp, ok := headers[t.expirationTimestampAttr()]; ok {
|
||||||
|
value, err := strconv.ParseInt(timestamp, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't parse value %s of header %s", timestamp, t.expirationTimestampAttr())
|
||||||
|
}
|
||||||
|
expTime := time.Unix(value, 0)
|
||||||
|
|
||||||
|
if expTime.Before(now) {
|
||||||
|
return fmt.Errorf("value %s of header %s must be in the future", timestamp, t.expirationTimestampAttr())
|
||||||
|
}
|
||||||
|
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
|
||||||
|
delete(headers, t.expirationTimestampAttr())
|
||||||
|
}
|
||||||
|
|
||||||
|
if duration, ok := headers[t.expirationDurationAttr()]; ok {
|
||||||
|
expDuration, err := time.ParseDuration(duration)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't parse value %s of header %s", duration, t.expirationDurationAttr())
|
||||||
|
}
|
||||||
|
if expDuration <= 0 {
|
||||||
|
return fmt.Errorf("value %s of header %s must be positive", expDuration, t.expirationDurationAttr())
|
||||||
|
}
|
||||||
|
t.updateExpirationHeader(headers, epochDurations, expDuration)
|
||||||
|
delete(headers, t.expirationDurationAttr())
|
||||||
|
}
|
||||||
|
|
||||||
|
if expirationInEpoch != "" {
|
||||||
|
expEpoch, err := strconv.ParseUint(expirationInEpoch, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parse expiration epoch '%s': %w", expirationInEpoch, err)
|
||||||
|
}
|
||||||
|
if expEpoch < epochDurations.CurrentEpoch {
|
||||||
|
return fmt.Errorf("expiration epoch '%d' must be greater than current epoch '%d'", expEpoch, epochDurations.CurrentEpoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[t.expirationEpochAttr()] = expirationInEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t systemTransformer) updateExpirationHeader(headers map[string]string, durations *EpochDurations, expDuration time.Duration) {
|
||||||
|
epochDuration := uint64(durations.MsPerBlock) * durations.BlockPerEpoch
|
||||||
|
currentEpoch := durations.CurrentEpoch
|
||||||
|
numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
|
||||||
|
|
||||||
|
if uint64(expDuration.Milliseconds())%epochDuration != 0 {
|
||||||
|
numEpoch++
|
||||||
|
}
|
||||||
|
|
||||||
|
expirationEpoch := uint64(math.MaxUint64)
|
||||||
|
if numEpoch < math.MaxUint64-currentEpoch {
|
||||||
|
expirationEpoch = currentEpoch + numEpoch
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
|
||||||
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue