Compare commits

...

No commits in common. "empty" and "master" have entirely different histories.

102 changed files with 15313 additions and 2 deletions

23
.docker/Dockerfile Normal file
View file

@ -0,0 +1,23 @@
FROM golang:1.22-alpine AS basebuilder
RUN apk add --update make bash ca-certificates
FROM basebuilder AS builder
ENV GOGC=off
ENV CGO_ENABLED=0
ARG BUILD=now
ARG VERSION=dev
ARG REPO=repository
WORKDIR /src
COPY . /src
RUN make
# Executable image
FROM scratch
WORKDIR /
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw
ENTRYPOINT ["/bin/frostfs-http-gw"]

8
.docker/Dockerfile.dirty Normal file
View file

@ -0,0 +1,8 @@
FROM alpine
RUN apk add --update --no-cache bash ca-certificates
WORKDIR /
COPY bin/frostfs-http-gw /bin/frostfs-http-gw
CMD ["frostfs-http-gw"]

View file

@ -0,0 +1,45 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: community, triage, bug
assignees: ''
---
<!--- Provide a general summary of the issue in the Title above -->
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!-- Not obligatory
If no reason/fix/additions for the bug can be suggested,
uncomment the following phrase:
<-- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. -->
1.
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Regression
<!-- Is this issue a regression? (Yes / No) -->
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Version used:
* Server setup and configuration:
* Operating System and version (`uname -a`):

View file

@ -0,0 +1 @@
blank_issues_enabled: false

View file

@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: community, triage
assignees: ''
---
## Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
## Describe the solution you'd like
<!--- A clear and concise description of what you want to happen. -->
## Describe alternatives you've considered
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
## Additional context
<!--- Add any other context or screenshots about the feature request here. -->

70
.forgejo/logo.svg Normal file
View file

@ -0,0 +1,70 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
<style type="text/css">
.st0{display:none;}
.st1{display:inline;}
.st2{fill:#01E397;}
.st3{display:inline;fill:#010032;}
.st4{display:inline;fill:#00E599;}
.st5{display:inline;fill:#00AF92;}
.st6{fill:#00C3E5;}
</style>
<g id="Layer_2">
<g id="Layer_1-2" class="st0">
<g class="st1">
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
</g>
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
</g>
<g>
<g>
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.5 KiB

View file

@ -0,0 +1,27 @@
on:
pull_request:
push:
branches:
- master
jobs:
builds:
name: Builds
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Build binary
run: make
- name: Check dirty suffix
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi

View file

@ -0,0 +1,20 @@
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

View file

@ -0,0 +1,45 @@
on:
pull_request:
push:
branches:
- master
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.23'
cache: true
- name: Install linters
run: make lint-install
- name: Run linters
run: make lint
tests:
name: Tests
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '${{ matrix.go_versions }}'
- name: Update Go modules
run: make dep
- name: Run tests
run: make test

View file

@ -0,0 +1,25 @@
on:
pull_request:
push:
branches:
- master
jobs:
vulncheck:
name: Vulncheck
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck ./...

21
.gitignore vendored Normal file
View file

@ -0,0 +1,21 @@
.idea
bin
temp
/plugins/
/vendor/
.test.env
*~
*.log
test.sh
testfile
.blast.yml
.frostfs-cli.yml
.cache
coverage.txt
coverage.html
# debhelpers
**/.debhelper

11
.gitlint Normal file
View file

@ -0,0 +1,11 @@
[general]
fail-without-commits=True
regex-style-search=True
contrib=CC1
[title-match-regex]
regex=^\[\#[0-9Xx]+\]\s
[ignore-by-title]
regex=^Release(.*)
ignore=title-match-regex

68
.golangci.yml Normal file
View file

@ -0,0 +1,68 @@
# This file contains all available configuration options
# with their default values.
# options for analysis running
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 15m
# include test files or not, default is true
tests: true
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats:
- format: tab
# all available settings of specific linters
linters-settings:
exhaustive:
# indicates that switch statements are to be considered exhaustive if a
# 'default' case is present, even if all enum members aren't listed in the
# switch
default-signifies-exhaustive: true
govet:
# report about shadowed variables
check-shadowing: false
custom:
truecloudlab-linters:
path: bin/external_linters.so
original-url: git.frostfs.info/TrueCloudLab/linters.git
settings:
noliteral:
enable: true
target-methods: ["Fatal"]
disable-packages: ["req", "r"]
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
linters:
enable:
# mandatory linters
- govet
- revive
# some default golangci-lint linters
- errcheck
- gosimple
- ineffassign
- staticcheck
- typecheck
- unused
# extra linters
- exhaustive
- godot
- gofmt
- whitespace
- goimports
- truecloudlab-linters
disable-all: true
fast: false
issues:
include:
- EXC0002 # should have a comment
- EXC0003 # test/Test ... consider calling this
- EXC0004 # govet
- EXC0005 # C-style breaks

52
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,52 @@
ci:
autofix_prs: false
repos:
- repo: https://github.com/jorisroovers/gitlint
rev: v0.19.1
hooks:
- id: gitlint
stages: [commit-msg]
- id: gitlint-ci
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-shebang-scripts-are-executable
- id: check-merge-conflict
- id: check-json
- id: check-xml
- id: check-yaml
- id: trailing-whitespace
args: [--markdown-linebreak-ext=md]
- id: end-of-file-fixer
exclude: ".key$"
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
- repo: local
hooks:
- id: make-lint-install
name: install linters
entry: make lint-install
language: system
pass_filenames: false
- id: make-lint
name: run linters
entry: make lint
language: system
pass_filenames: false
- id: go-unit-tests
name: go unit tests
entry: make test
pass_filenames: false
types: [go]
language: system

173
CHANGELOG.md Normal file
View file

@ -0,0 +1,173 @@
# Changelog
This document outlines major changes between releases.
## [Unreleased]
## [0.31.0] - Rongbuk - 2024-11-20
### Fixed
- Docker warnings during image build (#126)
- `trace_id` parameter in logs (#148)
- SIGHUP support for `tracing.enabled` config parameter (#157)
### Added
- Vulnerability report document (#123)
- Root CA configuration for tracing (#139)
- Log sampling policy configuration (#147)
- Index page support for buckets and containers (#137, #151)
- CORS support (#158)
- Source IP binding configuration for FrostFS requests (#160)
- Tracing attributes (#164)
### Changed
- Updated Go version to 1.22 (#132)
### Removed
- Duplicated NNS Resolver code (#129)
## [0.30.3] - 2024-10-18
### Fixed
- Get response on S3 multipart object (#142)
### Added
- Support percent-encoding for GET queries (#134)
### Changed
- Split `FrostFS` interface into separate read methods (#127)
## [0.30.2] - 2024-09-03
### Added
- Fuzzing tests (#135)
## [0.30.1] - 2024-08-20
### Fixed
- Error counting in pool component before connection switch (#131)
### Added
- Log of endpoint address during tree pool errors (#131)
## [0.30.0] - Kangshung - 2024-07-22
### Fixed
- Handle query unescape and invalid bearer token errors (#107)
- Fix HTTP/2 requests (#110)
### Added
- Add new `reconnect_interval` config param (#100)
- Erasure coding support in placement policy (#114)
- HTTP Header canonicalizer for well-known headers (#121)
### Changed
- Improve test coverage (#112, #117)
- Bumped vulnerable dependencies (#115)
- Replace extended ACL examples with policies in README (#118)
### Removed
## [0.29.0] - Zemu - 2024-05-27
### Fixed
- Fix possibility of panic during SIGHUP (#99)
- Handle query unescape and invalid bearer token errors (#108)
- Fix log-level change on SIGHUP (#105)
### Added
- Support client side object cut (#70)
- Add `frostfs.client_cut` config param
- Add `frostfs.buffer_max_size_for_put` config param
- Add bucket/container caching
- Disable homomorphic hash for PUT if it's disabled in container itself
- Add new `logger.destination` config param with journald support (#89, #104)
- Add support namespaces (#91)
### Changed
- Replace atomics with mutex for reloadable params (#74)
## [0.28.1] - 2024-01-24
### Added
- Tree pool traversal limit (#92)
### Update from 0.28.0
See new `frostfs.tree_pool_max_attempts` config parameter.
## [0.28.0] - Academy of Sciences - 2023-12-07
### Fixed
- `grpc` schemas in tree configuration (#62)
- `GetSubTree` failures (#67)
- Debian packaging (#69, #90)
- Get latest version of tree node (#85)
### Added
- Support dump metrics descriptions (#29)
- Support impersonate bearer token (#40, #45)
- Tracing support (#20, #44, #60)
- Object name resolving with tree service (#30)
- Metrics for current endpoint status (#77)
- Soft memory limit with `runtime.soft_memory_limit` (#72)
- Add selection of the node of the latest version of the object (#85)
### Changed
- Update prometheus to v1.15.0 (#35)
- Update go version to 1.19 (#50)
- Finish rebranding (#2)
- Use gate key to form object owner (#66)
- Move log messages to constants (#36)
- Uploader and downloader refactor (#73)
### Removed
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
## [0.27.0] - Karpinsky - 2023-07-12
This is a first FrostFS HTTP Gateway release named after
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
### Fixed
- Require only one healthy storage server to start (#7)
- Enable gate metrics (#38)
- `Too many pings` error (#61)
### Added
- Multiple configs support (#12)
### Changed
- Repository rebranding (#1)
- Update neo-go to v0.101.0 (#8)
- Update viper to v1.15.0 (#8)
- Update go version to 1.18 (#9)
- Errors have become more detailed (#18)
- Update system attribute names (#22)
- Separate integration tests with build tags (#24)
- Changed values for `frostfs_http_gw_state_health` metric (#32)
### Updating from neofs-http-gw v0.26.0
To set system attributes use updated headers
(you can use old ones for now, but their support will be dropped in the future releases):
* `X-Attribute-Neofs-*` -> `X-Attribute-System-*`
* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*`
* `X-Attribute-neofs-*` -> `X-Attribute-system-*`
## Older versions
This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0.
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...v0.30.1
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...master

1
CODEOWNERS Normal file
View file

@ -0,0 +1 @@
.* @alexvanin @dkirillov

156
CONTRIBUTING.md Normal file
View file

@ -0,0 +1,156 @@
# Contribution guide
First, thank you for contributing! We love and encourage pull requests from
everyone. Please follow the guidelines:
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing
discussions.
- Open an issue first, to discuss a new feature or enhancement.
- Write tests and make sure the test suite passes locally and on CI.
- Open a pull request and reference the relevant issue(s).
- Make sure your commits are logically separated and have good comments
explaining the details of your change.
- After receiving a feedback, amend your commits or add new ones as
appropriate.
- **Have fun!**
## Development Workflow
Start by forking the `frostfs-http-gw` repository, make changes in a branch and then
send a pull request. We encourage pull requests to discuss code changes. Here
are the steps in details:
### Set up your git repository
Fork [FrostFS HTTP Gateway
upstream](https://git.frostfs.info/repo/fork/8) source repository
to your own personal repository. Copy the URL of your fork (you will need it for
the `git clone` command below).
```sh
$ git clone https://git.frostfs.info/<username>/frostfs-http-gw.git
```
### Set up git remote as ``upstream``
```sh
$ cd frostfs-http-gw
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
$ git fetch upstream
$ git merge upstream/master
...
```
### Create your feature branch
Before making code changes, make sure you create a separate branch for these
changes. Maybe you will find it convenient to name a branch in
`<type>/<Issue>-<changes_topic>` format.
```
$ git checkout -b feature/123-something_awesome
```
### Test your changes
After your code changes, make sure
- To add test cases for the new code.
- To run `make lint`
- To squash your commits into a single commit or a series of logically separated
commits run `git rebase -i`. It's okay to force update your pull request.
- To run `make test` and `make all` completes.
### Commit changes
After verification, commit your changes. This is a [great
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
messages. Try following this template:
```
[#Issue] <component> Summary
Description
<Macros>
<Sign-Off>
```
```
$ git commit -am '[#123] Add some feature'
```
### Push to the branch
Push your locally committed changes to the remote origin (your fork)
```
$ git push origin feature/123-something_awesome
```
### Create a Pull Request
Pull requests can be created via Forgejo. Refer to [this
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
detailed steps on how to create a pull request. After a Pull Request gets peer
reviewed and approved, it will be merged.
## DCO Sign off
All authors to the project retain copyright to their work. However, to ensure
that they are only submitting work that they have rights to, we require
everyone to acknowledge this by signing their work.
Any copyright notices in this repository should specify the authors as "the
contributors".
To sign your work, just add a line like this at the end of your commit message:
```
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
```
This can be easily done with the `--signoff` option to `git commit`.
By doing this you state that you can certify the following (from [The Developer
Certificate of Origin](https://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```

20
CREDITS.md Normal file
View file

@ -0,0 +1,20 @@
# Credits
In alphabetical order:
- Alexey Vanin
- Angira Kekteeva
- Denis Kirillov
- Evgeniy Kulikov
- Pavel Korotkov
- Roman Khimov
# Contributors
In chronological order:
- Anatoly Bogatyrev
- Stanislav Bogatyrev
- Anastasia Prasolova
- Leonard Liubich
- Elizaveta Chichindaeva

674
LICENSE Normal file
View file

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

206
Makefile Executable file
View file

@ -0,0 +1,206 @@
#!/usr/bin/make -f
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.3
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BUILD ?= $(shell date -u --iso=seconds)
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
METRICS_DUMP_OUT ?= ./metrics-dump.json
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# List of binaries to build. For now just one.
BINDIR = bin
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS))
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean
# .deb package versioning
OS_RELEASE = $(shell lsb_release -cs)
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
sed "s/-/~/")-${OS_RELEASE}
.PHONY: debpackage debclean
FUZZ_NGFUZZ_DIR ?= ""
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all"
FUZZ_AUX ?= ""
# Make all binaries
all: $(BINS)
$(BINS): $(DIRS) dep
@echo "⇒ Build $@"
CGO_ENABLED=0 \
go build -v -trimpath \
-ldflags "-X main.Version=$(VERSION)" \
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
$(DIRS):
@echo "⇒ Ensure dir: $@"
@mkdir -p $@
# Pull go dependencies
dep:
@printf "⇒ Download requirements: "
@CGO_ENABLED=0 \
go mod download && echo OK
@printf "⇒ Tidy requirements: "
@CGO_ENABLED=0 \
go mod tidy -v && echo OK
# Run `make %` in Golang container, for more information run `make help.docker/%`
docker/%:
$(if $(filter $*,all $(BINS)), \
@echo "=> Running 'make $*' in clean Docker environment" && \
docker run --rm -t \
-v `pwd`:/src \
-w /src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golang:$(GO_VERSION) make $*,\
@echo "supported docker targets: all $(BINS) lint")
# Run tests
test:
@go test ./... -cover
# Run integration tests
.PHONY: integration-test
integration-test:
@go test ./... -cover --tags=integration
# Run tests with race detection and produce coverage output
cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Run fuzzing
CLANG := $(shell which clang-17 2>/dev/null)
.PHONY: check-clang all
check-clang:
ifeq ($(CLANG),)
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
@exit 1
endif
.PHONY: check-ngfuzz all
check-ngfuzz:
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
exit 1; \
fi
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: fuzz
fuzz: install-fuzzing-deps
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
# Reformat code
fmt:
@echo "⇒ Processing gofmt check"
@gofmt -s -w ./
# Build clean Docker image
image:
@echo "⇒ Build FrostFS HTTP Gateway docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
--rm \
-f .docker/Dockerfile \
-t $(HUB_IMAGE):$(HUB_TAG) .
# Push Docker image to the hub
image-push:
@echo "⇒ Publish image"
@docker push $(HUB_IMAGE):$(HUB_TAG)
# Build dirty Docker image
dirty-image:
@echo "⇒ Build FrostFS HTTP Gateway dirty docker image "
@docker build \
--build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \
--rm \
-f .docker/Dockerfile.dirty \
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
# Install linters
lint-install:
@mkdir -p $(TMP_DIR)
@rm -rf $(TMP_DIR)/linters
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
@rm -rf $(TMP_DIR)/linters
@rmdir $(TMP_DIR) 2>/dev/null || true
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
# Run linters
lint:
@if [ ! -d "$(LINT_DIR)" ]; then \
echo "Run make lint-install"; \
exit 1; \
fi
$(LINT_DIR)/golangci-lint --timeout=5m run
# Run linters in Docker
docker/lint:
docker run --rm -it \
-v `pwd`:/src \
-u `stat -c "%u:%g" .` \
--env HOME=/src \
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
# Activate pre-commit hooks
pre-commit:
pre-commit install -t pre-commit -t commit-msg
# Deactivate pre-commit hooks
unpre-commit:
pre-commit uninstall -t pre-commit -t commit-msg
# Print version
version:
@echo $(VERSION)
# Clean up
clean:
rm -rf vendor
rm -rf $(BINDIR)
# Package for Debian
debpackage:
dch --package frostfs-http-gw \
--controlmaint \
--newversion $(PKG_VERSION) \
--distribution $(OS_RELEASE) \
"Please see CHANGELOG.md for code changes for $(VERSION)"
dpkg-buildpackage --no-sign -b
debclean:
dh clean
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
.PHONY: dump-metrics
dump-metrics:
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
include help.mk

601
README.md
View file

@ -1,3 +1,600 @@
# WIP area: this repo is just a fork!
<p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
</p>
<p align="center">
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
</p>
Useful things may be published only in [other branches](../../../branches)
---
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw)
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange)
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
# FrostFS HTTP Gateway
FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
- you can download one file per request from the FrostFS Network
- you can upload one file per request into the FrostFS Network
See available routes in [specification](./docs/api.md).
## Installation
```go install git.frostfs.info/TrueCloudLab/frostfs-http-gw```
Or you can call `make` to build it from the cloned repository (the binary will
end up in `bin/frostfs-http-gw`). To build frostfs-http-gw binary in clean docker
environment, call `make docker/bin/frostfs-http-gw`.
Other notable make targets:
```
dep Check and ensure dependencies
image Build clean docker image
dirty-image Build dirty docker image with host-built binaries
fmt Format the code
lint Run linters
version Show current version
```
Or you can also use a [Docker
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
(and occasionally unreleased) versions of the gateway (`:latest` points to the
latest stable release).
## Execution
HTTP gateway itself is not a FrostFS node, so to access FrostFS it uses node's
gRPC interface and you need to provide some node that it will connect to. This
can be done either via `-p` parameter or via `HTTP_GW_PEERS_<N>_ADDRESS` and
`HTTP_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple
FrostFS nodes with weighted load balancing).
If you launch HTTP gateway in bundle with [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
you can get the IP address of the node in the output of `make hosts` command
(with s0*.frostfs.devenv name).
These two commands are functionally equivalent, they run the gate with one
backend node (and otherwise default settings):
```
$ frostfs-http-gw -p 192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 frostfs-http-gw
```
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
```
$ frostfs-http-gw -p grpc://192.168.130.72:8080
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 frostfs-http-gw
```
## Configuration
In general, everything available as CLI parameter can also be specified via
environment variables (see [example](./config/config.env)), so they're not specifically mentioned in most cases
(see `--help` also). If you prefer a config file you can use it in yaml format.
### Nodes: weights and priorities
You can specify multiple `-p` options to add more FrostFS nodes, this will make
gateway spread requests equally among them (using weight 1 and priority 1 for every node):
```
$ frostfs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
```
If you want some specific load distribution proportions, use weights and priorities:
```
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \
HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \
HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \
frostfs-http-gw
```
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%.
### Keys
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
If no wallet provided, the gateway autogenerates a key pair it will use for FrostFS requests.
```
$ frostfs-http-gw -p $FROSTFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
```
Example:
```
$ frostfs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
```
### Binding and TLS
You can make the gateway listen on specific address using the `--listen_address` option.
It can also provide TLS interface for its users, just specify paths to the key and
certificate files via `--tls_key` and `--tls_certificate` parameters. Note
that using these options makes gateway TLS-only. If you need to serve both TLS
and plain text HTTP, you either have to run two gateway instances or use some
external redirecting solution.
Example to bind to `192.168.130.130:443` and serve TLS there:
```
$ frostfs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
--tls_key=key.pem --tls_certificate=cert.pem
```
### HTTP parameters
You can tune HTTP read and write buffer sizes as well as timeouts with
`HTTP_GW_WEB_READ_BUFFER_SIZE`, `HTTP_GW_WEB_READ_TIMEOUT`,
`HTTP_GW_WEB_WRITE_BUFFER_SIZE` and `HTTP_GW_WEB_WRITE_TIMEOUT` environment
variables.
**Note:** to allow upload and download of big data streams, disable read
and write timeouts correspondingly. To do that, set `HTTP_GW_WEB_READ_TIMEOUT=0`
and `HTTP_GW_WEB_WRITE_TIMEOUT=0`. Otherwise, HTTP Gateway will terminate
request with data stream after timeout.
`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable
request body streaming (effectively it'll make the gateway accept the file completely
first and only then try sending it to FrostFS).
`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size
limiting uploads to files slightly lower than this limit.
### FrostFS parameters
Gateway can automatically set timestamps for uploaded files based on local
time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment
variable to control this behavior.
### Monitoring and metrics
Pprof and Prometheus are integrated into the gateway. To enable them use `--pprof` and `--metrics` flags or
`HTTP_GW_PPROF`/`HTTP_GW_METRICS` environment variables.
### Timeouts
You can tune gRPC interface parameters with `--connect_timeout` (for
connection to a node) and `--request_timeout` (for request processing over
established connection) options.
gRPC-level checks allow the gateway to detect dead peers, but it declares them
unhealthy at pool level once per `--rebalance_timer` interval, so check for it
if needed.
All timing options accept values with suffixes, so "15s" is 15 seconds and
"2m" is 2 minutes.
### Zip streaming
The gateway supports downloading files by common prefix (like dir) in zip format. You can enable compression
using config or `HTTP_GW_ZIP_COMPRESSION=true` environment variable.
### Logging
You can specify logging level using variable:
```
HTTP_GW_LOGGER_LEVEL=debug
```
### Yaml file
Configuration file is optional and can be used instead of environment variables/other parameters.
It can be specified with `--config` parameter:
```
$ frostfs-http-gw --config your-config.yaml
```
See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example.
#### Multiple configs
You can use several config files when running application. It allows you to split configuration into parts.
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](./config)).
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag.
Also, you can combine these flags:
```shell
$ frostfs-http-gw --config ./config/config.yaml --config /your/partial/config.yaml --config-dir ./config/dir
```
**Note:** next file in `--config` flag overwrites values from the previous one.
Files from `--config-dir` directory overwrite values from `--config` files.
So the command above run `frostfs-http-gw` to listen on `0.0.0.0:8080` address (value from `./config/config.yaml`),
applies parameters from `/your/partial/config.yaml`,
enable pprof (value from `./config/dir/pprof.yaml`) and prometheus (value from `./config/dir/prometheus.yaml`).
## HTTP API provided
This gateway intentionally provides limited feature set and doesn't try to
substitute (or completely wrap) regular gRPC FrostFS interface. You can download
and upload objects with it, but deleting, searching, managing ACLs, creating
containers and other activities are not supported and not planned to be
supported.
### Preparation
Before uploading or downloading a file make sure you have a prepared container.
You can create it with instructions below.
Also, in case of downloading, you need to have a file inside a container.
### NNS
In all download/upload routes you can use container name instead of its id (`$CID`).
Steps to start using name resolving:
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
```yaml
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
```
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
you can check if your container (e.g. with `container-name` name) is registered in NNS:
```shell
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
$ docker exec -it morph_chain neo-go \
contract testinvokefunction \
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
resolve string:container-name.container int:16 \
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
| base64 -d && echo
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
```
3. Use container name instead of its `$CID`. For example:
```shell
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
```
#### Create a container
You can create a container via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases):
```
$ frostfs-cli -r $FROSTFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
```
where `$WALLET` is a path to user wallet,
`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and
`$POLICY` -- QL-encoded or JSON-encoded placement policy or path to file with it
For example:
```
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
```
If you have launched nodes via [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
you can get the key value from `wallets/wallet.json` or write the path to
the file `wallets/wallet.key`.
#### Prepare a file in a container
To create a file via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases), run a command below:
```
$ frostfs-cli -r $FROSTFS_NODE -k $KEY object put --file $FILENAME --cid $CID
```
where
`$KEY` -- the key, please read the information [above](#create-a-container),
`$CID` -- container ID.
For example:
```
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
```
### Downloading
#### Requests
The following requests support GET/HEAD methods.
##### By IDs
Basic downloading involves container ID and object ID and is done via GET
requests to `/get/$CID/$OID` path, where `$CID` is a container ID or its name if NNS is enabled,
`$OID` is an object's (i.e. your file's) ID.
For example:
```shell
$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY
```
or if container has a name:
```shell
$ wget http://localhost:8082/get/container-name/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY
```
##### By attributes
There is also more complex interface provided for attribute-based downloads,
it's usually used to retrieve files by their names, but any other attribute
can be used as well. The generic syntax for it looks like this:
```/get_by_attribute/$CID/$ATTRIBUTE_NAME/$ATTRIBUTE_VALUE```
where
`$CID` is a container ID or its name if NNS is enabled,
`$ATTRIBUTE_NAME` is the name of the attribute we want to use,
`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have.
**NB!** The attribute key and value should be url encoded, i.e., if you want to download an object with the attribute value
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key. If you don't escape such values
everything can still work (for example you can use `d@ta` without encoding) but it's HIGHLY RECOMMENDED to encode all your attributes.
If multiple objects have specified attribute with specified value, then the
first one of them is returned (and you can't get others via this interface).
Example for file name attribute:
```
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat.jpeg
```
Or when the filename includes special symbols:
```
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat+jpeg # means 'cat jpeg'
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat%25jpeg # means 'cat%jpeg'
```
Some other user-defined attributes:
```
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Ololo/100500
```
Or when the attribute includes special symbols:
```
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Olo%2Blo/100500 # means Olo+lo
```
An optional `download=true` argument for `Content-Disposition` management is
also supported (more on that below):
```
$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY?download=true
```
##### Zip
You can download some dir (files with the same prefix) in zip (it will be compressed if config contains appropriate param):
```
$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix
```
**Note:** the objects must have a valid `FilePath` attribute (it should not contain trailing `/`),
otherwise they will not be in the zip archive. You can upload file with this attribute using `curl`:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
```
#### Replies
You get object contents in the reply body (if GET method was used), but at the same time you also get a
set of reply headers generated using the following rules:
* `Content-Length` is set to the length of the object
* `Content-Type` is autodetected dynamically by gateway
* `Content-Disposition` is `inline` for regular requests and `attachment` for
requests with `download=true` argument, `filename` is also added if there
is `FileName` attribute set for this object
* `Last-Modified` header is set to `Timestamp` attribute value if it's
present for the object
* `x-container-id` contains container ID
* `x-object-id` contains object ID
* `x-owner-id` contains owner address
* all the other FrostFS attributes are converted to `X-Attribute-*` headers (but only
if they can be safely represented in HTTP header), for example `FileName`
attribute becomes `X-Attribute-FileName` header
##### Caching strategy
HTTP Gateway doesn't control caching (doesn't anything with the `Cache-Control` header). Caching strategy strictly
depends on application use case. So it should be carefully done by proxy server.
### Uploading
You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The
request must contain multipart form with mandatory `filename` parameter. Only
one part in multipart form will be processed, so to upload another file just
issue a new POST request.
Example request:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
```
Chunked encoding is supported by the server (but check for request read
timeouts if you're planning some streaming). You can try streaming support
with a large file piped through named FIFO pipe:
```
$ mkfifo pipe
$ cat video.mp4 > pipe &
$ curl --no-buffer -F 'file=@pipe;filename=catvideo.mp4' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
```
You can also add some attributes to your file using the following rules:
* all "X-Attribute-*" headers get converted to object attributes with
"X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo:
100500" header to your request the resulting object will get "Ololo:
100500" attribute
* "X-Attribute-SYSTEM-*" headers are special
(`-SYSTEM-` part can also be `-system-` or`-System-` (and even legacy `-Neofs-` for some next releases)), they're used to set internal
FrostFS attributes starting with `__SYSTEM__` prefix, for these attributes all
dashes get converted to underscores and all letters are capitalized. For
example, you can use "X-Attribute-SYSTEM-Expiration-Epoch" header to set
`__SYSTEM__EXPIRATION_EPOCH` attribute
* `FileName` attribute is set from multipart's `filename` if not set
explicitly via `X-Attribute-FileName` header
* `Timestamp` attribute can be set using gateway local time if using
HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP option and if request doesn't
provide `X-Attribute-Timestamp` header of its own
---
**NOTE**
There are some reserved headers type of `X-Attribute-SYSTEM-*` (headers are arranged in descending order of priority):
1. `X-Attribute-System-Expiration-Epoch: 100`
2. `X-Attribute-System-Expiration-Duration: 24h30m`
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
---
For successful uploads you get JSON data in reply body with a container and
object ID, like this:
```
{
"object_id": "9ANhbry2ryjJY1NZbcjryJMRXG5uGNKd73kD3V1sVFsX",
"container_id": "Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ"
}
```
#### Authentication
You can always upload files to public containers (open for anyone to put
objects into), but for restricted containers you need to explicitly allow PUT
operations for a request signed with your HTTP Gateway keys.
If you don't want to manage gateway's secret keys and adjust policies when
gateway configuration changes (new gate, key rotation, etc) or you plan to use
public services, there is an option to let your application backend (or you) to
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
to grant access.
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
documentation for more details). There are two options to pass them to gateway:
* "Authorization" header with "Bearer" type and base64-encoded token in
credentials field
* "Bearer" cookie with base64-encoded token contents
For example, you have a mobile application frontend with a backend part storing
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
Bearer token and provides it to the frontend. Then, the mobile app may generate
some data and upload it via any available FrostFS HTTP Gateway by adding
the corresponding header to the upload request. Accessing policy protected data
works the same way.
##### Example
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
1. Suppose you have a container with private policy for wallet key
```
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
--chain-id <chainID>
```
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
```
{
"body": {
"allowImpersonate": true,
"lifetime": {
"exp": "10000",
"nbf": "0",
"iat": "0"
}
},
"signature": null
}
```
3. Sign it with the wallet:
```
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
```
4. Encode to base64 to use in header:
```
$ base64 -w 0 signed.json
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
```
After that, the Bearer token can be used:
```
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
# output:
# {
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
# }
```
##### Note: Bearer Token owner
You can specify exact key who can use Bearer Token (gateway wallet address).
To do this, encode wallet address in base64 format
```
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
```
Then specify this value in Bearer Token Json
```
{
"body": {
"ownerID": {
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
},
...
```
##### Note: Policy override
Instead of impersonation, you can define the set of policies that will be applied
to the request sender. This allows to restrict access to specific operation and
specific objects without giving full impersonation control to the token user.
### Metrics and Pprof
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
and Pprof at `localhost:8083/debug/pprof` by default. Host and port can be configured.
See [configuration](./docs/gate-configuration.md).
## Credits
Please see [CREDITS](CREDITS.md) for details.
## Fuzzing
To run fuzzing tests use the following command:
```shell
$ make fuzz
```
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
You can also use the following arguments:
```
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
````
## Credits
Please see [CREDITS](CREDITS.md) for details.

26
SECURITY.md Normal file
View file

@ -0,0 +1,26 @@
# Security Policy
## How To Report a Vulnerability
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
Instead, you can report it using one of the following ways:
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
* The type of issue (e.g., buffer overflow, or cross-site scripting)
* Affected version(s)
* Impact of the issue, including how an attacker might exploit the issue
* Step-by-step instructions to reproduce the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Full paths of source file(s) related to the manifestation of the issue
* Any special configuration required to reproduce the issue
* Any log files that are related to this issue (if possible)
* Proof-of-concept or exploit code (if possible)
This information will help us triage your report more quickly.

1
VERSION Normal file
View file

@ -0,0 +1 @@
v0.31.0

1033
cmd/http-gw/app.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,563 @@
//go:build integration
package main
import (
"archive/zip"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"mime/multipart"
"net/http"
"os"
"sort"
"testing"
"time"
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/wallet"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"go.uber.org/zap/zapcore"
)
type putResponse struct {
CID string `json:"container_id"`
OID string `json:"object_id"`
}
const (
testContainerName = "friendly"
testListenAddress = "localhost:8082"
testHost = "http://" + testListenAddress
)
func TestIntegration(t *testing.T) {
rootCtx := context.Background()
aioImage := "truecloudlab/frostfs-aio:"
versions := []string{
"1.2.7",
"1.3.0",
"1.5.0",
}
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
require.NoError(t, err)
file, err := os.CreateTemp("", "wallet")
require.NoError(t, err)
defer os.Remove(file.Name())
makeTempWallet(t, key, file.Name())
var ownerID user.ID
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
for _, version := range versions {
ctx, cancel2 := context.WithCancel(rootCtx)
aioContainer := createDockerContainer(ctx, t, aioImage+version)
server, cancel := runServer(file.Name())
clientPool := getPool(ctx, t, key)
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
require.NoError(t, err, version)
token := makeBearerToken(t, key, ownerID, version)
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
cancel()
server.Wait()
err = aioContainer.Terminate(ctx)
require.NoError(t, err)
cancel2()
}
}
func runServer(pathToWallet string) (App, context.CancelFunc) {
cancelCtx, cancel := context.WithCancel(context.Background())
v := getDefaultConfig()
v.Set(cfgWalletPath, pathToWallet)
v.Set(cfgWalletPassphrase, "")
l, lvl := newStdoutLogger(v, zapcore.DebugLevel)
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
go application.Serve()
return application, cancel
}
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, version string) {
url := testHost + "/upload/" + CID.String()
makePutRequestAndCheck(ctx, t, p, CID, url)
url = testHost + "/upload/" + testContainerName
makePutRequestAndCheck(ctx, t, p, CID, url)
}
func putWithBearerTokenInHeader(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
url := testHost + "/upload/" + CID.String()
request, content, attributes := makePutRequest(t, url)
request.Header.Set("Authorization", "Bearer "+token)
resp, err := http.DefaultClient.Do(request)
require.NoError(t, err)
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
}
func putWithBearerTokenInCookie(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
url := testHost + "/upload/" + CID.String()
request, content, attributes := makePutRequest(t, url)
request.AddCookie(&http.Cookie{Name: "Bearer", Value: token})
resp, err := http.DefaultClient.Do(request)
require.NoError(t, err)
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
}
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
request, content, attributes := makePutRequest(t, url)
resp, err := http.DefaultClient.Do(request)
require.NoError(t, err)
checkPutResponse(ctx, t, p, cnrID, resp, content, attributes)
}
func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string]string) {
content := "content of file"
keyAttr, valAttr := "User-Attribute", "user value"
attributes := map[string]string{
object.AttributeFileName: "newFile.txt",
keyAttr: valAttr,
}
var buff bytes.Buffer
w := multipart.NewWriter(&buff)
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
require.NoError(t, err)
_, err = io.Copy(fw, bytes.NewBufferString(content))
require.NoError(t, err)
err = w.Close()
require.NoError(t, err)
request, err := http.NewRequest(http.MethodPost, url, &buff)
require.NoError(t, err)
request.Header.Set("Content-Type", w.FormDataContentType())
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
return request, content, attributes
}
func checkPutResponse(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, resp *http.Response, content string, attributes map[string]string) {
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
if resp.StatusCode != http.StatusOK {
fmt.Println(string(body))
}
require.Equal(t, http.StatusOK, resp.StatusCode)
addr := &putResponse{}
err = json.Unmarshal(body, addr)
require.NoError(t, err)
err = cnrID.DecodeString(addr.CID)
require.NoError(t, err)
var id oid.ID
err = id.DecodeString(addr.OID)
require.NoError(t, err)
var objectAddress oid.Address
objectAddress.SetContainer(cnrID)
objectAddress.SetObject(id)
payload := bytes.NewBuffer(nil)
var prm pool.PrmObjectGet
prm.SetAddress(objectAddress)
res, err := p.GetObject(ctx, prm)
require.NoError(t, err)
_, err = io.Copy(payload, res.Payload)
require.NoError(t, err)
require.Equal(t, content, payload.String())
for _, attribute := range res.Header.Attributes() {
require.Equal(t, attributes[attribute.Key()], attribute.Value())
}
}
func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
url := testHost + "/upload/" + CID.String()
attr := "X-Attribute-User-Attribute"
content := "content of file"
valOne, valTwo := "first_value", "second_value"
fileName := "newFile.txt"
var buff bytes.Buffer
w := multipart.NewWriter(&buff)
fw, err := w.CreateFormFile("file", fileName)
require.NoError(t, err)
_, err = io.Copy(fw, bytes.NewBufferString(content))
require.NoError(t, err)
err = w.Close()
require.NoError(t, err)
request, err := http.NewRequest(http.MethodPost, url, &buff)
require.NoError(t, err)
request.Header.Set("Content-Type", w.FormDataContentType())
request.Header.Add(attr, valOne)
request.Header.Add(attr, valTwo)
resp, err := http.DefaultClient.Do(request)
require.NoError(t, err)
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
body, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
}
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
content := "content of file"
attributes := map[string]string{
"some-attr": "some-get-value",
}
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
resp, err := http.Get(testHost + "/get/" + CID.String() + "/" + id.String())
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
}
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, content, string(data))
for k, v := range attributes {
require.Equal(t, v, resp.Header.Get("X-Attribute-"+k))
}
}
func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, content, string(data))
for k, v := range attributes {
require.Equal(t, v, resp.Header.Get(k))
}
}
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
content := "content of file"
attributes := map[string]string{keyAttr: valAttr}
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
expectedAttr := map[string]string{
"X-Attribute-" + keyAttr: valAttr,
"x-object-id": id.String(),
"x-container-id": CID.String(),
}
resp, err := http.Get(testHost + "/get_by_attribute/" + CID.String() + "/" + keyAttr + "/" + valAttr)
require.NoError(t, err)
checkGetByAttrResponse(t, resp, content, expectedAttr)
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
require.NoError(t, err)
checkGetByAttrResponse(t, resp, content, expectedAttr)
}
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
contents := []string{"content of file1", "content of file2"}
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
attributes2 := map[string]string{object.AttributeFilePath: names[1]}
putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
baseURL := testHost + "/zip/" + CID.String()
makeZipTest(t, baseURL, names, contents)
baseURL = testHost + "/zip/" + testContainerName
makeZipTest(t, baseURL, names, contents)
}
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
url := baseURL + "/zipfolder"
makeZipRequest(t, url, names, contents)
// check nested folder
url = baseURL + "/zipfolder/dir"
makeZipRequest(t, url, names[:1], contents[:1])
}
func makeZipRequest(t *testing.T, url string, names, contents []string) {
resp, err := http.Get(url)
require.NoError(t, err)
defer func() {
err := resp.Body.Close()
require.NoError(t, err)
}()
data, err := io.ReadAll(resp.Body)
require.NoError(t, err)
checkZip(t, data, int64(len(data)), names, contents)
}
func checkZip(t *testing.T, data []byte, length int64, names, contents []string) {
readerAt := bytes.NewReader(data)
zipReader, err := zip.NewReader(readerAt, length)
require.NoError(t, err)
require.Equal(t, len(names), len(zipReader.File))
sort.Slice(zipReader.File, func(i, j int) bool {
return zipReader.File[i].FileHeader.Name < zipReader.File[j].FileHeader.Name
})
for i, f := range zipReader.File {
require.Equal(t, names[i], f.FileHeader.Name)
rc, err := f.Open()
require.NoError(t, err)
all, err := io.ReadAll(rc)
require.NoError(t, err)
require.Equal(t, contents[i], string(all))
err = rc.Close()
require.NoError(t, err)
}
}
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
content := "content of file"
attributes := map[string]string{
"some-attr": "some-get-value",
}
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
require.NoError(t, err)
req.Header.Set(defaultNamespaceHeader, "")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
require.NoError(t, err)
req.Header.Set(defaultNamespaceHeader, "root")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
checkGetResponse(t, resp, content, attributes)
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
require.NoError(t, err)
req.Header.Set(defaultNamespaceHeader, "root2")
resp, err = http.DefaultClient.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
}
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
req := testcontainers.ContainerRequest{
Image: image,
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(30 * time.Second),
Name: "aio",
Hostname: "aio",
NetworkMode: "host",
}
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
require.NoError(t, err)
return aioC
}
func getDefaultConfig() *viper.Viper {
v := settings()
v.SetDefault(cfgPeers+".0.address", "localhost:8080")
v.SetDefault(cfgPeers+".0.weight", 1)
v.SetDefault(cfgPeers+".0.priority", 1)
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
v.SetDefault("server.0.address", testListenAddress)
return v
}
func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool {
var prm pool.InitParameters
prm.SetKey(&key.PrivateKey)
prm.SetNodeDialTimeout(5 * time.Second)
prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1))
clientPool, err := pool.NewPool(prm)
require.NoError(t, err)
err = clientPool.Dial(ctx)
require.NoError(t, err)
return clientPool
}
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) (cid.ID, error) {
var policy netmap.PlacementPolicy
err := policy.DecodeString("REP 1")
require.NoError(t, err)
var cnr container.Container
cnr.Init()
cnr.SetPlacementPolicy(policy)
cnr.SetBasicACL(acl.PublicRWExtended)
cnr.SetOwner(ownerID)
container.SetCreationTime(&cnr, time.Now())
var domain container.Domain
domain.SetName(testContainerName)
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
var waitPrm pool.WaitParams
waitPrm.SetTimeout(15 * time.Second)
waitPrm.SetPollInterval(3 * time.Second)
var prm pool.PrmContainerPut
prm.SetContainer(cnr)
prm.SetWaitParams(waitPrm)
CID, err := clientPool.PutContainer(ctx, prm)
if err != nil {
return cid.ID{}, err
}
fmt.Println(CID.String())
return CID, err
}
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
obj := object.New()
obj.SetContainerID(CID)
obj.SetOwnerID(ownerID)
var attrs []object.Attribute
for key, val := range attributes {
attr := object.NewAttribute()
attr.SetKey(key)
attr.SetValue(val)
attrs = append(attrs, *attr)
}
obj.SetAttributes(attrs...)
var prm pool.PrmObjectPut
prm.SetHeader(*obj)
prm.SetPayload(bytes.NewBufferString(content))
id, err := clientPool.PutObject(ctx, prm)
require.NoError(t, err)
return id.ObjectID
}
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
tkn := new(bearer.Token)
tkn.ForUser(ownerID)
tkn.SetExp(10000)
if version == "1.2.7" {
tkn.SetEACLTable(*eacl.NewTable())
} else {
tkn.SetImpersonate(true)
}
err := tkn.Sign(key.PrivateKey)
require.NoError(t, err)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
return t64
}
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
w, err := wallet.NewWallet(path)
require.NoError(t, err)
acc := wallet.NewAccountFromPrivateKey(key)
err = acc.Encrypt("", w.Scrypt)
require.NoError(t, err)
w.AddAccount(acc)
err = w.Save()
require.NoError(t, err)
}

17
cmd/http-gw/main.go Normal file
View file

@ -0,0 +1,17 @@
package main
import (
"context"
"os/signal"
"syscall"
)
func main() {
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
v := settings()
logger, atomicLevel := pickLogger(v)
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
go application.Serve()
application.Wait()
}

10
cmd/http-gw/misc.go Normal file
View file

@ -0,0 +1,10 @@
package main
// Prefix is a prefix used for environment variables containing gateway
// configuration.
const Prefix = "HTTP_GW"
var (
// Version is the gateway version.
Version = "dev"
)

124
cmd/http-gw/server.go Normal file
View file

@ -0,0 +1,124 @@
package main
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"sync"
)
type (
ServerInfo struct {
Address string
TLS ServerTLSInfo
}
ServerTLSInfo struct {
Enabled bool
CertFile string
KeyFile string
}
Server interface {
Address() string
Listener() net.Listener
UpdateCert(certFile, keyFile string) error
}
server struct {
address string
listener net.Listener
tlsProvider *certProvider
}
certProvider struct {
Enabled bool
mu sync.RWMutex
certPath string
keyPath string
cert *tls.Certificate
}
)
func (s *server) Address() string {
return s.address
}
func (s *server) Listener() net.Listener {
return s.listener
}
func (s *server) UpdateCert(certFile, keyFile string) error {
return s.tlsProvider.UpdateCert(certFile, keyFile)
}
func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
var lic net.ListenConfig
ln, err := lic.Listen(ctx, "tcp", serverInfo.Address)
if err != nil {
return nil, fmt.Errorf("could not prepare listener: %w", err)
}
tlsProvider := &certProvider{
Enabled: serverInfo.TLS.Enabled,
}
if serverInfo.TLS.Enabled {
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
lnErr := ln.Close()
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
}
ln = tls.NewListener(ln, &tls.Config{
GetCertificate: tlsProvider.GetCertificate,
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
})
}
return &server{
address: serverInfo.Address,
listener: ln,
tlsProvider: tlsProvider,
}, nil
}
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
if !p.Enabled {
return nil, errors.New("cert provider: disabled")
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.cert, nil
}
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
if !p.Enabled {
return fmt.Errorf("tls disabled")
}
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
if err != nil {
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
}
p.mu.Lock()
p.certPath = certPath
p.keyPath = keyPath
p.cert = &cert
p.mu.Unlock()
return nil
}
func (p *certProvider) FilePaths() (string, string) {
if !p.Enabled {
return "", ""
}
p.mu.RLock()
defer p.mu.RUnlock()
return p.certPath, p.keyPath
}

119
cmd/http-gw/server_test.go Normal file
View file

@ -0,0 +1,119 @@
package main
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
"net"
"net/http"
"os"
"path"
"testing"
"time"
"github.com/stretchr/testify/require"
"golang.org/x/net/http2"
)
const (
expHeaderKey = "Foo"
expHeaderValue = "Bar"
)
func TestHTTP2TLS(t *testing.T) {
ctx := context.Background()
certPath, keyPath := prepareTestCerts(t)
srv := &http.Server{
Handler: http.HandlerFunc(testHandler),
}
tlsListener, err := newServer(ctx, ServerInfo{
Address: ":0",
TLS: ServerTLSInfo{
Enabled: true,
CertFile: certPath,
KeyFile: keyPath,
},
})
require.NoError(t, err)
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
addr := fmt.Sprintf("https://localhost:%d", port)
go func() {
_ = srv.Serve(tlsListener.Listener())
}()
// Server is running, now send HTTP/2 request
tlsClientConfig := &tls.Config{
InsecureSkipVerify: true,
}
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
req, err := http.NewRequest("GET", addr, nil)
require.NoError(t, err)
req.Header[expHeaderKey] = []string{expHeaderValue}
resp, err := cliHTTP1.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
resp, err = cliHTTP2.Do(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
}
func testHandler(resp http.ResponseWriter, req *http.Request) {
hdr, ok := req.Header[expHeaderKey]
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
resp.WriteHeader(http.StatusBadRequest)
} else {
resp.WriteHeader(http.StatusOK)
}
}
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err)
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{CommonName: "localhost"},
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour * 24 * 365),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
require.NoError(t, err)
dir := t.TempDir()
certPath = path.Join(dir, "cert.pem")
keyPath = path.Join(dir, "key.pem")
certFile, err := os.Create(certPath)
require.NoError(t, err)
defer certFile.Close()
keyFile, err := os.Create(keyPath)
require.NoError(t, err)
defer keyFile.Close()
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
require.NoError(t, err)
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
require.NoError(t, err)
return certPath, keyPath
}

817
cmd/http-gw/settings.go Normal file
View file

@ -0,0 +1,817 @@
package main
import (
"context"
"encoding/hex"
"fmt"
"io"
"math"
"os"
"path"
"runtime"
"sort"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"git.frostfs.info/TrueCloudLab/zapjournald"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"github.com/ssgreg/journald"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
)
const (
destinationStdout = "stdout"
destinationJournald = "journald"
)
const (
defaultRebalanceTimer = 60 * time.Second
defaultRequestTimeout = 15 * time.Second
defaultConnectTimeout = 10 * time.Second
defaultStreamTimeout = 10 * time.Second
defaultLoggerSamplerInterval = 1 * time.Second
defaultShutdownTimeout = 15 * time.Second
defaultPoolErrorThreshold uint32 = 100
defaultSoftMemoryLimit = math.MaxInt64
defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb
defaultNamespaceHeader = "X-Frostfs-Namespace"
defaultReconnectInterval = time.Minute
defaultCORSMaxAge = 600 // seconds
defaultMultinetFallbackDelay = 300 * time.Millisecond
cfgServer = "server"
cfgTLSEnabled = "tls.enabled"
cfgTLSCertFile = "tls.cert_file"
cfgTLSKeyFile = "tls.key_file"
cfgReconnectInterval = "reconnect_interval"
cfgIndexPageEnabled = "index_page.enabled"
cfgIndexPageTemplatePath = "index_page.template_path"
cfgWorkerPoolSize = "worker_pool_size"
// Web.
cfgWebReadBufferSize = "web.read_buffer_size"
cfgWebWriteBufferSize = "web.write_buffer_size"
cfgWebReadTimeout = "web.read_timeout"
cfgWebWriteTimeout = "web.write_timeout"
cfgWebStreamRequestBody = "web.stream_request_body"
cfgWebMaxRequestBodySize = "web.max_request_body_size"
// Metrics / Profiler.
cfgPrometheusEnabled = "prometheus.enabled"
cfgPrometheusAddress = "prometheus.address"
cfgPprofEnabled = "pprof.enabled"
cfgPprofAddress = "pprof.address"
// Tracing ...
cfgTracingEnabled = "tracing.enabled"
cfgTracingExporter = "tracing.exporter"
cfgTracingEndpoint = "tracing.endpoint"
cfgTracingTrustedCa = "tracing.trusted_ca"
cfgTracingAttributes = "tracing.attributes"
// Pool config.
cfgConTimeout = "connect_timeout"
cfgStreamTimeout = "stream_timeout"
cfgReqTimeout = "request_timeout"
cfgRebalance = "rebalance_timer"
cfgPoolErrorThreshold = "pool_error_threshold"
// Logger.
cfgLoggerLevel = "logger.level"
cfgLoggerDestination = "logger.destination"
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
cfgLoggerSamplingInitial = "logger.sampling.initial"
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
cfgLoggerSamplingInterval = "logger.sampling.interval"
// Wallet.
cfgWalletPassphrase = "wallet.passphrase"
cfgWalletPath = "wallet.path"
cfgWalletAddress = "wallet.address"
// Uploader Header.
cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp"
// Peers.
cfgPeers = "peers"
// NeoGo.
cfgRPCEndpoint = "rpc_endpoint"
// Resolving.
cfgResolveOrder = "resolve_order"
// Zip compression.
cfgZipCompression = "zip.compression"
// Runtime.
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
// Enabling client side object preparing for PUT operations.
cfgClientCut = "frostfs.client_cut"
// Sets max buffer size for read payload in put operations.
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
// Configuration of parameters of requests to FrostFS.
// Sets max attempt to make successful tree request.
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
// Caching.
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
cfgBucketsCacheSize = "cache.buckets.size"
// Bucket resolving options.
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
// CORS.
cfgCORSAllowOrigin = "cors.allow_origin"
cfgCORSAllowMethods = "cors.allow_methods"
cfgCORSAllowHeaders = "cors.allow_headers"
cfgCORSExposeHeaders = "cors.expose_headers"
cfgCORSAllowCredentials = "cors.allow_credentials"
cfgCORSMaxAge = "cors.max_age"
// Multinet.
cfgMultinetEnabled = "multinet.enabled"
cfgMultinetBalancer = "multinet.balancer"
cfgMultinetRestrict = "multinet.restrict"
cfgMultinetFallbackDelay = "multinet.fallback_delay"
cfgMultinetSubnets = "multinet.subnets"
// Command line args.
cmdHelp = "help"
cmdVersion = "version"
cmdPprof = "pprof"
cmdMetrics = "metrics"
cmdWallet = "wallet"
cmdAddress = "address"
cmdConfig = "config"
cmdConfigDir = "config-dir"
cmdListenAddress = "listen_address"
)
var ignore = map[string]struct{}{
cfgPeers: {},
cmdHelp: {},
cmdVersion: {},
}
func settings() *viper.Viper {
v := viper.New()
v.AutomaticEnv()
v.SetEnvPrefix(Prefix)
v.AllowEmptyEnv(true)
v.SetConfigType("yaml")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
// flags setup:
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
flags.SetOutput(os.Stdout)
flags.SortFlags = false
flags.Bool(cmdPprof, false, "enable pprof")
flags.Bool(cmdMetrics, false, "enable prometheus")
help := flags.BoolP(cmdHelp, "h", false, "show help")
version := flags.BoolP(cmdVersion, "v", false, "show version")
flags.StringP(cmdWallet, "w", "", `path to the wallet`)
flags.String(cmdAddress, "", `address of wallet account`)
flags.StringArray(cmdConfig, nil, "config paths")
flags.String(cmdConfigDir, "", "config dir path")
flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout")
flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
flags.String(cfgTLSCertFile, "", "TLS certificate path")
flags.String(cfgTLSKeyFile, "", "TLS key path")
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
// set defaults:
// logger:
v.SetDefault(cfgLoggerLevel, "debug")
v.SetDefault(cfgLoggerDestination, "stdout")
v.SetDefault(cfgLoggerSamplingEnabled, false)
v.SetDefault(cfgLoggerSamplingThereafter, 100)
v.SetDefault(cfgLoggerSamplingInitial, 100)
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
// pool:
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
// frostfs:
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
// web-server:
v.SetDefault(cfgWebReadBufferSize, 4096)
v.SetDefault(cfgWebWriteBufferSize, 4096)
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
v.SetDefault(cfgWebStreamRequestBody, true)
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
v.SetDefault(cfgWorkerPoolSize, 1000)
// upload header
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
// zip:
v.SetDefault(cfgZipCompression, false)
// metrics
v.SetDefault(cfgPprofAddress, "localhost:8083")
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
// resolve bucket
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
// multinet
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
// Binding flags
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
panic(err)
}
if err := v.BindPFlags(flags); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
panic(err)
}
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
panic(err)
}
if err := flags.Parse(os.Args); err != nil {
panic(err)
}
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
}
if resolveMethods != nil {
v.SetDefault(cfgResolveOrder, *resolveMethods)
}
switch {
case help != nil && *help:
fmt.Printf("FrostFS HTTP Gateway %s\n", Version)
flags.PrintDefaults()
fmt.Println()
fmt.Println("Default environments:")
fmt.Println()
keys := v.AllKeys()
sort.Strings(keys)
for i := range keys {
if _, ok := ignore[keys[i]]; ok {
continue
}
defaultValue := v.GetString(keys[i])
if len(defaultValue) == 0 {
continue
}
k := strings.Replace(keys[i], ".", "_", -1)
fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue)
}
fmt.Println()
fmt.Println("Peers preset:")
fmt.Println()
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers))
fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers))
os.Exit(0)
case version != nil && *version:
fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
os.Exit(0)
}
if err := readInConfig(v); err != nil {
panic(err)
}
if peers != nil && len(*peers) > 0 {
for i := range *peers {
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
}
}
return v
}
func readInConfig(v *viper.Viper) error {
if v.IsSet(cmdConfig) {
if err := readConfig(v); err != nil {
return err
}
}
if v.IsSet(cmdConfigDir) {
if err := readConfigDir(v); err != nil {
return err
}
}
return nil
}
func readConfigDir(v *viper.Viper) error {
cfgSubConfigDir := v.GetString(cmdConfigDir)
entries, err := os.ReadDir(cfgSubConfigDir)
if err != nil {
return err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
ext := path.Ext(entry.Name())
if ext != ".yaml" && ext != ".yml" {
continue
}
if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
return err
}
}
return nil
}
func readConfig(v *viper.Viper) error {
for _, fileName := range v.GetStringSlice(cmdConfig) {
if err := mergeConfig(v, fileName); err != nil {
return err
}
}
return nil
}
func mergeConfig(v *viper.Viper, fileName string) error {
cfgFile, err := os.Open(fileName)
if err != nil {
return err
}
defer func() {
if errClose := cfgFile.Close(); errClose != nil {
panic(errClose)
}
}()
return v.MergeConfig(cfgFile)
}
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
lvl, err := getLogLevel(v)
if err != nil {
panic(err)
}
dest := v.GetString(cfgLoggerDestination)
switch dest {
case destinationStdout:
return newStdoutLogger(v, lvl)
case destinationJournald:
return newJournaldLogger(v, lvl)
default:
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
}
}
// newStdoutLogger constructs a zap.Logger instance for current application.
// Panics on failure.
//
// Logger is built from zap's production logging configuration with:
// - parameterized level (debug by default)
// - console encoding
// - ISO8601 time encoding
//
// Logger records a stack trace for all messages at or above fatal level.
//
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
stdout := zapcore.AddSync(os.Stderr)
level := zap.NewAtomicLevelAt(lvl)
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
consoleOutCore = samplingEnabling(v, consoleOutCore)
l := zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
return l, level
}
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
level := zap.NewAtomicLevelAt(lvl)
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
coreWithContext := core.With([]zapcore.Field{
zapjournald.SyslogFacility(zapjournald.LogDaemon),
zapjournald.SyslogIdentifier(),
zapjournald.SyslogPid(),
})
coreWithContext = samplingEnabling(v, coreWithContext)
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
return l, level
}
func newLogEncoder() zapcore.Encoder {
c := zap.NewProductionEncoderConfig()
c.EncodeTime = zapcore.ISO8601TimeEncoder
return zapcore.NewConsoleEncoder(c)
}
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
// and message within the specified time interval.
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
// cfgLoggerSamplingThereafter is specified here.
if v.GetBool(cfgLoggerSamplingEnabled) {
core = zapcore.NewSamplerWithOptions(
core,
v.GetDuration(cfgLoggerSamplingInterval),
v.GetInt(cfgLoggerSamplingInitial),
v.GetInt(cfgLoggerSamplingThereafter),
)
}
return core
}
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
var lvl zapcore.Level
lvlStr := v.GetString(cfgLoggerLevel)
err := lvl.UnmarshalText([]byte(lvlStr))
if err != nil {
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
zapcore.DebugLevel,
zapcore.InfoLevel,
zapcore.WarnLevel,
zapcore.ErrorLevel,
zapcore.DPanicLevel,
zapcore.PanicLevel,
zapcore.FatalLevel,
})
}
return lvl, nil
}
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
reconnect := cfg.GetDuration(cfgReconnectInterval)
if reconnect <= 0 {
reconnect = defaultReconnectInterval
}
return reconnect
}
func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
if !v.GetBool(cfgIndexPageEnabled) {
return "", false
}
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
if err != nil {
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
return "", true
}
tmpl, err := io.ReadAll(reader)
if err != nil {
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
return "", true
}
l.Info(logs.SetCustomIndexPageTemplate)
return string(tmpl), true
}
func fetchDefaultNamespaces(v *viper.Viper) []string {
namespaces := v.GetStringSlice(cfgResolveDefaultNamespaces)
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
namespaces[i] = strings.Trim(namespaces[i], "\"")
}
return namespaces
}
func fetchCORSMaxAge(v *viper.Viper) int {
maxAge := v.GetInt(cfgCORSMaxAge)
if maxAge <= 0 {
maxAge = defaultCORSMaxAge
}
return maxAge
}
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
var servers []ServerInfo
seen := make(map[string]struct{})
for i := 0; ; i++ {
key := cfgServer + "." + strconv.Itoa(i) + "."
var serverInfo ServerInfo
serverInfo.Address = v.GetString(key + "address")
serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled)
serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile)
serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile)
if serverInfo.Address == "" {
break
}
if _, ok := seen[serverInfo.Address]; ok {
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
continue
}
seen[serverInfo.Address] = struct{}{}
servers = append(servers, serverInfo)
}
return servers
}
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper, dialSource *internalnet.DialerSource) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
key, err := getFrostFSKey(cfg, logger)
if err != nil {
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
}
var prm pool.InitParameters
var prmTree treepool.InitParameters
prm.SetKey(&key.PrivateKey)
prmTree.SetKey(key)
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
for _, peer := range fetchPeers(logger, cfg) {
prm.AddNode(peer)
prmTree.AddNode(peer)
}
connTimeout := cfg.GetDuration(cfgConTimeout)
if connTimeout <= 0 {
connTimeout = defaultConnectTimeout
}
prm.SetNodeDialTimeout(connTimeout)
prmTree.SetNodeDialTimeout(connTimeout)
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
if streamTimeout <= 0 {
streamTimeout = defaultStreamTimeout
}
prm.SetNodeStreamTimeout(streamTimeout)
prmTree.SetNodeStreamTimeout(streamTimeout)
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
if healthCheckTimeout <= 0 {
healthCheckTimeout = defaultRequestTimeout
}
prm.SetHealthcheckTimeout(healthCheckTimeout)
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
rebalanceInterval := cfg.GetDuration(cfgRebalance)
if rebalanceInterval <= 0 {
rebalanceInterval = defaultRebalanceTimer
}
prm.SetClientRebalanceInterval(rebalanceInterval)
prmTree.SetClientRebalanceInterval(rebalanceInterval)
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
if errorThreshold <= 0 {
errorThreshold = defaultPoolErrorThreshold
}
prm.SetErrorThreshold(errorThreshold)
prm.SetLogger(logger)
prmTree.SetLogger(logger)
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
interceptors := []grpc.DialOption{
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
grpc.WithContextDialer(dialSource.GrpcContextDialer()),
}
prm.SetGRPCDialOptions(interceptors...)
prmTree.SetGRPCDialOptions(interceptors...)
p, err := pool.NewPool(prm)
if err != nil {
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
}
if err = p.Dial(ctx); err != nil {
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
}
treePool, err := treepool.NewPool(prmTree)
if err != nil {
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
}
if err = treePool.Dial(ctx); err != nil {
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
}
return p, treePool, key
}
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
var nodes []pool.NodeParam
for i := 0; ; i++ {
key := cfgPeers + "." + strconv.Itoa(i) + "."
address := v.GetString(key + "address")
weight := v.GetFloat64(key + "weight")
priority := v.GetInt(key + "priority")
if address == "" {
break
}
if weight <= 0 { // unspecified or wrong
weight = 1
}
if priority <= 0 { // unspecified or wrong
priority = 1
}
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
l.Info(logs.AddedStoragePeer,
zap.Int("priority", priority),
zap.String("address", address),
zap.Float64("weight", weight))
}
return nodes
}
func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
softMemoryLimit := cfg.GetSizeInBytes(cfgSoftMemoryLimit)
if softMemoryLimit <= 0 {
softMemoryLimit = defaultSoftMemoryLimit
}
return int64(softMemoryLimit)
}
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
cacheCfg := cache.DefaultBucketConfig(l)
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size)
return cacheCfg
}
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
if v.IsSet(cfgEntry) {
lifetime := v.GetDuration(cfgEntry)
if lifetime <= 0 {
l.Error(logs.InvalidLifetimeUsingDefaultValue,
zap.String("parameter", cfgEntry),
zap.Duration("value in config", lifetime),
zap.Duration("default", defaultValue))
} else {
return lifetime
}
}
return defaultValue
}
func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int {
if v.IsSet(cfgEntry) {
size := v.GetInt(cfgEntry)
if size <= 0 {
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
zap.String("parameter", cfgEntry),
zap.Int("value in config", size),
zap.Int("default", defaultValue))
} else {
return size
}
}
return defaultValue
}
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
if err != nil {
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
}
return source
}
func fetchMultinetConfig(v *viper.Viper, l *zap.Logger) (cfg internalnet.Config) {
cfg.Enabled = v.GetBool(cfgMultinetEnabled)
cfg.Balancer = v.GetString(cfgMultinetBalancer)
cfg.Restrict = v.GetBool(cfgMultinetRestrict)
cfg.FallbackDelay = v.GetDuration(cfgMultinetFallbackDelay)
cfg.Subnets = make([]internalnet.Subnet, 0, 5)
cfg.EventHandler = internalnet.NewLogEventHandler(l)
for i := 0; ; i++ {
key := cfgMultinetSubnets + "." + strconv.Itoa(i) + "."
subnet := internalnet.Subnet{}
subnet.Prefix = v.GetString(key + "mask")
if subnet.Prefix == "" {
break
}
subnet.SourceIPs = v.GetStringSlice(key + "source_ips")
cfg.Subnets = append(cfg.Subnets, subnet)
}
return
}
func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
attributes := make(map[string]string)
for i := 0; ; i++ {
key := cfgTracingAttributes + "." + strconv.Itoa(i) + "."
attrKey := v.GetString(key + "key")
attrValue := v.GetString(key + "value")
if attrKey == "" {
break
}
if _, ok := attributes[attrKey]; ok {
return nil, fmt.Errorf("tracing attribute key %s defined more than once", attrKey)
}
if attrValue == "" {
return nil, fmt.Errorf("empty tracing attribute value for key %s", attrKey)
}
attributes[attrKey] = attrValue
}
return attributes, nil
}

161
config/config.env Normal file
View file

@ -0,0 +1,161 @@
# Wallet section.
# Path to wallet.
HTTP_GW_WALLET_PATH=/path/to/wallet.json
# Account address. If omitted default one will be used.
HTTP_GW_WALLET_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
# Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
HTTP_GW_WALLET_PASSPHRASE=pwd
# Enable metrics.
HTTP_GW_PPROF_ENABLED=true
HTTP_GW_PPROF_ADDRESS=localhost:8083
HTTP_GW_PROMETHEUS_ENABLED=true
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
# Logger.
HTTP_GW_LOGGER_LEVEL=debug
HTTP_GW_LOGGER_SAMPLING_ENABLED=false
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
HTTP_GW_SERVER_0_TLS_ENABLED=false
HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert
HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key
HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444
HTTP_GW_SERVER_1_TLS_ENABLED=true
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
# How often to reconnect to the servers
HTTP_GW_RECONNECT_INTERVAL: 1m
# Nodes configuration.
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
# Peer 1.
# Endpoint.
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
HTTP_GW_PEERS_0_PRIORITY=1
# Load distribution proportion for nodes with the same priority.
HTTP_GW_PEERS_0_WEIGHT=1
# Peer 2.
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080
HTTP_GW_PEERS_1_PRIORITY=2
HTTP_GW_PEERS_1_WEIGHT=1
# Peer 3.
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080
HTTP_GW_PEERS_2_PRIORITY=2
HTTP_GW_PEERS_2_WEIGHT=9
# Per-connection buffer size for requests' reading.
# This also limits the maximum header size.
HTTP_GW_WEB_READ_BUFFER_SIZE=4096
# Per-connection buffer size for responses' writing.
HTTP_GW_WRITE_BUFFER_SIZE=4096
# ReadTimeout is the amount of time allowed to read
# the full request including body. The connection's read
# deadline is reset when the connection opens, or for
# keep-alive connections after the first byte has been read.
HTTP_GW_READ_TIMEOUT=10m
# WriteTimeout is the maximum duration before timing out
# writes of the response. It is reset after the request handler
# has returned.
HTTP_GW_WRITE_TIMEOUT=5m
# StreamRequestBody enables request body streaming,
# and calls the handler sooner when given body is
# larger then the current limit.
HTTP_GW_STREAM_REQUEST_BODY=true
# Maximum request body size.
# The server rejects requests with bodies exceeding this limit.
HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
# RPC endpoint to be able to use nns container resolving.
HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333
# The order in which resolvers are used to find an container id by name.
HTTP_GW_RESOLVE_ORDER="nns dns"
# Create timestamp for object if it isn't provided by header.
HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false
# Timeout to dial node.
HTTP_GW_CONNECT_TIMEOUT=5s
# Timeout for individual operations in streaming RPC.
HTTP_GW_STREAM_TIMEOUT=10s
# Timeout to check node health during rebalance.
HTTP_GW_REQUEST_TIMEOUT=5s
# Interval to check nodes health.
HTTP_GW_REBALANCE_TIMER=30s
# The number of errors on connection after which node is considered as unhealthy
HTTP_GW_POOL_ERROR_THRESHOLD=100
# Enable zip compression to download files by common prefix.
HTTP_GW_ZIP_COMPRESSION=false
HTTP_GW_TRACING_ENABLED=true
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
HTTP_GW_TRACING_TRUSTED_CA=""
HTTP_GW_TRACING_ATTRIBUTES_0_KEY=key0
HTTP_GW_TRACING_ATTRIBUTES_0_VALUE=value
HTTP_GW_TRACING_ATTRIBUTES_1_KEY=key1
HTTP_GW_TRACING_ATTRIBUTES_1_VALUE=value
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
# Parameters of requests to FrostFS
# This flag enables client side object preparing.
HTTP_GW_FROSTFS_CLIENT_CUT=false
# Sets max buffer size for read payload in put operations.
HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
# Caching
# Cache which contains mapping of bucket name to bucket info
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
HTTP_GW_CACHE_BUCKETS_SIZE=1000
# Header to determine zone to resolve bucket name
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
# Namespaces that should be handled as default
HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
# Max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
HTTP_GW_CORS_ALLOW_ORIGIN="*"
HTTP_GW_CORS_ALLOW_METHODS="GET" "POST"
HTTP_GW_CORS_ALLOW_HEADERS="*"
HTTP_GW_CORS_EXPOSE_HEADERS="*"
HTTP_GW_CORS_ALLOW_CREDENTIALS=false
HTTP_GW_CORS_MAX_AGE=600
# Multinet properties
# Enable multinet support
HTTP_GW_MULTINET_ENABLED=false
# Strategy to pick source IP address
HTTP_GW_MULTINET_BALANCER=roundrobin
# Restrict requests with unknown destination subnet
HTTP_GW_MULTINET_RESTRICT=false
# Delay between ipv6 to ipv4 fallback switch
HTTP_GW_MULTINET_FALLBACK_DELAY=300ms
# List of subnets and IP addresses to use as source for those subnets
HTTP_GW_MULTINET_SUBNETS_1_MASK=1.2.3.4/24
HTTP_GW_MULTINET_SUBNETS_1_SOURCE_IPS=1.2.3.4 1.2.3.5
# Number of workers in handler's worker pool
HTTP_GW_WORKER_POOL_SIZE=1000
# Index page
# Enable index page support
HTTP_GW_INDEX_PAGE_ENABLED=false
# Index page template path
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl

174
config/config.yaml Normal file
View file

@ -0,0 +1,174 @@
wallet:
path: /path/to/wallet.json # Path to wallet.
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP # Account address. If omitted default one will be used.
passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
pprof:
enabled: false # Enable pprof.
address: localhost:8083
prometheus:
enabled: false # Enable metrics.
address: localhost:8084
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost:4317"
trusted_ca: ""
attributes:
- key: key0
value: value
- key: key1
value: value
logger:
level: debug # Log level.
destination: stdout
sampling:
enabled: false
initial: 100
thereafter: 100
interval: 1s
server:
- address: 0.0.0.0:8080
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
- address: 0.0.0.0:8081
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
# Nodes configuration.
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
# for 10% of requests and the third node for 90% of requests.
peers:
0:
# Endpoint.
address: grpc://s01.frostfs.devenv:8080
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
priority: 1
# Load distribution proportion for nodes with the same priority.
weight: 1
1:
address: grpc://s02.frostfs.devenv:8080
priority: 2
weight: 1
2:
address: grpc://s03.frostfs.devenv:8080
priority: 2
weight: 9
reconnect_interval: 1m
web:
# Per-connection buffer size for requests' reading.
# This also limits the maximum header size.
read_buffer_size: 4096
# Per-connection buffer size for responses' writing.
write_buffer_size: 4096
# ReadTimeout is the amount of time allowed to read
# the full request including body. The connection's read
# deadline is reset when the connection opens, or for
# keep-alive connections after the first byte has been read.
read_timeout: 10m
# WriteTimeout is the maximum duration before timing out
# writes of the response. It is reset after the request handler
# has returned.
write_timeout: 5m
# StreamRequestBody enables request body streaming,
# and calls the handler sooner when given body is
# larger then the current limit.
stream_request_body: true
# Maximum request body size.
# The server rejects requests with bodies exceeding this limit.
max_request_body_size: 4194304
# RPC endpoint to be able to use nns container resolving.
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
# The order in which resolvers are used to find an container id by name.
resolve_order:
- nns
- dns
upload_header:
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
connect_timeout: 5s # Timeout to dial node.
stream_timeout: 10s # Timeout for individual operations in streaming RPC.
request_timeout: 5s # Timeout to check node health during rebalance.
rebalance_timer: 30s # Interval to check nodes health.
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
# Number of workers in handler's worker pool
worker_pool_size: 1000
# Enable index page to see objects list for specified container and prefix
index_page:
enabled: false
template_path: internal/handler/templates/index.gotmpl
zip:
compression: false # Enable zip compression to download files by common prefix.
runtime:
soft_memory_limit: 1gb
# Parameters of requests to FrostFS
frostfs:
# This flag enables client side object preparing.
client_cut: false
# Sets max buffer size for read payload in put operations.
buffer_max_size_for_put: 1048576
# Max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
tree_pool_max_attempts: 0
# Caching
cache:
# Cache which contains mapping of bucket name to bucket info
buckets:
lifetime: 1m
size: 1000
resolve_bucket:
namespace_header: X-Frostfs-Namespace
default_namespaces: [ "", "root" ]
cors:
allow_origin: ""
allow_methods: []
allow_headers: []
expose_headers: []
allow_credentials: false
max_age: 600
# Multinet properties
multinet:
# Enable multinet support
enabled: false
# Strategy to pick source IP address
balancer: roundrobin
# Restrict requests with unknown destination subnet
restrict: false
# Delay between ipv6 to ipv4 fallback switch
fallback_delay: 300ms
# List of subnets and IP addresses to use as source for those subnets
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5

3
config/dir/pprof.yaml Normal file
View file

@ -0,0 +1,3 @@
pprof:
enabled: true
address: localhost:8083

View file

@ -0,0 +1,3 @@
prometheus:
enabled: true
address: localhost:8084

5
debian/changelog vendored Normal file
View file

@ -0,0 +1,5 @@
frostfs-http-gw (0.0.0) stable; urgency=medium
* Please see CHANGELOG.md
-- TrueCloudLab <tech@frostfs.info> Wed, 24 Aug 2022 18:29:49 +0300

14
debian/control vendored Normal file
View file

@ -0,0 +1,14 @@
Source: frostfs-http-gw
Section: frostfs
Priority: optional
Maintainer: TrueCloudLab <tech@frostfs.info>
Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts
Standards-Version: 4.5.1
Homepage: https://frostfs.info/
Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
Package: frostfs-http-gw
Architecture: any
Depends: ${misc:Depends}
Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.

25
debian/copyright vendored Normal file
View file

@ -0,0 +1,25 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: frostfs-http-gw
Upstream-Contact: tech@frostfs.info
Source: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
Files: *
Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project
(https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md)
2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project
(https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/src/branch/master/CREDITS.md)
License: GPL-3
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; version 3.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program or at /usr/share/common-licenses/GPL-3.
If not, see <http://www.gnu.org/licenses/>.

2
debian/frostfs-http-gw.dirs vendored Normal file
View file

@ -0,0 +1,2 @@
etc/frostfs
srv/frostfs_cache

4
debian/frostfs-http-gw.docs vendored Normal file
View file

@ -0,0 +1,4 @@
docs/gate-configuration.md
README.md
CREDITS.md
CONTRIBUTING.md

1
debian/frostfs-http-gw.examples vendored Normal file
View file

@ -0,0 +1 @@
config/*

2
debian/frostfs-http-gw.install vendored Normal file
View file

@ -0,0 +1,2 @@
bin/frostfs-http-gw usr/bin
config/config.yaml etc/frostfs/http

51
debian/frostfs-http-gw.postinst vendored Executable file
View file

@ -0,0 +1,51 @@
#!/bin/sh
# postinst script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <postinst> `abort-remove'
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
configure)
USERNAME=http
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -m -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
chmod -f 0750 /etc/frostfs/$USERNAME
chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
fi
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
if ! dpkg-statoverride --list "$USERDIR" >/dev/null; then
chown -f frostfs-$USERNAME: "$USERDIR"
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

41
debian/frostfs-http-gw.postrm vendored Executable file
View file

@ -0,0 +1,41 @@
#!/bin/sh
# postrm script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postrm> `remove'
# * <postrm> `purge'
# * <old-postrm> `upgrade' <new-version>
# * <new-postrm> `failed-upgrade' <old-version>
# * <new-postrm> `abort-install'
# * <new-postrm> `abort-install' <old-version>
# * <new-postrm> `abort-upgrade' <old-version>
# * <disappearer's-postrm> `disappear' <overwriter>
# <overwriter-version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
purge)
rm -rf /srv/frostfs_cache
;;
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

35
debian/frostfs-http-gw.preinst vendored Executable file
View file

@ -0,0 +1,35 @@
#!/bin/sh
# preinst script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <new-preinst> `install'
# * <new-preinst> `install' <old-version>
# * <new-preinst> `upgrade' <old-version>
# * <old-preinst> `abort-upgrade' <new-version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
install|upgrade)
;;
abort-upgrade)
;;
*)
echo "preinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

38
debian/frostfs-http-gw.prerm vendored Executable file
View file

@ -0,0 +1,38 @@
#!/bin/sh
# prerm script for frostfs-http-gw
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <prerm> `remove'
# * <old-prerm> `upgrade' <new-version>
# * <new-prerm> `failed-upgrade' <old-version>
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
# * <deconfigured's-prerm> `deconfigure' `in-favour'
# <package-being-installed> <version> `removing'
# <conflicting-package> <version>
# for details, see https://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
remove|upgrade|deconfigure)
;;
failed-upgrade)
;;
*)
echo "prerm called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
exit 0

16
debian/frostfs-http-gw.service vendored Normal file
View file

@ -0,0 +1,16 @@
[Unit]
Description=FrostFS HTTP Gateway
Requires=network.target
[Service]
Type=simple
ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml
User=frostfs-http
Group=frostfs-http
WorkingDirectory=/srv/frostfs_cache
Restart=always
RestartSec=5
PrivateTmp=true
[Install]
WantedBy=multi-user.target

14
debian/rules vendored Executable file
View file

@ -0,0 +1,14 @@
#!/usr/bin/make -f
# Do not try to strip Go binaries and do not run test
export DEB_BUILD_OPTIONS := nostrip nocheck
SERVICE = frostfs-http-gw
%:
dh $@
override_dh_installsystemd:
dh_installsystemd --no-enable --no-start $(SERVICE).service
override_dh_installchangelogs:
dh_installchangelogs -k CHANGELOG.md

1
debian/source/format vendored Normal file
View file

@ -0,0 +1 @@
3.0 (quilt)

316
docs/api.md Normal file
View file

@ -0,0 +1,316 @@
# HTTP Gateway Specification
| Route | Description |
|-------------------------------------------------|----------------------------------------------|
| `/upload/{cid}` | [Put object](#put-object) |
| `/get/{cid}/{oid}` | [Get object](#get-object) |
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
**Note:** `cid` parameter can be base58 encoded container ID or container name
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
Route parameters can be:
* `Single` - match a single path segment (cannot contain `/` and be empty)
* `Catch-All` - match everything (such parameter usually the last one in routes)
* `Query` - regular query parameter
### Bearer token
All routes can accept [bearer token](../README.md#authentication) from:
* `Authorization` header with `Bearer` type and base64-encoded token in
credentials field
* `Bearer` cookie with base64-encoded token contents
Example:
Header:
```
Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
```
Cookie:
```
cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
```
## Put object
Route: `/upload/{cid}`
| Route parameter | Type | Description |
|-----------------|--------|---------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
### Methods
#### POST
Upload file as object with attributes to FrostFS.
##### Request
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
1. `X-Attribute-System-Expiration-Epoch: 100`
2. `X-Attribute-System-Expiration-Duration: 24h30m`
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway
(see http-gw [configuration](gate-configuration.md#upload-header-section)).
The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used.
Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb.
###### Body
Body must contain multipart form with file.
The `filename` field from the multipart form will be set as `FileName` attribute of object
(can be overriden by `X-Attribute-FileName` header).
##### Response
###### Status codes
| Status | Description |
|--------|----------------------------------------------|
| 200 | Object created successfully. |
| 400 | Some error occurred during object uploading. |
## Get object
Route: `/get/{cid}/{oid}?[download=false]`
| Route parameter | Type | Description |
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
### Methods
#### GET
Get an object (payload and attributes) by an address.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or object not found. |
###### Body
Returns object data. If request performed from browser, either displays raw data or downloads it as
attachment if `download` query parameter is set to `true`.
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
S3-name was found.
#### HEAD
Get an object attributes by an address.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|---------------------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during object HEAD operation. |
| 404 | Container or object not found. |
## Search object
Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]`
| Route parameter | Type | Description |
|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
| `attr_key` | Single | Object attribute key to search. |
| `attr_val` | Catch-All | Object attribute value to match. |
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. |
### Methods
#### GET
Find and get an object (payload and attributes) by a specific attribute.
If more than one object is found, an arbitrary one will be returned.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or object not found. |
#### HEAD
Get object attributes by a specific attribute.
If more than one object is found, an arbitrary one will be used to get attributes.
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
| `Content-Length` | Size of object payload. |
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
| `X-Owner-Id` | Base58 encoded owner ID. |
| `X-Container-Id` | Base58 encoded container ID. |
| `X-Object-Id` | Base58 encoded object ID. |
###### Status codes
| Status | Description |
|--------|---------------------------------------|
| 200 | Object head successfully. |
| 400 | Some error occurred during operation. |
| 404 | Container or object not found. |
## Download zip
Route: `/zip/{cid}/{prefix}`
| Route parameter | Type | Description |
|-----------------|-----------|---------------------------------------------------------|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
| `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. |
### Methods
#### GET
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
Name of files in archive sets to `FilePath` attribute of objects.
Time of files sets to time when object has started downloading.
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
##### Request
###### Headers
| Header | Description |
|----------------|------------------------------------|
| Common headers | See [bearer token](#bearer-token). |
##### Response
###### Headers
| Header | Description |
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
###### Status codes
| Status | Description |
|--------|-----------------------------------------------------|
| 200 | Object got successfully. |
| 400 | Some error occurred during object downloading. |
| 404 | Container or objects not found. |
| 500 | Some inner error (e.g. error on streaming objects). |

View file

@ -0,0 +1,46 @@
# Building Debian package on host
## Prerequisites
For now, we're assuming building for Debian 11 (stable) x86_64.
Go version 18.4 or later should already be installed, i.e. this runs
successfully:
* `make all`
## Installing packaging dependencies
```shell
$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
```
Warining: number of package installed is pretty large considering dependecies.
## Package building
```shell
$ make debpackage
```
## Leftovers cleaning
```shell
$ make debclean
```
or
```shell
$ dh clean
```
# Package versioning
By default, package version is based on product version and may also contain git
tags and hashes.
Package version could be overwritten by setting `PKG_VERSION` variable before
build, Debian package versioning rules should be respected.
```shell
$ PKG_VERSION=0.32.0 make debpackge
```

459
docs/gate-configuration.md Normal file
View file

@ -0,0 +1,459 @@
# FrostFS HTTP Gateway configuration file
This section contains detailed FrostFS HTTP Gateway configuration file description
including default config values and some tips to set up configurable values.
There are some custom types used for brevity:
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
milliseconds).
# Reload on SIGHUP
Some config values can be reloaded on SIGHUP signal.
Such parameters have special mark in tables below.
You can send SIGHUP signal to app using the following command:
```shell
$ kill -s SIGHUP <app_pid>
```
Example:
```shell
$ ./bin/frostfs-http-gw --config config.yaml &> http.log &
[1] 998346
$ cat http.log
# ...
2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"}
# ...
$ kill -s SIGHUP 998346
$ cat http.log
# ...
2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed
```
# Structure
| Section | Description |
|------------------|----------------------------------------------------------------|
| no section | [General parameters](#general-section) |
| `wallet` | [Wallet configuration](#wallet-section) |
| `peers` | [Nodes configuration](#peers-section) |
| `logger` | [Logger configuration](#logger-section) |
| `web` | [Web configuration](#web-section) |
| `server` | [Server configuration](#server-section) |
| `upload-header` | [Upload header configuration](#upload-header-section) |
| `zip` | [ZIP configuration](#zip-section) |
| `pprof` | [Pprof configuration](#pprof-section) |
| `prometheus` | [Prometheus configuration](#prometheus-section) |
| `tracing` | [Tracing configuration](#tracing-section) |
| `runtime` | [Runtime configuration](#runtime-section) |
| `frostfs` | [Frostfs configuration](#frostfs-section) |
| `cache` | [Cache configuration](#cache-section) |
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
| `index_page` | [Index page configuration](#index_page-section) |
| `multinet` | [Multinet configuration](#multinet-section) |
# General section
```yaml
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
resolve_order:
- nns
- dns
connect_timeout: 5s
stream_timeout: 10s
request_timeout: 5s
rebalance_timer: 30s
pool_error_threshold: 100
reconnect_interval: 1m
worker_pool_size: 1000
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------------|------------|---------------|---------------|------------------------------------------------------------------------------------|
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
| `worker_pool_size` | `int` | no | `1000` | Maximum worker count in handler's worker pool. |
# `wallet` section
```yaml
wallet:
path: /path/to/wallet.json
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
passphrase: pwd
```
| Parameter | Type | Default value | Description |
|--------------|----------|---------------|--------------------------------------------------------------------------|
| `path` | `string` | | Path to the wallet. |
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
# `peers` section
```yaml
# Nodes configuration
# This configuration makes the gateway use the first node (node1.frostfs:8080)
# while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080)
# for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
# Until nodes with the same priority level are healthy
# nodes with other priority are not used.
# The lower the value, the higher the priority.
peers:
0:
address: node1.frostfs:8080
priority: 1
weight: 1
1:
address: node2.frostfs:8080
priority: 2
weight: 0.1
2:
address: node3.frostfs:8080
priority: 2
weight: 0.9
```
| Parameter | Type | Default value | Description |
|------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
| `address` | `string` | | Address of storage node. |
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
# `server` section
You can specify several listeners for server. For example, for `http` and `https`.
```yaml
server:
- address: 0.0.0.0:8080
tls:
enabled: false
cert_file: /path/to/cert
key_file: /path/to/key
- address: 0.0.0.0:8081
tls:
enabled: true
cert_file: /path/to/another/cert
key_file: /path/to/another/key
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------|----------|---------------|----------------|-----------------------------------------------|
| `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. |
| `tls.enabled` | `bool` | | false | Enable TLS or not. |
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
| `tls.key_file` | `string` | yes | | Path to the key. |
# `logger` section
```yaml
logger:
level: debug
destination: stdout
sampling:
enabled: false
initial: 100
thereafter: 100
interval: 1s
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. |
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
# `web` section
```yaml
web:
read_buffer_size: 4096
write_buffer_size: 4096
read_timeout: 10m
write_timeout: 5m
stream_request_body: true
max_request_body_size: 4194304
```
| Parameter | Type | Default value | Description |
|-------------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `read_buffer_size` | `int` | `4096` | Per-connection buffer size for requests' reading. This also limits the maximum header size. |
| `write_buffer_size` | `int` | `4096` | Per-connection buffer size for responses' writing. |
| `read_timeout` | `duration` | `10m` | The amount of time allowed to read the full request including body. The connection's read deadline is reset when the connection opens, or for keep-alive connections after the first byte has been read. |
| `write_timeout` | `duration` | `5m` | The maximum duration before timing out writes of the response. It is reset after the request handler has returned. |
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
# `upload-header` section
```yaml
upload_header:
use_default_timestamp: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
# `zip` section
```yaml
zip:
compression: false
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
# `pprof` section
Contains configuration for the `pprof` profiler.
```yaml
pprof:
enabled: true
address: localhost:8083
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|------------------|-----------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
| `address` | `string` | yes | `localhost:8083` | Address that service listener binds to. |
# `prometheus` section
Contains configuration for the `prometheus` metrics service.
```yaml
prometheus:
enabled: true
address: localhost:8084
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------|----------|---------------|------------------|-----------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
# `tracing` section
Contains configuration for the `tracing` service.
```yaml
tracing:
enabled: true
exporter: "otlp_grpc"
endpoint: "localhost:4317"
trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem"
attributes:
- key: key0
value: value
- key: key1
value: value
```
| Parameter | Type | SIGHUP reload | Default value | Description |
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
#### `attributes` subsection
```yaml
attributes:
- key: key0
value: value
- key: key1
value: value
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
| `key` | `string` | yes | | Attribute key. |
| `value` | `string` | yes | | Attribute value. |
# `runtime` section
Contains runtime parameters.
```yaml
runtime:
soft_memory_limit: 1gb
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
# `frostfs` section
Contains parameters of requests to FrostFS.
```yaml
frostfs:
client_cut: false
buffer_max_size_for_put: 1048576 # 1mb
tree_pool_max_attempts: 0
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
### `cache` section
```yaml
cache:
buckets:
lifetime: 1m
size: 1000
```
| Parameter | Type | Default value | Description |
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
#### `cache` subsection
```yaml
lifetime: 1m
size: 1000
```
| Parameter | Type | Default value | Description |
|------------|------------|------------------|-------------------------------|
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
| `size` | `int` | depends on cache | LRU cache size. |
# `resolve_bucket` section
Bucket name resolving parameters from and to container ID.
```yaml
resolve_bucket:
namespace_header: X-Frostfs-Namespace
default_namespaces: [ "", "root" ]
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
# `index_page` section
Parameters for index HTML-page output. Activates if `GetObject` request returns `not found`. Two
index page modes available:
* `s3` mode uses tree service for listing objects,
* `native` sends requests to nodes via native protocol.
If request pass S3-bucket name instead of CID, `s3` mode will be used, otherwise `native`.
```yaml
index_page:
enabled: false
template_path: ""
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
# `cors` section
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
If values are not set, headers will not be included to response.
```yaml
cors:
allow_origin: "*"
allow_methods: ["GET", "HEAD"]
allow_headers: ["Authorization"]
expose_headers: ["*"]
allow_credentials: false
max_age: 600
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------|------------|---------------|---------------|--------------------------------------------------------|
| `allow_origin` | `string` | yes | | Values for `Access-Control-Allow-Origin` headers. |
| `allow_methods` | `[]string` | yes | | Values for `Access-Control-Allow-Methods` headers. |
| `allow_headers` | `[]string` | yes | | Values for `Access-Control-Allow-Headers` headers. |
| `expose_headers` | `[]string` | yes | | Values for `Access-Control-Expose-Headers` headers. |
| `allow_credentials` | `bool` | yes | `false` | Values for `Access-Control-Allow-Credentials` headers. |
| `max_age` | `int` | yes | `600` | Values for `Access-Control-Max-Age ` headers. |
# `multinet` section
Configuration of multinet support.
```yaml
multinet:
enabled: false
balancer: roundrobin
restrict: false
fallback_delay: 300ms
subnets:
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------|--------------------------------|---------------|---------------|--------------------------------------------------------------------------------------------|
| `enabled` | `bool` | yes | `false` | Enables multinet setting to manage source ip of outcoming requests. |
| `balancer` | `string` | yes | `""` | Strategy to pick source IP. By default picks first address. Supports `roundrobin` setting. |
| `restrict` | `bool` | yes | `false` | Restricts requests to an undefined subnets. |
| `fallback_delay` | `duration` | yes | `300ms` | Delay between IPv6 and IPv4 fallback stack switch. |
| `subnets` | [[]Subnet](#subnet-subsection) | yes | | Set of subnets to apply multinet dial settings. |
#### `subnet` subsection
```yaml
- mask: 1.2.3.4/24
source_ips:
- 1.2.3.4
- 1.2.3.5
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
| `mask` | `string` | yes | | Destination subnet. |
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |

120
go.mod Normal file
View file

@ -0,0 +1,120 @@
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241022124111-5361f0ecebd3
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/bluele/gcache v0.0.2
github.com/docker/go-units v0.4.0
github.com/fasthttp/router v1.4.1
github.com/nspcc-dev/neo-go v0.106.2
github.com/panjf2000/ants/v2 v2.5.0
github.com/prometheus/client_golang v1.19.0
github.com/prometheus/client_model v0.5.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.13.0
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
github.com/valyala/fasthttp v1.34.0
go.opentelemetry.io/otel v1.28.0
go.opentelemetry.io/otel/trace v1.28.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
golang.org/x/net v0.26.0
golang.org/x/sys v0.22.0
google.golang.org/grpc v1.66.2
)
require (
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.14+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.16.4 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/sys/mount v0.3.2 // indirect
github.com/moby/sys/mountinfo v0.6.1 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/runc v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/afero v1.9.3 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/twmb/murmur3 v1.1.8 // indirect
github.com/urfave/cli v1.22.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
go.etcd.io/bbolt v1.3.9 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/term v0.21.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

1423
go.sum Normal file

File diff suppressed because it is too large Load diff

22
help.mk Normal file
View file

@ -0,0 +1,22 @@
.PHONY: help
# Show this help prompt
help:
@echo ' Usage:'
@echo ''
@echo ' make <target>'
@echo ''
@echo ' Targets:'
@echo ''
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
# Show help for docker/% IGNORE
help.docker/%:
$(eval TARGETS:=$(notdir all lint) ${BINS})
@echo ' Usage:'
@echo ''
@echo ' make docker/% -- Run `make %` in Golang container'
@echo ''
@echo ' Supported docker targets:'
@echo ''
@$(foreach bin, $(TARGETS), echo ' ' $(bin);)

View file

@ -0,0 +1,22 @@
package layer
import (
"context"
"errors"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
// TreeService provide interface to interact with tree service using s3 data models.
type TreeService interface {
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
}
var (
// ErrNodeNotFound is returned from Tree service in case of not found error.
ErrNodeNotFound = errors.New("not found")
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
ErrNodeAccessDenied = errors.New("access denied")
)

18
internal/api/tree.go Normal file
View file

@ -0,0 +1,18 @@
package api
import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// NodeVersion represent node from tree service.
type NodeVersion struct {
BaseNodeVersion
DeleteMarker bool
IsPrefixNode bool
}
// BaseNodeVersion is minimal node info from tree service.
// Basically used for "system" object.
type BaseNodeVersion struct {
OID oid.ID
}

72
internal/cache/buckets.go vendored Normal file
View file

@ -0,0 +1,72 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
// BucketCache contains cache with objects and the lifetime of cache entries.
type BucketCache struct {
cache gcache.Cache
logger *zap.Logger
}
// Config stores expiration params for cache.
type Config struct {
Size int
Lifetime time.Duration
Logger *zap.Logger
}
const (
// DefaultBucketCacheSize is a default maximum number of entries in cache.
DefaultBucketCacheSize = 1e3
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
DefaultBucketCacheLifetime = time.Minute
)
// DefaultBucketConfig returns new default cache expiration values.
func DefaultBucketConfig(logger *zap.Logger) *Config {
return &Config{
Size: DefaultBucketCacheSize,
Lifetime: DefaultBucketCacheLifetime,
Logger: logger,
}
}
// NewBucketCache creates an object of BucketCache.
func NewBucketCache(config *Config) *BucketCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
return &BucketCache{cache: gc, logger: config.Logger}
}
// Get returns a cached object.
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
entry, err := o.cache.Get(formKey(ns, bktName))
if err != nil {
return nil
}
result, ok := entry.(*data.BucketInfo)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
// Put puts an object to cache.
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
}
func formKey(ns, name string) string {
return name + "." + ns
}

12
internal/data/bucket.go Normal file
View file

@ -0,0 +1,12 @@
package data
import (
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
)
type BucketInfo struct {
Name string // container name from system attribute
Zone string // container zone from system attribute
CID cid.ID
HomomorphicHashDisabled bool
}

375
internal/handler/browse.go Normal file
View file

@ -0,0 +1,375 @@
package handler
import (
"context"
"html/template"
"net/url"
"sort"
"strconv"
"strings"
"sync"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/docker/go-units"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
dateFormat = "02-01-2006 15:04"
attrOID = "OID"
attrCreated = "Created"
attrFileName = "FileName"
attrSize = "Size"
)
type (
BrowsePageData struct {
HasErrors bool
Container string
Prefix string
Protocol string
Objects []ResponseObject
}
ResponseObject struct {
OID string
Created string
FileName string
FilePath string
Size string
IsDir bool
GetURL string
}
)
func newListObjectsResponseS3(attrs map[string]string) ResponseObject {
return ResponseObject{
Created: formatTimestamp(attrs[attrCreated]),
OID: attrs[attrOID],
FileName: attrs[attrFileName],
Size: attrs[attrSize],
IsDir: attrs[attrOID] == "",
}
}
func newListObjectsResponseNative(attrs map[string]string) ResponseObject {
filename := lastPathElement(attrs[object.AttributeFilePath])
if filename == "" {
filename = attrs[attrFileName]
}
return ResponseObject{
OID: attrs[attrOID],
Created: formatTimestamp(attrs[object.AttributeTimestamp] + "000"),
FileName: filename,
FilePath: attrs[object.AttributeFilePath],
Size: attrs[attrSize],
IsDir: false,
}
}
func getNextDir(filepath, prefix string) string {
restPath := strings.Replace(filepath, prefix, "", 1)
index := strings.Index(restPath, "/")
if index == -1 {
return ""
}
return restPath[:index]
}
func lastPathElement(path string) string {
if path == "" {
return path
}
index := strings.LastIndex(path, "/")
if index == len(path)-1 {
index = strings.LastIndex(path[:index], "/")
}
return path[index+1:]
}
func parseTimestamp(tstamp string) (time.Time, error) {
millis, err := strconv.ParseInt(tstamp, 10, 64)
if err != nil {
return time.Time{}, err
}
return time.UnixMilli(millis), nil
}
func formatTimestamp(strdate string) string {
date, err := parseTimestamp(strdate)
if err != nil || date.IsZero() {
return ""
}
return date.Format(dateFormat)
}
func formatSize(strsize string) string {
size, err := strconv.ParseFloat(strsize, 64)
if err != nil {
return "0B"
}
return units.HumanSize(size)
}
func parentDir(prefix string) string {
index := strings.LastIndex(prefix, "/")
if index == -1 {
return prefix
}
return prefix[index:]
}
func trimPrefix(encPrefix string) string {
prefix, err := url.PathUnescape(encPrefix)
if err != nil {
return ""
}
slashIndex := strings.LastIndex(prefix, "/")
if slashIndex == -1 {
return ""
}
return prefix[:slashIndex]
}
func urlencode(path string) string {
var res strings.Builder
prefixParts := strings.Split(path, "/")
for _, prefixPart := range prefixParts {
prefixPart = "/" + url.PathEscape(prefixPart)
if prefixPart == "/." || prefixPart == "/.." {
prefixPart = url.PathEscape(prefixPart)
}
res.WriteString(prefixPart)
}
return res.String()
}
type GetObjectsResponse struct {
objects []ResponseObject
hasErrors bool
}
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
if err != nil {
return nil, err
}
result := &GetObjectsResponse{
objects: make([]ResponseObject, 0, len(nodes)),
}
for _, node := range nodes {
meta := node.GetMeta()
if meta == nil {
continue
}
var attrs = make(map[string]string, len(meta))
for _, m := range meta {
attrs[m.GetKey()] = string(m.GetValue())
}
obj := newListObjectsResponseS3(attrs)
obj.FilePath = prefix + obj.FileName
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
result.objects = append(result.objects, obj)
}
return result, nil
}
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
var basePath string
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
basePath = prefix[:ind+1]
}
filters := object.NewSearchFilters()
filters.AddRootFilter()
if prefix != "" {
filters.AddFilter(object.AttributeFilePath, prefix, object.MatchCommonPrefix)
}
prm := PrmObjectSearch{
PrmAuth: PrmAuth{
BearerToken: bearerToken(ctx),
},
Container: bucketInfo.CID,
Filters: filters,
}
objectIDs, err := h.frostfs.SearchObjects(ctx, prm)
if err != nil {
return nil, err
}
defer objectIDs.Close()
resp, err := h.headDirObjects(ctx, bucketInfo.CID, objectIDs, basePath)
if err != nil {
return nil, err
}
log := utils.GetReqLogOrDefault(ctx, h.log)
dirs := make(map[string]struct{})
result := &GetObjectsResponse{
objects: make([]ResponseObject, 0, 100),
}
for objExt := range resp {
if objExt.Error != nil {
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
result.hasErrors = true
continue
}
if objExt.Object.IsDir {
if _, ok := dirs[objExt.Object.FileName]; ok {
continue
}
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + urlencode(objExt.Object.FilePath)
dirs[objExt.Object.FileName] = struct{}{}
} else {
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + "/" + objExt.Object.OID
}
result.objects = append(result.objects, objExt.Object)
}
return result, nil
}
type ResponseObjectExtended struct {
Object ResponseObject
Error error
}
func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs ResObjectSearch, basePath string) (<-chan ResponseObjectExtended, error) {
res := make(chan ResponseObjectExtended)
go func() {
defer close(res)
log := utils.GetReqLogOrDefault(ctx, h.log).With(
zap.String("cid", cnrID.EncodeToString()),
zap.String("path", basePath),
)
var wg sync.WaitGroup
err := objectIDs.Iterate(func(id oid.ID) bool {
wg.Add(1)
err := h.workerPool.Submit(func() {
defer wg.Done()
var obj ResponseObjectExtended
obj.Object, obj.Error = h.headDirObject(ctx, cnrID, id, basePath)
res <- obj
})
if err != nil {
wg.Done()
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
}
select {
case <-ctx.Done():
return true
default:
return false
}
})
if err != nil {
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
}
wg.Wait()
}()
return res, nil
}
func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID, basePath string) (ResponseObject, error) {
addr := newAddress(cnrID, objID)
obj, err := h.frostfs.HeadObject(ctx, PrmObjectHead{
PrmAuth: PrmAuth{BearerToken: bearerToken(ctx)},
Address: addr,
})
if err != nil {
return ResponseObject{}, err
}
attrs := loadAttributes(obj.Attributes())
attrs[attrOID] = objID.EncodeToString()
if multipartSize, ok := attrs[attributeMultipartObjectSize]; ok {
attrs[attrSize] = multipartSize
} else {
attrs[attrSize] = strconv.FormatUint(obj.PayloadSize(), 10)
}
dirname := getNextDir(attrs[object.AttributeFilePath], basePath)
if dirname == "" {
return newListObjectsResponseNative(attrs), nil
}
return ResponseObject{
FileName: dirname,
FilePath: basePath + dirname,
IsDir: true,
}, nil
}
type browseParams struct {
bucketInfo *data.BucketInfo
prefix string
isNative bool
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
}
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
const S3Protocol = "s3"
const FrostfsProtocol = "frostfs"
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(
zap.String("bucket", p.bucketInfo.Name),
zap.String("container", p.bucketInfo.CID.EncodeToString()),
zap.String("prefix", p.prefix),
)
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objects := resp.objects
sort.Slice(objects, func(i, j int) bool {
if objects[i].IsDir == objects[j].IsDir {
return objects[i].FileName < objects[j].FileName
}
return objects[i].IsDir
})
tmpl, err := template.New("index").Funcs(template.FuncMap{
"formatSize": formatSize,
"trimPrefix": trimPrefix,
"urlencode": urlencode,
"parentDir": parentDir,
}).Parse(h.config.IndexPageTemplate())
if err != nil {
logAndSendBucketError(c, log, err)
return
}
bucketName := p.bucketInfo.Name
protocol := S3Protocol
if p.isNative {
bucketName = p.bucketInfo.CID.EncodeToString()
protocol = FrostfsProtocol
}
if err = tmpl.Execute(c, &BrowsePageData{
Container: bucketName,
Prefix: p.prefix,
Objects: objects,
Protocol: protocol,
HasErrors: resp.hasErrors,
}); err != nil {
logAndSendBucketError(c, log, err)
return
}
}

View file

@ -0,0 +1,200 @@
package handler
import (
"archive/zip"
"bufio"
"context"
"fmt"
"io"
"net/http"
"net/url"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
oidURLParam := c.UserValue("oid").(string)
downloadQueryParam := c.QueryArgs().GetBool("download")
switch {
case isObjectID(oidURLParam):
h.byNativeAddress(c, h.receiveFile)
case !isContainerRoot(oidURLParam) && (downloadQueryParam || !isDir(oidURLParam)):
h.byS3Path(c, h.receiveFile)
default:
h.browseIndex(c)
}
}
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
return &request{
RequestCtx: ctx,
log: log,
}
}
// DownloadByAttribute handles attribute-based download requests.
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
h.byAttribute(c, h.receiveFile)
}
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
filters := object.NewSearchFilters()
filters.AddRootFilter()
filters.AddFilter(key, val, op)
prm := PrmObjectSearch{
PrmAuth: PrmAuth{
BearerToken: bearerToken(ctx),
},
Container: cnrID,
Filters: filters,
}
return h.frostfs.SearchObjects(ctx, prm)
}
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
method := zip.Store
if h.config.ZipCompression() {
method = zip.Deflate
}
filePath := getZipFilePath(obj)
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
}
return zw.CreateHeader(&zip.FileHeader{
Name: filePath,
Method: method,
Modified: time.Now(),
})
}
// DownloadZipped handles zip by prefix requests.
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
scid, _ := c.UserValue("cid").(string)
prefix, _ := c.UserValue("prefix").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
prefix, err := url.QueryUnescape(prefix)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
return
}
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
resSearch, err := h.search(ctx, bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return
}
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
c.Response.SetStatusCode(http.StatusOK)
c.SetBodyStreamWriter(func(w *bufio.Writer) {
defer resSearch.Close()
zipWriter := zip.NewWriter(w)
var bufZip []byte
var addr oid.Address
empty := true
called := false
btoken := bearerToken(ctx)
addr.SetContainer(bktInfo.CID)
errIter := resSearch.Iterate(func(id oid.ID) bool {
called = true
if empty {
bufZip = make([]byte, 3<<20) // the same as for upload
}
empty = false
addr.SetObject(id)
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
}
return false
})
if errIter != nil {
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
} else if !called {
log.Error(logs.ObjectsNotFound)
}
if err = zipWriter.Close(); err != nil {
log.Error(logs.CloseZipWriter, zap.Error(err))
}
})
}
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
prm := PrmObjectGet{
PrmAuth: PrmAuth{
BearerToken: btoken,
},
Address: addr,
}
resGet, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
return fmt.Errorf("get FrostFS object: %v", err)
}
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
if err != nil {
return fmt.Errorf("zip create header: %v", err)
}
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
return fmt.Errorf("copy object payload to zip file: %v", err)
}
if err = resGet.Payload.Close(); err != nil {
return fmt.Errorf("object body close error: %w", err)
}
if err = zipWriter.Flush(); err != nil {
return fmt.Errorf("flush zip writer: %v", err)
}
return nil
}
func getZipFilePath(obj *object.Object) string {
for _, attr := range obj.Attributes() {
if attr.Key() == object.AttributeFilePath {
return attr.Value()
}
}
return ""
}

View file

@ -0,0 +1,57 @@
package handler
import (
"bytes"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) {
var err error
result := make(map[string]string)
prefix := []byte(utils.UserAttributeHeaderPrefix)
header.VisitAll(func(key, val []byte) {
// checks that the key and the val not empty
if len(key) == 0 || len(val) == 0 {
return
}
// checks that the key has attribute prefix
if !bytes.HasPrefix(key, prefix) {
return
}
// removing attribute prefix
clearKey := bytes.TrimPrefix(key, prefix)
clearKey = utils.TransformIfSystem(clearKey)
// checks that the attribute key is not empty
if len(clearKey) == 0 {
return
}
// check if key gets duplicated
// return error containing full key name (with prefix)
if _, ok := result[string(clearKey)]; ok {
err = fmt.Errorf("key duplication error: %s", string(key))
return
}
// make string representation of key / val
k, v := string(clearKey), string(val)
result[k] = v
l.Debug(logs.AddAttributeToResultObject,
zap.String("key", k),
zap.String("val", v))
})
return result, err
}

View file

@ -0,0 +1,53 @@
//go:build !integration
package handler
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
func TestFilter(t *testing.T) {
log := zap.NewNop()
t.Run("duplicate keys error", func(t *testing.T) {
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Add("X-Attribute-DupKey", "first-value")
req.Add("X-Attribute-DupKey", "second-value")
_, err := filterHeaders(log, req)
require.Error(t, err)
})
t.Run("duplicate system keys error", func(t *testing.T) {
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Add("X-Attribute-System-DupKey", "first-value")
req.Add("X-Attribute-System-DupKey", "second-value")
_, err := filterHeaders(log, req)
require.Error(t, err)
})
req := &fasthttp.RequestHeader{}
req.DisableNormalizing()
req.Set("X-Attribute-System-Expiration-Epoch1", "101")
req.Set("X-Attribute-SYSTEM-Expiration-Epoch2", "102")
req.Set("X-Attribute-system-Expiration-Epoch3", "103")
req.Set("X-Attribute-MyAttribute", "value")
expected := map[string]string{
"__SYSTEM__EXPIRATION_EPOCH1": "101",
"MyAttribute": "value",
"__SYSTEM__EXPIRATION_EPOCH3": "103",
"__SYSTEM__EXPIRATION_EPOCH2": "102",
}
result, err := filterHeaders(log, req)
require.NoError(t, err)
require.Equal(t, expected, result)
}

View file

@ -0,0 +1,275 @@
package handler
import (
"bytes"
"context"
"crypto/rand"
"crypto/sha256"
"fmt"
"io"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
)
type TestFrostFS struct {
objects map[string]*object.Object
containers map[string]*container.Container
accessList map[string]bool
key *keys.PrivateKey
}
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
return &TestFrostFS{
objects: make(map[string]*object.Object),
containers: make(map[string]*container.Container),
accessList: make(map[string]bool),
key: key,
}
}
func (t *TestFrostFS) ContainerID(name string) (*cid.ID, error) {
for id, cnr := range t.containers {
if container.Name(*cnr) == name {
var cnrID cid.ID
return &cnrID, cnrID.DecodeString(id)
}
}
return nil, fmt.Errorf("not found")
}
func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
t.containers[cnrID.EncodeToString()] = cnr
}
// AllowUserOperation grants access to object operations.
// Empty userID and objID means any user and object respectively.
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
t.accessList[fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID)] = true
}
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
for k, v := range t.containers {
if k == prm.ContainerID.EncodeToString() {
return v, nil
}
}
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
}
func (t *TestFrostFS) requestOwner(btoken *bearer.Token) user.ID {
if btoken != nil {
return bearer.ResolveIssuer(*btoken)
}
var owner user.ID
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
return owner
}
func (t *TestFrostFS) retrieveObject(addr oid.Address, btoken *bearer.Token) (*object.Object, error) {
sAddr := addr.EncodeToString()
if obj, ok := t.objects[sAddr]; ok {
owner := t.requestOwner(btoken)
if !t.isAllowed(addr.Container(), owner, acl.OpObjectGet, addr.Object()) {
return nil, ErrAccessDenied
}
return obj, nil
}
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
}
func (t *TestFrostFS) HeadObject(_ context.Context, prm PrmObjectHead) (*object.Object, error) {
return t.retrieveObject(prm.Address, prm.BearerToken)
}
func (t *TestFrostFS) GetObject(_ context.Context, prm PrmObjectGet) (*Object, error) {
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
if err != nil {
return nil, err
}
return &Object{
Header: *obj,
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
}, nil
}
func (t *TestFrostFS) RangeObject(_ context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
if err != nil {
return nil, err
}
off := prm.PayloadRange[0]
payload := obj.Payload()[off : off+prm.PayloadRange[1]]
return io.NopCloser(bytes.NewReader(payload)), nil
}
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
b := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, b); err != nil {
return oid.ID{}, err
}
var id oid.ID
id.SetSHA256(sha256.Sum256(b))
prm.Object.SetID(id)
attrs := prm.Object.Attributes()
if prm.ClientCut {
a := object.NewAttribute()
a.SetKey("s3-client-cut")
a.SetValue("true")
attrs = append(attrs, *a)
}
prm.Object.SetAttributes(attrs...)
if prm.Payload != nil {
all, err := io.ReadAll(prm.Payload)
if err != nil {
return oid.ID{}, err
}
prm.Object.SetPayload(all)
prm.Object.SetPayloadSize(uint64(len(all)))
var hash checksum.Checksum
checksum.Calculate(&hash, checksum.SHA256, all)
prm.Object.SetPayloadChecksum(hash)
}
cnrID, _ := prm.Object.ContainerID()
objID, _ := prm.Object.ID()
owner := t.requestOwner(prm.BearerToken)
if !t.isAllowed(cnrID, owner, acl.OpObjectPut, objID) {
return oid.ID{}, ErrAccessDenied
}
addr := newAddress(cnrID, objID)
t.objects[addr.EncodeToString()] = prm.Object
return objID, nil
}
type resObjectSearchMock struct {
res []oid.ID
}
func (r *resObjectSearchMock) Read(buf []oid.ID) (int, error) {
for i := range buf {
if i > len(r.res)-1 {
return len(r.res), io.EOF
}
buf[i] = r.res[i]
}
r.res = r.res[len(buf):]
return len(buf), nil
}
func (r *resObjectSearchMock) Iterate(f func(oid.ID) bool) error {
for _, id := range r.res {
if f(id) {
return nil
}
}
return nil
}
func (r *resObjectSearchMock) Close() {}
func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (ResObjectSearch, error) {
if !t.isAllowed(prm.Container, t.requestOwner(prm.BearerToken), acl.OpObjectSearch, oid.ID{}) {
return nil, ErrAccessDenied
}
cidStr := prm.Container.EncodeToString()
var res []oid.ID
if len(prm.Filters) == 1 { // match root filter
for k, v := range t.objects {
if strings.Contains(k, cidStr) {
id, _ := v.ID()
res = append(res, id)
}
}
return &resObjectSearchMock{res: res}, nil
}
filter := prm.Filters[1]
if len(prm.Filters) != 2 ||
filter.Operation() != object.MatchCommonPrefix && filter.Operation() != object.MatchStringEqual {
return nil, fmt.Errorf("usupported filters")
}
for k, v := range t.objects {
if strings.Contains(k, cidStr) && isMatched(v.Attributes(), filter) {
id, _ := v.ID()
res = append(res, id)
}
}
return &resObjectSearchMock{res: res}, nil
}
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
return nil, nil
}
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
for _, attr := range attributes {
if attr.Key() == filter.Header() {
switch filter.Operation() {
case object.MatchStringEqual:
return attr.Value() == filter.Value()
case object.MatchCommonPrefix:
return strings.HasPrefix(attr.Value(), filter.Value())
default:
return false
}
}
}
return false
}
func (t *TestFrostFS) GetEpochDurations(context.Context) (*utils.EpochDurations, error) {
return &utils.EpochDurations{
CurrentEpoch: 10,
MsPerBlock: 1000,
BlockPerEpoch: 100,
}, nil
}
func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) bool {
keysToCheck := []string{
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID),
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, oid.ID{}),
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, objID),
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, oid.ID{}),
}
for _, key := range keysToCheck {
if t.accessList[key] {
return true
}
}
return false
}

437
internal/handler/handler.go Normal file
View file

@ -0,0 +1,437 @@
package handler
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/panjf2000/ants/v2"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
type Config interface {
DefaultTimestamp() bool
ZipCompression() bool
ClientCut() bool
IndexPageEnabled() bool
IndexPageTemplate() string
BufferMaxSizeForPut() uint64
NamespaceHeader() string
}
// PrmContainer groups parameters of FrostFS.Container operation.
type PrmContainer struct {
// Container identifier.
ContainerID cid.ID
}
// PrmAuth groups authentication parameters for the FrostFS operation.
type PrmAuth struct {
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
BearerToken *bearer.Token
}
// PrmObjectHead groups parameters of FrostFS.HeadObject operation.
type PrmObjectHead struct {
// Authentication parameters.
PrmAuth
// Address to read the object header from.
Address oid.Address
}
// PrmObjectGet groups parameters of FrostFS.GetObject operation.
type PrmObjectGet struct {
// Authentication parameters.
PrmAuth
// Address to read the object header from.
Address oid.Address
}
// PrmObjectRange groups parameters of FrostFS.RangeObject operation.
type PrmObjectRange struct {
// Authentication parameters.
PrmAuth
// Address to read the object header from.
Address oid.Address
// Offset-length range of the object payload to be read.
PayloadRange [2]uint64
}
// Object represents FrostFS object.
type Object struct {
// Object header (doesn't contain payload).
Header object.Object
// Object payload part encapsulated in io.Reader primitive.
// Returns ErrAccessDenied on read access violation.
Payload io.ReadCloser
}
// PrmObjectCreate groups parameters of FrostFS.CreateObject operation.
type PrmObjectCreate struct {
// Authentication parameters.
PrmAuth
Object *object.Object
// Object payload encapsulated in io.Reader primitive.
Payload io.Reader
// Enables client side object preparing.
ClientCut bool
// Disables using Tillich-Zémor hash for payload.
WithoutHomomorphicHash bool
// Sets max buffer size to read payload.
BufferMaxSize uint64
}
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
type PrmObjectSearch struct {
// Authentication parameters.
PrmAuth
// Container to select the objects from.
Container cid.ID
Filters object.SearchFilters
}
type PrmInitMultiObjectReader struct {
// payload range
Off, Ln uint64
Addr oid.Address
Bearer *bearer.Token
}
type ResObjectSearch interface {
Read(buf []oid.ID) (int, error)
Iterate(f func(oid.ID) bool) error
Close()
}
var (
// ErrAccessDenied is returned from FrostFS in case of access violation.
ErrAccessDenied = errors.New("access denied")
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
ErrGatewayTimeout = errors.New("gateway timeout")
)
// FrostFS represents virtual connection to FrostFS network.
type FrostFS interface {
Container(context.Context, PrmContainer) (*container.Container, error)
HeadObject(context.Context, PrmObjectHead) (*object.Object, error)
GetObject(context.Context, PrmObjectGet) (*Object, error)
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
InitMultiObjectReader(ctx context.Context, p PrmInitMultiObjectReader) (io.Reader, error)
utils.EpochInfoFetcher
}
type ContainerResolver interface {
Resolve(ctx context.Context, name string) (*cid.ID, error)
}
type Handler struct {
log *zap.Logger
frostfs FrostFS
ownerID *user.ID
config Config
containerResolver ContainerResolver
tree *tree.Tree
cache *cache.BucketCache
workerPool *ants.Pool
}
type AppParams struct {
Logger *zap.Logger
FrostFS FrostFS
Owner *user.ID
Resolver ContainerResolver
Cache *cache.BucketCache
}
func New(params *AppParams, config Config, tree *tree.Tree, workerPool *ants.Pool) *Handler {
return &Handler{
log: params.Logger,
frostfs: params.FrostFS,
ownerID: params.Owner,
config: config,
containerResolver: params.Resolver,
tree: tree,
cache: params.Cache,
workerPool: workerPool,
}
}
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// prepares request and object address to it.
func (h *Handler) byNativeAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
idCnr, _ := c.UserValue("cid").(string)
idObj, _ := url.PathUnescape(c.UserValue("oid").(string))
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", idCnr), zap.String("oid", idObj))
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
objID := new(oid.ID)
if err = objID.DecodeString(idObj); err != nil {
log.Error(logs.WrongObjectID, zap.Error(err))
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
return
}
addr := newAddress(bktInfo.CID, *objID)
f(ctx, *h.newRequest(c, log), addr)
}
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
// resolves object address from S3-like path <bucket name>/<object key>.
func (h *Handler) byS3Path(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
bucketname := c.UserValue("cid").(string)
key := c.UserValue("oid").(string)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("bucketname", bucketname), zap.String("key", key))
unescapedKey, err := url.QueryUnescape(key)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
if err != nil {
if errors.Is(err, tree.ErrNodeAccessDenied) {
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
} else {
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
}
return
}
if foundOid.DeleteMarker {
log.Error(logs.ObjectWasDeleted)
response.Error(c, "object deleted", fasthttp.StatusNotFound)
return
}
addr := newAddress(bktInfo.CID, foundOid.OID)
f(ctx, *h.newRequest(c, log), addr)
}
// byAttribute is a wrapper similar to byNativeAddress.
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
scid, _ := c.UserValue("cid").(string)
key, _ := c.UserValue("attr_key").(string)
val, _ := c.UserValue("attr_val").(string)
ctx := utils.GetContextFromRequest(c)
log := utils.GetReqLogOrDefault(ctx, h.log)
key, err := url.QueryUnescape(key)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Error(err))
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
return
}
val, err = url.QueryUnescape(val)
if err != nil {
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Error(err))
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
return
}
log = log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
res, err := h.search(ctx, bktInfo.CID, key, val, object.MatchStringEqual)
if err != nil {
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
return
}
defer res.Close()
buf := make([]oid.ID, 1)
n, err := res.Read(buf)
if n == 0 {
if errors.Is(err, io.EOF) {
log.Error(logs.ObjectNotFound, zap.Error(err))
response.Error(c, "object not found", fasthttp.StatusNotFound)
return
}
log.Error(logs.ReadObjectListFailed, zap.Error(err))
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
return
}
var addrObj oid.Address
addrObj.SetContainer(bktInfo.CID)
addrObj.SetObject(buf[0])
f(ctx, *h.newRequest(c, log), addrObj)
}
// resolveContainer decode container id, if it's not a valid container id
// then trey to resolve name using provided resolver.
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
cnrID := new(cid.ID)
err := cnrID.DecodeString(containerID)
if err != nil {
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
if err != nil && strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
}
}
return cnrID, err
}
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
ns, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil {
return bktInfo, nil
}
cnrID, err := h.resolveContainer(ctx, containerName)
if err != nil {
return nil, err
}
bktInfo, err := h.readContainer(ctx, *cnrID)
if err != nil {
return nil, err
}
if err = h.cache.Put(bktInfo); err != nil {
log.Warn(logs.CouldntPutBucketIntoCache,
zap.String("bucket name", bktInfo.Name),
zap.Stringer("bucket cid", bktInfo.CID),
zap.Error(err))
}
return bktInfo, nil
}
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
prm := PrmContainer{ContainerID: cnrID}
res, err := h.frostfs.Container(ctx, prm)
if err != nil {
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
}
bktInfo := &data.BucketInfo{
CID: cnrID,
Name: cnrID.EncodeToString(),
}
if domain := container.ReadDomain(*res); domain.Name() != "" {
bktInfo.Name = domain.Name()
bktInfo.Zone = domain.Zone()
}
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
return bktInfo, err
}
func (h *Handler) browseIndex(c *fasthttp.RequestCtx) {
if !h.config.IndexPageEnabled() {
c.SetStatusCode(fasthttp.StatusNotFound)
return
}
cidURLParam := c.UserValue("cid").(string)
oidURLParam := c.UserValue("oid").(string)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
unescapedKey, err := url.QueryUnescape(oidURLParam)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
listFunc := h.getDirObjectsS3
isNativeList := false
err = h.tree.CheckSettingsNodeExist(ctx, bktInfo)
if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) {
// tree probe failed, try to use native
listFunc = h.getDirObjectsNative
isNativeList = true
} else {
logAndSendBucketError(c, log, err)
return
}
}
h.browseObjects(c, browseParams{
bucketInfo: bktInfo,
prefix: unescapedKey,
listObjects: listFunc,
isNative: isNativeList,
})
}

View file

@ -0,0 +1,580 @@
//go:build gofuzz
// +build gofuzz
package handler
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"mime/multipart"
"net/http"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
"github.com/valyala/fasthttp"
)
const (
fuzzSuccessExitCode = 0
fuzzFailExitCode = -1
)
func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) {
array := make([]string, count)
var err error
for i := 0; i < count; i++ {
err = tp.Reset()
if err != nil {
return nil, err
}
array[i], err = tp.GetString()
if err != nil {
return nil, err
}
}
return array, nil
}
func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) {
array := make([]bool, count)
var err error
for i := 0; i < count; i++ {
err = tp.Reset()
if err != nil {
return nil, err
}
array[i], err = tp.GetBool()
if err != nil {
return nil, err
}
}
return array, nil
}
func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) {
count, err := tp.GetInt()
if err != nil {
return -1, err
}
count = count % max
if count < 0 {
count += max
}
return count, nil
}
func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error {
count, err := tp.GetInt()
if err != nil {
return err
}
count = count % len(params)
if count < 0 {
count += len(params)
}
for i := 0; i < count; i++ {
position, err := tp.GetInt()
if err != nil {
return err
}
position = position % len(params)
if position < 0 {
position += len(params)
}
v, err := tp.GetString()
if err != nil {
return err
}
r.Header.Set(params[position], v)
}
return nil
}
func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) {
rnd, err := tp.GetBool()
if err != nil {
return "", err
}
if rnd == true {
initValue, err = tp.GetString()
if err != nil {
return "", err
}
}
return initValue, nil
}
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
hc, err := prepareHandlerContext()
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
aclList := []acl.Basic{
acl.Private,
acl.PrivateExtended,
acl.PublicRO,
acl.PublicROExtended,
acl.PublicRW,
acl.PublicRWExtended,
acl.PublicAppend,
acl.PublicAppendExtended,
}
pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList))
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
acl := aclList[pos]
strings, err := prepareStrings(tp, 6)
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
bktName := strings[0]
objFileName := strings[1]
valAttr := strings[2]
keyAttr := strings[3]
if len(bktName) == 0 {
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets")
}
cnrID, cnr, err := hc.prepareContainer(bktName, acl)
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
hc.frostfs.SetContainer(cnrID, cnr)
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", cnrID.EncodeToString())
attributes := map[string]string{
object.AttributeFileName: objFileName,
keyAttr: valAttr,
}
var buff bytes.Buffer
w := multipart.NewWriter(&buff)
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
content, err := tp.GetBytes()
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
if err = w.Close(); err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
r.Request.SetBodyStream(&buff, buff.Len())
r.Request.Header.Set("Content-Type", w.FormDataContentType())
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]})
if err != nil {
return nil, nil, cid.ID{}, nil, "", "", "", err
}
hc.Handler().Upload(r)
if r.Response.StatusCode() != http.StatusOK {
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload")
}
return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil
}
func InitFuzzUpload() {
}
func DoFuzzUpload(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, _, _, _, _, _, _, err = upload(tp)
if err != nil {
return fuzzFailExitCode
}
return fuzzSuccessExitCode
}
func FuzzUpload(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzUpload(data)
})
}
func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) {
var putRes putResponse
defer func() {
if r := recover(); r != nil {
panic(resp)
}
}()
data := resp.Response.Body()
err := json.Unmarshal(data, &putRes)
if err != nil {
return nil, err
}
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
attr := object.NewAttribute()
attr.SetKey(object.AttributeFilePath)
filename, err = maybeFillRandom(tp, filename)
if err != nil {
return nil, err
}
attr.SetValue(filename)
obj.SetAttributes(append(obj.Attributes(), *attr)...)
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
cid := cnrID.EncodeToString()
cid, err = maybeFillRandom(tp, cid)
if err != nil {
return nil, err
}
oid := putRes.ObjectID
oid, err = maybeFillRandom(tp, oid)
if err != nil {
return nil, err
}
r.SetUserValue("cid", cid)
r.SetUserValue("oid", oid)
rnd, err := tp.GetBool()
if err != nil {
return nil, err
}
if rnd == true {
r.SetUserValue("download", "true")
}
return r, nil
}
func InitFuzzGet() {
}
func DoFuzzGet(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
if err != nil {
return fuzzFailExitCode
}
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
if err != nil {
return fuzzFailExitCode
}
hc.Handler().DownloadByAddressOrBucketName(r)
return fuzzSuccessExitCode
}
func FuzzGet(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzUpload(data)
})
}
func InitFuzzHead() {
}
func DoFuzzHead(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
if err != nil {
return fuzzFailExitCode
}
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
if err != nil {
return fuzzFailExitCode
}
hc.Handler().HeadByAddressOrBucketName(r)
return fuzzSuccessExitCode
}
func FuzzHead(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzHead(data)
})
}
func InitFuzzDownloadByAttribute() {
}
func DoFuzzDownloadByAttribute(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
if err != nil {
return fuzzFailExitCode
}
cid := cnrID.EncodeToString()
cid, err = maybeFillRandom(tp, cid)
if err != nil {
return fuzzFailExitCode
}
attrKey, err = maybeFillRandom(tp, attrKey)
if err != nil {
return fuzzFailExitCode
}
attrVal, err = maybeFillRandom(tp, attrVal)
if err != nil {
return fuzzFailExitCode
}
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", cid)
r.SetUserValue("attr_key", attrKey)
r.SetUserValue("attr_val", attrVal)
hc.Handler().DownloadByAttribute(r)
return fuzzSuccessExitCode
}
func FuzzDownloadByAttribute(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDownloadByAttribute(data)
})
}
func InitFuzzHeadByAttribute() {
}
func DoFuzzHeadByAttribute(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
if err != nil {
return fuzzFailExitCode
}
cid := cnrID.EncodeToString()
cid, err = maybeFillRandom(tp, cid)
if err != nil {
return fuzzFailExitCode
}
attrKey, err = maybeFillRandom(tp, attrKey)
if err != nil {
return fuzzFailExitCode
}
attrVal, err = maybeFillRandom(tp, attrVal)
if err != nil {
return fuzzFailExitCode
}
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", cid)
r.SetUserValue("attr_key", attrKey)
r.SetUserValue("attr_val", attrVal)
hc.Handler().HeadByAttribute(r)
return fuzzSuccessExitCode
}
func FuzzHeadByAttribute(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzHeadByAttribute(data)
})
}
func InitFuzzDownloadZipped() {
}
func DoFuzzDownloadZipped(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
ctx, hc, cnrID, _, _, _, _, err := upload(tp)
if err != nil {
return fuzzFailExitCode
}
cid := cnrID.EncodeToString()
cid, err = maybeFillRandom(tp, cid)
if err != nil {
return fuzzFailExitCode
}
prefix := ""
prefix, err = maybeFillRandom(tp, prefix)
if err != nil {
return fuzzFailExitCode
}
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", cid)
r.SetUserValue("prefix", prefix)
hc.Handler().DownloadZipped(r)
return fuzzSuccessExitCode
}
func FuzzDownloadZipped(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDownloadZipped(data)
})
}
func InitFuzzStoreBearerTokenAppCtx() {
}
func DoFuzzStoreBearerTokenAppCtx(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := go_fuzz_utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
prefix := ""
prefix, err = maybeFillRandom(tp, prefix)
if err != nil {
return fuzzFailExitCode
}
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
strings, err := prepareStrings(tp, 3)
rand, err := prepareBools(tp, 2)
if rand[0] == true {
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
} else if rand[1] == true {
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
} else {
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
}
tokens.StoreBearerTokenAppCtx(ctx, r)
return fuzzSuccessExitCode
}
func FuzzStoreBearerTokenAppCtx(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzStoreBearerTokenAppCtx(data)
})
}

View file

@ -0,0 +1,318 @@
package handler
import (
"archive/zip"
"bytes"
"context"
"encoding/json"
"io"
"mime/multipart"
"net/http"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/panjf2000/ants/v2"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
type treeClientMock struct {
}
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
return nil, nil
}
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
return nil, nil
}
type configMock struct {
}
func (c *configMock) DefaultTimestamp() bool {
return false
}
func (c *configMock) ZipCompression() bool {
return false
}
func (c *configMock) IndexPageEnabled() bool {
return false
}
func (c *configMock) IndexPageTemplate() string {
return ""
}
func (c *configMock) IndexPageNativeTemplate() string {
return ""
}
func (c *configMock) ClientCut() bool {
return false
}
func (c *configMock) BufferMaxSizeForPut() uint64 {
return 0
}
func (c *configMock) NamespaceHeader() string {
return ""
}
type handlerContext struct {
key *keys.PrivateKey
owner user.ID
h *Handler
frostfs *TestFrostFS
tree *treeClientMock
cfg *configMock
}
func (hc *handlerContext) Handler() *Handler {
return hc.h
}
func prepareHandlerContext() (*handlerContext, error) {
logger, err := zap.NewDevelopment()
if err != nil {
return nil, err
}
key, err := keys.NewPrivateKey()
if err != nil {
return nil, err
}
var owner user.ID
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
testFrostFS := NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
return testFrostFS.ContainerID(name)
})
params := &AppParams{
Logger: logger,
FrostFS: testFrostFS,
Owner: &owner,
Resolver: testResolver,
Cache: cache.NewBucketCache(&cache.Config{
Size: 1,
Lifetime: 1,
Logger: logger,
}),
}
treeMock := &treeClientMock{}
cfgMock := &configMock{}
workerPool, err := ants.NewPool(1000)
if err != nil {
return nil, err
}
handler := New(params, cfgMock, tree.NewTree(treeMock), workerPool)
return &handlerContext{
key: key,
owner: owner,
h: handler,
frostfs: testFrostFS,
tree: treeMock,
cfg: cfgMock,
}, nil
}
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
var pp netmap.PlacementPolicy
err := pp.DecodeString("REP 1")
if err != nil {
return cid.ID{}, nil, err
}
var cnr container.Container
cnr.Init()
cnr.SetOwner(hc.owner)
cnr.SetPlacementPolicy(pp)
cnr.SetBasicACL(basicACL)
var domain container.Domain
domain.SetName(name)
container.WriteDomain(&cnr, domain)
container.SetName(&cnr, name)
container.SetCreationTime(&cnr, time.Now())
cnrID := cidtest.ID()
for op := acl.OpObjectGet; op < acl.OpObjectHash; op++ {
hc.frostfs.AllowUserOperation(cnrID, hc.owner, op, oid.ID{})
if basicACL.IsOpAllowed(op, acl.RoleOthers) {
hc.frostfs.AllowUserOperation(cnrID, user.ID{}, op, oid.ID{})
}
}
return cnrID, &cnr, nil
}
func TestBasic(t *testing.T) {
hc, err := prepareHandlerContext()
require.NoError(t, err)
bktName := "bucket"
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
require.NoError(t, err)
hc.frostfs.SetContainer(cnrID, cnr)
ctx := context.Background()
ctx = middleware.SetNamespace(ctx, "")
content := "hello"
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
require.NoError(t, err)
hc.Handler().Upload(r)
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
var putRes putResponse
err = json.Unmarshal(r.Response.Body(), &putRes)
require.NoError(t, err)
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
attr := object.NewAttribute()
attr.SetKey(object.AttributeFilePath)
attr.SetValue(objFileName)
obj.SetAttributes(append(obj.Attributes(), *attr)...)
t.Run("get", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
hc.Handler().DownloadByAddressOrBucketName(r)
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head", func(t *testing.T) {
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
hc.Handler().HeadByAddressOrBucketName(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("get by attribute", func(t *testing.T) {
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
hc.Handler().DownloadByAttribute(r)
require.Equal(t, content, string(r.Response.Body()))
})
t.Run("head by attribute", func(t *testing.T) {
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
hc.Handler().HeadByAttribute(r)
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
})
t.Run("zip", func(t *testing.T) {
r = prepareGetZipped(ctx, bktName, "")
hc.Handler().DownloadZipped(r)
readerAt := bytes.NewReader(r.Response.Body())
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
require.NoError(t, err)
require.Len(t, zipReader.File, 1)
require.Equal(t, objFileName, zipReader.File[0].Name)
f, err := zipReader.File[0].Open()
require.NoError(t, err)
defer func() {
inErr := f.Close()
require.NoError(t, inErr)
}()
data, err := io.ReadAll(f)
require.NoError(t, err)
require.Equal(t, content, string(data))
})
}
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", bucket)
return r, fillMultipartBody(r, content)
}
func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", bucket)
r.SetUserValue("oid", objID)
return r
}
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", bucket)
r.SetUserValue("attr_key", attrKey)
r.SetUserValue("attr_val", attrVal)
return r
}
func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.RequestCtx {
r := new(fasthttp.RequestCtx)
utils.SetContextToRequest(ctx, r)
r.SetUserValue("cid", bucket)
r.SetUserValue("prefix", prefix)
return r
}
const (
keyAttr = "User-Attribute"
valAttr = "user value"
objFileName = "newFile.txt"
)
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {
attributes := map[string]string{
object.AttributeFileName: objFileName,
keyAttr: valAttr,
}
var buff bytes.Buffer
w := multipart.NewWriter(&buff)
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
if err != nil {
return err
}
if _, err = io.Copy(fw, bytes.NewBufferString(content)); err != nil {
return err
}
if err = w.Close(); err != nil {
return err
}
r.Request.SetBodyStream(&buff, buff.Len())
r.Request.Header.Set("Content-Type", w.FormDataContentType())
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
return nil
}

119
internal/handler/head.go Normal file
View file

@ -0,0 +1,119 @@
package handler
import (
"context"
"io"
"net/http"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
// max bytes needed to detect content type according to http.DetectContentType docs.
const sizeToDetectType = 512
const (
hdrObjectID = "X-Object-Id"
hdrOwnerID = "X-Owner-Id"
hdrContainerID = "X-Container-Id"
)
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
var start = time.Now()
btoken := bearerToken(ctx)
prm := PrmObjectHead{
PrmAuth: PrmAuth{
BearerToken: btoken,
},
Address: objectAddress,
}
obj, err := h.frostfs.HeadObject(ctx, prm)
if err != nil {
req.handleFrostFSErr(err, start)
return
}
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
var contentType string
for _, attr := range obj.Attributes() {
key := attr.Key()
val := attr.Value()
if !isValidToken(key) || !isValidValue(val) {
continue
}
key = utils.BackwardTransformIfSystem(key)
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
switch key {
case object.AttributeTimestamp:
value, err := strconv.ParseInt(val, 10, 64)
if err != nil {
req.log.Info(logs.CouldntParseCreationDate,
zap.String("key", key),
zap.String("val", val),
zap.Error(err))
continue
}
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
case object.AttributeContentType:
contentType = val
}
}
idsToResponse(&req.Response, obj)
if len(contentType) == 0 {
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
prmRange := PrmObjectRange{
PrmAuth: PrmAuth{
BearerToken: btoken,
},
Address: objectAddress,
PayloadRange: [2]uint64{0, sz},
}
return h.frostfs.RangeObject(ctx, prmRange)
})
if err != nil && err != io.EOF {
req.handleFrostFSErr(err, start)
return
}
}
req.SetContentType(contentType)
}
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
resp.Header.Set(hdrObjectID, objID.String())
resp.Header.Set(hdrOwnerID, obj.OwnerID().String())
resp.Header.Set(hdrContainerID, cnrID.String())
}
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
test, _ := c.UserValue("oid").(string)
var id oid.ID
err := id.DecodeString(test)
if err != nil {
h.byS3Path(c, h.headObject)
} else {
h.byNativeAddress(c, h.headObject)
}
}
// HeadByAttribute handles attribute-based head requests.
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
h.byAttribute(c, h.headObject)
}

View file

@ -0,0 +1,26 @@
package middleware
import (
"context"
"fmt"
)
// keyWrapper is wrapper for context keys.
type keyWrapper string
const nsKey = keyWrapper("namespace")
// GetNamespace extract namespace from context.
func GetNamespace(ctx context.Context) (string, error) {
ns, ok := ctx.Value(nsKey).(string)
if !ok {
return "", fmt.Errorf("couldn't get namespace from context")
}
return ns, nil
}
// SetNamespace sets namespace in the context.
func SetNamespace(ctx context.Context, ns string) context.Context {
return context.WithValue(ctx, nsKey, ns)
}

View file

@ -0,0 +1,78 @@
package handler
import (
"errors"
"io"
"strconv"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"go.uber.org/zap"
)
const attributeMultipartObjectSize = "S3-Multipart-Object-Size"
// MultipartFile provides standard ReadCloser interface and also allows one to
// get file name, it's used for multipart uploads.
type MultipartFile interface {
io.ReadCloser
FileName() string
}
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
// To have a custom buffer (3mb) the custom multipart reader is used.
// Default reader uses 4KiB chunks, which slow down upload speed up to 400%
// https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32
reader := multipart.NewReader(r, boundary)
for {
part, err := reader.NextPart()
if err != nil {
return nil, err
}
name := part.FormName()
if name == "" {
l.Debug(logs.IgnorePartEmptyFormName)
continue
}
filename := part.FileName()
// ignore multipart/form-data values
if filename == "" {
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
continue
}
return part, nil
}
}
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
cid, ok := p.obj.Header.ContainerID()
if !ok {
return nil, 0, errors.New("no container id set")
}
oid, ok := p.obj.Header.ID()
if !ok {
return nil, 0, errors.New("no object id set")
}
size, err := strconv.ParseUint(p.strSize, 10, 64)
if err != nil {
return nil, 0, err
}
ctx := p.req.RequestCtx
params := PrmInitMultiObjectReader{
Addr: newAddress(cid, oid),
Bearer: bearerToken(ctx),
}
payload, err := h.frostfs.InitMultiObjectReader(ctx, params)
if err != nil {
return nil, 0, err
}
return io.NopCloser(payload), size, nil
}

View file

@ -0,0 +1,423 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
/*
Package multipart implements MIME multipart parsing, as defined in RFC
2046.
The implementation is sufficient for HTTP (RFC 2388) and the multipart
bodies generated by popular browsers.
*/
package multipart
import (
"bufio"
"bytes"
"fmt"
"io"
"mime"
"mime/quotedprintable"
"net/textproto"
"strings"
)
var emptyParams = make(map[string]string)
// This constant needs to be at least 76 for this package to work correctly.
// This is because \r\n--separator_of_len_70- would fill the buffer and it
// wouldn't be safe to consume a single byte from it.
// This constant is different from the constant in stdlib. The standard value is 4096.
const peekBufferSize = 3 << 20
// A Part represents a single part in a multipart body.
type Part struct {
// The headers of the body, if any, with the keys canonicalized
// in the same fashion that the Go http.Request headers are.
// For example, "foo-bar" changes case to "Foo-Bar"
Header textproto.MIMEHeader
mr *Reader
disposition string
dispositionParams map[string]string
// r is either a reader directly reading from mr, or it's a
// wrapper around such a reader, decoding the
// Content-Transfer-Encoding
r io.Reader
n int // known data bytes waiting in mr.bufReader
total int64 // total data bytes read already
err error // error to return when n == 0
readErr error // read error observed from mr.bufReader
}
// FormName returns the name parameter if p has a Content-Disposition
// of type "form-data". Otherwise it returns the empty string.
func (p *Part) FormName() string {
// See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
// of Content-Disposition value format.
if p.dispositionParams == nil {
p.parseContentDisposition()
}
if p.disposition != "form-data" {
return ""
}
return p.dispositionParams["name"]
}
// FileName returns the filename parameter of the Part's
// Content-Disposition header.
func (p *Part) FileName() string {
if p.dispositionParams == nil {
p.parseContentDisposition()
}
return p.dispositionParams["filename"]
}
func (p *Part) parseContentDisposition() {
v := p.Header.Get("Content-Disposition")
var err error
p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
if err != nil {
p.dispositionParams = emptyParams
}
}
// NewReader creates a new multipart Reader reading from r using the
// given MIME boundary.
//
// The boundary is usually obtained from the "boundary" parameter of
// the message's "Content-Type" header. Use mime.ParseMediaType to
// parse such headers.
func NewReader(r io.Reader, boundary string) *Reader {
b := []byte("\r\n--" + boundary + "--")
return &Reader{
bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
nl: b[:2],
nlDashBoundary: b[:len(b)-2],
dashBoundaryDash: b[2:],
dashBoundary: b[2 : len(b)-2],
}
}
// stickyErrorReader is an io.Reader which never calls Read on its
// underlying Reader once an error has been seen. (the io.Reader
// interface's contract promises nothing about the return values of
// Read calls after an error, yet this package does do multiple Reads
// after error).
type stickyErrorReader struct {
r io.Reader
err error
}
func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
if r.err != nil {
return 0, r.err
}
n, r.err = r.r.Read(p)
return n, r.err
}
func newPart(mr *Reader, rawPart bool) (*Part, error) {
bp := &Part{
Header: make(map[string][]string),
mr: mr,
}
if err := bp.populateHeaders(); err != nil {
return nil, err
}
bp.r = partReader{bp}
// rawPart is used to switch between Part.NextPart and Part.NextRawPart.
if !rawPart {
const cte = "Content-Transfer-Encoding"
if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
bp.Header.Del(cte)
bp.r = quotedprintable.NewReader(bp.r)
}
}
return bp, nil
}
func (p *Part) populateHeaders() error {
r := textproto.NewReader(p.mr.bufReader)
header, err := r.ReadMIMEHeader()
if err == nil {
p.Header = header
}
return err
}
// Read reads the body of a part, after its headers and before the
// next part (if any) begins.
func (p *Part) Read(d []byte) (n int, err error) {
return p.r.Read(d)
}
// partReader implements io.Reader by reading raw bytes directly from the
// wrapped *Part, without doing any Transfer-Encoding decoding.
type partReader struct {
p *Part
}
func (pr partReader) Read(d []byte) (int, error) {
p := pr.p
br := p.mr.bufReader
// Read into buffer until we identify some data to return,
// or we find a reason to stop (boundary or read error).
for p.n == 0 && p.err == nil {
peek, _ := br.Peek(br.Buffered())
p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
if p.n == 0 && p.err == nil {
// Force buffered I/O to read more into buffer.
_, p.readErr = br.Peek(len(peek) + 1)
if p.readErr == io.EOF {
p.readErr = io.ErrUnexpectedEOF
}
}
}
// Read out from "data to return" part of buffer.
if p.n == 0 {
return 0, p.err
}
n := len(d)
if n > p.n {
n = p.n
}
n, _ = br.Read(d[:n])
p.total += int64(n)
p.n -= n
if p.n == 0 {
return n, p.err
}
return n, nil
}
// scanUntilBoundary scans buf to identify how much of it can be safely
// returned as part of the Part body.
// dashBoundary is "--boundary".
// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
// The comments below (and the name) assume "\n--boundary", but either is accepted.
// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
// readErr is the read error, if any, that followed reading the bytes in buf.
// scanUntilBoundary returns the number of data bytes from buf that can be
// returned as part of the Part body and also the error to return (if any)
// once those data bytes are done.
func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
if total == 0 {
// At beginning of body, allow dashBoundary.
if bytes.HasPrefix(buf, dashBoundary) {
switch matchAfterPrefix(buf, dashBoundary, readErr) {
case -1:
return len(dashBoundary), nil
case 0:
return 0, nil
case +1:
return 0, io.EOF
}
}
if bytes.HasPrefix(dashBoundary, buf) {
return 0, readErr
}
}
// Search for "\n--boundary".
if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
case -1:
return i + len(nlDashBoundary), nil
case 0:
return i, nil
case +1:
return i, io.EOF
}
}
if bytes.HasPrefix(nlDashBoundary, buf) {
return 0, readErr
}
// Otherwise, anything up to the final \n is not part of the boundary
// and so must be part of the body.
// Also if the section from the final \n onward is not a prefix of the boundary,
// it too must be part of the body.
i := bytes.LastIndexByte(buf, nlDashBoundary[0])
if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
return i, nil
}
return len(buf), readErr
}
// matchAfterPrefix checks whether buf should be considered to match the boundary.
// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
//
// matchAfterPrefix returns +1 if the buffer does match the boundary,
// meaning the prefix is followed by a dash, space, tab, cr, nl, or end of input.
// It returns -1 if the buffer definitely does NOT match the boundary,
// meaning the prefix is followed by some other character.
// For example, "--foobar" does not match "--foo".
// It returns 0 more input needs to be read to make the decision,
// meaning that len(buf) == len(prefix) and readErr == nil.
func matchAfterPrefix(buf, prefix []byte, readErr error) int {
if len(buf) == len(prefix) {
if readErr != nil {
return +1
}
return 0
}
c := buf[len(prefix)]
if c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '-' {
return +1
}
return -1
}
func (p *Part) Close() error {
_, _ = io.Copy(io.Discard, p)
return nil
}
// Reader is an iterator over parts in a MIME multipart body.
// Reader's underlying parser consumes its input as needed. Seeking
// isn't supported.
type Reader struct {
bufReader *bufio.Reader
currentPart *Part
partsRead int
nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
nlDashBoundary []byte // nl + "--boundary"
dashBoundaryDash []byte // "--boundary--"
dashBoundary []byte // "--boundary"
}
// NextPart returns the next part in the multipart or an error.
// When there are no more parts, the error io.EOF is returned.
//
// As a special case, if the "Content-Transfer-Encoding" header
// has a value of "quoted-printable", that header is instead
// hidden and the body is transparently decoded during Read calls.
func (r *Reader) NextPart() (*Part, error) {
return r.nextPart(false)
}
// NextRawPart returns the next part in the multipart or an error.
// When there are no more parts, the error io.EOF is returned.
//
// Unlike NextPart, it does not have special handling for
// "Content-Transfer-Encoding: quoted-printable".
func (r *Reader) NextRawPart() (*Part, error) {
return r.nextPart(true)
}
func (r *Reader) nextPart(rawPart bool) (*Part, error) {
if r.currentPart != nil {
r.currentPart.Close()
}
if string(r.dashBoundary) == "--" {
return nil, fmt.Errorf("multipart: boundary is empty")
}
expectNewPart := false
for {
line, err := r.bufReader.ReadSlice('\n')
if err == io.EOF && r.isFinalBoundary(line) {
// If the buffer ends in "--boundary--" without the
// trailing "\r\n", ReadSlice will return an error
// (since it's missing the '\n'), but this is a valid
// multipart EOF so we need to return io.EOF instead of
// a fmt-wrapped one.
return nil, io.EOF
}
if err != nil {
return nil, fmt.Errorf("multipart: NextPart: %v", err)
}
if r.isBoundaryDelimiterLine(line) {
r.partsRead++
bp, err := newPart(r, rawPart)
if err != nil {
return nil, err
}
r.currentPart = bp
return bp, nil
}
if r.isFinalBoundary(line) {
// Expected EOF
return nil, io.EOF
}
if expectNewPart {
return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
}
if r.partsRead == 0 {
// skip line
continue
}
// Consume the "\n" or "\r\n" separator between the
// body of the previous part and the boundary line we
// now expect will follow. (either a new part or the
// end boundary)
if bytes.Equal(line, r.nl) {
expectNewPart = true
continue
}
return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
}
}
// isFinalBoundary reports whether line is the final boundary line
// indicating that all parts are over.
// It matches `^--boundary--[ \t]*(\r\n)?$`.
func (r *Reader) isFinalBoundary(line []byte) bool {
if !bytes.HasPrefix(line, r.dashBoundaryDash) {
return false
}
rest := line[len(r.dashBoundaryDash):]
rest = skipLWSPChar(rest)
return len(rest) == 0 || bytes.Equal(rest, r.nl)
}
func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
// https://tools.ietf.org/html/rfc2046#section-5.1
// The boundary delimiter line is then defined as a line
// consisting entirely of two hyphen characters ("-",
// decimal value 45) followed by the boundary parameter
// value from the Content-Type header field, optional linear
// whitespace, and a terminating CRLF.
if !bytes.HasPrefix(line, r.dashBoundary) {
return false
}
rest := line[len(r.dashBoundary):]
rest = skipLWSPChar(rest)
// On the first part, see our lines are ending in \n instead of \r\n
// and switch into that mode if so. This is a violation of the spec,
// but occurs in practice.
if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
r.nl = r.nl[1:]
r.nlDashBoundary = r.nlDashBoundary[1:]
}
return bytes.Equal(rest, r.nl)
}
// skipLWSPChar returns b with leading spaces and tabs removed.
// RFC 822 defines:
//
// LWSP-char = SPACE / HTAB
func skipLWSPChar(b []byte) []byte {
for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
b = b[1:]
}
return b
}

View file

@ -0,0 +1,164 @@
//go:build !integration
package handler
import (
"crypto/rand"
"fmt"
"io"
"mime/multipart"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func generateRandomFile(size int64) (string, error) {
file, err := os.CreateTemp("", "data")
if err != nil {
return "", err
}
_, err = io.CopyN(file, rand.Reader, size)
if err != nil {
return "", err
}
return file.Name(), file.Close()
}
func BenchmarkAll(b *testing.B) {
fileName, err := generateRandomFile(1024 * 1024 * 256)
require.NoError(b, err)
fmt.Println(fileName)
defer os.Remove(fileName)
b.Run("bare", func(b *testing.B) {
for i := 0; i < b.N; i++ {
err := bareRead(fileName)
require.NoError(b, err)
}
})
b.Run("default", func(b *testing.B) {
for i := 0; i < b.N; i++ {
err := defaultMultipart(fileName)
require.NoError(b, err)
}
})
b.Run("custom", func(b *testing.B) {
for i := 0; i < b.N; i++ {
err := customMultipart(fileName)
require.NoError(b, err)
}
})
}
func defaultMultipart(filename string) error {
r, bound := multipartFile(filename)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFileDefault(logger, r, bound)
if err != nil {
return err
}
_, err = io.Copy(io.Discard, file)
return err
}
func TestName(t *testing.T) {
fileName, err := generateRandomFile(1024 * 1024 * 256)
require.NoError(t, err)
fmt.Println(fileName)
defer os.Remove(fileName)
err = defaultMultipart(fileName)
require.NoError(t, err)
}
func customMultipart(filename string) error {
r, bound := multipartFile(filename)
logger, err := zap.NewProduction()
if err != nil {
return err
}
file, err := fetchMultipartFile(logger, r, bound)
if err != nil {
return err
}
_, err = io.Copy(io.Discard, file)
return err
}
func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
reader := multipart.NewReader(r, boundary)
for {
part, err := reader.NextPart()
if err != nil {
return nil, err
}
name := part.FormName()
if name == "" {
l.Debug(logs.IgnorePartEmptyFormName)
continue
}
filename := part.FileName()
// ignore multipart/form-data values
if filename == "" {
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
continue
}
return part, nil
}
}
func bareRead(filename string) error {
r, _ := multipartFile(filename)
_, err := io.Copy(io.Discard, r)
return err
}
func multipartFile(filename string) (*io.PipeReader, string) {
r, w := io.Pipe()
m := multipart.NewWriter(w)
go func() {
defer w.Close()
defer m.Close()
part, err := m.CreateFormFile("myFile", "foo.txt")
if err != nil {
fmt.Println(err)
return
}
file, err := os.Open(filename)
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
if _, err = io.Copy(part, file); err != nil {
fmt.Println(err)
return
}
}()
return r, m.Boundary()
}

184
internal/handler/reader.go Normal file
View file

@ -0,0 +1,184 @@
package handler
import (
"bytes"
"context"
"io"
"net/http"
"path"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
type readCloser struct {
io.Reader
io.Closer
}
// initializes io.Reader with the limited size and detects Content-Type from it.
// Returns r's error directly. Also returns the processed data.
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
if maxSize > sizeToDetectType {
maxSize = sizeToDetectType
}
buf := make([]byte, maxSize) // maybe sync-pool the slice?
r, err := rInit(maxSize)
if err != nil {
return "", nil, err
}
n, err := r.Read(buf)
if err != nil && err != io.EOF {
return "", nil, err
}
buf = buf[:n]
return http.DetectContentType(buf), buf, err // to not lose io.EOF
}
type getMultiobjectBodyParams struct {
obj *Object
req request
strSize string
}
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
var (
shouldDownload = req.QueryArgs().GetBool("download")
start = time.Now()
filename string
filepath string
contentType string
)
prm := PrmObjectGet{
PrmAuth: PrmAuth{
BearerToken: bearerToken(ctx),
},
Address: objAddress,
}
rObj, err := h.frostfs.GetObject(ctx, prm)
if err != nil {
req.handleFrostFSErr(err, start)
return
}
// we can't close reader in this function, so how to do it?
req.setIDs(rObj.Header)
payload := rObj.Payload
payloadSize := rObj.Header.PayloadSize()
for _, attr := range rObj.Header.Attributes() {
key := attr.Key()
val := attr.Value()
if !isValidToken(key) || !isValidValue(val) {
continue
}
key = utils.BackwardTransformIfSystem(key)
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
switch key {
case object.AttributeFileName:
filename = val
case object.AttributeTimestamp:
if err = req.setTimestamp(val); err != nil {
req.log.Error(logs.CouldntParseCreationDate,
zap.String("val", val),
zap.Error(err))
}
case object.AttributeContentType:
contentType = val
case object.AttributeFilePath:
filepath = val
case attributeMultipartObjectSize:
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
obj: rObj,
req: req,
strSize: val,
})
if err != nil {
req.handleFrostFSErr(err, start)
return
}
}
}
if filename == "" {
filename = filepath
}
req.setDisposition(shouldDownload, filename)
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
if len(contentType) == 0 {
// determine the Content-Type from the payload head
var payloadHead []byte
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
return payload, nil
})
if err != nil && err != io.EOF {
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
return
}
// reset payload reader since a part of the data has been read
var headReader io.Reader = bytes.NewReader(payloadHead)
if err != io.EOF { // otherwise, we've already read full payload
headReader = io.MultiReader(headReader, payload)
}
// note: we could do with io.Reader, but SetBodyStream below closes body stream
// if it implements io.Closer and that's useful for us.
payload = readCloser{headReader, payload}
}
req.SetContentType(contentType)
req.Response.SetBodyStream(payload, int(payloadSize))
}
func (r *request) setIDs(obj object.Object) {
objID, _ := obj.ID()
cnrID, _ := obj.ContainerID()
r.Response.Header.Set(hdrObjectID, objID.String())
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
r.Response.Header.Set(hdrContainerID, cnrID.String())
}
func (r *request) setDisposition(shouldDownload bool, filename string) {
const (
inlineDisposition = "inline"
attachmentDisposition = "attachment"
)
dis := inlineDisposition
if shouldDownload {
dis = attachmentDisposition
}
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
}
func (r *request) setTimestamp(timestamp string) error {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return err
}
r.Response.Header.Set(fasthttp.HeaderLastModified,
time.Unix(value, 0).UTC().Format(http.TimeFormat))
return nil
}

View file

@ -0,0 +1,48 @@
//go:build !integration
package handler
import (
"io"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestDetector(t *testing.T) {
txtContentType := "text/plain; charset=utf-8"
sb := strings.Builder{}
for i := 0; i < 10; i++ {
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
}
for _, tc := range []struct {
Name string
ContentType string
Expected string
}{
{
Name: "less than 512b",
ContentType: txtContentType,
Expected: sb.String()[:256],
},
{
Name: "more than 512b",
ContentType: txtContentType,
Expected: sb.String(),
},
} {
t.Run(tc.Name, func(t *testing.T) {
contentType, data, err := readContentType(uint64(len(tc.Expected)),
func(uint64) (io.Reader, error) {
return strings.NewReader(tc.Expected), nil
},
)
require.NoError(t, err)
require.Equal(t, tc.ContentType, contentType)
require.True(t, strings.HasPrefix(tc.Expected, string(data)))
})
}
}

194
internal/handler/upload.go Normal file
View file

@ -0,0 +1,194 @@
package handler
import (
"context"
"encoding/json"
"io"
"net/http"
"strconv"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
const (
jsonHeader = "application/json; charset=UTF-8"
drainBufSize = 4096
)
type putResponse struct {
ObjectID string `json:"object_id"`
ContainerID string `json:"container_id"`
}
func newPutResponse(addr oid.Address) *putResponse {
return &putResponse{
ObjectID: addr.Object().EncodeToString(),
ContainerID: addr.Container().EncodeToString(),
}
}
func (pr *putResponse) encode(w io.Writer) error {
enc := json.NewEncoder(w)
enc.SetIndent("", "\t")
return enc.Encode(pr)
}
// Upload handles multipart upload request.
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
var (
file MultipartFile
idObj oid.ID
addr oid.Address
)
scid, _ := c.UserValue("cid").(string)
bodyStream := c.RequestBodyStream()
drainBuf := make([]byte, drainBufSize)
ctx := utils.GetContextFromRequest(c)
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
log := reqLog.With(zap.String("cid", scid))
bktInfo, err := h.getBucketInfo(ctx, scid, log)
if err != nil {
logAndSendBucketError(c, log, err)
return
}
defer func() {
// If the temporary reader can be closed - let's close it.
if file == nil {
return
}
err := file.Close()
log.Debug(
logs.CloseTemporaryMultipartFormFile,
zap.Stringer("address", addr),
zap.String("filename", file.FileName()),
zap.Error(err),
)
}()
boundary := string(c.Request.Header.MultipartFormBoundary())
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
return
}
filtered, err := filterHeaders(log, &c.Request.Header)
if err != nil {
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
return
}
now := time.Now()
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
} else {
now = parsed
}
}
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
return
}
attributes := make([]object.Attribute, 0, len(filtered))
// prepares attributes from filtered headers
for key, val := range filtered {
attribute := object.NewAttribute()
attribute.SetKey(key)
attribute.SetValue(val)
attributes = append(attributes, *attribute)
}
// sets FileName attribute if it wasn't set from header
if _, ok := filtered[object.AttributeFileName]; !ok {
filename := object.NewAttribute()
filename.SetKey(object.AttributeFileName)
filename.SetValue(file.FileName())
attributes = append(attributes, *filename)
}
// sets Timestamp attribute if it wasn't set from header and enabled by settings
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
timestamp := object.NewAttribute()
timestamp.SetKey(object.AttributeTimestamp)
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
attributes = append(attributes, *timestamp)
}
obj := object.New()
obj.SetContainerID(bktInfo.CID)
obj.SetOwnerID(*h.ownerID)
obj.SetAttributes(attributes...)
prm := PrmObjectCreate{
PrmAuth: PrmAuth{
BearerToken: h.fetchBearerToken(ctx),
},
Object: obj,
Payload: file,
ClientCut: h.config.ClientCut(),
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
BufferMaxSize: h.config.BufferMaxSizeForPut(),
}
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
h.handlePutFrostFSErr(c, err, log)
return
}
addr.SetObject(idObj)
addr.SetContainer(bktInfo.CID)
// Try to return the response, otherwise, if something went wrong, throw an error.
if err = newPutResponse(addr).encode(c); err != nil {
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
return
}
// Multipart is multipart and thus can contain more than one part which
// we ignore at the moment. Also, when dealing with chunked encoding
// the last zero-length chunk might be left unread (because multipart
// reader only cares about its boundary and doesn't look further) and
// it will be (erroneously) interpreted as the start of the next
// pipelined header. Thus we need to drain the body buffer.
for {
_, err = bodyStream.Read(drainBuf)
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
}
// Report status code and content type.
c.Response.SetStatusCode(fasthttp.StatusOK)
c.Response.Header.SetContentType(jsonHeader)
}
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
response.Error(r, msg, statusCode)
}
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
return tkn
}
return nil
}

102
internal/handler/utils.go Normal file
View file

@ -0,0 +1,102 @@
package handler
import (
"context"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
type request struct {
*fasthttp.RequestCtx
log *zap.Logger
}
func (r *request) handleFrostFSErr(err error, start time.Time) {
logFields := []zap.Field{
zap.Stringer("elapsed", time.Since(start)),
zap.Error(err),
}
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
logFields = append(logFields, additionalFields...)
r.log.Error(logs.CouldNotReceiveObject, logFields...)
response.Error(r.RequestCtx, msg, statusCode)
}
func bearerToken(ctx context.Context) *bearer.Token {
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
return tkn
}
return nil
}
func isDir(name string) bool {
return strings.HasSuffix(name, "/")
}
func isObjectID(s string) bool {
var objID oid.ID
return objID.DecodeString(s) == nil
}
func isContainerRoot(key string) bool {
return key == ""
}
func loadAttributes(attrs []object.Attribute) map[string]string {
result := make(map[string]string)
for _, attr := range attrs {
result[attr.Key()] = attr.Value()
}
return result
}
func isValidToken(s string) bool {
for _, c := range s {
if c <= ' ' || c > 127 {
return false
}
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
return false
}
}
return true
}
func isValidValue(s string) bool {
for _, c := range s {
// HTTP specification allows for more technically, but we don't want to escape things.
if c < ' ' || c > 127 || c == '"' {
return false
}
}
return true
}
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
log.Error(logs.CouldntGetBucket, zap.Error(err))
if client.IsErrContainerNotFound(err) {
response.Error(c, "Not Found", fasthttp.StatusNotFound)
return
}
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
}
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
var addr oid.Address
addr.SetContainer(cnr)
addr.SetObject(obj)
return addr
}

90
internal/logs/logs.go Normal file
View file

@ -0,0 +1,90 @@
package logs
const (
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
FailedToCreateWorkerPool = "failed to create worker pool" // Fatal in ../../app.go
FailedToReadIndexPageTemplate = "failed to read index page template" // Error in ../../app.go
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
StartingApplication = "starting application" // Info in ../../app.go
StartingServer = "starting server" // Info in ../../app.go
ListenAndServe = "listen and serve" // Fatal in ../../app.go
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
Request = "request" // Info in ../../app.go
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
FailedToAddServer = "failed to add server" // Warn in ../../app.go
AddServer = "add server" // Info in ../../app.go
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
UsingCredentials = "using credentials" // Info in ../../settings.go
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
FailedToSumbitTaskToPool = "failed to submit task to pool" // Error in ../handler/browse.go
FailedToHeadObject = "failed to head object" // Error in ../handler/browse.go
FailedToIterateOverResponse = "failed to iterate over search response" // Error in ../handler/browse.go
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
FailedToUnescapeQuery = "failed to unescape query"
ServerReconnecting = "reconnecting server..."
ServerReconnectedSuccessfully = "server reconnected successfully"
ServerReconnectFailed = "failed to reconnect server"
WarnDuplicateAddress = "duplicate address"
MultinetDialSuccess = "multinet dial successful"
MultinetDialFail = "multinet dial failed"
FailedToLoadMultinetConfig = "failed to load multinet config"
MultinetConfigWontBeUpdated = "multinet config won't be updated"
)

68
internal/net/config.go Normal file
View file

@ -0,0 +1,68 @@
package net
import (
"errors"
"fmt"
"net/netip"
"slices"
"time"
"git.frostfs.info/TrueCloudLab/multinet"
)
var errEmptySourceIPList = errors.New("empty source IP list")
type Subnet struct {
Prefix string
SourceIPs []string
}
type Config struct {
Enabled bool
Subnets []Subnet
Balancer string
Restrict bool
FallbackDelay time.Duration
EventHandler multinet.EventHandler
}
func (c Config) toMultinetConfig() (multinet.Config, error) {
var subnets []multinet.Subnet
for _, s := range c.Subnets {
var ms multinet.Subnet
p, err := netip.ParsePrefix(s.Prefix)
if err != nil {
return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
}
ms.Prefix = p
for _, ip := range s.SourceIPs {
addr, err := netip.ParseAddr(ip)
if err != nil {
return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
}
ms.SourceIPs = append(ms.SourceIPs, addr)
}
if len(ms.SourceIPs) == 0 {
return multinet.Config{}, errEmptySourceIPList
}
subnets = append(subnets, ms)
}
return multinet.Config{
Subnets: subnets,
Balancer: multinet.BalancerType(c.Balancer),
Restrict: c.Restrict,
FallbackDelay: c.FallbackDelay,
Dialer: newDefaultDialer(),
EventHandler: c.EventHandler,
}, nil
}
func (c Config) equals(other Config) bool {
return c.Enabled == other.Enabled &&
slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
}) &&
c.Balancer == other.Balancer &&
c.Restrict == other.Restrict &&
c.FallbackDelay == other.FallbackDelay
}

View file

@ -0,0 +1,54 @@
// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package net
import (
"net/url"
"strings"
)
// parseDialTarget returns the network and address to pass to dialer.
func parseDialTarget(target string) (string, string) {
net := "tcp"
m1 := strings.Index(target, ":")
m2 := strings.Index(target, ":/")
// handle unix:addr which will fail with url.Parse
if m1 >= 0 && m2 < 0 {
if n := target[0:m1]; n == "unix" {
return n, target[m1+1:]
}
}
if m2 >= 0 {
t, err := url.Parse(target)
if err != nil {
return net, target
}
scheme := t.Scheme
addr := t.Path
if scheme == "unix" {
if addr == "" {
addr = t.Host
}
return scheme, addr
}
}
return net, target
}

36
internal/net/dialer.go Normal file
View file

@ -0,0 +1,36 @@
package net
import (
"net"
"syscall"
"time"
"golang.org/x/sys/unix"
)
func newDefaultDialer() net.Dialer {
// From `grpc.WithContextDialer` comment:
//
// Note: All supported releases of Go (as of December 2023) override the OS
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
// option to true from the Control field. For a concrete example of how to do
// this, see internal.NetDialerWithTCPKeepalive().
//
// https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
//
// From `internal.NetDialerWithTCPKeepalive` comment:
//
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
// appropriate Go version becomes less than our least supported Go version, we
// should look into using the new API to make things more straightforward.
return net.Dialer{
KeepAlive: time.Duration(-1),
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
})
},
}
}

View file

@ -0,0 +1,69 @@
package net
import (
"context"
"net"
"sync"
"git.frostfs.info/TrueCloudLab/multinet"
)
type DialerSource struct {
guard sync.RWMutex
c Config
md multinet.Dialer
}
func NewDialerSource(c Config) (*DialerSource, error) {
result := &DialerSource{}
if err := result.build(c); err != nil {
return nil, err
}
return result, nil
}
func (s *DialerSource) build(c Config) error {
if c.Enabled {
mc, err := c.toMultinetConfig()
if err != nil {
return err
}
md, err := multinet.NewDialer(mc)
if err != nil {
return err
}
s.md = md
s.c = c
return nil
}
s.md = nil
s.c = c
return nil
}
// GrpcContextDialer returns grpc.WithContextDialer func.
// Returns nil if multinet disabled.
func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
s.guard.RLock()
defer s.guard.RUnlock()
if s.c.Enabled {
return func(ctx context.Context, address string) (net.Conn, error) {
network, address := parseDialTarget(address)
return s.md.DialContext(ctx, network, address)
}
}
return nil
}
func (s *DialerSource) Update(c Config) error {
s.guard.Lock()
defer s.guard.Unlock()
if s.c.equals(c) {
return nil
}
return s.build(c)
}

View file

@ -0,0 +1,28 @@
package net
import (
"net"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"go.uber.org/zap"
)
type LogEventHandler struct {
logger *zap.Logger
}
func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err error) {
sourceIPString := "undefined"
if sourceIP != nil {
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
}
if err == nil {
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
} else {
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
}
}
func NewLogEventHandler(logger *zap.Logger) LogEventHandler {
return LogEventHandler{logger: logger}
}

View file

@ -0,0 +1,245 @@
package frostfs
import (
"context"
"errors"
"fmt"
"io"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// FrostFS represents virtual connection to the FrostFS network.
// It is used to provide an interface to dependent packages
// which work with FrostFS.
type FrostFS struct {
pool *pool.Pool
}
// NewFrostFS creates new FrostFS using provided pool.Pool.
func NewFrostFS(p *pool.Pool) *FrostFS {
return &FrostFS{
pool: p,
}
}
// Container implements frostfs.FrostFS interface method.
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
prm := pool.PrmContainerGet{
ContainerID: containerPrm.ContainerID,
}
res, err := x.pool.GetContainer(ctx, prm)
if err != nil {
return nil, handleObjectError("read container via connection pool", err)
}
return &res, nil
}
// CreateObject implements frostfs.FrostFS interface method.
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
var prmPut pool.PrmObjectPut
prmPut.SetHeader(*prm.Object)
prmPut.SetPayload(prm.Payload)
prmPut.SetClientCut(prm.ClientCut)
prmPut.WithoutHomomorphicHash(prm.WithoutHomomorphicHash)
prmPut.SetBufferMaxSize(prm.BufferMaxSize)
if prm.BearerToken != nil {
prmPut.UseBearer(*prm.BearerToken)
}
idObj, err := x.pool.PutObject(ctx, prmPut)
if err != nil {
return oid.ID{}, handleObjectError("save object via connection pool", err)
}
return idObj.ObjectID, nil
}
// wraps io.ReadCloser and transforms Read errors related to access violation
// to frostfs.ErrAccessDenied.
type payloadReader struct {
io.ReadCloser
}
func (x payloadReader) Read(p []byte) (int, error) {
n, err := x.ReadCloser.Read(p)
if err != nil && errors.Is(err, io.EOF) {
return n, err
}
return n, handleObjectError("read payload", err)
}
// HeadObject implements frostfs.FrostFS interface method.
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
var prmHead pool.PrmObjectHead
prmHead.SetAddress(prm.Address)
if prm.BearerToken != nil {
prmHead.UseBearer(*prm.BearerToken)
}
res, err := x.pool.HeadObject(ctx, prmHead)
if err != nil {
return nil, handleObjectError("read object header via connection pool", err)
}
return &res, nil
}
// GetObject implements frostfs.FrostFS interface method.
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
var prmGet pool.PrmObjectGet
prmGet.SetAddress(prm.Address)
if prm.BearerToken != nil {
prmGet.UseBearer(*prm.BearerToken)
}
res, err := x.pool.GetObject(ctx, prmGet)
if err != nil {
return nil, handleObjectError("init full object reading via connection pool", err)
}
return &handler.Object{
Header: res.Header,
Payload: res.Payload,
}, nil
}
// RangeObject implements frostfs.FrostFS interface method.
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
var prmRange pool.PrmObjectRange
prmRange.SetAddress(prm.Address)
prmRange.SetOffset(prm.PayloadRange[0])
prmRange.SetLength(prm.PayloadRange[1])
if prm.BearerToken != nil {
prmRange.UseBearer(*prm.BearerToken)
}
res, err := x.pool.ObjectRange(ctx, prmRange)
if err != nil {
return nil, handleObjectError("init payload range reading via connection pool", err)
}
return payloadReader{&res}, nil
}
// SearchObjects implements frostfs.FrostFS interface method.
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
var prmSearch pool.PrmObjectSearch
prmSearch.SetContainerID(prm.Container)
prmSearch.SetFilters(prm.Filters)
if prm.BearerToken != nil {
prmSearch.UseBearer(*prm.BearerToken)
}
res, err := x.pool.SearchObjects(ctx, prmSearch)
if err != nil {
return nil, handleObjectError("init object search via connection pool", err)
}
return &res, nil
}
// GetEpochDurations implements frostfs.FrostFS interface method.
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return nil, err
}
res := &utils.EpochDurations{
CurrentEpoch: networkInfo.CurrentEpoch(),
MsPerBlock: networkInfo.MsPerBlock(),
BlockPerEpoch: networkInfo.EpochDuration(),
}
if res.BlockPerEpoch == 0 {
return nil, fmt.Errorf("EpochDuration is empty")
}
return res, nil
}
// ResolverFrostFS represents virtual connection to the FrostFS network.
// It implements resolver.FrostFS.
type ResolverFrostFS struct {
pool *pool.Pool
}
// NewResolverFrostFS creates new ResolverFrostFS using provided pool.Pool.
func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
return &ResolverFrostFS{pool: p}
}
// SystemDNS implements resolver.FrostFS interface method.
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
networkInfo, err := x.pool.NetworkInfo(ctx)
if err != nil {
return "", handleObjectError("read network info via client", err)
}
domain := networkInfo.RawNetworkParameter("SystemDNS")
if domain == nil {
return "", errors.New("system DNS parameter not found or empty")
}
return string(domain), nil
}
func handleObjectError(msg string, err error) error {
if err == nil {
return nil
}
if reason, ok := IsErrObjectAccessDenied(err); ok {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
}
if IsTimeoutError(err) {
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
}
return fmt.Errorf("%s: %w", msg, err)
}
func UnwrapErr(err error) error {
unwrappedErr := errors.Unwrap(err)
for unwrappedErr != nil {
err = unwrappedErr
unwrappedErr = errors.Unwrap(err)
}
return err
}
func IsErrObjectAccessDenied(err error) (string, bool) {
err = UnwrapErr(err)
switch err := err.(type) {
default:
return "", false
case *apistatus.ObjectAccessDenied:
return err.Reason(), true
}
}
func IsTimeoutError(err error) bool {
if strings.Contains(err.Error(), "timeout") ||
errors.Is(err, context.DeadlineExceeded) {
return true
}
return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded
}

View file

@ -0,0 +1,241 @@
package frostfs
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
// PartInfo is upload information about part.
type PartInfo struct {
Key string `json:"key"`
UploadID string `json:"uploadId"`
Number int `json:"number"`
OID oid.ID `json:"oid"`
Size uint64 `json:"size"`
ETag string `json:"etag"`
MD5 string `json:"md5"`
Created time.Time `json:"created"`
}
type GetFrostFSParams struct {
// payload range
Off, Ln uint64
Addr oid.Address
}
type PartObj struct {
OID oid.ID
Size uint64
}
type readerInitiator interface {
InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error)
}
// MultiObjectReader implements io.Reader of payloads of the object list stored in the FrostFS network.
type MultiObjectReader struct {
ctx context.Context
layer readerInitiator
startPartOffset uint64
endPartLength uint64
prm GetFrostFSParams
curIndex int
curReader io.ReadCloser
parts []PartObj
}
type MultiObjectReaderConfig struct {
Initiator readerInitiator
// the offset of complete object and total size to read
Off, Ln uint64
Addr oid.Address
Parts []PartObj
}
var (
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
errLengthIsOutOfRange = errors.New("length is out of payload range")
errEmptyPartsList = errors.New("empty parts list")
errorZeroRangeLength = errors.New("zero range length")
)
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
Address: p.Addr,
})
if err != nil {
return nil, fmt.Errorf("get combined object '%s': %w", p.Addr.Object().EncodeToString(), err)
}
var parts []*PartInfo
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
}
objParts := make([]PartObj, len(parts))
for i, part := range parts {
objParts[i] = PartObj{
OID: part.OID,
Size: part.Size,
}
}
return NewMultiObjectReader(ctx, MultiObjectReaderConfig{
Initiator: x,
Off: p.Off,
Ln: p.Ln,
Parts: objParts,
Addr: p.Addr,
})
}
func NewMultiObjectReader(ctx context.Context, cfg MultiObjectReaderConfig) (*MultiObjectReader, error) {
if len(cfg.Parts) == 0 {
return nil, errEmptyPartsList
}
r := &MultiObjectReader{
ctx: ctx,
layer: cfg.Initiator,
prm: GetFrostFSParams{
Addr: cfg.Addr,
},
parts: cfg.Parts,
}
if cfg.Off+cfg.Ln == 0 {
return r, nil
}
if cfg.Off > 0 && cfg.Ln == 0 {
return nil, errorZeroRangeLength
}
startPartIndex, startPartOffset := findStartPart(cfg)
if startPartIndex == -1 {
return nil, errOffsetIsOutOfRange
}
r.startPartOffset = startPartOffset
endPartIndex, endPartLength := findEndPart(cfg)
if endPartIndex == -1 {
return nil, errLengthIsOutOfRange
}
r.endPartLength = endPartLength
r.parts = cfg.Parts[startPartIndex : endPartIndex+1]
return r, nil
}
func findStartPart(cfg MultiObjectReaderConfig) (index int, offset uint64) {
position := cfg.Off
for i, part := range cfg.Parts {
// Strict inequality when searching for start position to avoid reading zero length part.
if position < part.Size {
return i, position
}
position -= part.Size
}
return -1, 0
}
func findEndPart(cfg MultiObjectReaderConfig) (index int, length uint64) {
position := cfg.Off + cfg.Ln
for i, part := range cfg.Parts {
// Non-strict inequality when searching for end position to avoid out of payload range error.
if position <= part.Size {
return i, position
}
position -= part.Size
}
return -1, 0
}
func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
if x.curReader != nil {
n, err = x.curReader.Read(p)
if err != nil {
if closeErr := x.curReader.Close(); closeErr != nil {
return n, fmt.Errorf("%w (close err: %v)", err, closeErr)
}
}
if !errors.Is(err, io.EOF) {
return n, err
}
x.curIndex++
}
if x.curIndex == len(x.parts) {
return n, io.EOF
}
x.prm.Addr.SetObject(x.parts[x.curIndex].OID)
if x.curIndex == 0 {
x.prm.Off = x.startPartOffset
x.prm.Ln = x.parts[x.curIndex].Size - x.startPartOffset
}
if x.curIndex == len(x.parts)-1 {
x.prm.Ln = x.endPartLength - x.prm.Off
}
x.curReader, err = x.layer.InitFrostFSObjectPayloadReader(x.ctx, x.prm)
if err != nil {
return n, fmt.Errorf("init payload reader for the next part: %w", err)
}
x.prm.Off = 0
x.prm.Ln = 0
next, err := x.Read(p[n:])
return n + next, err
}
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
// Zero range corresponds to full payload (panics if only offset is set).
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
var prmAuth handler.PrmAuth
if p.Off+p.Ln != 0 {
prm := handler.PrmObjectRange{
PrmAuth: prmAuth,
PayloadRange: [2]uint64{p.Off, p.Ln},
Address: p.Addr,
}
return x.RangeObject(ctx, prm)
}
prm := handler.PrmObjectGet{
PrmAuth: prmAuth,
Address: p.Addr,
}
res, err := x.GetObject(ctx, prm)
if err != nil {
return nil, err
}
return res.Payload, nil
}

View file

@ -0,0 +1,137 @@
package frostfs
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"testing"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/stretchr/testify/require"
)
type readerInitiatorMock struct {
parts map[oid.ID][]byte
}
func (r *readerInitiatorMock) InitFrostFSObjectPayloadReader(_ context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
partPayload, ok := r.parts[p.Addr.Object()]
if !ok {
return nil, errors.New("part not found")
}
if p.Off+p.Ln == 0 {
return io.NopCloser(bytes.NewReader(partPayload)), nil
}
if p.Off > uint64(len(partPayload)-1) {
return nil, fmt.Errorf("invalid offset: %d/%d", p.Off, len(partPayload))
}
if p.Off+p.Ln > uint64(len(partPayload)) {
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.Off, p.Off+p.Ln, len(partPayload))
}
return io.NopCloser(bytes.NewReader(partPayload[p.Off : p.Off+p.Ln])), nil
}
func prepareDataReader() ([]byte, []PartObj, *readerInitiatorMock) {
mockInitReader := &readerInitiatorMock{
parts: map[oid.ID][]byte{
oidtest.ID(): []byte("first part 1"),
oidtest.ID(): []byte("second part 2"),
oidtest.ID(): []byte("third part 3"),
},
}
var fullPayload []byte
parts := make([]PartObj, 0, len(mockInitReader.parts))
for id, payload := range mockInitReader.parts {
parts = append(parts, PartObj{OID: id, Size: uint64(len(payload))})
fullPayload = append(fullPayload, payload...)
}
return fullPayload, parts, mockInitReader
}
func TestMultiReader(t *testing.T) {
ctx := context.Background()
fullPayload, parts, mockInitReader := prepareDataReader()
for _, tc := range []struct {
name string
off uint64
ln uint64
err error
}{
{
name: "simple read all",
},
{
name: "simple read with length",
ln: uint64(len(fullPayload)),
},
{
name: "middle of parts",
off: parts[0].Size + 2,
ln: 4,
},
{
name: "first and second",
off: parts[0].Size - 4,
ln: 8,
},
{
name: "first and third",
off: parts[0].Size - 4,
ln: parts[1].Size + 8,
},
{
name: "second part",
off: parts[0].Size,
ln: parts[1].Size,
},
{
name: "second and third",
off: parts[0].Size,
ln: parts[1].Size + parts[2].Size,
},
{
name: "offset out of range",
off: uint64(len(fullPayload) + 1),
ln: 1,
err: errOffsetIsOutOfRange,
},
{
name: "zero length",
off: parts[1].Size + 1,
ln: 0,
err: errorZeroRangeLength,
},
} {
t.Run(tc.name, func(t *testing.T) {
multiReader, err := NewMultiObjectReader(ctx, MultiObjectReaderConfig{
Initiator: mockInitReader,
Parts: parts,
Off: tc.off,
Ln: tc.ln,
})
require.ErrorIs(t, err, tc.err)
if tc.err == nil {
off := tc.off
ln := tc.ln
if off+ln == 0 {
ln = uint64(len(fullPayload))
}
data, err := io.ReadAll(multiReader)
require.NoError(t, err)
require.Equal(t, fullPayload[off:off+ln], data)
}
})
}
}

View file

@ -0,0 +1,163 @@
package frostfs
import (
"context"
"errors"
"fmt"
"io"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
)
type GetNodeByPathResponseInfoWrapper struct {
response *grpcService.GetNodeByPathResponse_Info
}
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
return []uint64{n.response.GetNodeId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
return []uint64{n.response.GetParentId()}
}
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
return []uint64{n.response.GetTimestamp()}
}
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}
type PoolWrapper struct {
p *treepool.Pool
}
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
return &PoolWrapper{p: p}
}
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
poolPrm := treepool.GetNodesParams{
CID: prm.CnrID,
TreeID: prm.TreeID,
Path: prm.Path,
Meta: prm.Meta,
PathAttribute: tree.FileNameKey,
LatestOnly: prm.LatestOnly,
AllAttrs: prm.AllAttrs,
BearerToken: getBearer(ctx),
}
nodes, err := w.p.GetNodes(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
res := make([]tree.NodeResponse, len(nodes))
for i, info := range nodes {
res[i] = GetNodeByPathResponseInfoWrapper{info}
}
return res, nil
}
func getBearer(ctx context.Context) []byte {
token, err := tokens.LoadBearerToken(ctx)
if err != nil {
return nil
}
return token.Marshal()
}
func handleError(err error) error {
if err == nil {
return nil
}
if errors.Is(err, treepool.ErrNodeNotFound) {
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
}
if errors.Is(err, treepool.ErrNodeAccessDenied) {
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
}
return err
}
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
order := treepool.NoneOrder
if sort {
order = treepool.AscendingOrder
}
poolPrm := treepool.GetSubTreeParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
BearerToken: getBearer(ctx),
Order: order,
}
if len(rootID) == 1 && rootID[0] == 0 {
// storage node interprets 'nil' value as []uint64{0}
// gate wants to send 'nil' value instead of []uint64{0}, because
// it provides compatibility with previous tree service api where
// single uint64(0) value is dropped from signature
poolPrm.RootID = nil
}
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
if err != nil {
return nil, handleError(err)
}
var subtree []tree.NodeResponse
node, err := subTreeReader.Next()
for err == nil {
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
node, err = subTreeReader.Next()
}
if err != io.EOF {
return nil, handleError(err)
}
return subtree, nil
}
type GetSubTreeResponseBodyWrapper struct {
response *grpcService.GetSubTreeResponse_Body
}
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
return n.response.GetNodeId()
}
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
resp := n.response.GetParentId()
if resp == nil {
// storage sends nil that should be interpreted as []uint64{0}
// due to protobuf compatibility, see 'GetSubTree' function
return []uint64{0}
}
return resp
}
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
return n.response.GetTimestamp()
}
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
res := make([]tree.Meta, len(n.response.Meta))
for i, value := range n.response.Meta {
res[i] = value
}
return res
}

View file

@ -0,0 +1,112 @@
{{$container := .Container}}
{{ $prefix := trimPrefix .Prefix }}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<title>Index of {{.Protocol}}://{{$container}}
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
<style>
.alert {
width: 80%;
box-sizing: border-box;
padding: 20px;
background-color: #f44336;
color: white;
margin-bottom: 15px;
}
table {
width: 80%;
border-collapse: collapse;
}
body {
background: #f2f2f2;
}
table, th, td {
border: 0 solid transparent;
}
th, td {
padding: 10px;
text-align: left;
}
th {
background-color: #c3bcbc;
}
h1 {
font-size: 1.5em;
}
tr:nth-child(even) {background-color: #ebe7e7;}
</style>
</head>
<body>
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
{{ if .HasErrors }}
<div class="alert">
Errors occurred while processing the request. Perhaps some objects are missing
</div>
{{ end }}
<table>
<thead>
<tr>
<th>Filename</th>
<th>OID</th>
<th>Size</th>
<th>Created</th>
<th>Download</th>
</tr>
</thead>
<tbody>
{{ $trimmedPrefix := trimPrefix $prefix }}
{{if $trimmedPrefix }}
<tr>
<td>
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
{{else}}
<tr>
<td>
⮐<a href="/get/{{$container}}/">..</a>
</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
{{end}}
{{range .Objects}}
<tr>
<td>
{{if .IsDir}}
🗀
<a href="{{.GetURL}}/">
{{.FileName}}/
</a>
{{else}}
🗎
<a href="{{ .GetURL }}">
{{.FileName}}
</a>
{{end}}
</td>
<td>{{.OID}}</td>
<td>{{if not .IsDir}}{{ formatSize .Size }}{{end}}</td>
<td>{{ .Created }}</td>
<td>
{{ if .OID }}
<a href="{{ .GetURL }}?download=true">
Link
</a>
{{ end }}
</td>
</tr>
{{end}}
</tbody>
</table>
</body>
</html>

View file

@ -0,0 +1,6 @@
package templates
import _ "embed"
//go:embed index.gotmpl
var DefaultIndexTemplate string

150
metrics/desc.go Normal file
View file

@ -0,0 +1,150 @@
package metrics
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
var appMetricsDesc = map[string]map[string]Description{
poolSubsystem: {
overallErrorsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallErrorsMetric,
Help: "Total number of errors in pool",
},
overallNodeErrorsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallNodeErrorsMetric,
Help: "Total number of errors for connection in pool",
VariableLabels: []string{"node"},
},
overallNodeRequestsMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: overallNodeRequestsMetric,
Help: "Total number of requests to specific node in pool",
VariableLabels: []string{"node"},
},
currentErrorMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: currentErrorMetric,
Help: "Number of errors on current connections that will be reset after the threshold",
VariableLabels: []string{"node"},
},
avgRequestDurationMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: poolSubsystem,
Name: avgRequestDurationMetric,
Help: "Average request duration (in milliseconds) for specific method on node in pool",
VariableLabels: []string{"node", "method"},
},
},
stateSubsystem: {
healthMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: healthMetric,
Help: "Current HTTP gateway state",
},
versionInfoMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: stateSubsystem,
Name: versionInfoMetric,
Help: "Version of current FrostFS HTTP Gate instance",
VariableLabels: []string{"version"},
},
},
serverSubsystem: {
healthMetric: Description{
Type: dto.MetricType_GAUGE,
Namespace: namespace,
Subsystem: serverSubsystem,
Name: healthMetric,
Help: "HTTP Server endpoint health",
VariableLabels: []string{"endpoint"},
},
},
}
type Description struct {
Type dto.MetricType
Namespace string
Subsystem string
Name string
Help string
ConstantLabels prometheus.Labels
VariableLabels []string
}
func (d *Description) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Type string `json:"type"`
FQName string `json:"name"`
Help string `json:"help"`
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
VariableLabels []string `json:"variable_labels,omitempty"`
}{
Type: d.Type.String(),
FQName: d.BuildFQName(),
Help: d.Help,
ConstantLabels: d.ConstantLabels,
VariableLabels: d.VariableLabels,
})
}
func (d *Description) BuildFQName() string {
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
}
// DescribeAll returns descriptions for metrics.
func DescribeAll() []Description {
var list []Description
for _, m := range appMetricsDesc {
for _, description := range m {
list = append(list, description)
}
}
return list
}
func newOpts(description Description) prometheus.Opts {
return prometheus.Opts{
Namespace: description.Namespace,
Subsystem: description.Subsystem,
Name: description.Name,
Help: description.Help,
ConstLabels: description.ConstantLabels,
}
}
func mustNewGauge(description Description) prometheus.Gauge {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGauge(
prometheus.GaugeOpts(newOpts(description)),
)
}
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
if description.Type != dto.MetricType_GAUGE {
panic("invalid metric type")
}
return prometheus.NewGaugeVec(
prometheus.GaugeOpts(newOpts(description)),
description.VariableLabels,
)
}

37
metrics/desc_test.go Normal file
View file

@ -0,0 +1,37 @@
//go:build dump_metrics
package metrics
import (
"encoding/json"
"flag"
"os"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/stretchr/testify/require"
)
type mock struct{}
func (m mock) Statistic() pool.Statistic {
return pool.Statistic{}
}
var metricsPath = flag.String("out", "", "File to export http gateway metrics to.")
func TestDescribeAll(t *testing.T) {
// to check correct metrics type mapping
_ = NewGateMetrics(mock{})
flag.Parse()
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
desc := DescribeAll()
data, err := json.Marshal(desc)
require.NoError(t, err)
err = os.WriteFile(*metricsPath, data, 0644)
require.NoError(t, err)
}

241
metrics/metrics.go Normal file
View file

@ -0,0 +1,241 @@
package metrics
import (
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.uber.org/zap"
)
const (
namespace = "frostfs_http_gw"
stateSubsystem = "state"
poolSubsystem = "pool"
serverSubsystem = "server"
)
const (
healthMetric = "health"
versionInfoMetric = "version_info"
)
const (
overallErrorsMetric = "overall_errors"
overallNodeErrorsMetric = "overall_node_errors"
overallNodeRequestsMetric = "overall_node_requests"
currentErrorMetric = "current_errors"
avgRequestDurationMetric = "avg_request_duration"
)
const (
methodGetBalance = "get_balance"
methodPutContainer = "put_container"
methodGetContainer = "get_container"
methodListContainer = "list_container"
methodDeleteContainer = "delete_container"
methodGetContainerEacl = "get_container_eacl"
methodSetContainerEacl = "set_container_eacl"
methodEndpointInfo = "endpoint_info"
methodNetworkInfo = "network_info"
methodPutObject = "put_object"
methodDeleteObject = "delete_object"
methodGetObject = "get_object"
methodHeadObject = "head_object"
methodRangeObject = "range_object"
methodCreateSession = "create_session"
)
// HealthStatus of the gate application.
type HealthStatus int32
const (
HealthStatusUndefined HealthStatus = 0
HealthStatusStarting HealthStatus = 1
HealthStatusReady HealthStatus = 2
HealthStatusShuttingDown HealthStatus = 3
)
type StatisticScraper interface {
Statistic() pool.Statistic
}
type serverMetrics struct {
endpointHealth *prometheus.GaugeVec
}
type GateMetrics struct {
stateMetrics
poolMetricsCollector
serverMetrics
}
type stateMetrics struct {
healthCheck prometheus.Gauge
versionInfo *prometheus.GaugeVec
}
type poolMetricsCollector struct {
scraper StatisticScraper
overallErrors prometheus.Gauge
overallNodeErrors *prometheus.GaugeVec
overallNodeRequests *prometheus.GaugeVec
currentErrors *prometheus.GaugeVec
requestDuration *prometheus.GaugeVec
}
// NewGateMetrics creates new metrics for http gate.
func NewGateMetrics(p StatisticScraper) *GateMetrics {
stateMetric := newStateMetrics()
stateMetric.register()
poolMetric := newPoolMetricsCollector(p)
poolMetric.register()
serverMetric := newServerMetrics()
serverMetric.register()
return &GateMetrics{
stateMetrics: *stateMetric,
poolMetricsCollector: *poolMetric,
serverMetrics: *serverMetric,
}
}
func (g *GateMetrics) Unregister() {
g.stateMetrics.unregister()
prometheus.Unregister(&g.poolMetricsCollector)
g.serverMetrics.unregister()
}
func newStateMetrics() *stateMetrics {
return &stateMetrics{
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
}
}
func (m stateMetrics) register() {
prometheus.MustRegister(m.healthCheck)
prometheus.MustRegister(m.versionInfo)
}
func (m stateMetrics) unregister() {
prometheus.Unregister(m.healthCheck)
prometheus.Unregister(m.versionInfo)
}
func (m stateMetrics) SetHealth(s HealthStatus) {
m.healthCheck.Set(float64(s))
}
func (m stateMetrics) SetVersion(ver string) {
m.versionInfo.WithLabelValues(ver).Set(1)
}
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
return &poolMetricsCollector{
scraper: p,
overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]),
overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]),
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
}
}
func (m *poolMetricsCollector) Collect(ch chan<- prometheus.Metric) {
m.updateStatistic()
m.overallErrors.Collect(ch)
m.overallNodeErrors.Collect(ch)
m.overallNodeRequests.Collect(ch)
m.currentErrors.Collect(ch)
m.requestDuration.Collect(ch)
}
func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
m.overallErrors.Describe(descs)
m.overallNodeErrors.Describe(descs)
m.overallNodeRequests.Describe(descs)
m.currentErrors.Describe(descs)
m.requestDuration.Describe(descs)
}
func (m *poolMetricsCollector) register() {
prometheus.MustRegister(m)
}
func (m *poolMetricsCollector) updateStatistic() {
stat := m.scraper.Statistic()
m.overallNodeErrors.Reset()
m.overallNodeRequests.Reset()
m.currentErrors.Reset()
m.requestDuration.Reset()
for _, node := range stat.Nodes() {
m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors()))
m.overallNodeRequests.WithLabelValues(node.Address()).Set(float64(node.Requests()))
m.currentErrors.WithLabelValues(node.Address()).Set(float64(node.CurrentErrors()))
m.updateRequestsDuration(node)
}
m.overallErrors.Set(float64(stat.OverallErrors()))
}
func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
m.requestDuration.WithLabelValues(node.Address(), methodGetBalance).Set(float64(node.AverageGetBalance().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodPutContainer).Set(float64(node.AveragePutContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodDeleteObject).Set(float64(node.AverageDeleteObject().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodGetObject).Set(float64(node.AverageGetObject().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodHeadObject).Set(float64(node.AverageHeadObject().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodRangeObject).Set(float64(node.AverageRangeObject().Milliseconds()))
m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
}
func newServerMetrics() *serverMetrics {
return &serverMetrics{
endpointHealth: mustNewGaugeVec(appMetricsDesc[serverSubsystem][healthMetric]),
}
}
func (m serverMetrics) register() {
prometheus.MustRegister(m.endpointHealth)
}
func (m serverMetrics) unregister() {
prometheus.Unregister(m.endpointHealth)
}
func (m serverMetrics) MarkHealthy(endpoint string) {
m.endpointHealth.WithLabelValues(endpoint).Set(float64(1))
}
func (m serverMetrics) MarkUnhealthy(endpoint string) {
m.endpointHealth.WithLabelValues(endpoint).Set(float64(0))
}
// NewPrometheusService creates a new service for gathering prometheus metrics.
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
if log == nil {
return nil
}
return &Service{
Server: &http.Server{
Addr: cfg.Address,
Handler: promhttp.Handler(),
},
enabled: cfg.Enabled,
serviceType: "Prometheus",
log: log.With(zap.String("service", "Prometheus")),
}
}

33
metrics/pprof.go Normal file
View file

@ -0,0 +1,33 @@
package metrics
import (
"net/http"
"net/http/pprof"
"go.uber.org/zap"
)
// NewPprofService creates a new service for gathering pprof metrics.
func NewPprofService(l *zap.Logger, cfg Config) *Service {
handler := http.NewServeMux()
handler.HandleFunc("/debug/pprof/", pprof.Index)
handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
// Manually add support for paths linked to by index page at /debug/pprof/
for _, item := range []string{"allocs", "block", "heap", "goroutine", "mutex", "threadcreate"} {
handler.Handle("/debug/pprof/"+item, pprof.Handler(item))
}
return &Service{
Server: &http.Server{
Addr: cfg.Address,
Handler: handler,
},
enabled: cfg.Enabled,
serviceType: "Pprof",
log: l.With(zap.String("service", "Pprof")),
}
}

48
metrics/service.go Normal file
View file

@ -0,0 +1,48 @@
package metrics
import (
"context"
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
"go.uber.org/zap"
)
// Service serves metrics.
type Service struct {
*http.Server
enabled bool
log *zap.Logger
serviceType string
}
// Config is a params to configure service.
type Config struct {
Address string
Enabled bool
}
// Start runs http service with the exposed endpoint on the configured port.
func (ms *Service) Start() {
if ms.enabled {
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
err := ms.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
}
} else {
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
}
}
// ShutDown stops the service.
func (ms *Service) ShutDown(ctx context.Context) {
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
err := ms.Shutdown(ctx)
if err != nil {
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
if err = ms.Close(); err != nil {
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
}
}
}

230
resolver/resolver.go Normal file
View file

@ -0,0 +1,230 @@
package resolver
import (
"context"
"errors"
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
)
const (
NNSResolver = "nns"
DNSResolver = "dns"
)
// ErrNoResolvers returns when trying to resolve container without any resolver.
var ErrNoResolvers = errors.New("no resolvers")
// FrostFS represents virtual connection to the FrostFS network.
type FrostFS interface {
// SystemDNS reads system DNS network parameters of the FrostFS.
//
// Returns exactly on non-zero value. Returns any error encountered
// which prevented the parameter to be read.
SystemDNS(context.Context) (string, error)
}
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct {
FrostFS FrostFS
RPCAddress string
Settings Settings
}
type ContainerResolver struct {
mu sync.RWMutex
resolvers []*Resolver
}
type Resolver struct {
Name string
resolve func(context.Context, string) (*cid.ID, error)
}
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (*cid.ID, error)) {
r.resolve = fn
}
func (r *Resolver) Resolve(ctx context.Context, name string) (*cid.ID, error) {
return r.resolve(ctx, name)
}
func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) {
resolvers, err := createResolvers(resolverNames, cfg)
if err != nil {
return nil, err
}
return &ContainerResolver{
resolvers: resolvers,
}, nil
}
func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
resolvers := make([]*Resolver, len(resolverNames))
for i, name := range resolverNames {
cnrResolver, err := newResolver(name, cfg)
if err != nil {
return nil, err
}
resolvers[i] = cnrResolver
}
return resolvers, nil
}
func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.ID, error) {
r.mu.RLock()
defer r.mu.RUnlock()
var err error
for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, cnrName)
if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil {
err = resolverErr
} else {
err = fmt.Errorf("%s: %w", err.Error(), resolverErr)
}
continue
}
return cnrID, nil
}
if err != nil {
return nil, err
}
return nil, ErrNoResolvers
}
func (r *ContainerResolver) UpdateResolvers(resolverNames []string, cfg *Config) error {
r.mu.Lock()
defer r.mu.Unlock()
if r.equals(resolverNames) {
return nil
}
resolvers, err := createResolvers(resolverNames, cfg)
if err != nil {
return err
}
r.resolvers = resolvers
return nil
}
func (r *ContainerResolver) equals(resolverNames []string) bool {
if len(r.resolvers) != len(resolverNames) {
return false
}
for i := 0; i < len(resolverNames); i++ {
if r.resolvers[i].Name != resolverNames[i] {
return false
}
}
return true
}
func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name {
case DNSResolver:
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
case NNSResolver:
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
default:
return nil, fmt.Errorf("unknown resolver: %s", name)
}
}
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
var err error
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, isDefault := settings.FormContainerZone(namespace)
if isDefault {
zone, err = frostFS.SystemDNS(ctx)
if err != nil {
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
}
}
domain := name + "." + zone
cnrID, err := dns.ResolveContainerName(domain)
if err != nil {
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
}
return &cnrID, nil
}
return &Resolver{
Name: DNSResolver,
resolve: resolveFunc,
}, nil
}
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
if rpcAddress == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS
if err := nns.Dial(rpcAddress); err != nil {
return nil, fmt.Errorf("could not dial nns: %w", err)
}
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
var d container.Domain
d.SetName(name)
namespace, err := middleware.GetNamespace(ctx)
if err != nil {
return nil, err
}
zone, _ := settings.FormContainerZone(namespace)
d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d)
if err != nil {
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
}
return &cnrID, nil
}
return &Resolver{
Name: NNSResolver,
resolve: resolveFunc,
}, nil
}

41
response/utils.go Normal file
View file

@ -0,0 +1,41 @@
package response
import (
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/valyala/fasthttp"
"go.uber.org/zap"
)
func Error(r *fasthttp.RequestCtx, msg string, code int) {
r.Error(msg+"\n", code)
}
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
var (
msg string
statusCode int
logFields []zap.Field
)
st := new(sdkstatus.ObjectAccessDenied)
switch {
case errors.As(err, &st):
statusCode = fasthttp.StatusForbidden
reason := st.Reason()
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
logFields = append(logFields, zap.String("error_detail", reason))
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
statusCode = fasthttp.StatusNotFound
msg = "Not Found"
default:
statusCode = fasthttp.StatusBadRequest
msg = fmt.Sprintf("%s: %v", message, err)
}
return statusCode, msg, logFields
}

99
tokens/bearer-token.go Normal file
View file

@ -0,0 +1,99 @@
package tokens
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"github.com/valyala/fasthttp"
)
type fromHandler = func(h *fasthttp.RequestHeader) []byte
type ctxKey string
const (
bearerTokenHdr = "Bearer"
bearerTokenKey ctxKey = "__context_bearer_token_key"
)
// BearerToken usage:
//
// if err = storeBearerToken(ctx); err != nil {
// log.Error("could not fetch bearer token", zap.Error(err))
// c.Error("could not fetch bearer token", fasthttp.StatusBadRequest)
// return
// }
// BearerTokenFromHeader extracts a bearer token from Authorization request header.
func BearerTokenFromHeader(h *fasthttp.RequestHeader) []byte {
auth := h.Peek(fasthttp.HeaderAuthorization)
if auth == nil || !bytes.HasPrefix(auth, []byte(bearerTokenHdr)) {
return nil
}
if auth = bytes.TrimPrefix(auth, []byte(bearerTokenHdr+" ")); len(auth) == 0 {
return nil
}
return auth
}
// BearerTokenFromCookie extracts a bearer token from cookies.
func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
auth := h.Cookie(bearerTokenHdr)
if len(auth) == 0 {
return nil
}
return auth
}
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
// it in the application context.
func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) {
tkn, err := fetchBearerToken(c)
if err != nil {
return nil, err
}
newCtx := context.WithValue(ctx, bearerTokenKey, tkn)
return newCtx, nil
}
// LoadBearerToken returns a bearer token stored in the context given (if it's
// present there).
func LoadBearerToken(ctx context.Context) (*bearer.Token, error) {
if tkn, ok := ctx.Value(bearerTokenKey).(*bearer.Token); ok && tkn != nil {
return tkn, nil
}
return nil, errors.New("found empty bearer token")
}
func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
// ignore empty value
if ctx == nil {
return nil, nil
}
var (
lastErr error
buf []byte
tkn = new(bearer.Token)
)
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
if buf = parse(&ctx.Request.Header); buf == nil {
continue
} else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
continue
} else if err = tkn.Unmarshal(data); err != nil {
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
continue
}
return tkn, nil
}
return nil, lastErr
}

307
tokens/bearer-token_test.go Normal file
View file

@ -0,0 +1,307 @@
//go:build !integration
package tokens
import (
"context"
"encoding/base64"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"github.com/valyala/fasthttp"
)
func makeTestCookie(value []byte) *fasthttp.RequestHeader {
header := new(fasthttp.RequestHeader)
header.SetCookie(bearerTokenHdr, string(value))
return header
}
func makeTestHeader(value []byte) *fasthttp.RequestHeader {
header := new(fasthttp.RequestHeader)
if value != nil {
header.Set(fasthttp.HeaderAuthorization, string(value))
}
return header
}
func makeBearer(value string) string {
return bearerTokenHdr + " " + value
}
func TestBearerTokenFromCookie(t *testing.T) {
cases := []struct {
name string
actual []byte
expect []byte
}{
{
name: "empty",
},
{
name: "normal",
actual: []byte("TOKEN"),
expect: []byte("TOKEN"),
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.expect, BearerTokenFromCookie(makeTestCookie(tt.actual)))
})
}
}
func TestBearerTokenFromHeader(t *testing.T) {
validToken := "token"
tokenWithoutPrefix := "invalid-token"
cases := []struct {
name string
actual []byte
expect []byte
}{
{
name: "empty",
},
{
name: "token without the bearer prefix",
actual: []byte(tokenWithoutPrefix),
},
{
name: "token without payload",
actual: []byte(makeBearer("")),
},
{
name: "normal",
actual: []byte(makeBearer(validToken)),
expect: []byte(validToken),
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
require.Equal(t, tt.expect, BearerTokenFromHeader(makeTestHeader(tt.actual)))
})
}
}
func TestFetchBearerToken(t *testing.T) {
key, err := keys.NewPrivateKey()
require.NoError(t, err)
var uid user.ID
user.IDFromKey(&uid, key.PrivateKey.PublicKey)
tkn := new(bearer.Token)
tkn.ForUser(uid)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
cases := []struct {
name string
cookie string
header string
error string
nilCtx bool
expect *bearer.Token
}{
{
name: "empty",
},
{
name: "nil context",
nilCtx: true,
},
{
name: "bad base64 header",
header: "WRONG BASE64",
error: "can't base64-decode bearer token",
},
{
name: "bad base64 cookie",
cookie: "WRONG BASE64",
error: "can't base64-decode bearer token",
},
{
name: "header token unmarshal error",
header: "dGVzdAo=",
error: "can't unmarshal bearer token",
},
{
name: "cookie token unmarshal error",
cookie: "dGVzdAo=",
error: "can't unmarshal bearer token",
},
{
name: "bad header and cookie",
header: "WRONG BASE64",
cookie: "dGVzdAo=",
error: "can't unmarshal bearer token",
},
{
name: "bad header, but good cookie",
header: "dGVzdAo=",
cookie: t64,
expect: tkn,
},
{
name: "bad cookie, but good header",
header: t64,
cookie: "dGVzdAo=",
expect: tkn,
},
{
name: "ok for header",
header: t64,
expect: tkn,
},
{
name: "ok for cookie",
cookie: t64,
expect: tkn,
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
var ctx *fasthttp.RequestCtx
if !tt.nilCtx {
ctx = makeTestRequest(tt.cookie, tt.header)
}
actual, err := fetchBearerToken(ctx)
if tt.error == "" {
require.NoError(t, err)
require.Equal(t, tt.expect, actual)
return
}
require.Contains(t, err.Error(), tt.error)
})
}
}
func makeTestRequest(cookie, header string) *fasthttp.RequestCtx {
ctx := new(fasthttp.RequestCtx)
if cookie != "" {
ctx.Request.Header.SetCookie(bearerTokenHdr, cookie)
}
if header != "" {
ctx.Request.Header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+header)
}
return ctx
}
func TestCheckAndPropagateBearerToken(t *testing.T) {
key, err := keys.NewPrivateKey()
require.NoError(t, err)
var uid user.ID
user.IDFromKey(&uid, key.PrivateKey.PublicKey)
tkn := new(bearer.Token)
tkn.ForUser(uid)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
req := makeTestRequest(t64, "")
// Expect to see the token within the context.
appCtx, err := StoreBearerTokenAppCtx(context.Background(), req)
require.NoError(t, err)
// Expect to see the same token without errors.
actual, err := LoadBearerToken(appCtx)
require.NoError(t, err)
require.Equal(t, tkn, actual)
}
func TestLoadBearerToken(t *testing.T) {
ctx := context.Background()
token := new(bearer.Token)
cases := []struct {
name string
appCtx context.Context
error string
}{
{
name: "token is missing in the context",
appCtx: ctx,
error: "found empty bearer token",
},
{
name: "normal",
appCtx: context.WithValue(ctx, bearerTokenKey, token),
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
tkn, err := LoadBearerToken(tt.appCtx)
if tt.error == "" {
require.NoError(t, err)
require.Equal(t, token, tkn)
return
}
require.Contains(t, err.Error(), tt.error)
})
}
}
func TestStoreBearerTokenAppCtx(t *testing.T) {
key, err := keys.NewPrivateKey()
require.NoError(t, err)
var uid user.ID
user.IDFromKey(&uid, key.PrivateKey.PublicKey)
tkn := new(bearer.Token)
tkn.ForUser(uid)
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
require.NotEmpty(t, t64)
cases := []struct {
name string
req *fasthttp.RequestCtx
error string
}{
{
name: "invalid token",
req: makeTestRequest("dGVzdAo=", ""),
error: "can't unmarshal bearer token",
},
{
name: "normal",
req: makeTestRequest(t64, ""),
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
ctx, err := StoreBearerTokenAppCtx(context.Background(), tt.req)
if tt.error == "" {
require.NoError(t, err)
actualToken, ok := ctx.Value(bearerTokenKey).(*bearer.Token)
require.True(t, ok)
require.Equal(t, tkn, actualToken)
return
}
require.Contains(t, err.Error(), tt.error)
})
}
}

441
tree/tree.go Normal file
View file

@ -0,0 +1,441 @@
package tree
import (
"context"
"errors"
"fmt"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
)
type (
Tree struct {
service ServiceClient
}
// ServiceClient is a client to interact with tree service.
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
ServiceClient interface {
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
}
treeNode struct {
ObjID oid.ID
Meta map[string]string
}
multiSystemNode struct {
// the first element is latest
nodes []*treeNode
}
GetNodesParams struct {
CnrID cid.ID
BktInfo *data.BucketInfo
TreeID string
Path []string
Meta []string
LatestOnly bool
AllAttrs bool
}
)
var (
// ErrNodeNotFound is returned from ServiceClient in case of not found error.
ErrNodeNotFound = layer.ErrNodeNotFound
// ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error.
ErrNodeAccessDenied = layer.ErrNodeAccessDenied
)
const (
FileNameKey = "FileName"
settingsFileName = "bucket-settings"
oidKV = "OID"
uploadIDKV = "UploadId"
sizeKV = "Size"
// keys for delete marker nodes.
isDeleteMarkerKV = "IsDeleteMarker"
// versionTree -- ID of a tree with object versions.
versionTree = "version"
systemTree = "system"
separator = "/"
)
// NewTree creates instance of Tree using provided address and create grpc connection.
func NewTree(service ServiceClient) *Tree {
return &Tree{service: service}
}
type Meta interface {
GetKey() string
GetValue() []byte
}
type NodeResponse interface {
GetMeta() []Meta
GetTimestamp() []uint64
GetNodeID() []uint64
GetParentID() []uint64
}
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
tNode := &treeNode{
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
}
for _, kv := range nodeInfo.GetMeta() {
switch kv.GetKey() {
case oidKV:
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
return nil, err
}
default:
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
}
}
return tNode, nil
}
func (n *treeNode) Get(key string) (string, bool) {
value, ok := n.Meta[key]
return value, ok
}
func (n *treeNode) FileName() (string, bool) {
value, ok := n.Meta[FileNameKey]
return value, ok
}
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
tNode, err := newTreeNode(node)
if err != nil {
return nil, fmt.Errorf("invalid tree node: %w", err)
}
return newNodeVersionFromTreeNode(tNode), nil
}
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
size, _ := treeNode.Get(sizeKV)
version := &api.NodeVersion{
BaseNodeVersion: api.BaseNodeVersion{
OID: treeNode.ObjID,
},
DeleteMarker: isDeleteMarker,
IsPrefixNode: size == "",
}
return version
}
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
var (
err error
index int
maxTimestamp uint64
)
if len(nodes) == 0 {
return nil, errors.New("multi node must have at least one node")
}
treeNodes := make([]*treeNode, len(nodes))
for i, node := range nodes {
if treeNodes[i], err = newTreeNode(node); err != nil {
return nil, fmt.Errorf("parse system node response: %w", err)
}
if timestamp := getMaxTimestamp(node); timestamp > maxTimestamp {
index = i
maxTimestamp = timestamp
}
}
treeNodes[0], treeNodes[index] = treeNodes[index], treeNodes[0]
return &multiSystemNode{
nodes: treeNodes,
}, nil
}
func (m *multiSystemNode) Latest() *treeNode {
return m.nodes[0]
}
func (m *multiSystemNode) Old() []*treeNode {
return m.nodes[1:]
}
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
nodes, err := c.GetVersions(ctx, cnrID, objectName)
if err != nil {
return nil, err
}
latestNode, err := getLatestVersionNode(nodes)
if err != nil {
return nil, err
}
return newNodeVersion(latestNode)
}
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
path := pathFromName(objectName)
p := &GetNodesParams{
CnrID: *cnrID,
TreeID: versionTree,
Path: path,
Meta: meta,
LatestOnly: false,
AllAttrs: false,
}
return c.service.GetNodes(ctx, p)
}
func (c *Tree) CheckSettingsNodeExist(ctx context.Context, bktInfo *data.BucketInfo) error {
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
if err != nil {
return err
}
return nil
}
func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name string) (*multiSystemNode, error) {
p := &GetNodesParams{
CnrID: bktInfo.CID,
BktInfo: bktInfo,
TreeID: systemTree,
Path: []string{name},
LatestOnly: false,
AllAttrs: true,
}
nodes, err := c.service.GetNodes(ctx, p)
if err != nil {
return nil, err
}
nodes = filterMultipartNodes(nodes)
if len(nodes) == 0 {
return nil, ErrNodeNotFound
}
return newMultiNode(nodes)
}
func filterMultipartNodes(nodes []NodeResponse) []NodeResponse {
res := make([]NodeResponse, 0, len(nodes))
LOOP:
for _, node := range nodes {
for _, meta := range node.GetMeta() {
if meta.GetKey() == uploadIDKV {
continue LOOP
}
}
res = append(res, node)
}
return res
}
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
var (
maxCreationTime uint64
targetIndexNode = -1
)
for i, node := range nodes {
if !checkExistOID(node.GetMeta()) {
continue
}
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
targetIndexNode = i
maxCreationTime = currentCreationTime
}
}
if targetIndexNode == -1 {
return nil, layer.ErrNodeNotFound
}
return nodes[targetIndexNode], nil
}
func checkExistOID(meta []Meta) bool {
for _, kv := range meta {
if kv.GetKey() == "OID" {
return true
}
}
return false
}
// pathFromName splits name by '/'.
func pathFromName(objectName string) []string {
return strings.Split(objectName, separator)
}
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
if err != nil {
return nil, "", err
}
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
if err != nil {
if errors.Is(err, layer.ErrNodeNotFound) {
return nil, "", nil
}
return nil, "", err
}
nodesMap := make(map[string][]NodeResponse, len(subTree))
for _, node := range subTree {
if MultiID(rootID).Equal(node.GetNodeID()) {
continue
}
fileName := GetFilename(node)
if !strings.HasPrefix(fileName, tailPrefix) {
continue
}
nodes := nodesMap[fileName]
// Add all nodes if flag latestOnly is false.
// Add all intermediate nodes
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
if len(nodes) == 0 {
nodes = []NodeResponse{node}
} else if !latestOnly || isIntermediate(node) {
nodes = append(nodes, node)
} else if isIntermediate(nodes[0]) {
nodes = append([]NodeResponse{node}, nodes...)
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
nodes[0] = node
}
nodesMap[fileName] = nodes
}
result := make([]NodeResponse, 0, len(subTree))
for _, nodes := range nodesMap {
result = append(result, nodes...)
}
return result, strings.TrimSuffix(prefix, tailPrefix), nil
}
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
rootID := []uint64{0}
path := strings.Split(prefix, separator)
tailPrefix := path[len(path)-1]
if len(path) > 1 {
var err error
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
if err != nil {
return nil, "", err
}
}
return rootID, tailPrefix, nil
}
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
p := &GetNodesParams{
CnrID: bktInfo.CID,
BktInfo: bktInfo,
TreeID: treeID,
Path: prefixPath,
LatestOnly: false,
AllAttrs: true,
}
nodes, err := c.service.GetNodes(ctx, p)
if err != nil {
return nil, err
}
var intermediateNodes []uint64
for _, node := range nodes {
if isIntermediate(node) {
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
}
}
if len(intermediateNodes) == 0 {
return nil, layer.ErrNodeNotFound
}
return intermediateNodes, nil
}
func GetFilename(node NodeResponse) string {
for _, kv := range node.GetMeta() {
if kv.GetKey() == FileNameKey {
return string(kv.GetValue())
}
}
return ""
}
func isIntermediate(node NodeResponse) bool {
if len(node.GetMeta()) != 1 {
return false
}
return node.GetMeta()[0].GetKey() == FileNameKey
}
func getMaxTimestamp(node NodeResponse) uint64 {
var maxTimestamp uint64
for _, timestamp := range node.GetTimestamp() {
if timestamp > maxTimestamp {
maxTimestamp = timestamp
}
}
return maxTimestamp
}
type MultiID []uint64
func (m MultiID) Equal(id MultiID) bool {
seen := make(map[uint64]struct{}, len(m))
for i := range m {
seen[m[i]] = struct{}{}
}
for i := range id {
if _, ok := seen[id[i]]; !ok {
return false
}
}
return true
}

150
tree/tree_test.go Normal file
View file

@ -0,0 +1,150 @@
package tree
import (
"testing"
"github.com/stretchr/testify/require"
)
type nodeMeta struct {
key string
value []byte
}
func (m nodeMeta) GetKey() string {
return m.key
}
func (m nodeMeta) GetValue() []byte {
return m.value
}
type nodeResponse struct {
meta []nodeMeta
timestamp []uint64
}
func (n nodeResponse) GetTimestamp() []uint64 {
return n.timestamp
}
func (n nodeResponse) GetMeta() []Meta {
res := make([]Meta, len(n.meta))
for i, value := range n.meta {
res[i] = value
}
return res
}
func (n nodeResponse) GetNodeID() []uint64 {
return nil
}
func (n nodeResponse) GetParentID() []uint64 {
return nil
}
func TestGetLatestNode(t *testing.T) {
for _, tc := range []struct {
name string
nodes []NodeResponse
exceptedOID string
error bool
}{
{
name: "empty",
nodes: []NodeResponse{},
error: true,
},
{
name: "one node of the object version",
nodes: []NodeResponse{
nodeResponse{
timestamp: []uint64{1},
meta: []nodeMeta{
{
key: oidKV,
value: []byte("oid1"),
},
},
},
},
exceptedOID: "oid1",
},
{
name: "one node of the object version and one node of the secondary object",
nodes: []NodeResponse{
nodeResponse{
timestamp: []uint64{3},
meta: []nodeMeta{},
},
nodeResponse{
timestamp: []uint64{1},
meta: []nodeMeta{
{
key: oidKV,
value: []byte("oid1"),
},
},
},
},
exceptedOID: "oid1",
},
{
name: "all nodes represent a secondary object",
nodes: []NodeResponse{
nodeResponse{
timestamp: []uint64{3},
meta: []nodeMeta{},
},
nodeResponse{
timestamp: []uint64{5},
meta: []nodeMeta{},
},
},
error: true,
},
{
name: "several nodes of different types and with different timestamp",
nodes: []NodeResponse{
nodeResponse{
timestamp: []uint64{1},
meta: []nodeMeta{
{
key: oidKV,
value: []byte("oid1"),
},
},
},
nodeResponse{
timestamp: []uint64{3},
meta: []nodeMeta{},
},
nodeResponse{
timestamp: []uint64{4},
meta: []nodeMeta{
{
key: oidKV,
value: []byte("oid2"),
},
},
},
nodeResponse{
timestamp: []uint64{6},
meta: []nodeMeta{},
},
},
exceptedOID: "oid2",
},
} {
t.Run(tc.name, func(t *testing.T) {
actualNode, err := getLatestVersionNode(tc.nodes)
if tc.error {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Equal(t, tc.exceptedOID, string(actualNode.GetMeta()[0].GetValue()))
})
}
}

258
utils/attributes.go Normal file
View file

@ -0,0 +1,258 @@
package utils
import (
"bytes"
"context"
"errors"
"fmt"
"math"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type EpochDurations struct {
CurrentEpoch uint64
MsPerBlock int64
BlockPerEpoch uint64
}
type EpochInfoFetcher interface {
GetEpochDurations(context.Context) (*EpochDurations, error)
}
const (
UserAttributeHeaderPrefix = "X-Attribute-"
)
const (
systemAttributePrefix = "__SYSTEM__"
// deprecated: use systemAttributePrefix
systemAttributePrefixNeoFS = "__NEOFS__"
)
type systemTransformer struct {
prefix string
backwardPrefix string
xAttrPrefixes [][]byte
}
var transformers = []systemTransformer{
{
prefix: systemAttributePrefix,
backwardPrefix: "System-",
xAttrPrefixes: [][]byte{[]byte("System-"), []byte("SYSTEM-"), []byte("system-")},
},
{
prefix: systemAttributePrefixNeoFS,
backwardPrefix: "Neofs-",
xAttrPrefixes: [][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")},
},
}
func (t systemTransformer) existsExpirationAttributes(headers map[string]string) bool {
_, ok0 := headers[t.expirationEpochAttr()]
_, ok1 := headers[t.expirationDurationAttr()]
_, ok2 := headers[t.expirationTimestampAttr()]
_, ok3 := headers[t.expirationRFC3339Attr()]
return ok0 || ok1 || ok2 || ok3
}
func (t systemTransformer) expirationEpochAttr() string {
return t.prefix + "EXPIRATION_EPOCH"
}
func (t systemTransformer) expirationDurationAttr() string {
return t.prefix + "EXPIRATION_DURATION"
}
func (t systemTransformer) expirationTimestampAttr() string {
return t.prefix + "EXPIRATION_TIMESTAMP"
}
func (t systemTransformer) expirationRFC3339Attr() string {
return t.prefix + "EXPIRATION_RFC3339"
}
func (t systemTransformer) systemTranslator(key, prefix []byte) []byte {
// replace the specified prefix with system prefix
key = bytes.Replace(key, prefix, []byte(t.prefix), 1)
// replace `-` with `_`
key = bytes.ReplaceAll(key, []byte("-"), []byte("_"))
// replace with uppercase
return bytes.ToUpper(key)
}
func (t systemTransformer) transformIfSystem(key []byte) ([]byte, bool) {
// checks that it's a system FrostFS header
for _, system := range t.xAttrPrefixes {
if bytes.HasPrefix(key, system) {
return t.systemTranslator(key, system), true
}
}
return key, false
}
// systemBackwardTranslator is used to convert headers looking like '__PREFIX__ATTR_NAME' to 'Prefix-Attr-Name'.
func (t systemTransformer) systemBackwardTranslator(key string) string {
// trim specified prefix '__PREFIX__'
key = strings.TrimPrefix(key, t.prefix)
var res strings.Builder
res.WriteString(t.backwardPrefix)
strs := strings.Split(key, "_")
for i, s := range strs {
s = title(strings.ToLower(s))
res.WriteString(s)
if i != len(strs)-1 {
res.WriteString("-")
}
}
return res.String()
}
func (t systemTransformer) backwardTransformIfSystem(key string) (string, bool) {
if strings.HasPrefix(key, t.prefix) {
return t.systemBackwardTranslator(key), true
}
return key, false
}
func TransformIfSystem(key []byte) []byte {
for _, transformer := range transformers {
key, transformed := transformer.transformIfSystem(key)
if transformed {
return key
}
}
return key
}
func BackwardTransformIfSystem(key string) string {
for _, transformer := range transformers {
key, transformed := transformer.backwardTransformIfSystem(key)
if transformed {
return key
}
}
return key
}
func title(str string) string {
if str == "" {
return ""
}
r, size := utf8.DecodeRuneInString(str)
r0 := unicode.ToTitle(r)
return string(r0) + str[size:]
}
func PrepareExpirationHeader(ctx context.Context, epochFetcher EpochInfoFetcher, headers map[string]string, now time.Time) error {
formatsNum := 0
index := -1
for i, transformer := range transformers {
if transformer.existsExpirationAttributes(headers) {
formatsNum++
index = i
}
}
switch formatsNum {
case 0:
return nil
case 1:
epochDuration, err := epochFetcher.GetEpochDurations(ctx)
if err != nil {
return fmt.Errorf("couldn't get epoch durations from network info: %w", err)
}
return transformers[index].prepareExpirationHeader(headers, epochDuration, now)
default:
return errors.New("both deprecated and new system attributes formats are used, please use only one")
}
}
func (t systemTransformer) prepareExpirationHeader(headers map[string]string, epochDurations *EpochDurations, now time.Time) error {
expirationInEpoch := headers[t.expirationEpochAttr()]
if timeRFC3339, ok := headers[t.expirationRFC3339Attr()]; ok {
expTime, err := time.Parse(time.RFC3339, timeRFC3339)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, t.expirationRFC3339Attr())
}
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, t.expirationRFC3339Attr())
}
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, t.expirationRFC3339Attr())
}
if timestamp, ok := headers[t.expirationTimestampAttr()]; ok {
value, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", timestamp, t.expirationTimestampAttr())
}
expTime := time.Unix(value, 0)
if expTime.Before(now) {
return fmt.Errorf("value %s of header %s must be in the future", timestamp, t.expirationTimestampAttr())
}
t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now))
delete(headers, t.expirationTimestampAttr())
}
if duration, ok := headers[t.expirationDurationAttr()]; ok {
expDuration, err := time.ParseDuration(duration)
if err != nil {
return fmt.Errorf("couldn't parse value %s of header %s", duration, t.expirationDurationAttr())
}
if expDuration <= 0 {
return fmt.Errorf("value %s of header %s must be positive", expDuration, t.expirationDurationAttr())
}
t.updateExpirationHeader(headers, epochDurations, expDuration)
delete(headers, t.expirationDurationAttr())
}
if expirationInEpoch != "" {
expEpoch, err := strconv.ParseUint(expirationInEpoch, 10, 64)
if err != nil {
return fmt.Errorf("parse expiration epoch '%s': %w", expirationInEpoch, err)
}
if expEpoch < epochDurations.CurrentEpoch {
return fmt.Errorf("expiration epoch '%d' must be greater than current epoch '%d'", expEpoch, epochDurations.CurrentEpoch)
}
headers[t.expirationEpochAttr()] = expirationInEpoch
}
return nil
}
func (t systemTransformer) updateExpirationHeader(headers map[string]string, durations *EpochDurations, expDuration time.Duration) {
epochDuration := uint64(durations.MsPerBlock) * durations.BlockPerEpoch
currentEpoch := durations.CurrentEpoch
numEpoch := uint64(expDuration.Milliseconds()) / epochDuration
if uint64(expDuration.Milliseconds())%epochDuration != 0 {
numEpoch++
}
expirationEpoch := uint64(math.MaxUint64)
if numEpoch < math.MaxUint64-currentEpoch {
expirationEpoch = currentEpoch + numEpoch
}
headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10)
}

189
utils/attributes_test.go Normal file
View file

@ -0,0 +1,189 @@
//go:build !integration
package utils
import (
"math"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestPrepareExpirationHeader(t *testing.T) {
tomorrow := time.Now().Add(24 * time.Hour)
tomorrowUnix := tomorrow.Unix()
tomorrowUnixNano := tomorrow.UnixNano()
tomorrowUnixMilli := tomorrowUnixNano / 1e6
epoch := "100"
duration := "24h"
timestampSec := strconv.FormatInt(tomorrowUnix, 10)
timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10)
timestampNano := strconv.FormatInt(tomorrowUnixNano, 10)
defaultDurations := &EpochDurations{
CurrentEpoch: 10,
MsPerBlock: 1000,
BlockPerEpoch: 101,
}
msPerBlock := defaultDurations.BlockPerEpoch * uint64(defaultDurations.MsPerBlock)
epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock
if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 {
epochPerDay++
}
defaultExpEpoch := strconv.FormatUint(defaultDurations.CurrentEpoch+epochPerDay, 10)
for _, transformer := range transformers {
for _, tc := range []struct {
name string
headers map[string]string
durations *EpochDurations
err bool
expected map[string]string
}{
{
name: "valid epoch",
headers: map[string]string{transformer.expirationEpochAttr(): epoch},
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
durations: defaultDurations,
},
{
name: "valid epoch, valid duration",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationDurationAttr(): duration,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid rfc3339",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339),
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp sec",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampSec,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp milli",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampMilli,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid epoch, valid timestamp nano",
headers: map[string]string{
transformer.expirationEpochAttr(): epoch,
transformer.expirationTimestampAttr(): timestampNano,
},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): epoch},
},
{
name: "valid timestamp sec",
headers: map[string]string{transformer.expirationTimestampAttr(): timestampSec},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid duration",
headers: map[string]string{transformer.expirationDurationAttr(): duration},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid rfc3339",
headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)},
durations: defaultDurations,
expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch},
},
{
name: "valid max uint 64",
headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)},
durations: &EpochDurations{
CurrentEpoch: math.MaxUint64 - 1,
MsPerBlock: defaultDurations.MsPerBlock,
BlockPerEpoch: defaultDurations.BlockPerEpoch,
},
expected: map[string]string{transformer.expirationEpochAttr(): strconv.FormatUint(uint64(math.MaxUint64), 10)},
},
{
name: "invalid timestamp sec",
headers: map[string]string{transformer.expirationTimestampAttr(): "abc"},
err: true,
},
{
name: "invalid timestamp sec zero",
headers: map[string]string{transformer.expirationTimestampAttr(): "0"},
err: true,
},
{
name: "invalid duration",
headers: map[string]string{transformer.expirationDurationAttr(): "1d"},
err: true,
},
{
name: "invalid duration negative",
headers: map[string]string{transformer.expirationDurationAttr(): "-5h"},
err: true,
},
{
name: "invalid rfc3339",
headers: map[string]string{transformer.expirationRFC3339Attr(): "abc"},
err: true,
},
{
name: "invalid rfc3339 zero",
headers: map[string]string{transformer.expirationRFC3339Attr(): time.RFC3339},
err: true,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := transformer.prepareExpirationHeader(tc.headers, tc.durations, time.Now())
if tc.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tc.expected, tc.headers)
}
})
}
}
}
func TestSystemBackwardTranslator(t *testing.T) {
input := []string{
"__SYSTEM__EXPIRATION_EPOCH",
"__SYSTEM__RANDOM_ATTR",
"__NEOFS__EXPIRATION_EPOCH",
"__NEOFS__RANDOM_ATTR",
}
expected := []string{
"System-Expiration-Epoch",
"System-Random-Attr",
"Neofs-Expiration-Epoch",
"Neofs-Random-Attr",
}
for i, str := range input {
res := BackwardTransformIfSystem(str)
require.Equal(t, expected[i], res)
}
}

Some files were not shown because too many files have changed in this diff Show more