Compare commits
No commits in common. "master" and "empty" have entirely different histories.
109 changed files with 2 additions and 16337 deletions
|
@ -1,23 +0,0 @@
|
||||||
FROM golang:1.22-alpine AS basebuilder
|
|
||||||
RUN apk add --update make bash ca-certificates
|
|
||||||
|
|
||||||
FROM basebuilder AS builder
|
|
||||||
ENV GOGC=off
|
|
||||||
ENV CGO_ENABLED=0
|
|
||||||
ARG BUILD=now
|
|
||||||
ARG VERSION=dev
|
|
||||||
ARG REPO=repository
|
|
||||||
WORKDIR /src
|
|
||||||
COPY . /src
|
|
||||||
|
|
||||||
RUN make
|
|
||||||
|
|
||||||
# Executable image
|
|
||||||
FROM scratch
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
|
||||||
COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw
|
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/frostfs-http-gw"]
|
|
|
@ -1,8 +0,0 @@
|
||||||
FROM alpine
|
|
||||||
RUN apk add --update --no-cache bash ca-certificates
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
COPY bin/frostfs-http-gw /bin/frostfs-http-gw
|
|
||||||
|
|
||||||
CMD ["frostfs-http-gw"]
|
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: community, triage, bug
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
<!--- Provide a general summary of the issue in the Title above -->
|
|
||||||
|
|
||||||
## Expected Behavior
|
|
||||||
<!--- If you're describing a bug, tell us what should happen -->
|
|
||||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
|
||||||
|
|
||||||
## Current Behavior
|
|
||||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
|
||||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
|
||||||
|
|
||||||
## Possible Solution
|
|
||||||
<!-- Not obligatory
|
|
||||||
If no reason/fix/additions for the bug can be suggested,
|
|
||||||
uncomment the following phrase:
|
|
||||||
|
|
||||||
<-- No fix can be suggested by a QA engineer. Further solutions shall be up to developers. -->
|
|
||||||
|
|
||||||
## Steps to Reproduce (for bugs)
|
|
||||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
|
||||||
<!--- reproduce this bug. -->
|
|
||||||
|
|
||||||
1.
|
|
||||||
|
|
||||||
## Context
|
|
||||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
|
||||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
|
||||||
|
|
||||||
## Regression
|
|
||||||
<!-- Is this issue a regression? (Yes / No) -->
|
|
||||||
<!-- If Yes, optionally please include version or commit id or PR# that caused this regression, if you have these details. -->
|
|
||||||
|
|
||||||
## Your Environment
|
|
||||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
|
||||||
* Version used:
|
|
||||||
* Server setup and configuration:
|
|
||||||
* Operating System and version (`uname -a`):
|
|
|
@ -1 +0,0 @@
|
||||||
blank_issues_enabled: false
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: community, triage
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Is your feature request related to a problem? Please describe.
|
|
||||||
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
|
||||||
|
|
||||||
## Describe the solution you'd like
|
|
||||||
<!--- A clear and concise description of what you want to happen. -->
|
|
||||||
|
|
||||||
## Describe alternatives you've considered
|
|
||||||
<!--- A clear and concise description of any alternative solutions or features you've considered. -->
|
|
||||||
|
|
||||||
## Additional context
|
|
||||||
<!--- Add any other context or screenshots about the feature request here. -->
|
|
|
@ -1,70 +0,0 @@
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<!-- Generator: Adobe Illustrator 25.0.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
|
||||||
<svg version="1.1" id="Слой_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
|
||||||
viewBox="0 0 184.2 51.8" style="enable-background:new 0 0 184.2 51.8;" xml:space="preserve">
|
|
||||||
<style type="text/css">
|
|
||||||
.st0{display:none;}
|
|
||||||
.st1{display:inline;}
|
|
||||||
.st2{fill:#01E397;}
|
|
||||||
.st3{display:inline;fill:#010032;}
|
|
||||||
.st4{display:inline;fill:#00E599;}
|
|
||||||
.st5{display:inline;fill:#00AF92;}
|
|
||||||
.st6{fill:#00C3E5;}
|
|
||||||
</style>
|
|
||||||
<g id="Layer_2">
|
|
||||||
<g id="Layer_1-2" class="st0">
|
|
||||||
<g class="st1">
|
|
||||||
<path class="st2" d="M146.6,18.3v7.2h10.9V29h-10.9v10.7h-4V14.8h18v3.5H146.6z"/>
|
|
||||||
<path class="st2" d="M180,15.7c1.7,0.9,3,2.2,4,3.8l-3,2.7c-0.6-1.3-1.5-2.4-2.6-3.3c-1.3-0.7-2.8-1-4.3-1
|
|
||||||
c-1.4-0.1-2.8,0.3-4,1.1c-0.9,0.5-1.5,1.5-1.4,2.6c0,1,0.5,1.9,1.4,2.4c1.5,0.8,3.2,1.3,4.9,1.5c1.9,0.3,3.7,0.8,5.4,1.6
|
|
||||||
c1.2,0.5,2.2,1.3,2.9,2.3c0.6,1,1,2.2,0.9,3.4c0,1.4-0.5,2.7-1.3,3.8c-0.9,1.2-2.1,2.1-3.5,2.6c-1.7,0.6-3.4,0.9-5.2,0.8
|
|
||||||
c-5,0-8.6-1.6-10.7-5l2.9-2.8c0.7,1.4,1.8,2.5,3.1,3.3c1.5,0.7,3.1,1.1,4.7,1c1.5,0.1,2.9-0.2,4.2-0.9c0.9-0.5,1.5-1.5,1.5-2.6
|
|
||||||
c0-0.9-0.5-1.8-1.3-2.2c-1.5-0.7-3.1-1.2-4.8-1.5c-1.9-0.3-3.7-0.8-5.5-1.5c-1.2-0.5-2.2-1.4-3-2.4c-0.6-1-1-2.2-0.9-3.4
|
|
||||||
c0-1.4,0.4-2.7,1.2-3.8c0.8-1.2,2-2.2,3.3-2.8c1.6-0.7,3.4-1.1,5.2-1C176.1,14.3,178.2,14.8,180,15.7z"/>
|
|
||||||
</g>
|
|
||||||
<path class="st3" d="M73.3,16.3c1.9,1.9,2.9,4.5,2.7,7.1v15.9h-4V24.8c0-2.6-0.5-4.5-1.6-5.7c-1.2-1.2-2.8-1.8-4.5-1.7
|
|
||||||
c-1.3,0-2.5,0.3-3.7,0.8c-1.2,0.7-2.2,1.7-2.9,2.9c-0.8,1.5-1.1,3.2-1.1,4.9v13.3h-4V15.1l3.6,1.5v1.7c0.8-1.5,2.1-2.6,3.6-3.3
|
|
||||||
c1.5-0.8,3.2-1.2,4.9-1.1C68.9,13.8,71.3,14.7,73.3,16.3z"/>
|
|
||||||
<path class="st3" d="M104.4,28.3H85.6c0.1,2.2,1,4.3,2.5,5.9c1.5,1.4,3.5,2.2,5.6,2.1c1.6,0.1,3.2-0.2,4.6-0.9
|
|
||||||
c1.1-0.6,2-1.6,2.5-2.8l3.3,1.8c-0.9,1.7-2.3,3.1-4,4c-2,1-4.2,1.5-6.4,1.4c-3.7,0-6.7-1.1-8.8-3.4s-3.2-5.5-3.2-9.6s1-7.2,3-9.5
|
|
||||||
s5-3.4,8.7-3.4c2.1-0.1,4.2,0.5,6.1,1.5c1.6,1,3,2.5,3.8,4.2c0.9,1.8,1.3,3.9,1.3,5.9C104.6,26.4,104.6,27.4,104.4,28.3z
|
|
||||||
M88.1,19.3c-1.4,1.5-2.2,3.4-2.4,5.5h15.1c-0.2-2-1-3.9-2.3-5.5c-1.4-1.3-3.2-2-5.1-1.9C91.5,17.3,89.6,18,88.1,19.3z"/>
|
|
||||||
<path class="st3" d="M131,17.3c2.2,2.3,3.2,5.5,3.2,9.5s-1,7.3-3.2,9.6s-5.1,3.4-8.8,3.4s-6.7-1.1-8.9-3.4s-3.2-5.5-3.2-9.6
|
|
||||||
s1.1-7.2,3.2-9.5s5.1-3.4,8.9-3.4S128.9,15,131,17.3z M116.2,19.9c-1.5,2-2.2,4.4-2.1,6.9c-0.2,2.5,0.6,5,2.1,7
|
|
||||||
c1.5,1.7,3.7,2.7,6,2.6c2.3,0.1,4.4-0.9,5.9-2.6c1.5-2,2.3-4.5,2.1-7c0.1-2.5-0.6-4.9-2.1-6.9c-1.5-1.7-3.6-2.7-5.9-2.6
|
|
||||||
C119.9,17.2,117.7,18.2,116.2,19.9z"/>
|
|
||||||
<polygon class="st4" points="0,9.1 0,43.7 22.5,51.8 22.5,16.9 46.8,7.9 24.8,0 "/>
|
|
||||||
<polygon class="st5" points="24.3,17.9 24.3,36.8 46.8,44.9 46.8,9.6 "/>
|
|
||||||
</g>
|
|
||||||
<g>
|
|
||||||
<g>
|
|
||||||
<path class="st6" d="M41.6,17.5H28.2v6.9h10.4v3.3H28.2v10.2h-3.9V14.2h17.2V17.5z"/>
|
|
||||||
<path class="st6" d="M45.8,37.9v-18h3.3l0.4,3.2c0.5-1.2,1.2-2.1,2.1-2.7c0.9-0.6,2.1-0.9,3.5-0.9c0.4,0,0.7,0,1.1,0.1
|
|
||||||
c0.4,0.1,0.7,0.2,0.9,0.3l-0.5,3.4c-0.3-0.1-0.6-0.2-0.9-0.2C55.4,23,54.9,23,54.4,23c-0.7,0-1.5,0.2-2.2,0.6
|
|
||||||
c-0.7,0.4-1.3,1-1.8,1.8s-0.7,1.8-0.7,3v9.5H45.8z"/>
|
|
||||||
<path class="st6" d="M68.6,19.6c1.8,0,3.3,0.4,4.6,1.1c1.3,0.7,2.4,1.8,3.1,3.2s1.1,3.1,1.1,5c0,1.9-0.4,3.6-1.1,5
|
|
||||||
c-0.8,1.4-1.8,2.5-3.1,3.2c-1.3,0.7-2.9,1.1-4.6,1.1s-3.3-0.4-4.6-1.1c-1.3-0.7-2.4-1.8-3.2-3.2c-0.8-1.4-1.2-3.1-1.2-5
|
|
||||||
c0-1.9,0.4-3.6,1.2-5s1.8-2.5,3.2-3.2C65.3,19.9,66.8,19.6,68.6,19.6z M68.6,22.6c-1.1,0-2,0.2-2.8,0.7c-0.8,0.5-1.3,1.2-1.7,2.1
|
|
||||||
s-0.6,2.1-0.6,3.5c0,1.3,0.2,2.5,0.6,3.4s1,1.7,1.7,2.2s1.7,0.7,2.8,0.7c1.1,0,2-0.2,2.7-0.7c0.7-0.5,1.3-1.2,1.7-2.2
|
|
||||||
s0.6-2.1,0.6-3.4c0-1.4-0.2-2.5-0.6-3.5s-1-1.6-1.7-2.1C70.6,22.8,69.6,22.6,68.6,22.6z"/>
|
|
||||||
<path class="st6" d="M89.2,38.3c-1.8,0-3.4-0.3-4.9-1c-1.5-0.7-2.7-1.7-3.5-3l2.7-2.3c0.5,1,1.3,1.8,2.3,2.4
|
|
||||||
c1,0.6,2.2,0.9,3.6,0.9c1.1,0,2-0.2,2.6-0.6c0.6-0.4,1-0.9,1-1.6c0-0.5-0.2-0.9-0.5-1.2s-0.9-0.6-1.7-0.8l-3.8-0.8
|
|
||||||
c-1.9-0.4-3.3-1-4.1-1.9c-0.8-0.9-1.2-1.9-1.2-3.3c0-1,0.3-1.9,0.9-2.7c0.6-0.8,1.4-1.5,2.5-2s2.5-0.8,4-0.8c1.8,0,3.3,0.3,4.6,1
|
|
||||||
c1.3,0.6,2.2,1.5,2.9,2.7l-2.7,2.2c-0.5-1-1.1-1.7-2-2.1c-0.9-0.5-1.8-0.7-2.8-0.7c-0.8,0-1.4,0.1-2,0.3c-0.6,0.2-1,0.5-1.3,0.8
|
|
||||||
c-0.3,0.3-0.4,0.7-0.4,1.2c0,0.5,0.2,0.9,0.5,1.3s1,0.6,1.9,0.8l4.1,0.9c1.7,0.3,2.9,0.9,3.7,1.7c0.7,0.8,1.1,1.8,1.1,2.9
|
|
||||||
c0,1.2-0.3,2.2-0.9,3c-0.6,0.9-1.5,1.6-2.6,2C92.1,38.1,90.7,38.3,89.2,38.3z"/>
|
|
||||||
<path class="st6" d="M112.8,19.9v3H99.3v-3H112.8z M106.6,14.6v17.9c0,0.9,0.2,1.5,0.7,1.9c0.5,0.4,1.1,0.6,1.9,0.6
|
|
||||||
c0.6,0,1.2-0.1,1.7-0.3c0.5-0.2,0.9-0.5,1.3-0.8l0.9,2.8c-0.6,0.5-1.2,0.9-2,1.1c-0.8,0.3-1.7,0.4-2.7,0.4c-1,0-2-0.2-2.8-0.5
|
|
||||||
s-1.5-0.9-2-1.6c-0.5-0.8-0.7-1.7-0.8-3V15.7L106.6,14.6z"/>
|
|
||||||
<path d="M137.9,17.5h-13.3v6.9h10.4v3.3h-10.4v10.2h-3.9V14.2h17.2V17.5z"/>
|
|
||||||
<path d="M150.9,13.8c2.1,0,4,0.4,5.5,1.2c1.6,0.8,2.9,2,4,3.5l-2.6,2.5c-0.9-1.4-1.9-2.4-3.1-3c-1.1-0.6-2.5-0.9-4-0.9
|
|
||||||
c-1.2,0-2.1,0.2-2.8,0.5c-0.7,0.3-1.3,0.7-1.6,1.2c-0.3,0.5-0.5,1.1-0.5,1.7c0,0.7,0.3,1.4,0.8,1.9c0.5,0.6,1.5,1,2.9,1.3
|
|
||||||
l4.8,1.1c2.3,0.5,3.9,1.3,4.9,2.3c1,1,1.4,2.3,1.4,3.9c0,1.5-0.4,2.7-1.2,3.8c-0.8,1.1-1.9,1.9-3.3,2.5s-3.1,0.9-5,0.9
|
|
||||||
c-1.7,0-3.2-0.2-4.5-0.6c-1.3-0.4-2.5-1-3.5-1.8c-1-0.7-1.8-1.6-2.5-2.6l2.7-2.7c0.5,0.8,1.1,1.6,1.9,2.2
|
|
||||||
c0.8,0.7,1.7,1.2,2.7,1.5c1,0.4,2.2,0.5,3.4,0.5c1.1,0,2.1-0.1,2.9-0.4c0.8-0.3,1.4-0.7,1.8-1.2c0.4-0.5,0.6-1.1,0.6-1.9
|
|
||||||
c0-0.7-0.2-1.3-0.7-1.8c-0.5-0.5-1.3-0.9-2.6-1.2l-5.2-1.2c-1.4-0.3-2.6-0.8-3.6-1.3c-0.9-0.6-1.6-1.3-2.1-2.1s-0.7-1.8-0.7-2.8
|
|
||||||
c0-1.3,0.4-2.6,1.1-3.7c0.7-1.1,1.8-2,3.2-2.6C147.3,14.1,148.9,13.8,150.9,13.8z"/>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</g>
|
|
||||||
</svg>
|
|
Before Width: | Height: | Size: 5.5 KiB |
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
builds:
|
|
||||||
name: Builds
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Build binary
|
|
||||||
run: make
|
|
||||||
|
|
||||||
- name: Check dirty suffix
|
|
||||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
|
|
@ -1,20 +0,0 @@
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dco:
|
|
||||||
name: DCO
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run commit format checker
|
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
|
||||||
with:
|
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image:
|
|
||||||
name: OCI image
|
|
||||||
runs-on: docker
|
|
||||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Clone git repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Build OCI image
|
|
||||||
run: make image
|
|
||||||
|
|
||||||
- name: Push image to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "$REGISTRY_PASSWORD" \
|
|
||||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
|
||||||
make image-push
|
|
||||||
if: >-
|
|
||||||
startsWith(github.ref, 'refs/tags/v') &&
|
|
||||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
|
||||||
env:
|
|
||||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
|
@ -1,61 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
lint:
|
|
||||||
name: Lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
cache: true
|
|
||||||
|
|
||||||
- name: Install linters
|
|
||||||
run: make lint-install
|
|
||||||
|
|
||||||
- name: Run linters
|
|
||||||
run: make lint
|
|
||||||
|
|
||||||
tests:
|
|
||||||
name: Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
go_versions: [ '1.22', '1.23' ]
|
|
||||||
fail-fast: false
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '${{ matrix.go_versions }}'
|
|
||||||
|
|
||||||
- name: Update Go modules
|
|
||||||
run: make dep
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: make test
|
|
||||||
|
|
||||||
integration:
|
|
||||||
name: Integration tests
|
|
||||||
runs-on: oci-runner
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run integration tests
|
|
||||||
run: |-
|
|
||||||
podman-service.sh
|
|
||||||
make integration-test
|
|
|
@ -1,25 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
vulncheck:
|
|
||||||
name: Vulncheck
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.22.12'
|
|
||||||
|
|
||||||
- name: Install govulncheck
|
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
|
||||||
|
|
||||||
- name: Run govulncheck
|
|
||||||
run: govulncheck ./...
|
|
21
.gitignore
vendored
21
.gitignore
vendored
|
@ -1,21 +0,0 @@
|
||||||
.idea
|
|
||||||
bin
|
|
||||||
temp
|
|
||||||
/plugins/
|
|
||||||
/vendor/
|
|
||||||
|
|
||||||
.test.env
|
|
||||||
*~
|
|
||||||
*.log
|
|
||||||
test.sh
|
|
||||||
testfile
|
|
||||||
.blast.yml
|
|
||||||
.frostfs-cli.yml
|
|
||||||
|
|
||||||
.cache
|
|
||||||
|
|
||||||
coverage.txt
|
|
||||||
coverage.html
|
|
||||||
|
|
||||||
# debhelpers
|
|
||||||
**/.debhelper
|
|
11
.gitlint
11
.gitlint
|
@ -1,11 +0,0 @@
|
||||||
[general]
|
|
||||||
fail-without-commits=True
|
|
||||||
regex-style-search=True
|
|
||||||
contrib=CC1
|
|
||||||
|
|
||||||
[title-match-regex]
|
|
||||||
regex=^\[\#[0-9Xx]+\]\s
|
|
||||||
|
|
||||||
[ignore-by-title]
|
|
||||||
regex=^Release(.*)
|
|
||||||
ignore=title-match-regex
|
|
|
@ -1,68 +0,0 @@
|
||||||
# This file contains all available configuration options
|
|
||||||
# with their default values.
|
|
||||||
|
|
||||||
# options for analysis running
|
|
||||||
run:
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
timeout: 15m
|
|
||||||
|
|
||||||
# include test files or not, default is true
|
|
||||||
tests: true
|
|
||||||
|
|
||||||
# output configuration options
|
|
||||||
output:
|
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
|
||||||
formats:
|
|
||||||
- format: tab
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
|
||||||
linters-settings:
|
|
||||||
exhaustive:
|
|
||||||
# indicates that switch statements are to be considered exhaustive if a
|
|
||||||
# 'default' case is present, even if all enum members aren't listed in the
|
|
||||||
# switch
|
|
||||||
default-signifies-exhaustive: true
|
|
||||||
govet:
|
|
||||||
# report about shadowed variables
|
|
||||||
check-shadowing: false
|
|
||||||
custom:
|
|
||||||
truecloudlab-linters:
|
|
||||||
path: bin/external_linters.so
|
|
||||||
original-url: git.frostfs.info/TrueCloudLab/linters.git
|
|
||||||
settings:
|
|
||||||
noliteral:
|
|
||||||
enable: true
|
|
||||||
target-methods: ["Fatal"]
|
|
||||||
disable-packages: ["req", "r"]
|
|
||||||
constants-package: "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
# mandatory linters
|
|
||||||
- govet
|
|
||||||
- revive
|
|
||||||
|
|
||||||
# some default golangci-lint linters
|
|
||||||
- errcheck
|
|
||||||
- gosimple
|
|
||||||
- ineffassign
|
|
||||||
- staticcheck
|
|
||||||
- typecheck
|
|
||||||
- unused
|
|
||||||
|
|
||||||
# extra linters
|
|
||||||
- exhaustive
|
|
||||||
- godot
|
|
||||||
- gofmt
|
|
||||||
- whitespace
|
|
||||||
- goimports
|
|
||||||
- truecloudlab-linters
|
|
||||||
disable-all: true
|
|
||||||
fast: false
|
|
||||||
|
|
||||||
issues:
|
|
||||||
include:
|
|
||||||
- EXC0002 # should have a comment
|
|
||||||
- EXC0003 # test/Test ... consider calling this
|
|
||||||
- EXC0004 # govet
|
|
||||||
- EXC0005 # C-style breaks
|
|
|
@ -1,52 +0,0 @@
|
||||||
ci:
|
|
||||||
autofix_prs: false
|
|
||||||
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/jorisroovers/gitlint
|
|
||||||
rev: v0.19.1
|
|
||||||
hooks:
|
|
||||||
- id: gitlint
|
|
||||||
stages: [commit-msg]
|
|
||||||
- id: gitlint-ci
|
|
||||||
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.4.0
|
|
||||||
hooks:
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-case-conflict
|
|
||||||
- id: check-executables-have-shebangs
|
|
||||||
- id: check-shebang-scripts-are-executable
|
|
||||||
- id: check-merge-conflict
|
|
||||||
- id: check-json
|
|
||||||
- id: check-xml
|
|
||||||
- id: check-yaml
|
|
||||||
- id: trailing-whitespace
|
|
||||||
args: [--markdown-linebreak-ext=md]
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
exclude: ".key$"
|
|
||||||
|
|
||||||
- repo: https://github.com/shellcheck-py/shellcheck-py
|
|
||||||
rev: v0.9.0.2
|
|
||||||
hooks:
|
|
||||||
- id: shellcheck
|
|
||||||
|
|
||||||
- repo: local
|
|
||||||
hooks:
|
|
||||||
- id: make-lint-install
|
|
||||||
name: install linters
|
|
||||||
entry: make lint-install
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: make-lint
|
|
||||||
name: run linters
|
|
||||||
entry: make lint
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
|
||||||
- id: go-unit-tests
|
|
||||||
name: go unit tests
|
|
||||||
entry: make test
|
|
||||||
pass_filenames: false
|
|
||||||
types: [go]
|
|
||||||
language: system
|
|
209
CHANGELOG.md
209
CHANGELOG.md
|
@ -1,209 +0,0 @@
|
||||||
# Changelog
|
|
||||||
|
|
||||||
This document outlines major changes between releases.
|
|
||||||
|
|
||||||
## [Unreleased]
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add handling quota limit reached error (#187)
|
|
||||||
- Add slash clipping for FileName attribute (#174)
|
|
||||||
|
|
||||||
## [0.32.3] - 2025-02-05
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add slash clipping for FileName attribute (#174)
|
|
||||||
|
|
||||||
## [0.32.2] - 2025-02-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Possible memory leak in gRPC client (#202)
|
|
||||||
|
|
||||||
## [0.32.1] - 2025-01-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- SIGHUP panic (#198)
|
|
||||||
|
|
||||||
## [0.32.0] - Khumbu - 2024-12-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Getting S3 object with FrostFS Object ID-like key (#166)
|
|
||||||
- Ignore delete marked objects in versioned bucket in index page (#181)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Metric of dropped logs by log sampler (#150)
|
|
||||||
- Fallback FileName attribute search during FilePath attribute search (#174)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated tree service pool without api-go dependency (#178)
|
|
||||||
|
|
||||||
## [0.31.0] - Rongbuk - 2024-11-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Docker warnings during image build (#126)
|
|
||||||
- `trace_id` parameter in logs (#148)
|
|
||||||
- SIGHUP support for `tracing.enabled` config parameter (#157)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Vulnerability report document (#123)
|
|
||||||
- Root CA configuration for tracing (#139)
|
|
||||||
- Log sampling policy configuration (#147)
|
|
||||||
- Index page support for buckets and containers (#137, #151)
|
|
||||||
- CORS support (#158)
|
|
||||||
- Source IP binding configuration for FrostFS requests (#160)
|
|
||||||
- Tracing attributes (#164)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated Go version to 1.22 (#132)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Duplicated NNS Resolver code (#129)
|
|
||||||
|
|
||||||
## [0.30.3] - 2024-10-18
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Get response on S3 multipart object (#142)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support percent-encoding for GET queries (#134)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Split `FrostFS` interface into separate read methods (#127)
|
|
||||||
|
|
||||||
## [0.30.2] - 2024-09-03
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Fuzzing tests (#135)
|
|
||||||
|
|
||||||
## [0.30.1] - 2024-08-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error counting in pool component before connection switch (#131)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log of endpoint address during tree pool errors (#131)
|
|
||||||
|
|
||||||
## [0.30.0] - Kangshung - 2024-07-22
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Handle query unescape and invalid bearer token errors (#107)
|
|
||||||
- Fix HTTP/2 requests (#110)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `reconnect_interval` config param (#100)
|
|
||||||
- Erasure coding support in placement policy (#114)
|
|
||||||
- HTTP Header canonicalizer for well-known headers (#121)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Improve test coverage (#112, #117)
|
|
||||||
- Bumped vulnerable dependencies (#115)
|
|
||||||
- Replace extended ACL examples with policies in README (#118)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
## [0.29.0] - Zemu - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix possibility of panic during SIGHUP (#99)
|
|
||||||
- Handle query unescape and invalid bearer token errors (#108)
|
|
||||||
- Fix log-level change on SIGHUP (#105)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support client side object cut (#70)
|
|
||||||
- Add `frostfs.client_cut` config param
|
|
||||||
- Add `frostfs.buffer_max_size_for_put` config param
|
|
||||||
- Add bucket/container caching
|
|
||||||
- Disable homomorphic hash for PUT if it's disabled in container itself
|
|
||||||
- Add new `logger.destination` config param with journald support (#89, #104)
|
|
||||||
- Add support namespaces (#91)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Replace atomics with mutex for reloadable params (#74)
|
|
||||||
|
|
||||||
## [0.28.1] - 2024-01-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Tree pool traversal limit (#92)
|
|
||||||
|
|
||||||
### Update from 0.28.0
|
|
||||||
See new `frostfs.tree_pool_max_attempts` config parameter.
|
|
||||||
|
|
||||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- `grpc` schemas in tree configuration (#62)
|
|
||||||
- `GetSubTree` failures (#67)
|
|
||||||
- Debian packaging (#69, #90)
|
|
||||||
- Get latest version of tree node (#85)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support dump metrics descriptions (#29)
|
|
||||||
- Support impersonate bearer token (#40, #45)
|
|
||||||
- Tracing support (#20, #44, #60)
|
|
||||||
- Object name resolving with tree service (#30)
|
|
||||||
- Metrics for current endpoint status (#77)
|
|
||||||
- Soft memory limit with `runtime.soft_memory_limit` (#72)
|
|
||||||
- Add selection of the node of the latest version of the object (#85)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update prometheus to v1.15.0 (#35)
|
|
||||||
- Update go version to 1.19 (#50)
|
|
||||||
- Finish rebranding (#2)
|
|
||||||
- Use gate key to form object owner (#66)
|
|
||||||
- Move log messages to constants (#36)
|
|
||||||
- Uploader and downloader refactor (#73)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
|
|
||||||
|
|
||||||
## [0.27.0] - Karpinsky - 2023-07-12
|
|
||||||
|
|
||||||
This is a first FrostFS HTTP Gateway release named after
|
|
||||||
[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier).
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Require only one healthy storage server to start (#7)
|
|
||||||
- Enable gate metrics (#38)
|
|
||||||
- `Too many pings` error (#61)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Multiple configs support (#12)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Repository rebranding (#1)
|
|
||||||
- Update neo-go to v0.101.0 (#8)
|
|
||||||
- Update viper to v1.15.0 (#8)
|
|
||||||
- Update go version to 1.18 (#9)
|
|
||||||
- Errors have become more detailed (#18)
|
|
||||||
- Update system attribute names (#22)
|
|
||||||
- Separate integration tests with build tags (#24)
|
|
||||||
- Changed values for `frostfs_http_gw_state_health` metric (#32)
|
|
||||||
|
|
||||||
### Updating from neofs-http-gw v0.26.0
|
|
||||||
|
|
||||||
To set system attributes use updated headers
|
|
||||||
(you can use old ones for now, but their support will be dropped in the future releases):
|
|
||||||
|
|
||||||
* `X-Attribute-Neofs-*` -> `X-Attribute-System-*`
|
|
||||||
* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*`
|
|
||||||
* `X-Attribute-neofs-*` -> `X-Attribute-system-*`
|
|
||||||
|
|
||||||
|
|
||||||
## Older versions
|
|
||||||
|
|
||||||
This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0.
|
|
||||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
|
|
||||||
|
|
||||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
|
|
||||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0
|
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
|
|
||||||
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
|
|
||||||
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...v0.30.1
|
|
||||||
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
|
|
||||||
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
|
|
||||||
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
|
|
||||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0
|
|
||||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1
|
|
||||||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2
|
|
||||||
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...v0.32.3
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.3...master
|
|
|
@ -1,3 +0,0 @@
|
||||||
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
|
||||||
.forgejo/.* @potyarkin
|
|
||||||
Makefile @potyarkin
|
|
156
CONTRIBUTING.md
156
CONTRIBUTING.md
|
@ -1,156 +0,0 @@
|
||||||
# Contribution guide
|
|
||||||
|
|
||||||
First, thank you for contributing! We love and encourage pull requests from
|
|
||||||
everyone. Please follow the guidelines:
|
|
||||||
|
|
||||||
- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and
|
|
||||||
[pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing
|
|
||||||
discussions.
|
|
||||||
|
|
||||||
- Open an issue first, to discuss a new feature or enhancement.
|
|
||||||
|
|
||||||
- Write tests and make sure the test suite passes locally and on CI.
|
|
||||||
|
|
||||||
- Open a pull request and reference the relevant issue(s).
|
|
||||||
|
|
||||||
- Make sure your commits are logically separated and have good comments
|
|
||||||
explaining the details of your change.
|
|
||||||
|
|
||||||
- After receiving a feedback, amend your commits or add new ones as
|
|
||||||
appropriate.
|
|
||||||
|
|
||||||
- **Have fun!**
|
|
||||||
|
|
||||||
## Development Workflow
|
|
||||||
|
|
||||||
Start by forking the `frostfs-http-gw` repository, make changes in a branch and then
|
|
||||||
send a pull request. We encourage pull requests to discuss code changes. Here
|
|
||||||
are the steps in details:
|
|
||||||
|
|
||||||
### Set up your git repository
|
|
||||||
Fork [FrostFS HTTP Gateway
|
|
||||||
upstream](https://git.frostfs.info/repo/fork/8) source repository
|
|
||||||
to your own personal repository. Copy the URL of your fork (you will need it for
|
|
||||||
the `git clone` command below).
|
|
||||||
|
|
||||||
```sh
|
|
||||||
$ git clone https://git.frostfs.info/<username>/frostfs-http-gw.git
|
|
||||||
```
|
|
||||||
|
|
||||||
### Set up git remote as ``upstream``
|
|
||||||
```sh
|
|
||||||
$ cd frostfs-http-gw
|
|
||||||
$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
|
|
||||||
$ git fetch upstream
|
|
||||||
$ git merge upstream/master
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create your feature branch
|
|
||||||
Before making code changes, make sure you create a separate branch for these
|
|
||||||
changes. Maybe you will find it convenient to name a branch in
|
|
||||||
`<type>/<Issue>-<changes_topic>` format.
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git checkout -b feature/123-something_awesome
|
|
||||||
```
|
|
||||||
|
|
||||||
### Test your changes
|
|
||||||
After your code changes, make sure
|
|
||||||
|
|
||||||
- To add test cases for the new code.
|
|
||||||
- To run `make lint`
|
|
||||||
- To squash your commits into a single commit or a series of logically separated
|
|
||||||
commits run `git rebase -i`. It's okay to force update your pull request.
|
|
||||||
- To run `make test` and `make all` completes.
|
|
||||||
|
|
||||||
### Commit changes
|
|
||||||
After verification, commit your changes. This is a [great
|
|
||||||
post](https://chris.beams.io/posts/git-commit/) on how to write useful commit
|
|
||||||
messages. Try following this template:
|
|
||||||
|
|
||||||
```
|
|
||||||
[#Issue] <component> Summary
|
|
||||||
|
|
||||||
Description
|
|
||||||
|
|
||||||
<Macros>
|
|
||||||
|
|
||||||
<Sign-Off>
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
$ git commit -am '[#123] Add some feature'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Push to the branch
|
|
||||||
Push your locally committed changes to the remote origin (your fork)
|
|
||||||
```
|
|
||||||
$ git push origin feature/123-something_awesome
|
|
||||||
```
|
|
||||||
|
|
||||||
### Create a Pull Request
|
|
||||||
Pull requests can be created via Forgejo. Refer to [this
|
|
||||||
document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for
|
|
||||||
detailed steps on how to create a pull request. After a Pull Request gets peer
|
|
||||||
reviewed and approved, it will be merged.
|
|
||||||
|
|
||||||
## DCO Sign off
|
|
||||||
|
|
||||||
All authors to the project retain copyright to their work. However, to ensure
|
|
||||||
that they are only submitting work that they have rights to, we require
|
|
||||||
everyone to acknowledge this by signing their work.
|
|
||||||
|
|
||||||
Any copyright notices in this repository should specify the authors as "the
|
|
||||||
contributors".
|
|
||||||
|
|
||||||
To sign your work, just add a line like this at the end of your commit message:
|
|
||||||
|
|
||||||
```
|
|
||||||
Signed-off-by: Samii Sakisaka <samii@frostfs.info>
|
|
||||||
```
|
|
||||||
|
|
||||||
This can be easily done with the `--signoff` option to `git commit`.
|
|
||||||
|
|
||||||
By doing this you state that you can certify the following (from [The Developer
|
|
||||||
Certificate of Origin](https://developercertificate.org/)):
|
|
||||||
|
|
||||||
```
|
|
||||||
Developer Certificate of Origin
|
|
||||||
Version 1.1
|
|
||||||
|
|
||||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
|
||||||
1 Letterman Drive
|
|
||||||
Suite D4700
|
|
||||||
San Francisco, CA, 94129
|
|
||||||
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies of this
|
|
||||||
license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
|
|
||||||
Developer's Certificate of Origin 1.1
|
|
||||||
|
|
||||||
By making a contribution to this project, I certify that:
|
|
||||||
|
|
||||||
(a) The contribution was created in whole or in part by me and I
|
|
||||||
have the right to submit it under the open source license
|
|
||||||
indicated in the file; or
|
|
||||||
|
|
||||||
(b) The contribution is based upon previous work that, to the best
|
|
||||||
of my knowledge, is covered under an appropriate open source
|
|
||||||
license and I have the right under that license to submit that
|
|
||||||
work with modifications, whether created in whole or in part
|
|
||||||
by me, under the same open source license (unless I am
|
|
||||||
permitted to submit under a different license), as indicated
|
|
||||||
in the file; or
|
|
||||||
|
|
||||||
(c) The contribution was provided directly to me by some other
|
|
||||||
person who certified (a), (b) or (c) and I have not modified
|
|
||||||
it.
|
|
||||||
|
|
||||||
(d) I understand and agree that this project and the contribution
|
|
||||||
are public and that a record of the contribution (including all
|
|
||||||
personal information I submit with it, including my sign-off) is
|
|
||||||
maintained indefinitely and may be redistributed consistent with
|
|
||||||
this project or the open source license(s) involved.
|
|
||||||
```
|
|
20
CREDITS.md
20
CREDITS.md
|
@ -1,20 +0,0 @@
|
||||||
# Credits
|
|
||||||
|
|
||||||
In alphabetical order:
|
|
||||||
|
|
||||||
- Alexey Vanin
|
|
||||||
- Angira Kekteeva
|
|
||||||
- Denis Kirillov
|
|
||||||
- Evgeniy Kulikov
|
|
||||||
- Pavel Korotkov
|
|
||||||
- Roman Khimov
|
|
||||||
|
|
||||||
# Contributors
|
|
||||||
|
|
||||||
In chronological order:
|
|
||||||
|
|
||||||
- Anatoly Bogatyrev
|
|
||||||
- Stanislav Bogatyrev
|
|
||||||
- Anastasia Prasolova
|
|
||||||
- Leonard Liubich
|
|
||||||
- Elizaveta Chichindaeva
|
|
674
LICENSE
674
LICENSE
|
@ -1,674 +0,0 @@
|
||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
206
Makefile
206
Makefile
|
@ -1,206 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
REPO ?= $(shell go list -m)
|
|
||||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
|
||||||
GO_VERSION ?= 1.22
|
|
||||||
LINT_VERSION ?= 1.60.3
|
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
|
||||||
BUILD ?= $(shell date -u --iso=seconds)
|
|
||||||
|
|
||||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
|
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
|
||||||
|
|
||||||
OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
|
||||||
TMP_DIR := .cache
|
|
||||||
|
|
||||||
# List of binaries to build. For now just one.
|
|
||||||
BINDIR = bin
|
|
||||||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
|
||||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean
|
|
||||||
|
|
||||||
# .deb package versioning
|
|
||||||
OS_RELEASE = $(shell lsb_release -cs)
|
|
||||||
PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
|
||||||
sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \
|
|
||||||
sed "s/-/~/")-${OS_RELEASE}
|
|
||||||
.PHONY: debpackage debclean
|
|
||||||
|
|
||||||
FUZZ_NGFUZZ_DIR ?= ""
|
|
||||||
FUZZ_TIMEOUT ?= 30
|
|
||||||
FUZZ_FUNCTIONS ?= "all"
|
|
||||||
FUZZ_AUX ?= ""
|
|
||||||
|
|
||||||
# Make all binaries
|
|
||||||
all: $(BINS)
|
|
||||||
$(BINS): $(DIRS) dep
|
|
||||||
@echo "⇒ Build $@"
|
|
||||||
CGO_ENABLED=0 \
|
|
||||||
go build -v -trimpath \
|
|
||||||
-ldflags "-X main.Version=$(VERSION)" \
|
|
||||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
|
||||||
|
|
||||||
$(DIRS):
|
|
||||||
@echo "⇒ Ensure dir: $@"
|
|
||||||
@mkdir -p $@
|
|
||||||
|
|
||||||
# Pull go dependencies
|
|
||||||
dep:
|
|
||||||
@printf "⇒ Download requirements: "
|
|
||||||
@CGO_ENABLED=0 \
|
|
||||||
go mod download && echo OK
|
|
||||||
@printf "⇒ Tidy requirements: "
|
|
||||||
@CGO_ENABLED=0 \
|
|
||||||
go mod tidy -v && echo OK
|
|
||||||
|
|
||||||
# Run `make %` in Golang container, for more information run `make help.docker/%`
|
|
||||||
docker/%:
|
|
||||||
$(if $(filter $*,all $(BINS)), \
|
|
||||||
@echo "=> Running 'make $*' in clean Docker environment" && \
|
|
||||||
docker run --rm -t \
|
|
||||||
-v `pwd`:/src \
|
|
||||||
-w /src \
|
|
||||||
-u `stat -c "%u:%g" .` \
|
|
||||||
--env HOME=/src \
|
|
||||||
golang:$(GO_VERSION) make $*,\
|
|
||||||
@echo "supported docker targets: all $(BINS) lint")
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
test:
|
|
||||||
@go test ./... -cover
|
|
||||||
|
|
||||||
# Run integration tests
|
|
||||||
.PHONY: integration-test
|
|
||||||
integration-test:
|
|
||||||
@go test ./... -cover --tags=integration
|
|
||||||
|
|
||||||
# Run tests with race detection and produce coverage output
|
|
||||||
cover:
|
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
|
||||||
|
|
||||||
# Run fuzzing
|
|
||||||
CLANG := $(shell which clang-17 2>/dev/null)
|
|
||||||
.PHONY: check-clang all
|
|
||||||
check-clang:
|
|
||||||
ifeq ($(CLANG),)
|
|
||||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
|
||||||
@exit 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: check-ngfuzz all
|
|
||||||
check-ngfuzz:
|
|
||||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
|
||||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: install-fuzzing-deps
|
|
||||||
install-fuzzing-deps: check-clang check-ngfuzz
|
|
||||||
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: install-fuzzing-deps
|
|
||||||
@START_PATH=$$(pwd); \
|
|
||||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
|
||||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
|
||||||
./ngfuzz -clean && \
|
|
||||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
|
||||||
./ngfuzz -report
|
|
||||||
|
|
||||||
|
|
||||||
# Reformat code
|
|
||||||
fmt:
|
|
||||||
@echo "⇒ Processing gofmt check"
|
|
||||||
@gofmt -s -w ./
|
|
||||||
|
|
||||||
# Build clean Docker image
|
|
||||||
image:
|
|
||||||
@echo "⇒ Build FrostFS HTTP Gateway docker image "
|
|
||||||
@docker build \
|
|
||||||
--build-arg REPO=$(REPO) \
|
|
||||||
--build-arg VERSION=$(VERSION) \
|
|
||||||
--rm \
|
|
||||||
-f .docker/Dockerfile \
|
|
||||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
|
||||||
|
|
||||||
# Push Docker image to the hub
|
|
||||||
image-push:
|
|
||||||
@echo "⇒ Publish image"
|
|
||||||
@docker push $(HUB_IMAGE):$(HUB_TAG)
|
|
||||||
|
|
||||||
# Build dirty Docker image
|
|
||||||
dirty-image:
|
|
||||||
@echo "⇒ Build FrostFS HTTP Gateway dirty docker image "
|
|
||||||
@docker build \
|
|
||||||
--build-arg REPO=$(REPO) \
|
|
||||||
--build-arg VERSION=$(VERSION) \
|
|
||||||
--rm \
|
|
||||||
-f .docker/Dockerfile.dirty \
|
|
||||||
-t $(HUB_IMAGE)-dirty:$(HUB_TAG) .
|
|
||||||
|
|
||||||
# Install linters
|
|
||||||
lint-install:
|
|
||||||
@mkdir -p $(TMP_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters
|
|
||||||
@@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR)
|
|
||||||
@rm -rf $(TMP_DIR)/linters
|
|
||||||
@rmdir $(TMP_DIR) 2>/dev/null || true
|
|
||||||
@CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION)
|
|
||||||
|
|
||||||
# Run linters
|
|
||||||
lint:
|
|
||||||
@if [ ! -d "$(LINT_DIR)" ]; then \
|
|
||||||
echo "Run make lint-install"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
$(LINT_DIR)/golangci-lint --timeout=5m run
|
|
||||||
|
|
||||||
# Run linters in Docker
|
|
||||||
docker/lint:
|
|
||||||
docker run --rm -it \
|
|
||||||
-v `pwd`:/src \
|
|
||||||
-u `stat -c "%u:%g" .` \
|
|
||||||
--env HOME=/src \
|
|
||||||
golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint'
|
|
||||||
|
|
||||||
# Activate pre-commit hooks
|
|
||||||
pre-commit:
|
|
||||||
pre-commit install -t pre-commit -t commit-msg
|
|
||||||
|
|
||||||
# Deactivate pre-commit hooks
|
|
||||||
unpre-commit:
|
|
||||||
pre-commit uninstall -t pre-commit -t commit-msg
|
|
||||||
|
|
||||||
# Print version
|
|
||||||
version:
|
|
||||||
@echo $(VERSION)
|
|
||||||
|
|
||||||
# Clean up
|
|
||||||
clean:
|
|
||||||
rm -rf vendor
|
|
||||||
rm -rf $(BINDIR)
|
|
||||||
|
|
||||||
# Package for Debian
|
|
||||||
debpackage:
|
|
||||||
dch --package frostfs-http-gw \
|
|
||||||
--controlmaint \
|
|
||||||
--newversion $(PKG_VERSION) \
|
|
||||||
--distribution $(OS_RELEASE) \
|
|
||||||
"Please see CHANGELOG.md for code changes for $(VERSION)"
|
|
||||||
dpkg-buildpackage --no-sign -b
|
|
||||||
|
|
||||||
debclean:
|
|
||||||
dh clean
|
|
||||||
|
|
||||||
# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json')
|
|
||||||
.PHONY: dump-metrics
|
|
||||||
dump-metrics:
|
|
||||||
@go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT))
|
|
||||||
|
|
||||||
|
|
||||||
include help.mk
|
|
466
README.md
466
README.md
|
@ -1,465 +1,3 @@
|
||||||
<p align="center">
|
# WIP area: this repo is just a fork!
|
||||||
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
|
||||||
</p>
|
|
||||||
<p align="center">
|
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
---
|
Useful things may be published only in [other branches](../../../branches)
|
||||||
[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw)
|
|
||||||
![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange)
|
|
||||||
![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg)
|
|
||||||
|
|
||||||
# FrostFS HTTP Gateway
|
|
||||||
|
|
||||||
FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
|
|
||||||
- you can download one file per request from the FrostFS Network
|
|
||||||
- you can upload one file per request into the FrostFS Network
|
|
||||||
|
|
||||||
See available routes in [specification](./docs/api.md).
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
```go install git.frostfs.info/TrueCloudLab/frostfs-http-gw```
|
|
||||||
|
|
||||||
Or you can call `make` to build it from the cloned repository (the binary will
|
|
||||||
end up in `bin/frostfs-http-gw`). To build frostfs-http-gw binary in clean docker
|
|
||||||
environment, call `make docker/bin/frostfs-http-gw`.
|
|
||||||
|
|
||||||
Other notable make targets:
|
|
||||||
|
|
||||||
```
|
|
||||||
dep Check and ensure dependencies
|
|
||||||
image Build clean docker image
|
|
||||||
dirty-image Build dirty docker image with host-built binaries
|
|
||||||
fmt Format the code
|
|
||||||
lint Run linters
|
|
||||||
version Show current version
|
|
||||||
```
|
|
||||||
|
|
||||||
Or you can also use a [Docker
|
|
||||||
image](https://git.frostfs.info/TrueCloudLab/-/packages/container/frostfs-http-gw) provided for the released
|
|
||||||
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
|
||||||
latest stable release).
|
|
||||||
|
|
||||||
## Execution
|
|
||||||
|
|
||||||
HTTP gateway itself is not a FrostFS node, so to access FrostFS it uses node's
|
|
||||||
gRPC interface and you need to provide some node that it will connect to. This
|
|
||||||
can be done either via `-p` parameter or via `HTTP_GW_PEERS_<N>_ADDRESS` and
|
|
||||||
`HTTP_GW_PEERS_<N>_WEIGHT` environment variables (the gate supports multiple
|
|
||||||
FrostFS nodes with weighted load balancing).
|
|
||||||
|
|
||||||
If you launch HTTP gateway in bundle with [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
|
|
||||||
you can get the IP address of the node in the output of `make hosts` command
|
|
||||||
(with s0*.frostfs.devenv name).
|
|
||||||
|
|
||||||
These two commands are functionally equivalent, they run the gate with one
|
|
||||||
backend node (and otherwise default settings):
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p 192.168.130.72:8080
|
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 frostfs-http-gw
|
|
||||||
```
|
|
||||||
It's also possible to specify uri scheme (grpc or grpcs) when using `-p`:
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p grpc://192.168.130.72:8080
|
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 frostfs-http-gw
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
In general, everything available as CLI parameter can also be specified via
|
|
||||||
environment variables (see [example](./config/config.env)), so they're not specifically mentioned in most cases
|
|
||||||
(see `--help` also). If you prefer a config file you can use it in yaml format.
|
|
||||||
|
|
||||||
### Nodes: weights and priorities
|
|
||||||
|
|
||||||
You can specify multiple `-p` options to add more FrostFS nodes, this will make
|
|
||||||
gateway spread requests equally among them (using weight 1 and priority 1 for every node):
|
|
||||||
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080
|
|
||||||
```
|
|
||||||
If you want some specific load distribution proportions, use weights and priorities:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \
|
|
||||||
HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \
|
|
||||||
HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \
|
|
||||||
frostfs-http-gw
|
|
||||||
```
|
|
||||||
This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use
|
|
||||||
192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%.
|
|
||||||
|
|
||||||
### Keys
|
|
||||||
You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address`
|
|
||||||
(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet.
|
|
||||||
If no wallet provided, the gateway autogenerates a key pair it will use for FrostFS requests.
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p $FROSTFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS
|
|
||||||
```
|
|
||||||
Example:
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
|
||||||
```
|
|
||||||
|
|
||||||
### Binding and TLS
|
|
||||||
|
|
||||||
You can make the gateway listen on specific address using the `--listen_address` option.
|
|
||||||
|
|
||||||
It can also provide TLS interface for its users, just specify paths to the key and
|
|
||||||
certificate files via `--tls_key` and `--tls_certificate` parameters. Note
|
|
||||||
that using these options makes gateway TLS-only. If you need to serve both TLS
|
|
||||||
and plain text HTTP, you either have to run two gateway instances or use some
|
|
||||||
external redirecting solution.
|
|
||||||
|
|
||||||
Example to bind to `192.168.130.130:443` and serve TLS there:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \
|
|
||||||
--tls_key=key.pem --tls_certificate=cert.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
### HTTP parameters
|
|
||||||
|
|
||||||
You can tune HTTP read and write buffer sizes as well as timeouts with
|
|
||||||
`HTTP_GW_WEB_READ_BUFFER_SIZE`, `HTTP_GW_WEB_READ_TIMEOUT`,
|
|
||||||
`HTTP_GW_WEB_WRITE_BUFFER_SIZE` and `HTTP_GW_WEB_WRITE_TIMEOUT` environment
|
|
||||||
variables.
|
|
||||||
|
|
||||||
**Note:** to allow upload and download of big data streams, disable read
|
|
||||||
and write timeouts correspondingly. To do that, set `HTTP_GW_WEB_READ_TIMEOUT=0`
|
|
||||||
and `HTTP_GW_WEB_WRITE_TIMEOUT=0`. Otherwise, HTTP Gateway will terminate
|
|
||||||
request with data stream after timeout.
|
|
||||||
|
|
||||||
`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable
|
|
||||||
request body streaming (effectively it'll make the gateway accept the file completely
|
|
||||||
first and only then try sending it to FrostFS).
|
|
||||||
|
|
||||||
`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size
|
|
||||||
limiting uploads to files slightly lower than this limit.
|
|
||||||
|
|
||||||
### FrostFS parameters
|
|
||||||
|
|
||||||
Gateway can automatically set timestamps for uploaded files based on local
|
|
||||||
time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment
|
|
||||||
variable to control this behavior.
|
|
||||||
|
|
||||||
### Monitoring and metrics
|
|
||||||
|
|
||||||
Pprof and Prometheus are integrated into the gateway. To enable them use `--pprof` and `--metrics` flags or
|
|
||||||
`HTTP_GW_PPROF`/`HTTP_GW_METRICS` environment variables.
|
|
||||||
|
|
||||||
### Timeouts
|
|
||||||
|
|
||||||
You can tune gRPC interface parameters with `--connect_timeout` (for
|
|
||||||
connection to a node) and `--request_timeout` (for request processing over
|
|
||||||
established connection) options.
|
|
||||||
|
|
||||||
gRPC-level checks allow the gateway to detect dead peers, but it declares them
|
|
||||||
unhealthy at pool level once per `--rebalance_timer` interval, so check for it
|
|
||||||
if needed.
|
|
||||||
|
|
||||||
All timing options accept values with suffixes, so "15s" is 15 seconds and
|
|
||||||
"2m" is 2 minutes.
|
|
||||||
|
|
||||||
### Zip streaming
|
|
||||||
The gateway supports downloading files by common prefix (like dir) in zip format. You can enable compression
|
|
||||||
using config or `HTTP_GW_ZIP_COMPRESSION=true` environment variable.
|
|
||||||
|
|
||||||
### Logging
|
|
||||||
You can specify logging level using variable:
|
|
||||||
```
|
|
||||||
HTTP_GW_LOGGER_LEVEL=debug
|
|
||||||
```
|
|
||||||
|
|
||||||
### Yaml file
|
|
||||||
Configuration file is optional and can be used instead of environment variables/other parameters.
|
|
||||||
It can be specified with `--config` parameter:
|
|
||||||
```
|
|
||||||
$ frostfs-http-gw --config your-config.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example.
|
|
||||||
|
|
||||||
#### Multiple configs
|
|
||||||
|
|
||||||
You can use several config files when running application. It allows you to split configuration into parts.
|
|
||||||
For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](./config)).
|
|
||||||
You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag.
|
|
||||||
Also, you can combine these flags:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ frostfs-http-gw --config ./config/config.yaml --config /your/partial/config.yaml --config-dir ./config/dir
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** next file in `--config` flag overwrites values from the previous one.
|
|
||||||
Files from `--config-dir` directory overwrite values from `--config` files.
|
|
||||||
So the command above run `frostfs-http-gw` to listen on `0.0.0.0:8080` address (value from `./config/config.yaml`),
|
|
||||||
applies parameters from `/your/partial/config.yaml`,
|
|
||||||
enable pprof (value from `./config/dir/pprof.yaml`) and prometheus (value from `./config/dir/prometheus.yaml`).
|
|
||||||
|
|
||||||
## HTTP API provided
|
|
||||||
|
|
||||||
This gateway intentionally provides limited feature set and doesn't try to
|
|
||||||
substitute (or completely wrap) regular gRPC FrostFS interface. You can download
|
|
||||||
and upload objects with it, but deleting, searching, managing ACLs, creating
|
|
||||||
containers and other activities are not supported and not planned to be
|
|
||||||
supported.
|
|
||||||
|
|
||||||
### Preparation
|
|
||||||
|
|
||||||
Before uploading or downloading a file make sure you have a prepared container.
|
|
||||||
You can create it with instructions below.
|
|
||||||
|
|
||||||
Also, in case of downloading, you need to have a file inside a container.
|
|
||||||
|
|
||||||
### NNS
|
|
||||||
|
|
||||||
In all download/upload routes you can use container name instead of its id (`$CID`).
|
|
||||||
Read more about it in [docs/nns.md](./docs/nns.md).
|
|
||||||
|
|
||||||
|
|
||||||
#### Create a container
|
|
||||||
|
|
||||||
You can create a container via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases):
|
|
||||||
```
|
|
||||||
$ frostfs-cli -r $FROSTFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL
|
|
||||||
```
|
|
||||||
where `$WALLET` is a path to user wallet,
|
|
||||||
`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and
|
|
||||||
`$POLICY` -- QL-encoded or JSON-encoded placement policy or path to file with it
|
|
||||||
|
|
||||||
For example:
|
|
||||||
```
|
|
||||||
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have launched nodes via [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env),
|
|
||||||
you can get the key value from `wallets/wallet.json` or write the path to
|
|
||||||
the file `wallets/wallet.key`.
|
|
||||||
|
|
||||||
#### Prepare a file in a container
|
|
||||||
|
|
||||||
To create a file via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases), run a command below:
|
|
||||||
```
|
|
||||||
$ frostfs-cli -r $FROSTFS_NODE -k $KEY object put --file $FILENAME --cid $CID
|
|
||||||
```
|
|
||||||
where
|
|
||||||
`$KEY` -- the key, please read the information [above](#create-a-container),
|
|
||||||
`$CID` -- container ID.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
```
|
|
||||||
$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Downloading
|
|
||||||
|
|
||||||
#### Requests
|
|
||||||
|
|
||||||
The following requests support GET/HEAD methods.
|
|
||||||
|
|
||||||
##### By IDs
|
|
||||||
|
|
||||||
Basic downloading involves container ID and object ID and is done via GET
|
|
||||||
requests to `/get/$CID/$OID` path, where `$CID` is a container ID or its name if NNS is enabled,
|
|
||||||
`$OID` is an object's (i.e. your file's) ID.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY
|
|
||||||
```
|
|
||||||
|
|
||||||
or if container has a name:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ wget http://localhost:8082/get/container-name/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY
|
|
||||||
```
|
|
||||||
|
|
||||||
##### By attributes
|
|
||||||
There is also more complex interface provided for attribute-based downloads,
|
|
||||||
it's usually used to retrieve files by their names, but any other attribute
|
|
||||||
can be used as well. The generic syntax for it looks like this:
|
|
||||||
|
|
||||||
```/get_by_attribute/$CID/$ATTRIBUTE_NAME/$ATTRIBUTE_VALUE```
|
|
||||||
|
|
||||||
where
|
|
||||||
`$CID` is a container ID or its name if NNS is enabled,
|
|
||||||
`$ATTRIBUTE_NAME` is the name of the attribute we want to use,
|
|
||||||
`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have.
|
|
||||||
|
|
||||||
**NB!** The attribute key and value should be url encoded, i.e., if you want to download an object with the attribute value
|
|
||||||
`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key. If you don't escape such values
|
|
||||||
everything can still work (for example you can use `d@ta` without encoding) but it's HIGHLY RECOMMENDED to encode all your attributes.
|
|
||||||
|
|
||||||
If multiple objects have specified attribute with specified value, then the
|
|
||||||
first one of them is returned (and you can't get others via this interface).
|
|
||||||
|
|
||||||
Example for file name attribute:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat.jpeg
|
|
||||||
```
|
|
||||||
Or when the filename includes special symbols:
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat+jpeg # means 'cat jpeg'
|
|
||||||
$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat%25jpeg # means 'cat%jpeg'
|
|
||||||
```
|
|
||||||
|
|
||||||
Some other user-defined attributes:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Ololo/100500
|
|
||||||
```
|
|
||||||
|
|
||||||
Or when the attribute includes special symbols:
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Olo%2Blo/100500 # means Olo+lo
|
|
||||||
```
|
|
||||||
|
|
||||||
An optional `download=true` argument for `Content-Disposition` management is
|
|
||||||
also supported (more on that below):
|
|
||||||
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY?download=true
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Zip
|
|
||||||
You can download some dir (files with the same prefix) in zip (it will be compressed if config contains appropriate param):
|
|
||||||
```
|
|
||||||
$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** the objects must have a valid `FilePath` attribute (it should not contain trailing `/`),
|
|
||||||
otherwise they will not be in the zip archive. You can upload file with this attribute using `curl`:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
#### Replies
|
|
||||||
|
|
||||||
You get object contents in the reply body (if GET method was used), but at the same time you also get a
|
|
||||||
set of reply headers generated using the following rules:
|
|
||||||
* `Content-Length` is set to the length of the object
|
|
||||||
* `Content-Type` is autodetected dynamically by gateway
|
|
||||||
* `Content-Disposition` is `inline` for regular requests and `attachment` for
|
|
||||||
requests with `download=true` argument, `filename` is also added if there
|
|
||||||
is `FileName` attribute set for this object
|
|
||||||
* `Last-Modified` header is set to `Timestamp` attribute value if it's
|
|
||||||
present for the object
|
|
||||||
* `x-container-id` contains container ID
|
|
||||||
* `x-object-id` contains object ID
|
|
||||||
* `x-owner-id` contains owner address
|
|
||||||
* all the other FrostFS attributes are converted to `X-Attribute-*` headers (but only
|
|
||||||
if they can be safely represented in HTTP header), for example `FileName`
|
|
||||||
attribute becomes `X-Attribute-FileName` header
|
|
||||||
|
|
||||||
##### Caching strategy
|
|
||||||
|
|
||||||
HTTP Gateway doesn't control caching (doesn't anything with the `Cache-Control` header). Caching strategy strictly
|
|
||||||
depends on application use case. So it should be carefully done by proxy server.
|
|
||||||
|
|
||||||
### Uploading
|
|
||||||
|
|
||||||
You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The
|
|
||||||
request must contain multipart form with mandatory `filename` parameter. Only
|
|
||||||
one part in multipart form will be processed, so to upload another file just
|
|
||||||
issue a new POST request.
|
|
||||||
|
|
||||||
Example request:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
|
|
||||||
```
|
|
||||||
|
|
||||||
Chunked encoding is supported by the server (but check for request read
|
|
||||||
timeouts if you're planning some streaming). You can try streaming support
|
|
||||||
with a large file piped through named FIFO pipe:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ mkfifo pipe
|
|
||||||
$ cat video.mp4 > pipe &
|
|
||||||
$ curl --no-buffer -F 'file=@pipe;filename=catvideo.mp4' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also add some attributes to your file using the following rules:
|
|
||||||
* all "X-Attribute-*" headers get converted to object attributes with
|
|
||||||
"X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo:
|
|
||||||
100500" header to your request the resulting object will get "Ololo:
|
|
||||||
100500" attribute
|
|
||||||
* "X-Attribute-SYSTEM-*" headers are special
|
|
||||||
(`-SYSTEM-` part can also be `-system-` or`-System-` (and even legacy `-Neofs-` for some next releases)), they're used to set internal
|
|
||||||
FrostFS attributes starting with `__SYSTEM__` prefix, for these attributes all
|
|
||||||
dashes get converted to underscores and all letters are capitalized. For
|
|
||||||
example, you can use "X-Attribute-SYSTEM-Expiration-Epoch" header to set
|
|
||||||
`__SYSTEM__EXPIRATION_EPOCH` attribute
|
|
||||||
* `FileName` attribute is set from multipart's `filename` if not set
|
|
||||||
explicitly via `X-Attribute-FileName` header
|
|
||||||
* `Timestamp` attribute can be set using gateway local time if using
|
|
||||||
HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP option and if request doesn't
|
|
||||||
provide `X-Attribute-Timestamp` header of its own
|
|
||||||
|
|
||||||
---
|
|
||||||
**NOTE**
|
|
||||||
|
|
||||||
There are some reserved headers type of `X-Attribute-SYSTEM-*` (headers are arranged in descending order of priority):
|
|
||||||
1. `X-Attribute-System-Expiration-Epoch: 100`
|
|
||||||
2. `X-Attribute-System-Expiration-Duration: 24h30m`
|
|
||||||
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
|
|
||||||
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
|
|
||||||
|
|
||||||
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
For successful uploads you get JSON data in reply body with a container and
|
|
||||||
object ID, like this:
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"object_id": "9ANhbry2ryjJY1NZbcjryJMRXG5uGNKd73kD3V1sVFsX",
|
|
||||||
"container_id": "Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Authentication
|
|
||||||
|
|
||||||
Read more about request authentication in [docs/authentication.md](./docs/authemtnication.md)
|
|
||||||
|
|
||||||
### Metrics and Pprof
|
|
||||||
|
|
||||||
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
|
|
||||||
and Pprof at `localhost:8083/debug/pprof` by default. Host and port can be configured.
|
|
||||||
See [configuration](./docs/gate-configuration.md).
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
|
||||||
|
|
||||||
## Fuzzing
|
|
||||||
|
|
||||||
To run fuzzing tests use the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
|
||||||
|
|
||||||
You can also use the following arguments:
|
|
||||||
|
|
||||||
```
|
|
||||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
|
||||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
|
||||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
|
||||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
|
||||||
````
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
|
||||||
|
|
26
SECURITY.md
26
SECURITY.md
|
@ -1,26 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
|
|
||||||
## How To Report a Vulnerability
|
|
||||||
|
|
||||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
|
||||||
|
|
||||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
|
||||||
|
|
||||||
Instead, you can report it using one of the following ways:
|
|
||||||
|
|
||||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
|
||||||
|
|
||||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
|
||||||
|
|
||||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
|
||||||
* Affected version(s)
|
|
||||||
* Impact of the issue, including how an attacker might exploit the issue
|
|
||||||
* Step-by-step instructions to reproduce the issue
|
|
||||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
|
||||||
* Full paths of source file(s) related to the manifestation of the issue
|
|
||||||
* Any special configuration required to reproduce the issue
|
|
||||||
* Any log files that are related to this issue (if possible)
|
|
||||||
* Proof-of-concept or exploit code (if possible)
|
|
||||||
|
|
||||||
This information will help us triage your report more quickly.
|
|
1
VERSION
1
VERSION
|
@ -1 +0,0 @@
|
||||||
v0.32.3
|
|
1137
cmd/http-gw/app.go
1137
cmd/http-gw/app.go
File diff suppressed because it is too large
Load diff
|
@ -1,590 +0,0 @@
|
||||||
//go:build integration
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
docker "github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/testcontainers/testcontainers-go"
|
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
|
||||||
)
|
|
||||||
|
|
||||||
type putResponse struct {
|
|
||||||
CID string `json:"container_id"`
|
|
||||||
OID string `json:"object_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
testContainerName = "friendly"
|
|
||||||
testListenAddress = "localhost:8082"
|
|
||||||
testHost = "http://" + testListenAddress
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
|
||||||
rootCtx := context.Background()
|
|
||||||
aioImage := "git.frostfs.info/truecloudlab/frostfs-aio:"
|
|
||||||
versions := []string{
|
|
||||||
"1.2.7",
|
|
||||||
"1.3.0",
|
|
||||||
"1.5.0",
|
|
||||||
"1.6.5",
|
|
||||||
}
|
|
||||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
file, err := os.CreateTemp("", "wallet")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(file.Name())
|
|
||||||
makeTempWallet(t, key, file.Name())
|
|
||||||
|
|
||||||
var ownerID user.ID
|
|
||||||
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
|
|
||||||
|
|
||||||
for _, version := range versions {
|
|
||||||
ctx, cancel2 := context.WithCancel(rootCtx)
|
|
||||||
|
|
||||||
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
|
||||||
if strings.HasPrefix(version, "1.6") {
|
|
||||||
registerUser(t, ctx, aioContainer, file.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the logs from the command execution.
|
|
||||||
server, cancel := runServer(file.Name())
|
|
||||||
clientPool := getPool(ctx, t, key)
|
|
||||||
CID, err := createContainer(ctx, t, clientPool, ownerID)
|
|
||||||
require.NoError(t, err, version)
|
|
||||||
|
|
||||||
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
|
|
||||||
|
|
||||||
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID) })
|
|
||||||
t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) })
|
|
||||||
t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) })
|
|
||||||
t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
|
||||||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID) })
|
|
||||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
|
|
||||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
|
|
||||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
|
|
||||||
|
|
||||||
cancel()
|
|
||||||
server.Wait()
|
|
||||||
err = aioContainer.Terminate(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
cancel2()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runServer(pathToWallet string) (App, context.CancelFunc) {
|
|
||||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
v := getDefaultConfig()
|
|
||||||
v.config().Set(cfgWalletPath, pathToWallet)
|
|
||||||
v.config().Set(cfgWalletPassphrase, "")
|
|
||||||
|
|
||||||
application := newApp(cancelCtx, v)
|
|
||||||
go application.Serve()
|
|
||||||
|
|
||||||
return application, cancel
|
|
||||||
}
|
|
||||||
|
|
||||||
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
|
||||||
|
|
||||||
url = testHost + "/upload/" + testContainerName
|
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putWithBearerTokenInHeader(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
|
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
request.Header.Set("Authorization", "Bearer "+token)
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putWithBearerTokenInCookie(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
|
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
request.AddCookie(&http.Cookie{Name: "Bearer", Value: token})
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, cnrID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string]string) {
|
|
||||||
content := "content of file"
|
|
||||||
keyAttr, valAttr := "User-Attribute", "user value"
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: "newFile.txt",
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = io.Copy(fw, bytes.NewBufferString(content))
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = w.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
request, err := http.NewRequest(http.MethodPost, url, &buff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
return request, content, attributes
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPutResponse(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, resp *http.Response, content string, attributes map[string]string) {
|
|
||||||
defer func() {
|
|
||||||
err := resp.Body.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
fmt.Println(string(body))
|
|
||||||
}
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
|
|
||||||
addr := &putResponse{}
|
|
||||||
err = json.Unmarshal(body, addr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = cnrID.DecodeString(addr.CID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var id oid.ID
|
|
||||||
err = id.DecodeString(addr.OID)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var objectAddress oid.Address
|
|
||||||
objectAddress.SetContainer(cnrID)
|
|
||||||
objectAddress.SetObject(id)
|
|
||||||
|
|
||||||
payload := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
var prm pool.PrmObjectGet
|
|
||||||
prm.SetAddress(objectAddress)
|
|
||||||
|
|
||||||
res, err := p.GetObject(ctx, prm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, err = io.Copy(payload, res.Payload)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, content, payload.String())
|
|
||||||
|
|
||||||
for _, attribute := range res.Header.Attributes() {
|
|
||||||
require.Equal(t, attributes[attribute.Key()], attribute.Value())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
|
|
||||||
attr := "X-Attribute-User-Attribute"
|
|
||||||
content := "content of file"
|
|
||||||
valOne, valTwo := "first_value", "second_value"
|
|
||||||
fileName := "newFile.txt"
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", fileName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = io.Copy(fw, bytes.NewBufferString(content))
|
|
||||||
require.NoError(t, err)
|
|
||||||
err = w.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
request, err := http.NewRequest(http.MethodPost, url, &buff)
|
|
||||||
require.NoError(t, err)
|
|
||||||
request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
request.Header.Add(attr, valOne)
|
|
||||||
request.Header.Add(attr, valTwo)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
err := resp.Body.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "key duplication error: "+attr+"\n", string(body))
|
|
||||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
|
||||||
content := "content of file"
|
|
||||||
attributes := map[string]string{
|
|
||||||
"some-attr": "some-get-value",
|
|
||||||
}
|
|
||||||
|
|
||||||
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
|
||||||
|
|
||||||
resp, err := http.Get(testHost + "/get/" + CID.String() + "/" + id.String())
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
|
|
||||||
resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String())
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
|
|
||||||
defer func() {
|
|
||||||
err := resp.Body.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
data, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, content, string(data))
|
|
||||||
|
|
||||||
for k, v := range attributes {
|
|
||||||
require.Equal(t, v, resp.Header.Get("X-Attribute-"+k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) {
|
|
||||||
defer func() {
|
|
||||||
err := resp.Body.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
data, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, content, string(data))
|
|
||||||
|
|
||||||
for k, v := range attributes {
|
|
||||||
require.Equal(t, v, resp.Header.Get(k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
|
||||||
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
|
|
||||||
content := "content of file"
|
|
||||||
attributes := map[string]string{keyAttr: valAttr}
|
|
||||||
|
|
||||||
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
|
||||||
|
|
||||||
expectedAttr := map[string]string{
|
|
||||||
"X-Attribute-" + keyAttr: valAttr,
|
|
||||||
"x-object-id": id.String(),
|
|
||||||
"x-container-id": CID.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := http.Get(testHost + "/get_by_attribute/" + CID.String() + "/" + keyAttr + "/" + valAttr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
|
||||||
|
|
||||||
resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
|
||||||
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
|
||||||
contents := []string{"content of file1", "content of file2"}
|
|
||||||
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
|
||||||
attributes2 := map[string]string{object.AttributeFilePath: names[1]}
|
|
||||||
|
|
||||||
putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1)
|
|
||||||
putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2)
|
|
||||||
|
|
||||||
baseURL := testHost + "/zip/" + CID.String()
|
|
||||||
makeZipTest(t, baseURL, names, contents)
|
|
||||||
|
|
||||||
baseURL = testHost + "/zip/" + testContainerName
|
|
||||||
makeZipTest(t, baseURL, names, contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeZipTest(t *testing.T, baseURL string, names, contents []string) {
|
|
||||||
url := baseURL + "/zipfolder"
|
|
||||||
makeZipRequest(t, url, names, contents)
|
|
||||||
|
|
||||||
// check nested folder
|
|
||||||
url = baseURL + "/zipfolder/dir"
|
|
||||||
makeZipRequest(t, url, names[:1], contents[:1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeZipRequest(t *testing.T, url string, names, contents []string) {
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
err := resp.Body.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
data, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkZip(t, data, int64(len(data)), names, contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkZip(t *testing.T, data []byte, length int64, names, contents []string) {
|
|
||||||
readerAt := bytes.NewReader(data)
|
|
||||||
|
|
||||||
zipReader, err := zip.NewReader(readerAt, length)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, len(names), len(zipReader.File))
|
|
||||||
|
|
||||||
sort.Slice(zipReader.File, func(i, j int) bool {
|
|
||||||
return zipReader.File[i].FileHeader.Name < zipReader.File[j].FileHeader.Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for i, f := range zipReader.File {
|
|
||||||
require.Equal(t, names[i], f.FileHeader.Name)
|
|
||||||
|
|
||||||
rc, err := f.Open()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
all, err := io.ReadAll(rc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, contents[i], string(all))
|
|
||||||
|
|
||||||
err = rc.Close()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
|
||||||
content := "content of file"
|
|
||||||
attributes := map[string]string{
|
|
||||||
"some-attr": "some-get-value",
|
|
||||||
}
|
|
||||||
|
|
||||||
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "")
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "root")
|
|
||||||
|
|
||||||
resp, err = http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "root2")
|
|
||||||
|
|
||||||
resp, err = http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
|
||||||
req := testcontainers.ContainerRequest{
|
|
||||||
Image: image,
|
|
||||||
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(2 * time.Minute),
|
|
||||||
Name: "aio",
|
|
||||||
Hostname: "aio",
|
|
||||||
HostConfigModifier: func(hc *docker.HostConfig) {
|
|
||||||
hc.NetworkMode = "host"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
|
||||||
ContainerRequest: req,
|
|
||||||
Started: true,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return aioC
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDefaultConfig() *appCfg {
|
|
||||||
v := settings()
|
|
||||||
v.config().SetDefault(cfgPeers+".0.address", "localhost:8080")
|
|
||||||
v.config().SetDefault(cfgPeers+".0.weight", 1)
|
|
||||||
v.config().SetDefault(cfgPeers+".0.priority", 1)
|
|
||||||
|
|
||||||
v.config().SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
|
||||||
v.config().SetDefault("server.0.address", testListenAddress)
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool {
|
|
||||||
var prm pool.InitParameters
|
|
||||||
prm.SetKey(&key.PrivateKey)
|
|
||||||
prm.SetNodeDialTimeout(5 * time.Second)
|
|
||||||
prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1))
|
|
||||||
|
|
||||||
clientPool, err := pool.NewPool(prm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = clientPool.Dial(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
return clientPool
|
|
||||||
}
|
|
||||||
|
|
||||||
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
|
|
||||||
var policy netmap.PlacementPolicy
|
|
||||||
err := policy.DecodeString("REP 1")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var cnr container.Container
|
|
||||||
cnr.Init()
|
|
||||||
cnr.SetPlacementPolicy(policy)
|
|
||||||
cnr.SetBasicACL(acl.PublicRWExtended)
|
|
||||||
cnr.SetOwner(ownerID)
|
|
||||||
|
|
||||||
container.SetCreationTime(&cnr, time.Now())
|
|
||||||
|
|
||||||
var domain container.Domain
|
|
||||||
domain.SetName(testContainerName)
|
|
||||||
|
|
||||||
cnr.SetAttribute(containerv2.SysAttributeName, domain.Name())
|
|
||||||
cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone())
|
|
||||||
|
|
||||||
var waitPrm pool.WaitParams
|
|
||||||
waitPrm.SetTimeout(15 * time.Second)
|
|
||||||
waitPrm.SetPollInterval(3 * time.Second)
|
|
||||||
|
|
||||||
var prm pool.PrmContainerPut
|
|
||||||
prm.SetContainer(cnr)
|
|
||||||
prm.SetWaitParams(waitPrm)
|
|
||||||
|
|
||||||
CID, err := clientPool.PutContainer(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return cid.ID{}, err
|
|
||||||
}
|
|
||||||
fmt.Println(CID.String())
|
|
||||||
|
|
||||||
return CID, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
|
|
||||||
obj := object.New()
|
|
||||||
obj.SetContainerID(CID)
|
|
||||||
obj.SetOwnerID(ownerID)
|
|
||||||
|
|
||||||
var attrs []object.Attribute
|
|
||||||
for key, val := range attributes {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(key)
|
|
||||||
attr.SetValue(val)
|
|
||||||
attrs = append(attrs, *attr)
|
|
||||||
}
|
|
||||||
obj.SetAttributes(attrs...)
|
|
||||||
|
|
||||||
var prm pool.PrmObjectPut
|
|
||||||
prm.SetHeader(*obj)
|
|
||||||
prm.SetPayload(bytes.NewBufferString(content))
|
|
||||||
|
|
||||||
id, err := clientPool.PutObject(ctx, prm)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return id.ObjectID
|
|
||||||
}
|
|
||||||
|
|
||||||
func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers.Container, pathToWallet string) {
|
|
||||||
err := aioContainer.CopyFileToContainer(ctx, pathToWallet, "/usr/wallet.json", 644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, err = aioContainer.Exec(ctx, []string{
|
|
||||||
"/usr/bin/frostfs-s3-authmate", "register-user",
|
|
||||||
"--wallet", "/usr/wallet.json",
|
|
||||||
"--rpc-endpoint", "http://localhost:30333",
|
|
||||||
"--contract-wallet", "/config/s3-gw-wallet.json"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
|
|
||||||
tkn := new(bearer.Token)
|
|
||||||
tkn.ForUser(ownerID)
|
|
||||||
tkn.SetExp(10000)
|
|
||||||
|
|
||||||
if version == "1.2.7" {
|
|
||||||
tkn.SetEACLTable(*eacl.NewTable())
|
|
||||||
} else {
|
|
||||||
tkn.SetImpersonate(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := tkn.Sign(key.PrivateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
jsonToken, err := tkn.MarshalJSON()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken)
|
|
||||||
binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
|
|
||||||
require.NotEmpty(t, jsonTokenBase64)
|
|
||||||
require.NotEmpty(t, binaryTokenBase64)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
|
||||||
w, err := wallet.NewWallet(path)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
acc := wallet.NewAccountFromPrivateKey(key)
|
|
||||||
err = acc.Encrypt("", w.Scrypt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w.AddAccount(acc)
|
|
||||||
|
|
||||||
err = w.Save()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
|
@ -1,174 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/ssgreg/journald"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
|
||||||
var lvl zapcore.Level
|
|
||||||
lvlStr := v.GetString(cfgLoggerLevel)
|
|
||||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
|
||||||
if err != nil {
|
|
||||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
|
||||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
|
||||||
zapcore.DebugLevel,
|
|
||||||
zapcore.InfoLevel,
|
|
||||||
zapcore.WarnLevel,
|
|
||||||
zapcore.ErrorLevel,
|
|
||||||
zapcore.DPanicLevel,
|
|
||||||
zapcore.PanicLevel,
|
|
||||||
zapcore.FatalLevel,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return lvl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
|
|
||||||
|
|
||||||
type zapCoreTagFilterWrapper struct {
|
|
||||||
core zapcore.Core
|
|
||||||
settings TagFilterSettings
|
|
||||||
extra []zap.Field
|
|
||||||
}
|
|
||||||
|
|
||||||
type TagFilterSettings interface {
|
|
||||||
LevelEnabled(tag string, lvl zapcore.Level) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
|
|
||||||
return c.core.Enabled(level)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
|
|
||||||
return &zapCoreTagFilterWrapper{
|
|
||||||
core: c.core.With(fields),
|
|
||||||
settings: c.settings,
|
|
||||||
extra: append(c.extra, fields...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
|
||||||
if c.core.Enabled(entry.Level) {
|
|
||||||
return checked.AddCore(entry, c)
|
|
||||||
}
|
|
||||||
return checked
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
|
|
||||||
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.core.Write(entry, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
|
|
||||||
for _, field := range fields {
|
|
||||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
|
||||||
if !c.settings.LevelEnabled(field.String, entry.Level) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Sync() error {
|
|
||||||
return c.core.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core {
|
|
||||||
core = &zapCoreTagFilterWrapper{
|
|
||||||
core: core,
|
|
||||||
settings: tagSetting,
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
|
||||||
core = zapcore.NewSamplerWithOptions(core,
|
|
||||||
v.GetDuration(cfgLoggerSamplingInterval),
|
|
||||||
v.GetInt(cfgLoggerSamplingInitial),
|
|
||||||
v.GetInt(cfgLoggerSamplingThereafter),
|
|
||||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
|
||||||
if dec&zapcore.LogDropped > 0 {
|
|
||||||
loggerSettings.DroppedLogsInc()
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return core
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogEncoder() zapcore.Encoder {
|
|
||||||
c := zap.NewProductionEncoderConfig()
|
|
||||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
|
|
||||||
return zapcore.NewConsoleEncoder(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
|
||||||
// Panics on failure.
|
|
||||||
//
|
|
||||||
// Logger is built from zap's production logging configuration with:
|
|
||||||
// - parameterized level (debug by default)
|
|
||||||
// - console encoding
|
|
||||||
// - ISO8601 time encoding
|
|
||||||
//
|
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
|
||||||
//
|
|
||||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
|
||||||
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
|
||||||
stdout := zapcore.AddSync(os.Stderr)
|
|
||||||
|
|
||||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
|
|
||||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
|
|
||||||
|
|
||||||
return &Logger{
|
|
||||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
|
||||||
lvl: lvl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
|
||||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
|
||||||
|
|
||||||
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
|
||||||
coreWithContext := core.With([]zapcore.Field{
|
|
||||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
|
||||||
zapjournald.SyslogIdentifier(),
|
|
||||||
zapjournald.SyslogPid(),
|
|
||||||
})
|
|
||||||
|
|
||||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting)
|
|
||||||
|
|
||||||
return &Logger{
|
|
||||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
|
||||||
lvl: lvl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoggerAppSettings interface {
|
|
||||||
DroppedLogsInc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
|
||||||
dest := v.GetString(cfgLoggerDestination)
|
|
||||||
|
|
||||||
switch dest {
|
|
||||||
case destinationStdout:
|
|
||||||
return newStdoutLogger(v, lvl, loggerSettings, tagSettings)
|
|
||||||
case destinationJournald:
|
|
||||||
return newJournaldLogger(v, lvl, loggerSettings, tagSettings)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,16 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
application := newApp(globalContext, cfg)
|
|
||||||
go application.Serve()
|
|
||||||
application.Wait()
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
// Prefix is a prefix used for environment variables containing gateway
|
|
||||||
// configuration.
|
|
||||||
const Prefix = "HTTP_GW"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Version is the gateway version.
|
|
||||||
Version = "dev"
|
|
||||||
)
|
|
|
@ -1,124 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
ServerInfo struct {
|
|
||||||
Address string
|
|
||||||
TLS ServerTLSInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
ServerTLSInfo struct {
|
|
||||||
Enabled bool
|
|
||||||
CertFile string
|
|
||||||
KeyFile string
|
|
||||||
}
|
|
||||||
|
|
||||||
Server interface {
|
|
||||||
Address() string
|
|
||||||
Listener() net.Listener
|
|
||||||
UpdateCert(certFile, keyFile string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
server struct {
|
|
||||||
address string
|
|
||||||
listener net.Listener
|
|
||||||
tlsProvider *certProvider
|
|
||||||
}
|
|
||||||
|
|
||||||
certProvider struct {
|
|
||||||
Enabled bool
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
certPath string
|
|
||||||
keyPath string
|
|
||||||
cert *tls.Certificate
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *server) Address() string {
|
|
||||||
return s.address
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *server) Listener() net.Listener {
|
|
||||||
return s.listener
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *server) UpdateCert(certFile, keyFile string) error {
|
|
||||||
return s.tlsProvider.UpdateCert(certFile, keyFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
|
||||||
var lic net.ListenConfig
|
|
||||||
ln, err := lic.Listen(ctx, "tcp", serverInfo.Address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("could not prepare listener: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsProvider := &certProvider{
|
|
||||||
Enabled: serverInfo.TLS.Enabled,
|
|
||||||
}
|
|
||||||
|
|
||||||
if serverInfo.TLS.Enabled {
|
|
||||||
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
|
||||||
lnErr := ln.Close()
|
|
||||||
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ln = tls.NewListener(ln, &tls.Config{
|
|
||||||
GetCertificate: tlsProvider.GetCertificate,
|
|
||||||
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &server{
|
|
||||||
address: serverInfo.Address,
|
|
||||||
listener: ln,
|
|
||||||
tlsProvider: tlsProvider,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
|
||||||
if !p.Enabled {
|
|
||||||
return nil, errors.New("cert provider: disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.RLock()
|
|
||||||
defer p.mu.RUnlock()
|
|
||||||
return p.cert, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *certProvider) UpdateCert(certPath, keyPath string) error {
|
|
||||||
if !p.Enabled {
|
|
||||||
return fmt.Errorf("tls disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.Lock()
|
|
||||||
p.certPath = certPath
|
|
||||||
p.keyPath = keyPath
|
|
||||||
p.cert = &cert
|
|
||||||
p.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *certProvider) FilePaths() (string, string) {
|
|
||||||
if !p.Enabled {
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
|
|
||||||
p.mu.RLock()
|
|
||||||
defer p.mu.RUnlock()
|
|
||||||
return p.certPath, p.keyPath
|
|
||||||
}
|
|
|
@ -1,119 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/net/http2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
expHeaderKey = "Foo"
|
|
||||||
expHeaderValue = "Bar"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHTTP2TLS(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
certPath, keyPath := prepareTestCerts(t)
|
|
||||||
|
|
||||||
srv := &http.Server{
|
|
||||||
Handler: http.HandlerFunc(testHandler),
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsListener, err := newServer(ctx, ServerInfo{
|
|
||||||
Address: ":0",
|
|
||||||
TLS: ServerTLSInfo{
|
|
||||||
Enabled: true,
|
|
||||||
CertFile: certPath,
|
|
||||||
KeyFile: keyPath,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
|
|
||||||
addr := fmt.Sprintf("https://localhost:%d", port)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_ = srv.Serve(tlsListener.Listener())
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Server is running, now send HTTP/2 request
|
|
||||||
|
|
||||||
tlsClientConfig := &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
|
||||||
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", addr, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header[expHeaderKey] = []string{expHeaderValue}
|
|
||||||
|
|
||||||
resp, err := cliHTTP1.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
|
|
||||||
resp, err = cliHTTP2.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
|
||||||
hdr, ok := req.Header[expHeaderKey]
|
|
||||||
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
|
||||||
} else {
|
|
||||||
resp.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
|
|
||||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: big.NewInt(1),
|
|
||||||
Subject: pkix.Name{CommonName: "localhost"},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
|
||||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
certPath = path.Join(dir, "cert.pem")
|
|
||||||
keyPath = path.Join(dir, "key.pem")
|
|
||||||
|
|
||||||
certFile, err := os.Create(certPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer certFile.Close()
|
|
||||||
|
|
||||||
keyFile, err := os.Create(keyPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer keyFile.Close()
|
|
||||||
|
|
||||||
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return certPath, keyPath
|
|
||||||
}
|
|
|
@ -1,853 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
destinationStdout = "stdout"
|
|
||||||
destinationJournald = "journald"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultRebalanceTimer = 60 * time.Second
|
|
||||||
defaultRequestTimeout = 15 * time.Second
|
|
||||||
defaultConnectTimeout = 10 * time.Second
|
|
||||||
defaultStreamTimeout = 10 * time.Second
|
|
||||||
|
|
||||||
defaultLoggerSamplerInterval = 1 * time.Second
|
|
||||||
|
|
||||||
defaultShutdownTimeout = 15 * time.Second
|
|
||||||
|
|
||||||
defaultPoolErrorThreshold uint32 = 100
|
|
||||||
|
|
||||||
defaultSoftMemoryLimit = math.MaxInt64
|
|
||||||
|
|
||||||
defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb
|
|
||||||
|
|
||||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
|
||||||
|
|
||||||
defaultReconnectInterval = time.Minute
|
|
||||||
|
|
||||||
defaultCORSMaxAge = 600 // seconds
|
|
||||||
|
|
||||||
defaultMultinetFallbackDelay = 300 * time.Millisecond
|
|
||||||
|
|
||||||
cfgServer = "server"
|
|
||||||
cfgTLSEnabled = "tls.enabled"
|
|
||||||
cfgTLSCertFile = "tls.cert_file"
|
|
||||||
cfgTLSKeyFile = "tls.key_file"
|
|
||||||
|
|
||||||
cfgReconnectInterval = "reconnect_interval"
|
|
||||||
|
|
||||||
cfgIndexPageEnabled = "index_page.enabled"
|
|
||||||
cfgIndexPageTemplatePath = "index_page.template_path"
|
|
||||||
|
|
||||||
cfgWorkerPoolSize = "worker_pool_size"
|
|
||||||
|
|
||||||
// Web.
|
|
||||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
|
||||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
|
||||||
cfgWebReadTimeout = "web.read_timeout"
|
|
||||||
cfgWebWriteTimeout = "web.write_timeout"
|
|
||||||
cfgWebStreamRequestBody = "web.stream_request_body"
|
|
||||||
cfgWebMaxRequestBodySize = "web.max_request_body_size"
|
|
||||||
|
|
||||||
// Metrics / Profiler.
|
|
||||||
cfgPrometheusEnabled = "prometheus.enabled"
|
|
||||||
cfgPrometheusAddress = "prometheus.address"
|
|
||||||
cfgPprofEnabled = "pprof.enabled"
|
|
||||||
cfgPprofAddress = "pprof.address"
|
|
||||||
|
|
||||||
// Tracing ...
|
|
||||||
cfgTracingEnabled = "tracing.enabled"
|
|
||||||
cfgTracingExporter = "tracing.exporter"
|
|
||||||
cfgTracingEndpoint = "tracing.endpoint"
|
|
||||||
cfgTracingTrustedCa = "tracing.trusted_ca"
|
|
||||||
cfgTracingAttributes = "tracing.attributes"
|
|
||||||
|
|
||||||
// Pool config.
|
|
||||||
cfgConTimeout = "connect_timeout"
|
|
||||||
cfgStreamTimeout = "stream_timeout"
|
|
||||||
cfgReqTimeout = "request_timeout"
|
|
||||||
cfgRebalance = "rebalance_timer"
|
|
||||||
cfgPoolErrorThreshold = "pool_error_threshold"
|
|
||||||
|
|
||||||
// Logger.
|
|
||||||
cfgLoggerLevel = "logger.level"
|
|
||||||
cfgLoggerDestination = "logger.destination"
|
|
||||||
|
|
||||||
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
|
|
||||||
cfgLoggerSamplingInitial = "logger.sampling.initial"
|
|
||||||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
|
||||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
|
||||||
|
|
||||||
cfgLoggerTags = "logger.tags"
|
|
||||||
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
|
|
||||||
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
|
|
||||||
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
|
|
||||||
|
|
||||||
// Wallet.
|
|
||||||
cfgWalletPassphrase = "wallet.passphrase"
|
|
||||||
cfgWalletPath = "wallet.path"
|
|
||||||
cfgWalletAddress = "wallet.address"
|
|
||||||
|
|
||||||
// Uploader Header.
|
|
||||||
cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp"
|
|
||||||
|
|
||||||
// Peers.
|
|
||||||
cfgPeers = "peers"
|
|
||||||
|
|
||||||
// NeoGo.
|
|
||||||
cfgRPCEndpoint = "rpc_endpoint"
|
|
||||||
|
|
||||||
// Resolving.
|
|
||||||
cfgResolveOrder = "resolve_order"
|
|
||||||
|
|
||||||
// Zip compression.
|
|
||||||
//
|
|
||||||
// Deprecated: Use cfgArchiveCompression instead.
|
|
||||||
cfgZipCompression = "zip.compression"
|
|
||||||
|
|
||||||
// Archive compression.
|
|
||||||
cfgArchiveCompression = "archive.compression"
|
|
||||||
|
|
||||||
// Runtime.
|
|
||||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
|
||||||
|
|
||||||
// Enabling client side object preparing for PUT operations.
|
|
||||||
cfgClientCut = "frostfs.client_cut"
|
|
||||||
// Sets max buffer size for read payload in put operations.
|
|
||||||
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
|
||||||
// Configuration of parameters of requests to FrostFS.
|
|
||||||
// Sets max attempt to make successful tree request.
|
|
||||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
|
||||||
|
|
||||||
// Caching.
|
|
||||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
|
||||||
cfgBucketsCacheSize = "cache.buckets.size"
|
|
||||||
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
|
|
||||||
|
|
||||||
// Bucket resolving options.
|
|
||||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
|
||||||
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
|
||||||
|
|
||||||
// CORS.
|
|
||||||
cfgCORSAllowOrigin = "cors.allow_origin"
|
|
||||||
cfgCORSAllowMethods = "cors.allow_methods"
|
|
||||||
cfgCORSAllowHeaders = "cors.allow_headers"
|
|
||||||
cfgCORSExposeHeaders = "cors.expose_headers"
|
|
||||||
cfgCORSAllowCredentials = "cors.allow_credentials"
|
|
||||||
cfgCORSMaxAge = "cors.max_age"
|
|
||||||
|
|
||||||
// Multinet.
|
|
||||||
cfgMultinetEnabled = "multinet.enabled"
|
|
||||||
cfgMultinetBalancer = "multinet.balancer"
|
|
||||||
cfgMultinetRestrict = "multinet.restrict"
|
|
||||||
cfgMultinetFallbackDelay = "multinet.fallback_delay"
|
|
||||||
cfgMultinetSubnets = "multinet.subnets"
|
|
||||||
|
|
||||||
// Feature.
|
|
||||||
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
|
||||||
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
|
||||||
|
|
||||||
// Command line args.
|
|
||||||
cmdHelp = "help"
|
|
||||||
cmdVersion = "version"
|
|
||||||
cmdPprof = "pprof"
|
|
||||||
cmdMetrics = "metrics"
|
|
||||||
cmdWallet = "wallet"
|
|
||||||
cmdAddress = "address"
|
|
||||||
cmdConfig = "config"
|
|
||||||
cmdConfigDir = "config-dir"
|
|
||||||
cmdListenAddress = "listen_address"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ignore = map[string]struct{}{
|
|
||||||
cfgPeers: {},
|
|
||||||
cmdHelp: {},
|
|
||||||
cmdVersion: {},
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree}
|
|
||||||
|
|
||||||
type Logger struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
lvl zap.AtomicLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
type appCfg struct {
|
|
||||||
flags *pflag.FlagSet
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
settings *viper.Viper
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) reload() error {
|
|
||||||
old := a.config()
|
|
||||||
|
|
||||||
v, err := newViper(a.flags)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if old.IsSet(cmdConfig) {
|
|
||||||
v.Set(cmdConfig, old.Get(cmdConfig))
|
|
||||||
}
|
|
||||||
if old.IsSet(cmdConfigDir) {
|
|
||||||
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = readInConfig(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.setConfig(v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) config() *viper.Viper {
|
|
||||||
a.mu.RLock()
|
|
||||||
defer a.mu.RUnlock()
|
|
||||||
|
|
||||||
return a.settings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) setConfig(v *viper.Viper) {
|
|
||||||
a.mu.Lock()
|
|
||||||
a.settings = v
|
|
||||||
a.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
|
||||||
v := viper.New()
|
|
||||||
|
|
||||||
v.AutomaticEnv()
|
|
||||||
v.SetEnvPrefix(Prefix)
|
|
||||||
v.AllowEmptyEnv(true)
|
|
||||||
v.SetConfigType("yaml")
|
|
||||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
|
||||||
|
|
||||||
if err := bindFlags(v, flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
setDefaults(v, flags)
|
|
||||||
|
|
||||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
|
||||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func settings() *appCfg {
|
|
||||||
// flags setup:
|
|
||||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
|
||||||
flags.SetOutput(os.Stdout)
|
|
||||||
flags.SortFlags = false
|
|
||||||
|
|
||||||
flags.Bool(cmdPprof, false, "enable pprof")
|
|
||||||
flags.Bool(cmdMetrics, false, "enable prometheus")
|
|
||||||
|
|
||||||
help := flags.BoolP(cmdHelp, "h", false, "show help")
|
|
||||||
version := flags.BoolP(cmdVersion, "v", false, "show version")
|
|
||||||
|
|
||||||
flags.StringP(cmdWallet, "w", "", `path to the wallet`)
|
|
||||||
flags.String(cmdAddress, "", `address of wallet account`)
|
|
||||||
flags.StringArray(cmdConfig, nil, "config paths")
|
|
||||||
flags.String(cmdConfigDir, "", "config dir path")
|
|
||||||
flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout")
|
|
||||||
flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout")
|
|
||||||
flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout")
|
|
||||||
flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer")
|
|
||||||
|
|
||||||
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
|
||||||
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
|
||||||
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
|
||||||
flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
|
||||||
|
|
||||||
flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
|
||||||
|
|
||||||
if err := flags.Parse(os.Args); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := newViper(flags)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("bind flags: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case help != nil && *help:
|
|
||||||
fmt.Printf("FrostFS HTTP Gateway %s\n", Version)
|
|
||||||
flags.PrintDefaults()
|
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("Default environments:")
|
|
||||||
fmt.Println()
|
|
||||||
keys := v.AllKeys()
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
for i := range keys {
|
|
||||||
if _, ok := ignore[keys[i]]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultValue := v.GetString(keys[i])
|
|
||||||
if len(defaultValue) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
k := strings.Replace(keys[i], ".", "_", -1)
|
|
||||||
fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println("Peers preset:")
|
|
||||||
fmt.Println()
|
|
||||||
|
|
||||||
fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers))
|
|
||||||
fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers))
|
|
||||||
|
|
||||||
os.Exit(0)
|
|
||||||
case version != nil && *version:
|
|
||||||
fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version())
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := readInConfig(v); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &appCfg{
|
|
||||||
flags: flags,
|
|
||||||
settings: v,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
|
||||||
// set defaults:
|
|
||||||
|
|
||||||
// logger:
|
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
|
||||||
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
|
||||||
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
|
||||||
|
|
||||||
// pool:
|
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
|
||||||
|
|
||||||
// frostfs:
|
|
||||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
|
||||||
|
|
||||||
// web-server:
|
|
||||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
|
||||||
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
|
||||||
v.SetDefault(cfgWebStreamRequestBody, true)
|
|
||||||
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
|
||||||
|
|
||||||
v.SetDefault(cfgWorkerPoolSize, 1000)
|
|
||||||
// upload header
|
|
||||||
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
|
||||||
|
|
||||||
// metrics
|
|
||||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
|
||||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
|
||||||
|
|
||||||
// resolve bucket
|
|
||||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
|
||||||
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
|
||||||
|
|
||||||
// multinet
|
|
||||||
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
|
||||||
|
|
||||||
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
|
|
||||||
v.SetDefault(cfgResolveOrder, resolveMethods)
|
|
||||||
}
|
|
||||||
|
|
||||||
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
|
|
||||||
for i := range peers {
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
|
||||||
// Binding flags
|
|
||||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlags(flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readInConfig(v *viper.Viper) error {
|
|
||||||
if v.IsSet(cmdConfig) {
|
|
||||||
if err := readConfig(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.IsSet(cmdConfigDir) {
|
|
||||||
if err := readConfigDir(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readConfigDir(v *viper.Viper) error {
|
|
||||||
cfgSubConfigDir := v.GetString(cmdConfigDir)
|
|
||||||
entries, err := os.ReadDir(cfgSubConfigDir)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
|
||||||
if entry.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ext := path.Ext(entry.Name())
|
|
||||||
if ext != ".yaml" && ext != ".yml" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readConfig(v *viper.Viper) error {
|
|
||||||
for _, fileName := range v.GetStringSlice(cmdConfig) {
|
|
||||||
if err := mergeConfig(v, fileName); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeConfig(v *viper.Viper, fileName string) error {
|
|
||||||
cfgFile, err := os.Open(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if errClose := cfgFile.Close(); errClose != nil {
|
|
||||||
panic(errClose)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return v.MergeConfig(cfgFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) {
|
|
||||||
res := make(map[string]zapcore.Level)
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
|
|
||||||
if name == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
lvl := defaultLvl
|
|
||||||
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
|
|
||||||
if level != "" {
|
|
||||||
if err := lvl.Set(level); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res[name] = lvl
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
|
|
||||||
for _, tag := range defaultTags {
|
|
||||||
res[tag] = defaultLvl
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
|
||||||
reconnect := cfg.GetDuration(cfgReconnectInterval)
|
|
||||||
if reconnect <= 0 {
|
|
||||||
reconnect = defaultReconnectInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
return reconnect
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
|
|
||||||
if !v.GetBool(cfgIndexPageEnabled) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := io.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp))
|
|
||||||
return string(tmpl), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchDefaultNamespaces(v *viper.Viper) []string {
|
|
||||||
namespaces := v.GetStringSlice(cfgResolveDefaultNamespaces)
|
|
||||||
|
|
||||||
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
|
||||||
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
|
||||||
}
|
|
||||||
|
|
||||||
return namespaces
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCORSMaxAge(v *viper.Viper) int {
|
|
||||||
maxAge := v.GetInt(cfgCORSMaxAge)
|
|
||||||
if maxAge <= 0 {
|
|
||||||
maxAge = defaultCORSMaxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|
||||||
var servers []ServerInfo
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgServer + "." + strconv.Itoa(i) + "."
|
|
||||||
|
|
||||||
var serverInfo ServerInfo
|
|
||||||
serverInfo.Address = v.GetString(key + "address")
|
|
||||||
serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled)
|
|
||||||
serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile)
|
|
||||||
serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile)
|
|
||||||
|
|
||||||
if serverInfo.Address == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := seen[serverInfo.Address]; ok {
|
|
||||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[serverInfo.Address] = struct{}{}
|
|
||||||
servers = append(servers, serverInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
return servers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initPools(ctx context.Context) {
|
|
||||||
key, err := getFrostFSKey(a.config(), a.log)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
|
|
||||||
var prm pool.InitParameters
|
|
||||||
var prmTree treepool.InitParameters
|
|
||||||
|
|
||||||
prm.SetKey(&key.PrivateKey)
|
|
||||||
prmTree.SetKey(key)
|
|
||||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
|
|
||||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
|
||||||
prm.AddNode(peer)
|
|
||||||
prmTree.AddNode(peer)
|
|
||||||
}
|
|
||||||
|
|
||||||
connTimeout := a.config().GetDuration(cfgConTimeout)
|
|
||||||
if connTimeout <= 0 {
|
|
||||||
connTimeout = defaultConnectTimeout
|
|
||||||
}
|
|
||||||
prm.SetNodeDialTimeout(connTimeout)
|
|
||||||
prmTree.SetNodeDialTimeout(connTimeout)
|
|
||||||
|
|
||||||
streamTimeout := a.config().GetDuration(cfgStreamTimeout)
|
|
||||||
if streamTimeout <= 0 {
|
|
||||||
streamTimeout = defaultStreamTimeout
|
|
||||||
}
|
|
||||||
prm.SetNodeStreamTimeout(streamTimeout)
|
|
||||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
|
||||||
|
|
||||||
healthCheckTimeout := a.config().GetDuration(cfgReqTimeout)
|
|
||||||
if healthCheckTimeout <= 0 {
|
|
||||||
healthCheckTimeout = defaultRequestTimeout
|
|
||||||
}
|
|
||||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
|
||||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
|
||||||
|
|
||||||
rebalanceInterval := a.config().GetDuration(cfgRebalance)
|
|
||||||
if rebalanceInterval <= 0 {
|
|
||||||
rebalanceInterval = defaultRebalanceTimer
|
|
||||||
}
|
|
||||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
|
||||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
|
||||||
|
|
||||||
errorThreshold := a.config().GetUint32(cfgPoolErrorThreshold)
|
|
||||||
if errorThreshold <= 0 {
|
|
||||||
errorThreshold = defaultPoolErrorThreshold
|
|
||||||
}
|
|
||||||
prm.SetErrorThreshold(errorThreshold)
|
|
||||||
prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
|
||||||
prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
|
||||||
|
|
||||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
|
||||||
|
|
||||||
interceptors := []grpc.DialOption{
|
|
||||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
|
||||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
|
||||||
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
|
|
||||||
}
|
|
||||||
prm.SetGRPCDialOptions(interceptors...)
|
|
||||||
prmTree.SetGRPCDialOptions(interceptors...)
|
|
||||||
|
|
||||||
p, err := pool.NewPool(prm)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = p.Dial(ctx); err != nil {
|
|
||||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
|
|
||||||
prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p), cache.NewNetmapCache(getNetmapCacheOptions(a.config(), a.log)), a.bucketCache, a.log))
|
|
||||||
}
|
|
||||||
|
|
||||||
treePool, err := treepool.NewPool(prmTree)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
if err = treePool.Dial(ctx); err != nil {
|
|
||||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
|
|
||||||
a.pool = p
|
|
||||||
a.treePool = treePool
|
|
||||||
a.key = key
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
|
||||||
var nodes []pool.NodeParam
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgPeers + "." + strconv.Itoa(i) + "."
|
|
||||||
address := v.GetString(key + "address")
|
|
||||||
weight := v.GetFloat64(key + "weight")
|
|
||||||
priority := v.GetInt(key + "priority")
|
|
||||||
|
|
||||||
if address == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if weight <= 0 { // unspecified or wrong
|
|
||||||
weight = 1
|
|
||||||
}
|
|
||||||
if priority <= 0 { // unspecified or wrong
|
|
||||||
priority = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes = append(nodes, pool.NewNodeParam(priority, address, weight))
|
|
||||||
|
|
||||||
l.Info(logs.AddedStoragePeer,
|
|
||||||
zap.Int("priority", priority),
|
|
||||||
zap.String("address", address),
|
|
||||||
zap.Float64("weight", weight),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
|
||||||
softMemoryLimit := cfg.GetSizeInBytes(cfgSoftMemoryLimit)
|
|
||||||
if softMemoryLimit <= 0 {
|
|
||||||
softMemoryLimit = defaultSoftMemoryLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
return int64(softMemoryLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
|
||||||
cacheCfg := cache.DefaultBucketConfig(l)
|
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
|
||||||
cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size)
|
|
||||||
|
|
||||||
return cacheCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConfig {
|
|
||||||
cacheCfg := cache.DefaultNetmapConfig(l)
|
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgNetmapCacheLifetime, cacheCfg.Lifetime)
|
|
||||||
|
|
||||||
return cacheCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
|
||||||
if v.IsSet(cfgEntry) {
|
|
||||||
lifetime := v.GetDuration(cfgEntry)
|
|
||||||
if lifetime <= 0 {
|
|
||||||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
|
||||||
zap.String("parameter", cfgEntry),
|
|
||||||
zap.Duration("value in config", lifetime),
|
|
||||||
zap.Duration("default", defaultValue),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
|
||||||
return lifetime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int {
|
|
||||||
if v.IsSet(cfgEntry) {
|
|
||||||
size := v.GetInt(cfgEntry)
|
|
||||||
if size <= 0 {
|
|
||||||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
|
||||||
zap.String("parameter", cfgEntry),
|
|
||||||
zap.Int("value in config", size),
|
|
||||||
zap.Int("default", defaultValue),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
|
||||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
|
||||||
if err != nil {
|
|
||||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
return source
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchMultinetConfig(v *viper.Viper, l *zap.Logger) (cfg internalnet.Config) {
|
|
||||||
cfg.Enabled = v.GetBool(cfgMultinetEnabled)
|
|
||||||
cfg.Balancer = v.GetString(cfgMultinetBalancer)
|
|
||||||
cfg.Restrict = v.GetBool(cfgMultinetRestrict)
|
|
||||||
cfg.FallbackDelay = v.GetDuration(cfgMultinetFallbackDelay)
|
|
||||||
cfg.Subnets = make([]internalnet.Subnet, 0, 5)
|
|
||||||
cfg.EventHandler = internalnet.NewLogEventHandler(l)
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgMultinetSubnets + "." + strconv.Itoa(i) + "."
|
|
||||||
subnet := internalnet.Subnet{}
|
|
||||||
|
|
||||||
subnet.Prefix = v.GetString(key + "mask")
|
|
||||||
if subnet.Prefix == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
subnet.SourceIPs = v.GetStringSlice(key + "source_ips")
|
|
||||||
cfg.Subnets = append(cfg.Subnets, subnet)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
|
|
||||||
attributes := make(map[string]string)
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgTracingAttributes + "." + strconv.Itoa(i) + "."
|
|
||||||
attrKey := v.GetString(key + "key")
|
|
||||||
attrValue := v.GetString(key + "value")
|
|
||||||
if attrKey == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := attributes[attrKey]; ok {
|
|
||||||
return nil, fmt.Errorf("tracing attribute key %s defined more than once", attrKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
if attrValue == "" {
|
|
||||||
return nil, fmt.Errorf("empty tracing attribute value for key %s", attrKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
attributes[attrKey] = attrValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchArchiveCompression(v *viper.Viper) bool {
|
|
||||||
if v.IsSet(cfgZipCompression) {
|
|
||||||
return v.GetBool(cfgZipCompression)
|
|
||||||
}
|
|
||||||
return v.GetBool(cfgArchiveCompression)
|
|
||||||
}
|
|
|
@ -1,60 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigReload(t *testing.T) {
|
|
||||||
f, err := os.CreateTemp("", "conf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, os.Remove(f.Name()))
|
|
||||||
}()
|
|
||||||
|
|
||||||
confData := `
|
|
||||||
pprof:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
resolve_bucket:
|
|
||||||
default_namespaces: [""]
|
|
||||||
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
`
|
|
||||||
|
|
||||||
_, err = f.WriteString(confData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.Close())
|
|
||||||
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--connect_timeout", "15s"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{""}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
|
|
||||||
require.NoError(t, os.Truncate(f.Name(), 0))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.False(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{"", "root"}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver, resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetTLSEnabled(t *testing.T) {
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--" + cfgTLSCertFile, "tls.crt", "--" + cfgTLSKeyFile, "tls.key"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgServer+".0."+cfgTLSEnabled))
|
|
||||||
}
|
|
|
@ -1,174 +0,0 @@
|
||||||
# Wallet section.
|
|
||||||
|
|
||||||
# Path to wallet.
|
|
||||||
HTTP_GW_WALLET_PATH=/path/to/wallet.json
|
|
||||||
# Account address. If omitted default one will be used.
|
|
||||||
HTTP_GW_WALLET_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
|
||||||
# Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
|
|
||||||
HTTP_GW_WALLET_PASSPHRASE=pwd
|
|
||||||
|
|
||||||
# Enable metrics.
|
|
||||||
HTTP_GW_PPROF_ENABLED=true
|
|
||||||
HTTP_GW_PPROF_ADDRESS=localhost:8083
|
|
||||||
|
|
||||||
HTTP_GW_PROMETHEUS_ENABLED=true
|
|
||||||
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
|
||||||
|
|
||||||
# Logger.
|
|
||||||
HTTP_GW_LOGGER_LEVEL=debug
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
|
||||||
HTTP_GW_LOGGER_TAGS_0_NAME=app
|
|
||||||
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
|
|
||||||
|
|
||||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
|
||||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
|
||||||
HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert
|
|
||||||
HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key
|
|
||||||
HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444
|
|
||||||
HTTP_GW_SERVER_1_TLS_ENABLED=true
|
|
||||||
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
|
||||||
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
|
||||||
|
|
||||||
# How often to reconnect to the servers
|
|
||||||
HTTP_GW_RECONNECT_INTERVAL: 1m
|
|
||||||
|
|
||||||
# Nodes configuration.
|
|
||||||
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
|
||||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
|
||||||
# for 10% of requests and the third node for 90% of requests.
|
|
||||||
|
|
||||||
# Peer 1.
|
|
||||||
# Endpoint.
|
|
||||||
HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080
|
|
||||||
# Until nodes with the same priority level are healthy
|
|
||||||
# nodes with other priority are not used.
|
|
||||||
# The lower the value, the higher the priority.
|
|
||||||
HTTP_GW_PEERS_0_PRIORITY=1
|
|
||||||
# Load distribution proportion for nodes with the same priority.
|
|
||||||
HTTP_GW_PEERS_0_WEIGHT=1
|
|
||||||
# Peer 2.
|
|
||||||
HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080
|
|
||||||
HTTP_GW_PEERS_1_PRIORITY=2
|
|
||||||
HTTP_GW_PEERS_1_WEIGHT=1
|
|
||||||
# Peer 3.
|
|
||||||
HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080
|
|
||||||
HTTP_GW_PEERS_2_PRIORITY=2
|
|
||||||
HTTP_GW_PEERS_2_WEIGHT=9
|
|
||||||
|
|
||||||
# Per-connection buffer size for requests' reading.
|
|
||||||
# This also limits the maximum header size.
|
|
||||||
HTTP_GW_WEB_READ_BUFFER_SIZE=4096
|
|
||||||
# Per-connection buffer size for responses' writing.
|
|
||||||
HTTP_GW_WRITE_BUFFER_SIZE=4096
|
|
||||||
# ReadTimeout is the amount of time allowed to read
|
|
||||||
# the full request including body. The connection's read
|
|
||||||
# deadline is reset when the connection opens, or for
|
|
||||||
# keep-alive connections after the first byte has been read.
|
|
||||||
HTTP_GW_READ_TIMEOUT=10m
|
|
||||||
# WriteTimeout is the maximum duration before timing out
|
|
||||||
# writes of the response. It is reset after the request handler
|
|
||||||
# has returned.
|
|
||||||
HTTP_GW_WRITE_TIMEOUT=5m
|
|
||||||
# StreamRequestBody enables request body streaming,
|
|
||||||
# and calls the handler sooner when given body is
|
|
||||||
# larger then the current limit.
|
|
||||||
HTTP_GW_STREAM_REQUEST_BODY=true
|
|
||||||
# Maximum request body size.
|
|
||||||
# The server rejects requests with bodies exceeding this limit.
|
|
||||||
HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304
|
|
||||||
|
|
||||||
# RPC endpoint to be able to use nns container resolving.
|
|
||||||
HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333
|
|
||||||
# The order in which resolvers are used to find an container id by name.
|
|
||||||
HTTP_GW_RESOLVE_ORDER="nns dns"
|
|
||||||
|
|
||||||
# Create timestamp for object if it isn't provided by header.
|
|
||||||
HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false
|
|
||||||
|
|
||||||
# Timeout to dial node.
|
|
||||||
HTTP_GW_CONNECT_TIMEOUT=5s
|
|
||||||
# Timeout for individual operations in streaming RPC.
|
|
||||||
HTTP_GW_STREAM_TIMEOUT=10s
|
|
||||||
# Timeout to check node health during rebalance.
|
|
||||||
HTTP_GW_REQUEST_TIMEOUT=5s
|
|
||||||
# Interval to check nodes health.
|
|
||||||
HTTP_GW_REBALANCE_TIMER=30s
|
|
||||||
# The number of errors on connection after which node is considered as unhealthy
|
|
||||||
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
|
||||||
# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead.
|
|
||||||
HTTP_GW_ZIP_COMPRESSION=false
|
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
|
||||||
HTTP_GW_ARCHIVE_COMPRESSION=false
|
|
||||||
|
|
||||||
HTTP_GW_TRACING_ENABLED=true
|
|
||||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
|
||||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
|
||||||
HTTP_GW_TRACING_TRUSTED_CA=""
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_0_KEY=key0
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_0_VALUE=value
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_1_KEY=key1
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_1_VALUE=value
|
|
||||||
|
|
||||||
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
|
||||||
|
|
||||||
# Parameters of requests to FrostFS
|
|
||||||
# This flag enables client side object preparing.
|
|
||||||
HTTP_GW_FROSTFS_CLIENT_CUT=false
|
|
||||||
# Sets max buffer size for read payload in put operations.
|
|
||||||
HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
|
||||||
|
|
||||||
# Caching
|
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
|
||||||
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
|
||||||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
|
||||||
# Cache which stores netmap
|
|
||||||
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
|
|
||||||
|
|
||||||
# Header to determine zone to resolve bucket name
|
|
||||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
|
||||||
# Namespaces that should be handled as default
|
|
||||||
HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
|
|
||||||
|
|
||||||
# Max attempt to make successful tree request.
|
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
|
||||||
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
|
||||||
|
|
||||||
HTTP_GW_CORS_ALLOW_ORIGIN="*"
|
|
||||||
HTTP_GW_CORS_ALLOW_METHODS="GET" "POST"
|
|
||||||
HTTP_GW_CORS_ALLOW_HEADERS="*"
|
|
||||||
HTTP_GW_CORS_EXPOSE_HEADERS="*"
|
|
||||||
HTTP_GW_CORS_ALLOW_CREDENTIALS=false
|
|
||||||
HTTP_GW_CORS_MAX_AGE=600
|
|
||||||
|
|
||||||
# Multinet properties
|
|
||||||
# Enable multinet support
|
|
||||||
HTTP_GW_MULTINET_ENABLED=false
|
|
||||||
# Strategy to pick source IP address
|
|
||||||
HTTP_GW_MULTINET_BALANCER=roundrobin
|
|
||||||
# Restrict requests with unknown destination subnet
|
|
||||||
HTTP_GW_MULTINET_RESTRICT=false
|
|
||||||
# Delay between ipv6 to ipv4 fallback switch
|
|
||||||
HTTP_GW_MULTINET_FALLBACK_DELAY=300ms
|
|
||||||
# List of subnets and IP addresses to use as source for those subnets
|
|
||||||
HTTP_GW_MULTINET_SUBNETS_1_MASK=1.2.3.4/24
|
|
||||||
HTTP_GW_MULTINET_SUBNETS_1_SOURCE_IPS=1.2.3.4 1.2.3.5
|
|
||||||
|
|
||||||
# Number of workers in handler's worker pool
|
|
||||||
HTTP_GW_WORKER_POOL_SIZE=1000
|
|
||||||
|
|
||||||
# Index page
|
|
||||||
# Enable index page support
|
|
||||||
HTTP_GW_INDEX_PAGE_ENABLED=false
|
|
||||||
# Index page template path
|
|
||||||
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
|
||||||
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
|
|
@ -1,193 +0,0 @@
|
||||||
wallet:
|
|
||||||
path: /path/to/wallet.json # Path to wallet.
|
|
||||||
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP # Account address. If omitted default one will be used.
|
|
||||||
passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here.
|
|
||||||
|
|
||||||
pprof:
|
|
||||||
enabled: false # Enable pprof.
|
|
||||||
address: localhost:8083
|
|
||||||
prometheus:
|
|
||||||
enabled: false # Enable metrics.
|
|
||||||
address: localhost:8084
|
|
||||||
|
|
||||||
tracing:
|
|
||||||
enabled: true
|
|
||||||
exporter: "otlp_grpc"
|
|
||||||
endpoint: "localhost:4317"
|
|
||||||
trusted_ca: ""
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
|
|
||||||
logger:
|
|
||||||
level: debug # Log level.
|
|
||||||
destination: stdout
|
|
||||||
sampling:
|
|
||||||
enabled: false
|
|
||||||
initial: 100
|
|
||||||
thereafter: 100
|
|
||||||
interval: 1s
|
|
||||||
tags:
|
|
||||||
- name: app
|
|
||||||
- name: datapath
|
|
||||||
level: debug
|
|
||||||
|
|
||||||
server:
|
|
||||||
- address: 0.0.0.0:8080
|
|
||||||
tls:
|
|
||||||
enabled: false
|
|
||||||
cert_file: /path/to/cert
|
|
||||||
key_file: /path/to/key
|
|
||||||
- address: 0.0.0.0:8081
|
|
||||||
tls:
|
|
||||||
enabled: false
|
|
||||||
cert_file: /path/to/cert
|
|
||||||
key_file: /path/to/key
|
|
||||||
|
|
||||||
# Nodes configuration.
|
|
||||||
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
|
||||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
|
||||||
# for 10% of requests and the third node for 90% of requests.
|
|
||||||
peers:
|
|
||||||
0:
|
|
||||||
# Endpoint.
|
|
||||||
address: grpc://s01.frostfs.devenv:8080
|
|
||||||
|
|
||||||
# Until nodes with the same priority level are healthy
|
|
||||||
# nodes with other priority are not used.
|
|
||||||
# The lower the value, the higher the priority.
|
|
||||||
priority: 1
|
|
||||||
|
|
||||||
# Load distribution proportion for nodes with the same priority.
|
|
||||||
weight: 1
|
|
||||||
1:
|
|
||||||
address: grpc://s02.frostfs.devenv:8080
|
|
||||||
priority: 2
|
|
||||||
weight: 1
|
|
||||||
2:
|
|
||||||
address: grpc://s03.frostfs.devenv:8080
|
|
||||||
priority: 2
|
|
||||||
weight: 9
|
|
||||||
|
|
||||||
reconnect_interval: 1m
|
|
||||||
|
|
||||||
web:
|
|
||||||
# Per-connection buffer size for requests' reading.
|
|
||||||
# This also limits the maximum header size.
|
|
||||||
read_buffer_size: 4096
|
|
||||||
|
|
||||||
# Per-connection buffer size for responses' writing.
|
|
||||||
write_buffer_size: 4096
|
|
||||||
|
|
||||||
# ReadTimeout is the amount of time allowed to read
|
|
||||||
# the full request including body. The connection's read
|
|
||||||
# deadline is reset when the connection opens, or for
|
|
||||||
# keep-alive connections after the first byte has been read.
|
|
||||||
read_timeout: 10m
|
|
||||||
|
|
||||||
# WriteTimeout is the maximum duration before timing out
|
|
||||||
# writes of the response. It is reset after the request handler
|
|
||||||
# has returned.
|
|
||||||
write_timeout: 5m
|
|
||||||
|
|
||||||
# StreamRequestBody enables request body streaming,
|
|
||||||
# and calls the handler sooner when given body is
|
|
||||||
# larger then the current limit.
|
|
||||||
stream_request_body: true
|
|
||||||
|
|
||||||
# Maximum request body size.
|
|
||||||
# The server rejects requests with bodies exceeding this limit.
|
|
||||||
max_request_body_size: 4194304
|
|
||||||
|
|
||||||
# RPC endpoint to be able to use nns container resolving.
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
|
||||||
# The order in which resolvers are used to find an container id by name.
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
- dns
|
|
||||||
|
|
||||||
upload_header:
|
|
||||||
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
|
|
||||||
|
|
||||||
connect_timeout: 5s # Timeout to dial node.
|
|
||||||
stream_timeout: 10s # Timeout for individual operations in streaming RPC.
|
|
||||||
request_timeout: 5s # Timeout to check node health during rebalance.
|
|
||||||
rebalance_timer: 30s # Interval to check nodes health.
|
|
||||||
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
|
||||||
|
|
||||||
# Number of workers in handler's worker pool
|
|
||||||
worker_pool_size: 1000
|
|
||||||
|
|
||||||
# Enables index page to see objects list for specified container and prefix
|
|
||||||
index_page:
|
|
||||||
enabled: false
|
|
||||||
template_path: internal/handler/templates/index.gotmpl
|
|
||||||
|
|
||||||
# Deprecated: Use archive.compression instead
|
|
||||||
zip:
|
|
||||||
# Enables zip compression to download files by common prefix.
|
|
||||||
compression: false
|
|
||||||
|
|
||||||
archive:
|
|
||||||
# Enables archive compression to download files by common prefix.
|
|
||||||
compression: false
|
|
||||||
|
|
||||||
runtime:
|
|
||||||
soft_memory_limit: 1gb
|
|
||||||
|
|
||||||
# Parameters of requests to FrostFS
|
|
||||||
frostfs:
|
|
||||||
# This flag enables client side object preparing.
|
|
||||||
client_cut: false
|
|
||||||
# Sets max buffer size for read payload in put operations.
|
|
||||||
buffer_max_size_for_put: 1048576
|
|
||||||
# Max attempt to make successful tree request.
|
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
|
||||||
tree_pool_max_attempts: 0
|
|
||||||
|
|
||||||
# Caching
|
|
||||||
cache:
|
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
|
||||||
buckets:
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
# Cache which stores netmap
|
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
|
|
||||||
resolve_bucket:
|
|
||||||
namespace_header: X-Frostfs-Namespace
|
|
||||||
default_namespaces: [ "", "root" ]
|
|
||||||
|
|
||||||
cors:
|
|
||||||
allow_origin: ""
|
|
||||||
allow_methods: []
|
|
||||||
allow_headers: []
|
|
||||||
expose_headers: []
|
|
||||||
allow_credentials: false
|
|
||||||
max_age: 600
|
|
||||||
|
|
||||||
# Multinet properties
|
|
||||||
multinet:
|
|
||||||
# Enable multinet support
|
|
||||||
enabled: false
|
|
||||||
# Strategy to pick source IP address
|
|
||||||
balancer: roundrobin
|
|
||||||
# Restrict requests with unknown destination subnet
|
|
||||||
restrict: false
|
|
||||||
# Delay between ipv6 to ipv4 fallback switch
|
|
||||||
fallback_delay: 300ms
|
|
||||||
# List of subnets and IP addresses to use as source for those subnets
|
|
||||||
subnets:
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
|
|
||||||
features:
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
enable_filepath_fallback: false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
tree_pool_netmap_support: true
|
|
|
@ -1,3 +0,0 @@
|
||||||
pprof:
|
|
||||||
enabled: true
|
|
||||||
address: localhost:8083
|
|
|
@ -1,3 +0,0 @@
|
||||||
prometheus:
|
|
||||||
enabled: true
|
|
||||||
address: localhost:8084
|
|
5
debian/changelog
vendored
5
debian/changelog
vendored
|
@ -1,5 +0,0 @@
|
||||||
frostfs-http-gw (0.0.0) stable; urgency=medium
|
|
||||||
|
|
||||||
* Please see CHANGELOG.md
|
|
||||||
|
|
||||||
-- TrueCloudLab <tech@frostfs.info> Wed, 24 Aug 2022 18:29:49 +0300
|
|
14
debian/control
vendored
14
debian/control
vendored
|
@ -1,14 +0,0 @@
|
||||||
Source: frostfs-http-gw
|
|
||||||
Section: frostfs
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: TrueCloudLab <tech@frostfs.info>
|
|
||||||
Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts
|
|
||||||
Standards-Version: 4.5.1
|
|
||||||
Homepage: https://frostfs.info/
|
|
||||||
Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git
|
|
||||||
Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
|
||||||
|
|
||||||
Package: frostfs-http-gw
|
|
||||||
Architecture: any
|
|
||||||
Depends: ${misc:Depends}
|
|
||||||
Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard.
|
|
25
debian/copyright
vendored
25
debian/copyright
vendored
|
@ -1,25 +0,0 @@
|
||||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: frostfs-http-gw
|
|
||||||
Upstream-Contact: tech@frostfs.info
|
|
||||||
Source: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project
|
|
||||||
(https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md)
|
|
||||||
2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project
|
|
||||||
(https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/src/branch/master/CREDITS.md)
|
|
||||||
|
|
||||||
|
|
||||||
License: GPL-3
|
|
||||||
This program is free software: you can redistribute it and/or modify it
|
|
||||||
under the terms of the GNU General Public License as published
|
|
||||||
by the Free Software Foundation; version 3.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
||||||
General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program or at /usr/share/common-licenses/GPL-3.
|
|
||||||
If not, see <http://www.gnu.org/licenses/>.
|
|
2
debian/frostfs-http-gw.dirs
vendored
2
debian/frostfs-http-gw.dirs
vendored
|
@ -1,2 +0,0 @@
|
||||||
etc/frostfs
|
|
||||||
srv/frostfs_cache
|
|
4
debian/frostfs-http-gw.docs
vendored
4
debian/frostfs-http-gw.docs
vendored
|
@ -1,4 +0,0 @@
|
||||||
docs/gate-configuration.md
|
|
||||||
README.md
|
|
||||||
CREDITS.md
|
|
||||||
CONTRIBUTING.md
|
|
1
debian/frostfs-http-gw.examples
vendored
1
debian/frostfs-http-gw.examples
vendored
|
@ -1 +0,0 @@
|
||||||
config/*
|
|
2
debian/frostfs-http-gw.install
vendored
2
debian/frostfs-http-gw.install
vendored
|
@ -1,2 +0,0 @@
|
||||||
bin/frostfs-http-gw usr/bin
|
|
||||||
config/config.yaml etc/frostfs/http
|
|
51
debian/frostfs-http-gw.postinst
vendored
51
debian/frostfs-http-gw.postinst
vendored
|
@ -1,51 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# postinst script for frostfs-http-gw
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postinst> `configure' <most-recently-configured-version>
|
|
||||||
# * <old-postinst> `abort-upgrade' <new version>
|
|
||||||
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
|
|
||||||
# <new-version>
|
|
||||||
# * <postinst> `abort-remove'
|
|
||||||
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
|
|
||||||
# <failed-install-package> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
configure)
|
|
||||||
USERNAME=http
|
|
||||||
id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -m -U -c "FrostFS HTTP gateway" frostfs-$USERNAME
|
|
||||||
if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME
|
|
||||||
chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true
|
|
||||||
chmod -f 0750 /etc/frostfs/$USERNAME
|
|
||||||
chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true
|
|
||||||
fi
|
|
||||||
USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6)
|
|
||||||
if ! dpkg-statoverride --list "$USERDIR" >/dev/null; then
|
|
||||||
chown -f frostfs-$USERNAME: "$USERDIR"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade|abort-remove|abort-deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
41
debian/frostfs-http-gw.postrm
vendored
41
debian/frostfs-http-gw.postrm
vendored
|
@ -1,41 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# postrm script for frostfs-http-gw
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <postrm> `remove'
|
|
||||||
# * <postrm> `purge'
|
|
||||||
# * <old-postrm> `upgrade' <new-version>
|
|
||||||
# * <new-postrm> `failed-upgrade' <old-version>
|
|
||||||
# * <new-postrm> `abort-install'
|
|
||||||
# * <new-postrm> `abort-install' <old-version>
|
|
||||||
# * <new-postrm> `abort-upgrade' <old-version>
|
|
||||||
# * <disappearer's-postrm> `disappear' <overwriter>
|
|
||||||
# <overwriter-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
purge)
|
|
||||||
rm -rf /srv/frostfs_cache
|
|
||||||
;;
|
|
||||||
|
|
||||||
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "postrm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
35
debian/frostfs-http-gw.preinst
vendored
35
debian/frostfs-http-gw.preinst
vendored
|
@ -1,35 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# preinst script for frostfs-http-gw
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <new-preinst> `install'
|
|
||||||
# * <new-preinst> `install' <old-version>
|
|
||||||
# * <new-preinst> `upgrade' <old-version>
|
|
||||||
# * <old-preinst> `abort-upgrade' <new-version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
install|upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
abort-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "preinst called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
38
debian/frostfs-http-gw.prerm
vendored
38
debian/frostfs-http-gw.prerm
vendored
|
@ -1,38 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# prerm script for frostfs-http-gw
|
|
||||||
#
|
|
||||||
# see: dh_installdeb(1)
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# summary of how this script can be called:
|
|
||||||
# * <prerm> `remove'
|
|
||||||
# * <old-prerm> `upgrade' <new-version>
|
|
||||||
# * <new-prerm> `failed-upgrade' <old-version>
|
|
||||||
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
|
|
||||||
# * <deconfigured's-prerm> `deconfigure' `in-favour'
|
|
||||||
# <package-being-installed> <version> `removing'
|
|
||||||
# <conflicting-package> <version>
|
|
||||||
# for details, see https://www.debian.org/doc/debian-policy/ or
|
|
||||||
# the debian-policy package
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
remove|upgrade|deconfigure)
|
|
||||||
;;
|
|
||||||
|
|
||||||
failed-upgrade)
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "prerm called with unknown argument \`$1'" >&2
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# dh_installdeb will replace this with shell code automatically
|
|
||||||
# generated by other debhelper scripts.
|
|
||||||
|
|
||||||
#DEBHELPER#
|
|
||||||
|
|
||||||
exit 0
|
|
16
debian/frostfs-http-gw.service
vendored
16
debian/frostfs-http-gw.service
vendored
|
@ -1,16 +0,0 @@
|
||||||
[Unit]
|
|
||||||
Description=FrostFS HTTP Gateway
|
|
||||||
Requires=network.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml
|
|
||||||
User=frostfs-http
|
|
||||||
Group=frostfs-http
|
|
||||||
WorkingDirectory=/srv/frostfs_cache
|
|
||||||
Restart=always
|
|
||||||
RestartSec=5
|
|
||||||
PrivateTmp=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
14
debian/rules
vendored
14
debian/rules
vendored
|
@ -1,14 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
# Do not try to strip Go binaries and do not run test
|
|
||||||
export DEB_BUILD_OPTIONS := nostrip nocheck
|
|
||||||
SERVICE = frostfs-http-gw
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
override_dh_installsystemd:
|
|
||||||
dh_installsystemd --no-enable --no-start $(SERVICE).service
|
|
||||||
|
|
||||||
override_dh_installchangelogs:
|
|
||||||
dh_installchangelogs -k CHANGELOG.md
|
|
1
debian/source/format
vendored
1
debian/source/format
vendored
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
319
docs/api.md
319
docs/api.md
|
@ -1,319 +0,0 @@
|
||||||
# HTTP Gateway Specification
|
|
||||||
|
|
||||||
| Route | Description |
|
|
||||||
|-------------------------------------------------|--------------------------------------------------|
|
|
||||||
| `/upload/{cid}` | [Put object](#put-object) |
|
|
||||||
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
|
||||||
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
|
||||||
| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) |
|
|
||||||
|
|
||||||
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
|
||||||
(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)).
|
|
||||||
|
|
||||||
Route parameters can be:
|
|
||||||
|
|
||||||
* `Single` - match a single path segment (cannot contain `/` and be empty)
|
|
||||||
* `Catch-All` - match everything (such parameter usually the last one in routes)
|
|
||||||
* `Query` - regular query parameter
|
|
||||||
|
|
||||||
### Bearer token
|
|
||||||
|
|
||||||
All routes can accept [bearer token](./authentication.md) from:
|
|
||||||
|
|
||||||
* `Authorization` header with `Bearer` type and base64-encoded token in
|
|
||||||
credentials field
|
|
||||||
* `Bearer` cookie with base64-encoded token contents
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
Header:
|
|
||||||
|
|
||||||
```
|
|
||||||
Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
|
|
||||||
```
|
|
||||||
|
|
||||||
Cookie:
|
|
||||||
|
|
||||||
```
|
|
||||||
cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB
|
|
||||||
```
|
|
||||||
|
|
||||||
## Put object
|
|
||||||
|
|
||||||
Route: `/upload/{cid}`
|
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
|
||||||
|-----------------|--------|---------------------------------------------------------|
|
|
||||||
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
|
||||||
|
|
||||||
### Methods
|
|
||||||
|
|
||||||
#### POST
|
|
||||||
|
|
||||||
Upload file as object with attributes to FrostFS.
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
|
||||||
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
|
||||||
| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. |
|
|
||||||
| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. |
|
|
||||||
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
|
||||||
|
|
||||||
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
|
||||||
|
|
||||||
1. `X-Attribute-System-Expiration-Epoch: 100`
|
|
||||||
2. `X-Attribute-System-Expiration-Duration: 24h30m`
|
|
||||||
3. `X-Attribute-System-Expiration-Timestamp: 1637574797`
|
|
||||||
4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z`
|
|
||||||
|
|
||||||
which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way.
|
|
||||||
|
|
||||||
If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway
|
|
||||||
(see http-gw [configuration](gate-configuration.md#upload-header-section)).
|
|
||||||
|
|
||||||
The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used.
|
|
||||||
Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb.
|
|
||||||
|
|
||||||
###### Body
|
|
||||||
|
|
||||||
Body must contain multipart form with file.
|
|
||||||
The `filename` field from the multipart form will be set as `FileName` attribute of object
|
|
||||||
(can be overriden by `X-Attribute-FileName` header).
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|----------------------------------------------|
|
|
||||||
| 200 | Object created successfully. |
|
|
||||||
| 400 | Some error occurred during object uploading. |
|
|
||||||
|
|
||||||
## Get object
|
|
||||||
|
|
||||||
Route: `/get/{cid}/{oid}?[download=false]`
|
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
|
||||||
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
|
|
||||||
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
|
|
||||||
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
|
||||||
|
|
||||||
### Methods
|
|
||||||
|
|
||||||
#### GET
|
|
||||||
|
|
||||||
Get an object (payload and attributes) by an address.
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|----------------|------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
|
||||||
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
|
||||||
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
|
|
||||||
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
|
||||||
| `Content-Length` | Size of object payload. |
|
|
||||||
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
|
||||||
| `X-Owner-Id` | Base58 encoded owner ID. |
|
|
||||||
| `X-Container-Id` | Base58 encoded container ID. |
|
|
||||||
| `X-Object-Id` | Base58 encoded object ID. |
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|------------------------------------------------|
|
|
||||||
| 200 | Object got successfully. |
|
|
||||||
| 400 | Some error occurred during object downloading. |
|
|
||||||
| 404 | Container or object not found. |
|
|
||||||
|
|
||||||
###### Body
|
|
||||||
|
|
||||||
Returns object data. If request performed from browser, either displays raw data or downloads it as
|
|
||||||
attachment if `download` query parameter is set to `true`.
|
|
||||||
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
|
|
||||||
S3-name was found.
|
|
||||||
|
|
||||||
#### HEAD
|
|
||||||
|
|
||||||
Get an object attributes by an address.
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|----------------|------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
|
||||||
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
|
||||||
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
|
||||||
| `Content-Length` | Size of object payload. |
|
|
||||||
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
|
||||||
| `X-Owner-Id` | Base58 encoded owner ID. |
|
|
||||||
| `X-Container-Id` | Base58 encoded container ID. |
|
|
||||||
| `X-Object-Id` | Base58 encoded object ID. |
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|---------------------------------------------------|
|
|
||||||
| 200 | Object head successfully. |
|
|
||||||
| 400 | Some error occurred during object HEAD operation. |
|
|
||||||
| 404 | Container or object not found. |
|
|
||||||
|
|
||||||
## Search object
|
|
||||||
|
|
||||||
Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]`
|
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
|
||||||
|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
|
||||||
| `attr_key` | Single | Object attribute key to search. |
|
|
||||||
| `attr_val` | Catch-All | Object attribute value to match. |
|
|
||||||
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. |
|
|
||||||
|
|
||||||
### Methods
|
|
||||||
|
|
||||||
#### GET
|
|
||||||
|
|
||||||
Find and get an object (payload and attributes) by a specific attribute.
|
|
||||||
If more than one object is found, an arbitrary one will be returned.
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|----------------|------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
|
||||||
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
|
||||||
| `Content-Disposition` | Indicate how to browsers should treat file. <br/> Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). |
|
|
||||||
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
|
||||||
| `Content-Length` | Size of object payload. |
|
|
||||||
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
|
||||||
| `X-Owner-Id` | Base58 encoded owner ID. |
|
|
||||||
| `X-Container-Id` | Base58 encoded container ID. |
|
|
||||||
| `X-Object-Id` | Base58 encoded object ID. |
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|------------------------------------------------|
|
|
||||||
| 200 | Object got successfully. |
|
|
||||||
| 400 | Some error occurred during object downloading. |
|
|
||||||
| 404 | Container or object not found. |
|
|
||||||
|
|
||||||
#### HEAD
|
|
||||||
|
|
||||||
Get object attributes by a specific attribute.
|
|
||||||
If more than one object is found, an arbitrary one will be used to get attributes.
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|----------------|------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|------------------------|------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `X-Attribute-System-*` | System FrostFS object attributes <br/> (e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). |
|
|
||||||
| `X-Attribute-*` | Regular object attributes <br/> (e.g. `My-Tag` set "X-Attribute-My-Tag" header). |
|
|
||||||
| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. |
|
|
||||||
| `Content-Length` | Size of object payload. |
|
|
||||||
| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). |
|
|
||||||
| `X-Owner-Id` | Base58 encoded owner ID. |
|
|
||||||
| `X-Container-Id` | Base58 encoded container ID. |
|
|
||||||
| `X-Object-Id` | Base58 encoded object ID. |
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|---------------------------------------|
|
|
||||||
| 200 | Object head successfully. |
|
|
||||||
| 400 | Some error occurred during operation. |
|
|
||||||
| 404 | Container or object not found. |
|
|
||||||
|
|
||||||
## Download archive
|
|
||||||
|
|
||||||
Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
|
||||||
|-----------------|-----------|---------------------------------------------------------|
|
|
||||||
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
|
||||||
| `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. |
|
|
||||||
|
|
||||||
### Methods
|
|
||||||
|
|
||||||
#### GET
|
|
||||||
|
|
||||||
Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive.
|
|
||||||
Name of files in archive sets to `FilePath` attribute of objects.
|
|
||||||
Time of files sets to time when object has started downloading.
|
|
||||||
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or
|
|
||||||
`/tar/{cid}/` route.
|
|
||||||
|
|
||||||
Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)).
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|----------------|------------------------------------|
|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
###### Headers
|
|
||||||
|
|
||||||
| Header | Description |
|
|
||||||
|-----------------------|-------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. |
|
|
||||||
| `Content-Type` | Indicate content type of object. Set to `application/zip` |
|
|
||||||
|
|
||||||
###### Status codes
|
|
||||||
|
|
||||||
| Status | Description |
|
|
||||||
|--------|-----------------------------------------------------|
|
|
||||||
| 200 | Object got successfully. |
|
|
||||||
| 400 | Some error occurred during object downloading. |
|
|
||||||
| 404 | Container or objects not found. |
|
|
||||||
| 500 | Some inner error (e.g. error on streaming objects). |
|
|
|
@ -1,108 +0,0 @@
|
||||||
# Request authentication
|
|
||||||
|
|
||||||
HTTP Gateway does not authorize requests. Gateway converts HTTP request to a
|
|
||||||
FrostFS request and signs it with its own private key.
|
|
||||||
|
|
||||||
You can always upload files to public containers (open for anyone to put
|
|
||||||
objects into), but for restricted containers you need to explicitly allow PUT
|
|
||||||
operations for a request signed with your HTTP Gateway keys.
|
|
||||||
|
|
||||||
If you don't want to manage gateway's secret keys and adjust policies when
|
|
||||||
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
|
||||||
public services, there is an option to let your application backend (or you) to
|
|
||||||
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
|
||||||
to grant access.
|
|
||||||
|
|
||||||
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
|
||||||
documentation for more details). There are two options to pass them to gateway:
|
|
||||||
* "Authorization" header with "Bearer" type and base64-encoded token in
|
|
||||||
credentials field
|
|
||||||
* "Bearer" cookie with base64-encoded token contents
|
|
||||||
|
|
||||||
For example, you have a mobile application frontend with a backend part storing
|
|
||||||
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
|
||||||
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
|
||||||
some data and upload it via any available FrostFS HTTP Gateway by adding
|
|
||||||
the corresponding header to the upload request. Accessing policy protected data
|
|
||||||
works the same way.
|
|
||||||
|
|
||||||
##### Example
|
|
||||||
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
|
||||||
|
|
||||||
1. Suppose you have a container with private policy for wallet key
|
|
||||||
|
|
||||||
```
|
|
||||||
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
|
||||||
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
|
||||||
|
|
||||||
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
|
||||||
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
|
||||||
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
|
||||||
--chain-id <chainID>
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
|
||||||
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"allowImpersonate": true,
|
|
||||||
"lifetime": {
|
|
||||||
"exp": "10000",
|
|
||||||
"nbf": "0",
|
|
||||||
"iat": "0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"signature": null
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Sign it with the wallet:
|
|
||||||
```
|
|
||||||
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Encode to base64 to use in header:
|
|
||||||
```
|
|
||||||
$ base64 -w 0 signed.json
|
|
||||||
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
|
||||||
```
|
|
||||||
|
|
||||||
After that, the Bearer token can be used:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
|
|
||||||
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
|
||||||
# output:
|
|
||||||
# {
|
|
||||||
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
|
|
||||||
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
|
|
||||||
# }
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Bearer Token owner
|
|
||||||
|
|
||||||
You can specify exact key who can use Bearer Token (gateway wallet address).
|
|
||||||
To do this, encode wallet address in base64 format
|
|
||||||
|
|
||||||
```
|
|
||||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
|
||||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
|
||||||
```
|
|
||||||
|
|
||||||
Then specify this value in Bearer Token Json
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"ownerID": {
|
|
||||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
|
||||||
},
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Policy override
|
|
||||||
|
|
||||||
Instead of impersonation, you can define the set of policies that will be applied
|
|
||||||
to the request sender. This allows to restrict access to specific operation and
|
|
||||||
specific objects without giving full impersonation control to the token user.
|
|
|
@ -1,46 +0,0 @@
|
||||||
# Building Debian package on host
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
For now, we're assuming building for Debian 11 (stable) x86_64.
|
|
||||||
|
|
||||||
Go version 18.4 or later should already be installed, i.e. this runs
|
|
||||||
successfully:
|
|
||||||
|
|
||||||
* `make all`
|
|
||||||
|
|
||||||
## Installing packaging dependencies
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts
|
|
||||||
```
|
|
||||||
|
|
||||||
Warining: number of package installed is pretty large considering dependecies.
|
|
||||||
|
|
||||||
## Package building
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make debpackage
|
|
||||||
```
|
|
||||||
|
|
||||||
## Leftovers cleaning
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make debclean
|
|
||||||
```
|
|
||||||
or
|
|
||||||
```shell
|
|
||||||
$ dh clean
|
|
||||||
```
|
|
||||||
|
|
||||||
# Package versioning
|
|
||||||
|
|
||||||
By default, package version is based on product version and may also contain git
|
|
||||||
tags and hashes.
|
|
||||||
|
|
||||||
Package version could be overwritten by setting `PKG_VERSION` variable before
|
|
||||||
build, Debian package versioning rules should be respected.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ PKG_VERSION=0.32.0 make debpackge
|
|
||||||
```
|
|
|
@ -1,517 +0,0 @@
|
||||||
# FrostFS HTTP Gateway configuration file
|
|
||||||
|
|
||||||
This section contains detailed FrostFS HTTP Gateway configuration file description
|
|
||||||
including default config values and some tips to set up configurable values.
|
|
||||||
|
|
||||||
There are some custom types used for brevity:
|
|
||||||
|
|
||||||
* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` (
|
|
||||||
milliseconds).
|
|
||||||
|
|
||||||
|
|
||||||
# Reload on SIGHUP
|
|
||||||
|
|
||||||
Some config values can be reloaded on SIGHUP signal.
|
|
||||||
Such parameters have special mark in tables below.
|
|
||||||
|
|
||||||
You can send SIGHUP signal to app using the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ kill -s SIGHUP <app_pid>
|
|
||||||
```
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ ./bin/frostfs-http-gw --config config.yaml &> http.log &
|
|
||||||
[1] 998346
|
|
||||||
|
|
||||||
$ cat http.log
|
|
||||||
# ...
|
|
||||||
2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"}
|
|
||||||
# ...
|
|
||||||
|
|
||||||
$ kill -s SIGHUP 998346
|
|
||||||
|
|
||||||
$ cat http.log
|
|
||||||
# ...
|
|
||||||
2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed
|
|
||||||
```
|
|
||||||
|
|
||||||
# Structure
|
|
||||||
|
|
||||||
| Section | Description |
|
|
||||||
|------------------|----------------------------------------------------------------|
|
|
||||||
| no section | [General parameters](#general-section) |
|
|
||||||
| `wallet` | [Wallet configuration](#wallet-section) |
|
|
||||||
| `peers` | [Nodes configuration](#peers-section) |
|
|
||||||
| `logger` | [Logger configuration](#logger-section) |
|
|
||||||
| `web` | [Web configuration](#web-section) |
|
|
||||||
| `server` | [Server configuration](#server-section) |
|
|
||||||
| `upload-header` | [Upload header configuration](#upload-header-section) |
|
|
||||||
| `zip` | [ZIP configuration](#zip-section) |
|
|
||||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
|
||||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
|
||||||
| `tracing` | [Tracing configuration](#tracing-section) |
|
|
||||||
| `runtime` | [Runtime configuration](#runtime-section) |
|
|
||||||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
|
||||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
|
||||||
| `index_page` | [Index page configuration](#index_page-section) |
|
|
||||||
| `multinet` | [Multinet configuration](#multinet-section) |
|
|
||||||
| `features` | [Features configuration](#features-section) |
|
|
||||||
|
|
||||||
# General section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
- dns
|
|
||||||
|
|
||||||
connect_timeout: 5s
|
|
||||||
stream_timeout: 10s
|
|
||||||
request_timeout: 5s
|
|
||||||
rebalance_timer: 30s
|
|
||||||
pool_error_threshold: 100
|
|
||||||
reconnect_interval: 1m
|
|
||||||
worker_pool_size: 1000
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|------------------------|------------|---------------|---------------|------------------------------------------------------------------------------------|
|
|
||||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
|
||||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
|
||||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
|
||||||
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
|
||||||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
|
||||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
|
||||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
|
||||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
|
||||||
| `worker_pool_size` | `int` | no | `1000` | Maximum worker count in handler's worker pool. |
|
|
||||||
|
|
||||||
# `wallet` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
wallet:
|
|
||||||
path: /path/to/wallet.json
|
|
||||||
address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP
|
|
||||||
passphrase: pwd
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|--------------|----------|---------------|--------------------------------------------------------------------------|
|
|
||||||
| `path` | `string` | | Path to the wallet. |
|
|
||||||
| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. |
|
|
||||||
| `passphrase` | `string` | | Passphrase to decrypt wallet. |
|
|
||||||
|
|
||||||
# `peers` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Nodes configuration
|
|
||||||
# This configuration makes the gateway use the first node (node1.frostfs:8080)
|
|
||||||
# while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080)
|
|
||||||
# for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests.
|
|
||||||
# Until nodes with the same priority level are healthy
|
|
||||||
# nodes with other priority are not used.
|
|
||||||
# The lower the value, the higher the priority.
|
|
||||||
peers:
|
|
||||||
0:
|
|
||||||
address: node1.frostfs:8080
|
|
||||||
priority: 1
|
|
||||||
weight: 1
|
|
||||||
1:
|
|
||||||
address: node2.frostfs:8080
|
|
||||||
priority: 2
|
|
||||||
weight: 0.1
|
|
||||||
2:
|
|
||||||
address: node3.frostfs:8080
|
|
||||||
priority: 2
|
|
||||||
weight: 0.9
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `address` | `string` | | Address of storage node. |
|
|
||||||
| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. |
|
|
||||||
| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. |
|
|
||||||
|
|
||||||
# `server` section
|
|
||||||
|
|
||||||
You can specify several listeners for server. For example, for `http` and `https`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
server:
|
|
||||||
- address: 0.0.0.0:8080
|
|
||||||
tls:
|
|
||||||
enabled: false
|
|
||||||
cert_file: /path/to/cert
|
|
||||||
key_file: /path/to/key
|
|
||||||
- address: 0.0.0.0:8081
|
|
||||||
tls:
|
|
||||||
enabled: true
|
|
||||||
cert_file: /path/to/another/cert
|
|
||||||
key_file: /path/to/another/key
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------|----------|---------------|----------------|-----------------------------------------------|
|
|
||||||
| `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. |
|
|
||||||
| `tls.enabled` | `bool` | | false | Enable TLS or not. |
|
|
||||||
| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. |
|
|
||||||
| `tls.key_file` | `string` | yes | | Path to the key. |
|
|
||||||
|
|
||||||
|
|
||||||
# `logger` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
logger:
|
|
||||||
level: debug
|
|
||||||
destination: stdout
|
|
||||||
sampling:
|
|
||||||
enabled: false
|
|
||||||
initial: 100
|
|
||||||
thereafter: 100
|
|
||||||
interval: 1s
|
|
||||||
tags:
|
|
||||||
- name: "app"
|
|
||||||
level: info
|
|
||||||
- name: "datapath"
|
|
||||||
- name: "external_storage_tree"
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
|
||||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
|
||||||
| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. |
|
|
||||||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
|
||||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
|
||||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
|
||||||
| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). |
|
|
||||||
|
|
||||||
## Tags
|
|
||||||
|
|
||||||
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
|
|
||||||
parameter. Available tags:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
tags:
|
|
||||||
- name: "app"
|
|
||||||
level: info
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
|
||||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
|
||||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
|
||||||
|
|
||||||
### Tag values
|
|
||||||
|
|
||||||
* `app` - common application logs (enabled by default).
|
|
||||||
* `datapath` - main logic of application (enabled by default).
|
|
||||||
* `external_storage` - external interaction with storage node (enabled by default).
|
|
||||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
|
||||||
|
|
||||||
# `web` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
web:
|
|
||||||
read_buffer_size: 4096
|
|
||||||
write_buffer_size: 4096
|
|
||||||
read_timeout: 10m
|
|
||||||
write_timeout: 5m
|
|
||||||
stream_request_body: true
|
|
||||||
max_request_body_size: 4194304
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|-------------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `read_buffer_size` | `int` | `4096` | Per-connection buffer size for requests' reading. This also limits the maximum header size. |
|
|
||||||
| `write_buffer_size` | `int` | `4096` | Per-connection buffer size for responses' writing. |
|
|
||||||
| `read_timeout` | `duration` | `10m` | The amount of time allowed to read the full request including body. The connection's read deadline is reset when the connection opens, or for keep-alive connections after the first byte has been read. |
|
|
||||||
| `write_timeout` | `duration` | `5m` | The maximum duration before timing out writes of the response. It is reset after the request handler has returned. |
|
|
||||||
| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. |
|
|
||||||
| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. |
|
|
||||||
|
|
||||||
|
|
||||||
# `upload-header` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
upload_header:
|
|
||||||
use_default_timestamp: false
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
|
||||||
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
|
||||||
|
|
||||||
# `zip` section
|
|
||||||
|
|
||||||
> **_DEPRECATED:_** Use archive section instead
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
zip:
|
|
||||||
compression: false
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
|
||||||
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
|
||||||
|
|
||||||
# `archive` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
archive:
|
|
||||||
compression: false
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
|
||||||
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
|
||||||
|
|
||||||
|
|
||||||
# `pprof` section
|
|
||||||
|
|
||||||
Contains configuration for the `pprof` profiler.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
pprof:
|
|
||||||
enabled: true
|
|
||||||
address: localhost:8083
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------|----------|---------------|------------------|-----------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
|
||||||
| `address` | `string` | yes | `localhost:8083` | Address that service listener binds to. |
|
|
||||||
|
|
||||||
# `prometheus` section
|
|
||||||
|
|
||||||
Contains configuration for the `prometheus` metrics service.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
prometheus:
|
|
||||||
enabled: true
|
|
||||||
address: localhost:8084
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------|----------|---------------|------------------|-----------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the service. |
|
|
||||||
| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. |
|
|
||||||
|
|
||||||
# `tracing` section
|
|
||||||
|
|
||||||
Contains configuration for the `tracing` service.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
tracing:
|
|
||||||
enabled: true
|
|
||||||
exporter: "otlp_grpc"
|
|
||||||
endpoint: "localhost:4317"
|
|
||||||
trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem"
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
|
||||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
|
||||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
|
||||||
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
|
||||||
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `attributes` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
|
|
||||||
| `key` | `string` | yes | | Attribute key. |
|
|
||||||
| `value` | `string` | yes | | Attribute value. |
|
|
||||||
|
|
||||||
# `runtime` section
|
|
||||||
Contains runtime parameters.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
runtime:
|
|
||||||
soft_memory_limit: 1gb
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
|
||||||
|
|
||||||
# `frostfs` section
|
|
||||||
|
|
||||||
Contains parameters of requests to FrostFS.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
frostfs:
|
|
||||||
client_cut: false
|
|
||||||
buffer_max_size_for_put: 1048576 # 1mb
|
|
||||||
tree_pool_max_attempts: 0
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
|
||||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
|
||||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
|
||||||
|
|
||||||
|
|
||||||
### `cache` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
cache:
|
|
||||||
buckets:
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
|
|
||||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
|
||||||
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `cache` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|------------|------------|------------------|-------------------------------|
|
|
||||||
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
|
||||||
| `size` | `int` | depends on cache | LRU cache size. |
|
|
||||||
|
|
||||||
|
|
||||||
# `resolve_bucket` section
|
|
||||||
|
|
||||||
Bucket name resolving parameters from and to container ID.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resolve_bucket:
|
|
||||||
namespace_header: X-Frostfs-Namespace
|
|
||||||
default_namespaces: [ "", "root" ]
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
|
||||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
|
||||||
|
|
||||||
# `index_page` section
|
|
||||||
|
|
||||||
Parameters for index HTML-page output. Activates if `GetObject` request returns `not found`. Two
|
|
||||||
index page modes available:
|
|
||||||
|
|
||||||
* `s3` mode uses tree service for listing objects,
|
|
||||||
* `native` sends requests to nodes via native protocol.
|
|
||||||
If request pass S3-bucket name instead of CID, `s3` mode will be used, otherwise `native`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
index_page:
|
|
||||||
enabled: false
|
|
||||||
template_path: ""
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
|
|
||||||
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
|
|
||||||
|
|
||||||
# `cors` section
|
|
||||||
|
|
||||||
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
|
|
||||||
If values are not set, headers will not be included to response.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
cors:
|
|
||||||
allow_origin: "*"
|
|
||||||
allow_methods: ["GET", "HEAD"]
|
|
||||||
allow_headers: ["Authorization"]
|
|
||||||
expose_headers: ["*"]
|
|
||||||
allow_credentials: false
|
|
||||||
max_age: 600
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------------|------------|---------------|---------------|--------------------------------------------------------|
|
|
||||||
| `allow_origin` | `string` | yes | | Values for `Access-Control-Allow-Origin` headers. |
|
|
||||||
| `allow_methods` | `[]string` | yes | | Values for `Access-Control-Allow-Methods` headers. |
|
|
||||||
| `allow_headers` | `[]string` | yes | | Values for `Access-Control-Allow-Headers` headers. |
|
|
||||||
| `expose_headers` | `[]string` | yes | | Values for `Access-Control-Expose-Headers` headers. |
|
|
||||||
| `allow_credentials` | `bool` | yes | `false` | Values for `Access-Control-Allow-Credentials` headers. |
|
|
||||||
| `max_age` | `int` | yes | `600` | Values for `Access-Control-Max-Age ` headers. |
|
|
||||||
|
|
||||||
# `multinet` section
|
|
||||||
|
|
||||||
Configuration of multinet support.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
multinet:
|
|
||||||
enabled: false
|
|
||||||
balancer: roundrobin
|
|
||||||
restrict: false
|
|
||||||
fallback_delay: 300ms
|
|
||||||
subnets:
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|------------------|--------------------------------|---------------|---------------|--------------------------------------------------------------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Enables multinet setting to manage source ip of outcoming requests. |
|
|
||||||
| `balancer` | `string` | yes | `""` | Strategy to pick source IP. By default picks first address. Supports `roundrobin` setting. |
|
|
||||||
| `restrict` | `bool` | yes | `false` | Restricts requests to an undefined subnets. |
|
|
||||||
| `fallback_delay` | `duration` | yes | `300ms` | Delay between IPv6 and IPv4 fallback stack switch. |
|
|
||||||
| `subnets` | [[]Subnet](#subnet-subsection) | yes | | Set of subnets to apply multinet dial settings. |
|
|
||||||
|
|
||||||
#### `subnet` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
|
|
||||||
| `mask` | `string` | yes | | Destination subnet. |
|
|
||||||
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
|
|
||||||
|
|
||||||
# `features` section
|
|
||||||
|
|
||||||
Contains parameters for enabling features.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
features:
|
|
||||||
enable_filepath_fallback: true
|
|
||||||
tree_pool_netmap_support: true
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
|
|
||||||
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
|
36
docs/nns.md
36
docs/nns.md
|
@ -1,36 +0,0 @@
|
||||||
# Nicename Resolving with NNS
|
|
||||||
|
|
||||||
Steps to start using name resolving:
|
|
||||||
|
|
||||||
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
|
||||||
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
|
||||||
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
|
||||||
|
|
||||||
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
|
||||||
|
|
||||||
$ docker exec -it morph_chain neo-go \
|
|
||||||
contract testinvokefunction \
|
|
||||||
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
|
||||||
resolve string:container-name.container int:16 \
|
|
||||||
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
|
||||||
| base64 -d && echo
|
|
||||||
|
|
||||||
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Use container name instead of its `$CID`. For example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
|
|
||||||
```
|
|
138
go.mod
138
go.mod
|
@ -1,138 +0,0 @@
|
||||||
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
|
||||||
|
|
||||||
go 1.22
|
|
||||||
|
|
||||||
require (
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
|
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
|
||||||
github.com/bluele/gcache v0.0.2
|
|
||||||
github.com/docker/docker v27.1.1+incompatible
|
|
||||||
github.com/docker/go-units v0.5.0
|
|
||||||
github.com/fasthttp/router v1.4.1
|
|
||||||
github.com/nspcc-dev/neo-go v0.106.2
|
|
||||||
github.com/panjf2000/ants/v2 v2.5.0
|
|
||||||
github.com/prometheus/client_golang v1.19.0
|
|
||||||
github.com/prometheus/client_model v0.5.0
|
|
||||||
github.com/spf13/pflag v1.0.5
|
|
||||||
github.com/spf13/viper v1.15.0
|
|
||||||
github.com/ssgreg/journald v1.0.0
|
|
||||||
github.com/stretchr/testify v1.9.0
|
|
||||||
github.com/testcontainers/testcontainers-go v0.35.0
|
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
|
||||||
github.com/valyala/fasthttp v1.34.0
|
|
||||||
go.opentelemetry.io/otel v1.31.0
|
|
||||||
go.opentelemetry.io/otel/trace v1.31.0
|
|
||||||
go.uber.org/zap v1.27.0
|
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
|
||||||
golang.org/x/net v0.30.0
|
|
||||||
golang.org/x/sys v0.28.0
|
|
||||||
google.golang.org/grpc v1.69.2
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/containerd/containerd v1.7.18 // indirect
|
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
|
||||||
github.com/containerd/platforms v0.2.1 // indirect
|
|
||||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
|
||||||
github.com/distribution/reference v0.6.0 // indirect
|
|
||||||
github.com/docker/go-connections v0.5.0 // indirect
|
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
|
||||||
github.com/ipfs/go-cid v0.0.7 // indirect
|
|
||||||
github.com/klauspost/compress v1.17.4 // indirect
|
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
|
||||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
|
||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
|
||||||
github.com/moby/sys/user v0.1.0 // indirect
|
|
||||||
github.com/moby/term v0.5.0 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multiaddr v0.14.0 // indirect
|
|
||||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
|
||||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
|
||||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
|
||||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
|
||||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
|
||||||
github.com/urfave/cli v1.22.12 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
||||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
|
||||||
go.etcd.io/bbolt v1.3.9 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
|
||||||
golang.org/x/crypto v0.31.0 // indirect
|
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
|
||||||
golang.org/x/term v0.27.0 // indirect
|
|
||||||
golang.org/x/text v0.21.0 // indirect
|
|
||||||
golang.org/x/time v0.3.0 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
|
||||||
google.golang.org/protobuf v1.36.1 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
lukechampine.com/blake3 v1.2.1 // indirect
|
|
||||||
)
|
|
820
go.sum
820
go.sum
|
@ -1,820 +0,0 @@
|
||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
|
||||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
|
||||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
|
||||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
|
||||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
|
||||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
|
||||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
|
||||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
|
||||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
|
||||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
|
||||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
|
||||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
|
||||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
|
||||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
|
||||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
|
||||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
|
||||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
|
||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
|
||||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
|
||||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
|
||||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
|
||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
|
||||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
|
||||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
|
||||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
|
||||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
|
||||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a h1:Ud+3zz4WP9HPxEQxDPJZPpiPdm30nDNSKucsWP9L54M=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g=
|
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8=
|
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI=
|
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8=
|
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4=
|
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw=
|
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
|
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
|
||||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
|
||||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
|
|
||||||
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
|
||||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
|
||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
|
||||||
github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c=
|
|
||||||
github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
|
||||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
|
||||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
|
||||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
|
||||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
|
||||||
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb h1:f0BMgIjhZy4lSRHCXFbQst85f5agZAjtDMixQqBWNpc=
|
|
||||||
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
|
||||||
github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
|
|
||||||
github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
|
|
||||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
|
||||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
|
||||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
|
||||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
|
||||||
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
|
|
||||||
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
|
||||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
|
||||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
|
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
|
||||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
|
||||||
github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
|
|
||||||
github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
|
||||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
|
||||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
|
||||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
|
||||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/fasthttp/router v1.4.1 h1:3xPUO+hy/HAkgGDSd5sX5w18cyGDIFbC7vip8KwPDk8=
|
|
||||||
github.com/fasthttp/router v1.4.1/go.mod h1:4P0Kq4C882tA2evBKDW7De7hGfWmvV8FN+zqt8Lu49Q=
|
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
|
||||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
|
||||||
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
|
||||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
|
||||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
|
||||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
|
||||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
|
||||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
|
||||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
|
||||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
|
||||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
|
||||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
|
||||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
|
||||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
|
||||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
|
||||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
|
||||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
|
||||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
|
||||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
|
||||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
|
||||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
|
||||||
github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY=
|
|
||||||
github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I=
|
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
|
||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
|
||||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
|
||||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
|
||||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
|
||||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
|
||||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
|
||||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
|
||||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
|
||||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
|
|
||||||
github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
|
||||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
|
||||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
|
||||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
|
||||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
|
||||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
|
||||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
|
||||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
|
||||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
|
||||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
|
||||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
|
||||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
|
||||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
|
||||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
|
||||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
|
||||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
|
||||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
|
||||||
github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8=
|
|
||||||
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
|
||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
|
||||||
github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA=
|
|
||||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
|
||||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
|
||||||
github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
|
|
||||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
|
||||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
|
||||||
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=
|
|
||||||
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4=
|
|
||||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
|
||||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
|
||||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
|
||||||
github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
|
||||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
|
||||||
github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
|
|
||||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
|
||||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
|
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
|
|
||||||
github.com/nspcc-dev/neo-go v0.106.2 h1:KXSJ2J5Oacc7LrX3r4jvnC8ihKqHs5NB21q4f2S3r9o=
|
|
||||||
github.com/nspcc-dev/neo-go v0.106.2/go.mod h1:Ojwfx3/lv0VTeEHMpQ17g0wTnXcCSoFQVq5GEeCZmGo=
|
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
|
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
|
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
|
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
|
|
||||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
|
||||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
|
||||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
|
||||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
|
||||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
|
||||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
|
||||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
|
||||||
github.com/panjf2000/ants/v2 v2.5.0 h1:1rWGWSnxCsQBga+nQbA4/iY6VMeNoOIAM0ZWh9u3q2Q=
|
|
||||||
github.com/panjf2000/ants/v2 v2.5.0/go.mod h1:cU93usDlihJZ5CfRGNDYsiBYvoilLvBF5Qp/BT2GNRE=
|
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
|
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
|
||||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
|
||||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
|
||||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
|
||||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
|
||||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
|
||||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 h1:N3Af8f13ooDKcIhsmFT7Z05CStZWu4C7Md0uDEy4q6o=
|
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873/go.mod h1:dmPawKuiAeG/aFYVs2i+Dyosoo7FNcm+Pi8iK6ZUrX8=
|
|
||||||
github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
|
|
||||||
github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
|
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
|
|
||||||
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
|
|
||||||
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
|
|
||||||
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
|
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
|
|
||||||
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
|
||||||
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
|
|
||||||
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
|
||||||
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
|
|
||||||
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
|
|
||||||
github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU=
|
|
||||||
github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
|
||||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
|
||||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
|
||||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
|
||||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
|
||||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
|
||||||
github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo=
|
|
||||||
github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4=
|
|
||||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
|
||||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
|
||||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
|
||||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4 h1:GpfJ7OdNjS7BFTVwNCUI9L4aCJOFRbr5fdHqjdhoYE8=
|
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4/go.mod h1:f3jBhpWvuZmue0HZK52GzRHJOYHYSILs/c8+K2S/J+o=
|
|
||||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
|
||||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
|
||||||
github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8=
|
|
||||||
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
|
||||||
github.com/valyala/fasthttp v1.28.0/go.mod h1:cmWIqlu99AO/RKcp1HWaViTqc57FswJOfYYdPJBl8BA=
|
|
||||||
github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4=
|
|
||||||
github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0=
|
|
||||||
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
|
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
|
||||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
|
||||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
|
||||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
|
||||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
|
||||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ=
|
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y=
|
|
||||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
|
||||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
|
||||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
|
||||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
|
||||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
|
||||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
|
||||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
|
||||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
|
||||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
|
||||||
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
|
||||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
|
||||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
|
||||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
|
||||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
|
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
|
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
|
||||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
|
||||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
|
||||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
|
||||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
|
||||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
|
||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
|
||||||
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
|
||||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
|
||||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
|
||||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
|
||||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
|
||||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
|
||||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
|
||||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
|
||||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
|
||||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
|
||||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
|
||||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
|
||||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
|
||||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
|
||||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
|
||||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
|
||||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U=
|
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE=
|
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
|
||||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
|
||||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
|
||||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
|
||||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
|
||||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
|
||||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
|
||||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
|
||||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
|
||||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
|
||||||
lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI=
|
|
||||||
lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
|
||||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
|
||||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
|
22
help.mk
22
help.mk
|
@ -1,22 +0,0 @@
|
||||||
.PHONY: help
|
|
||||||
|
|
||||||
# Show this help prompt
|
|
||||||
help:
|
|
||||||
@echo ' Usage:'
|
|
||||||
@echo ''
|
|
||||||
@echo ' make <target>'
|
|
||||||
@echo ''
|
|
||||||
@echo ' Targets:'
|
|
||||||
@echo ''
|
|
||||||
@awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq
|
|
||||||
|
|
||||||
# Show help for docker/% IGNORE
|
|
||||||
help.docker/%:
|
|
||||||
$(eval TARGETS:=$(notdir all lint) ${BINS})
|
|
||||||
@echo ' Usage:'
|
|
||||||
@echo ''
|
|
||||||
@echo ' make docker/% -- Run `make %` in Golang container'
|
|
||||||
@echo ''
|
|
||||||
@echo ' Supported docker targets:'
|
|
||||||
@echo ''
|
|
||||||
@$(foreach bin, $(TARGETS), echo ' ' $(bin);)
|
|
111
internal/cache/buckets.go
vendored
111
internal/cache/buckets.go
vendored
|
@ -1,111 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
|
||||||
type BucketCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
cidCache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config stores expiration params for cache.
|
|
||||||
type Config struct {
|
|
||||||
Size int
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBucketCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultBucketCacheSize = 1e3
|
|
||||||
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultBucketCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultBucketConfig returns new default cache expiration values.
|
|
||||||
func DefaultBucketConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultBucketCacheSize,
|
|
||||||
Lifetime: DefaultBucketCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
|
||||||
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
|
||||||
cache := &BucketCache{
|
|
||||||
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
|
||||||
logger: config.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cidCache {
|
|
||||||
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object.
|
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
|
||||||
return o.get(formKey(ns, bktName))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|
||||||
if o.cidCache == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, err := o.cidCache.Get(cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := entry.(string)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) get(key string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketInfo)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
|
||||||
if o.cidCache != nil {
|
|
||||||
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formKey(ns, name string) string {
|
|
||||||
return name + "." + ns
|
|
||||||
}
|
|
65
internal/cache/netmap.go
vendored
65
internal/cache/netmap.go
vendored
|
@ -1,65 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// NetmapCache provides cache for netmap.
|
|
||||||
NetmapCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetmapCacheConfig stores expiration params for cache.
|
|
||||||
NetmapCacheConfig struct {
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultNetmapCacheLifetime = time.Minute
|
|
||||||
netmapCacheSize = 1
|
|
||||||
netmapKey = "netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultNetmapConfig returns new default cache expiration values.
|
|
||||||
func DefaultNetmapConfig(logger *zap.Logger) *NetmapCacheConfig {
|
|
||||||
return &NetmapCacheConfig{
|
|
||||||
Lifetime: DefaultNetmapCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetmapCache creates an object of NetmapCache.
|
|
||||||
func NewNetmapCache(config *NetmapCacheConfig) *NetmapCache {
|
|
||||||
gc := gcache.New(netmapCacheSize).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &NetmapCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Get() *netmap.NetMap {
|
|
||||||
entry, err := c.cache.Get(netmapKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetMap)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Put(nm netmap.NetMap) error {
|
|
||||||
return c.cache.Set(netmapKey, nm)
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BucketInfo struct {
|
|
||||||
Name string // container name from system attribute
|
|
||||||
Zone string // container zone from system attribute
|
|
||||||
CID cid.ID
|
|
||||||
HomomorphicHashDisabled bool
|
|
||||||
PlacementPolicy netmap.PlacementPolicy
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NodeVersion represent node from tree service.
|
|
||||||
type NodeVersion struct {
|
|
||||||
BaseNodeVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
|
||||||
// Basically used for "system" object.
|
|
||||||
type BaseNodeVersion struct {
|
|
||||||
ID uint64
|
|
||||||
OID oid.ID
|
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeInfo struct {
|
|
||||||
Meta []NodeMeta
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeMeta interface {
|
|
||||||
GetKey() string
|
|
||||||
GetValue() []byte
|
|
||||||
}
|
|
|
@ -1,382 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"html/template"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
dateFormat = "02-01-2006 15:04"
|
|
||||||
attrOID = "OID"
|
|
||||||
attrCreated = "Created"
|
|
||||||
attrFileName = "FileName"
|
|
||||||
attrFilePath = "FilePath"
|
|
||||||
attrSize = "Size"
|
|
||||||
attrDeleteMarker = "IsDeleteMarker"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
BrowsePageData struct {
|
|
||||||
HasErrors bool
|
|
||||||
Container string
|
|
||||||
Prefix string
|
|
||||||
Protocol string
|
|
||||||
Objects []ResponseObject
|
|
||||||
}
|
|
||||||
ResponseObject struct {
|
|
||||||
OID string
|
|
||||||
Created string
|
|
||||||
FileName string
|
|
||||||
FilePath string
|
|
||||||
Size string
|
|
||||||
IsDir bool
|
|
||||||
GetURL string
|
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func newListObjectsResponseS3(attrs map[string]string) ResponseObject {
|
|
||||||
return ResponseObject{
|
|
||||||
Created: formatTimestamp(attrs[attrCreated]),
|
|
||||||
OID: attrs[attrOID],
|
|
||||||
FileName: attrs[attrFileName],
|
|
||||||
Size: attrs[attrSize],
|
|
||||||
IsDir: attrs[attrOID] == "",
|
|
||||||
IsDeleteMarker: attrs[attrDeleteMarker] == "true",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newListObjectsResponseNative(attrs map[string]string) ResponseObject {
|
|
||||||
filename := lastPathElement(attrs[object.AttributeFilePath])
|
|
||||||
if filename == "" {
|
|
||||||
filename = attrs[attrFileName]
|
|
||||||
}
|
|
||||||
return ResponseObject{
|
|
||||||
OID: attrs[attrOID],
|
|
||||||
Created: formatTimestamp(attrs[object.AttributeTimestamp] + "000"),
|
|
||||||
FileName: filename,
|
|
||||||
FilePath: attrs[object.AttributeFilePath],
|
|
||||||
Size: attrs[attrSize],
|
|
||||||
IsDir: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNextDir(filepath, prefix string) string {
|
|
||||||
restPath := strings.Replace(filepath, prefix, "", 1)
|
|
||||||
index := strings.Index(restPath, "/")
|
|
||||||
if index == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return restPath[:index]
|
|
||||||
}
|
|
||||||
|
|
||||||
func lastPathElement(path string) string {
|
|
||||||
if path == "" {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
index := strings.LastIndex(path, "/")
|
|
||||||
if index == len(path)-1 {
|
|
||||||
index = strings.LastIndex(path[:index], "/")
|
|
||||||
}
|
|
||||||
return path[index+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTimestamp(tstamp string) (time.Time, error) {
|
|
||||||
millis, err := strconv.ParseInt(tstamp, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.UnixMilli(millis), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatTimestamp(strdate string) string {
|
|
||||||
date, err := parseTimestamp(strdate)
|
|
||||||
if err != nil || date.IsZero() {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return date.Format(dateFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatSize(strsize string) string {
|
|
||||||
size, err := strconv.ParseFloat(strsize, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "0B"
|
|
||||||
}
|
|
||||||
return units.HumanSize(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parentDir(prefix string) string {
|
|
||||||
index := strings.LastIndex(prefix, "/")
|
|
||||||
if index == -1 {
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
return prefix[index:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimPrefix(encPrefix string) string {
|
|
||||||
prefix, err := url.PathUnescape(encPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
slashIndex := strings.LastIndex(prefix, "/")
|
|
||||||
if slashIndex == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return prefix[:slashIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
func urlencode(path string) string {
|
|
||||||
var res strings.Builder
|
|
||||||
|
|
||||||
prefixParts := strings.Split(path, "/")
|
|
||||||
for _, prefixPart := range prefixParts {
|
|
||||||
prefixPart = "/" + url.PathEscape(prefixPart)
|
|
||||||
if prefixPart == "/." || prefixPart == "/.." {
|
|
||||||
prefixPart = url.PathEscape(prefixPart)
|
|
||||||
}
|
|
||||||
res.WriteString(prefixPart)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetObjectsResponse struct {
|
|
||||||
objects []ResponseObject
|
|
||||||
hasErrors bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
|
||||||
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &GetObjectsResponse{
|
|
||||||
objects: make([]ResponseObject, 0, len(nodes)),
|
|
||||||
}
|
|
||||||
for _, node := range nodes {
|
|
||||||
meta := node.Meta
|
|
||||||
if meta == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var attrs = make(map[string]string, len(meta))
|
|
||||||
for _, m := range meta {
|
|
||||||
attrs[m.GetKey()] = string(m.GetValue())
|
|
||||||
}
|
|
||||||
obj := newListObjectsResponseS3(attrs)
|
|
||||||
if obj.IsDeleteMarker {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
obj.FilePath = prefix + obj.FileName
|
|
||||||
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
|
||||||
result.objects = append(result.objects, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
|
||||||
var basePath string
|
|
||||||
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
|
|
||||||
basePath = prefix[:ind+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
filters := object.NewSearchFilters()
|
|
||||||
filters.AddRootFilter()
|
|
||||||
if prefix != "" {
|
|
||||||
filters.AddFilter(object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := PrmObjectSearch{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: bearerToken(ctx),
|
|
||||||
},
|
|
||||||
Container: bucketInfo.CID,
|
|
||||||
Filters: filters,
|
|
||||||
}
|
|
||||||
objectIDs, err := h.frostfs.SearchObjects(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer objectIDs.Close()
|
|
||||||
|
|
||||||
resp, err := h.headDirObjects(ctx, bucketInfo.CID, objectIDs, basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
result := &GetObjectsResponse{
|
|
||||||
objects: make([]ResponseObject, 0, 100),
|
|
||||||
}
|
|
||||||
for objExt := range resp {
|
|
||||||
if objExt.Error != nil {
|
|
||||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage))
|
|
||||||
result.hasErrors = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if objExt.Object.IsDir {
|
|
||||||
if _, ok := dirs[objExt.Object.FileName]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + urlencode(objExt.Object.FilePath)
|
|
||||||
dirs[objExt.Object.FileName] = struct{}{}
|
|
||||||
} else {
|
|
||||||
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + "/" + objExt.Object.OID
|
|
||||||
}
|
|
||||||
result.objects = append(result.objects, objExt.Object)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponseObjectExtended struct {
|
|
||||||
Object ResponseObject
|
|
||||||
Error error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs ResObjectSearch, basePath string) (<-chan ResponseObjectExtended, error) {
|
|
||||||
res := make(chan ResponseObjectExtended)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(res)
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cnrID.EncodeToString()),
|
|
||||||
zap.String("path", basePath),
|
|
||||||
)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
err := objectIDs.Iterate(func(id oid.ID) bool {
|
|
||||||
wg.Add(1)
|
|
||||||
err := h.workerPool.Submit(func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var obj ResponseObjectExtended
|
|
||||||
obj.Object, obj.Error = h.headDirObject(ctx, cnrID, id, basePath)
|
|
||||||
res <- obj
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
wg.Done()
|
|
||||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID, basePath string) (ResponseObject, error) {
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
obj, err := h.frostfs.HeadObject(ctx, PrmObjectHead{
|
|
||||||
PrmAuth: PrmAuth{BearerToken: bearerToken(ctx)},
|
|
||||||
Address: addr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ResponseObject{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs := loadAttributes(obj.Attributes())
|
|
||||||
attrs[attrOID] = objID.EncodeToString()
|
|
||||||
if multipartSize, ok := attrs[attributeMultipartObjectSize]; ok {
|
|
||||||
attrs[attrSize] = multipartSize
|
|
||||||
} else {
|
|
||||||
attrs[attrSize] = strconv.FormatUint(obj.PayloadSize(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
dirname := getNextDir(attrs[object.AttributeFilePath], basePath)
|
|
||||||
if dirname == "" {
|
|
||||||
return newListObjectsResponseNative(attrs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResponseObject{
|
|
||||||
FileName: dirname,
|
|
||||||
FilePath: basePath + dirname,
|
|
||||||
IsDir: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type browseParams struct {
|
|
||||||
bucketInfo *data.BucketInfo
|
|
||||||
prefix string
|
|
||||||
isNative bool
|
|
||||||
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
|
|
||||||
const S3Protocol = "s3"
|
|
||||||
const FrostfsProtocol = "frostfs"
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(
|
|
||||||
zap.String("bucket", p.bucketInfo.Name),
|
|
||||||
zap.String("container", p.bucketInfo.CID.EncodeToString()),
|
|
||||||
zap.String("prefix", p.prefix),
|
|
||||||
)
|
|
||||||
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
objects := resp.objects
|
|
||||||
sort.Slice(objects, func(i, j int) bool {
|
|
||||||
if objects[i].IsDir == objects[j].IsDir {
|
|
||||||
return objects[i].FileName < objects[j].FileName
|
|
||||||
}
|
|
||||||
return objects[i].IsDir
|
|
||||||
})
|
|
||||||
|
|
||||||
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
|
||||||
"formatSize": formatSize,
|
|
||||||
"trimPrefix": trimPrefix,
|
|
||||||
"urlencode": urlencode,
|
|
||||||
"parentDir": parentDir,
|
|
||||||
}).Parse(h.config.IndexPageTemplate())
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bucketName := p.bucketInfo.Name
|
|
||||||
protocol := S3Protocol
|
|
||||||
if p.isNative {
|
|
||||||
bucketName = p.bucketInfo.CID.EncodeToString()
|
|
||||||
protocol = FrostfsProtocol
|
|
||||||
}
|
|
||||||
if err = tmpl.Execute(c, &BrowsePageData{
|
|
||||||
Container: bucketName,
|
|
||||||
Prefix: p.prefix,
|
|
||||||
Objects: objects,
|
|
||||||
Protocol: protocol,
|
|
||||||
HasErrors: resp.hasErrors,
|
|
||||||
}); err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,321 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"bufio"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
|
||||||
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
cidParam := c.UserValue("cid").(string)
|
|
||||||
oidParam := c.UserValue("oid").(string)
|
|
||||||
downloadParam := c.QueryArgs().GetBool("download")
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cidParam),
|
|
||||||
zap.String("oid", oidParam),
|
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
|
||||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
|
|
||||||
} else {
|
|
||||||
h.browseIndex(c, checkS3Err != nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldDownload(oidParam string, downloadParam bool) bool {
|
|
||||||
return !isDir(oidParam) || downloadParam
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadByAttribute handles attribute-based download requests.
|
|
||||||
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
h.byAttribute(c, h.receiveFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
|
||||||
filters := object.NewSearchFilters()
|
|
||||||
filters.AddRootFilter()
|
|
||||||
filters.AddFilter(key, val, op)
|
|
||||||
|
|
||||||
prm := PrmObjectSearch{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: bearerToken(ctx),
|
|
||||||
},
|
|
||||||
Container: cnrID,
|
|
||||||
Filters: filters,
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.frostfs.SearchObjects(ctx, prm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadZip handles zip by prefix requests.
|
|
||||||
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, 3<<20)
|
|
||||||
zipWriter := zip.NewWriter(w)
|
|
||||||
var objectsWritten int
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createZipFile(zipWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
|
||||||
return
|
|
||||||
} else if objectsWritten == 0 {
|
|
||||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
if err := zipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
|
||||||
method := zip.Store
|
|
||||||
if h.config.ArchiveCompression() {
|
|
||||||
method = zip.Deflate
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath := getFilePath(obj)
|
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return zw.CreateHeader(&zip.FileHeader{
|
|
||||||
Name: filePath,
|
|
||||||
Method: method,
|
|
||||||
Modified: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadTar forms tar.gz from objects by prefix.
|
|
||||||
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
|
||||||
|
|
||||||
compressionLevel := gzip.NoCompression
|
|
||||||
if h.config.ArchiveCompression() {
|
|
||||||
compressionLevel = gzip.DefaultCompression
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore error because it's not nil only if compressionLevel argument is invalid
|
|
||||||
gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel)
|
|
||||||
tarWriter := tar.NewWriter(gzipWriter)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := tarWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
if err := gzipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var objectsWritten int
|
|
||||||
buf := make([]byte, 3<<20) // the same as for upload
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createTarFile(tarWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
|
||||||
} else if objectsWritten == 0 {
|
|
||||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) {
|
|
||||||
filePath := getFilePath(obj)
|
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return tw, tw.WriteHeader(&tar.Header{
|
|
||||||
Name: filePath,
|
|
||||||
Mode: 0655,
|
|
||||||
Size: int64(obj.PayloadSize()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
|
||||||
return func(id oid.ID) bool {
|
|
||||||
log = log.With(zap.String("oid", id.EncodeToString()))
|
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: bearerToken(ctx),
|
|
||||||
},
|
|
||||||
Address: newAddress(cnrID, id),
|
|
||||||
}
|
|
||||||
|
|
||||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
prefix, _ := c.UserValue("prefix").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
|
|
||||||
prefix, err := url.QueryUnescape(prefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
|
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
||||||
|
|
||||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resSearch, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
|
||||||
var err error
|
|
||||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil {
|
|
||||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = resGet.Payload.Close(); err != nil {
|
|
||||||
return fmt.Errorf("object body close error: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFilePath(obj *object.Object) string {
|
|
||||||
for _, attr := range obj.Attributes() {
|
|
||||||
if attr.Key() == object.AttributeFilePath {
|
|
||||||
return attr.Value()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,58 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) {
|
|
||||||
var err error
|
|
||||||
result := make(map[string]string)
|
|
||||||
prefix := []byte(utils.UserAttributeHeaderPrefix)
|
|
||||||
|
|
||||||
header.VisitAll(func(key, val []byte) {
|
|
||||||
// checks that the key and the val not empty
|
|
||||||
if len(key) == 0 || len(val) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// checks that the key has attribute prefix
|
|
||||||
if !bytes.HasPrefix(key, prefix) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// removing attribute prefix
|
|
||||||
clearKey := bytes.TrimPrefix(key, prefix)
|
|
||||||
|
|
||||||
clearKey = utils.TransformIfSystem(clearKey)
|
|
||||||
|
|
||||||
// checks that the attribute key is not empty
|
|
||||||
if len(clearKey) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if key gets duplicated
|
|
||||||
// return error containing full key name (with prefix)
|
|
||||||
if _, ok := result[string(clearKey)]; ok {
|
|
||||||
err = fmt.Errorf("key duplication error: %s", string(key))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// make string representation of key / val
|
|
||||||
k, v := string(clearKey), string(val)
|
|
||||||
|
|
||||||
result[k] = v
|
|
||||||
|
|
||||||
l.Debug(logs.AddAttributeToResultObject,
|
|
||||||
zap.String("key", k),
|
|
||||||
zap.String("val", v),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
})
|
|
||||||
|
|
||||||
return result, err
|
|
||||||
}
|
|
|
@ -1,53 +0,0 @@
|
||||||
//go:build !integration
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFilter(t *testing.T) {
|
|
||||||
log := zap.NewNop()
|
|
||||||
|
|
||||||
t.Run("duplicate keys error", func(t *testing.T) {
|
|
||||||
req := &fasthttp.RequestHeader{}
|
|
||||||
req.DisableNormalizing()
|
|
||||||
req.Add("X-Attribute-DupKey", "first-value")
|
|
||||||
req.Add("X-Attribute-DupKey", "second-value")
|
|
||||||
_, err := filterHeaders(log, req)
|
|
||||||
require.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("duplicate system keys error", func(t *testing.T) {
|
|
||||||
req := &fasthttp.RequestHeader{}
|
|
||||||
req.DisableNormalizing()
|
|
||||||
req.Add("X-Attribute-System-DupKey", "first-value")
|
|
||||||
req.Add("X-Attribute-System-DupKey", "second-value")
|
|
||||||
_, err := filterHeaders(log, req)
|
|
||||||
require.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
req := &fasthttp.RequestHeader{}
|
|
||||||
req.DisableNormalizing()
|
|
||||||
|
|
||||||
req.Set("X-Attribute-System-Expiration-Epoch1", "101")
|
|
||||||
req.Set("X-Attribute-SYSTEM-Expiration-Epoch2", "102")
|
|
||||||
req.Set("X-Attribute-system-Expiration-Epoch3", "103")
|
|
||||||
req.Set("X-Attribute-MyAttribute", "value")
|
|
||||||
|
|
||||||
expected := map[string]string{
|
|
||||||
"__SYSTEM__EXPIRATION_EPOCH1": "101",
|
|
||||||
"MyAttribute": "value",
|
|
||||||
"__SYSTEM__EXPIRATION_EPOCH3": "103",
|
|
||||||
"__SYSTEM__EXPIRATION_EPOCH2": "102",
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := filterHeaders(log, req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, expected, result)
|
|
||||||
}
|
|
|
@ -1,275 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TestFrostFS struct {
|
|
||||||
objects map[string]*object.Object
|
|
||||||
containers map[string]*container.Container
|
|
||||||
accessList map[string]bool
|
|
||||||
key *keys.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
|
||||||
return &TestFrostFS{
|
|
||||||
objects: make(map[string]*object.Object),
|
|
||||||
containers: make(map[string]*container.Container),
|
|
||||||
accessList: make(map[string]bool),
|
|
||||||
key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) ContainerID(name string) (*cid.ID, error) {
|
|
||||||
for id, cnr := range t.containers {
|
|
||||||
if container.Name(*cnr) == name {
|
|
||||||
var cnrID cid.ID
|
|
||||||
return &cnrID, cnrID.DecodeString(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
|
||||||
t.containers[cnrID.EncodeToString()] = cnr
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowUserOperation grants access to object operations.
|
|
||||||
// Empty userID and objID means any user and object respectively.
|
|
||||||
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
|
|
||||||
t.accessList[fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID)] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
|
|
||||||
for k, v := range t.containers {
|
|
||||||
if k == prm.ContainerID.EncodeToString() {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) requestOwner(btoken *bearer.Token) user.ID {
|
|
||||||
if btoken != nil {
|
|
||||||
return bearer.ResolveIssuer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
|
|
||||||
return owner
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) retrieveObject(addr oid.Address, btoken *bearer.Token) (*object.Object, error) {
|
|
||||||
sAddr := addr.EncodeToString()
|
|
||||||
|
|
||||||
if obj, ok := t.objects[sAddr]; ok {
|
|
||||||
owner := t.requestOwner(btoken)
|
|
||||||
|
|
||||||
if !t.isAllowed(addr.Container(), owner, acl.OpObjectGet, addr.Object()) {
|
|
||||||
return nil, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) HeadObject(_ context.Context, prm PrmObjectHead) (*object.Object, error) {
|
|
||||||
return t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) GetObject(_ context.Context, prm PrmObjectGet) (*Object, error) {
|
|
||||||
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Object{
|
|
||||||
Header: *obj,
|
|
||||||
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) RangeObject(_ context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
|
|
||||||
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
off := prm.PayloadRange[0]
|
|
||||||
payload := obj.Payload()[off : off+prm.PayloadRange[1]]
|
|
||||||
return io.NopCloser(bytes.NewReader(payload)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
|
||||||
b := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
var id oid.ID
|
|
||||||
id.SetSHA256(sha256.Sum256(b))
|
|
||||||
prm.Object.SetID(id)
|
|
||||||
|
|
||||||
attrs := prm.Object.Attributes()
|
|
||||||
if prm.ClientCut {
|
|
||||||
a := object.NewAttribute()
|
|
||||||
a.SetKey("s3-client-cut")
|
|
||||||
a.SetValue("true")
|
|
||||||
attrs = append(attrs, *a)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm.Object.SetAttributes(attrs...)
|
|
||||||
|
|
||||||
if prm.Payload != nil {
|
|
||||||
all, err := io.ReadAll(prm.Payload)
|
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
prm.Object.SetPayload(all)
|
|
||||||
prm.Object.SetPayloadSize(uint64(len(all)))
|
|
||||||
var hash checksum.Checksum
|
|
||||||
checksum.Calculate(&hash, checksum.SHA256, all)
|
|
||||||
prm.Object.SetPayloadChecksum(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, _ := prm.Object.ContainerID()
|
|
||||||
objID, _ := prm.Object.ID()
|
|
||||||
|
|
||||||
owner := t.requestOwner(prm.BearerToken)
|
|
||||||
|
|
||||||
if !t.isAllowed(cnrID, owner, acl.OpObjectPut, objID) {
|
|
||||||
return oid.ID{}, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
t.objects[addr.EncodeToString()] = prm.Object
|
|
||||||
return objID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type resObjectSearchMock struct {
|
|
||||||
res []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Read(buf []oid.ID) (int, error) {
|
|
||||||
for i := range buf {
|
|
||||||
if i > len(r.res)-1 {
|
|
||||||
return len(r.res), io.EOF
|
|
||||||
}
|
|
||||||
buf[i] = r.res[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
r.res = r.res[len(buf):]
|
|
||||||
|
|
||||||
return len(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Iterate(f func(oid.ID) bool) error {
|
|
||||||
for _, id := range r.res {
|
|
||||||
if f(id) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Close() {}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (ResObjectSearch, error) {
|
|
||||||
if !t.isAllowed(prm.Container, t.requestOwner(prm.BearerToken), acl.OpObjectSearch, oid.ID{}) {
|
|
||||||
return nil, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
cidStr := prm.Container.EncodeToString()
|
|
||||||
var res []oid.ID
|
|
||||||
|
|
||||||
if len(prm.Filters) == 1 { // match root filter
|
|
||||||
for k, v := range t.objects {
|
|
||||||
if strings.Contains(k, cidStr) {
|
|
||||||
id, _ := v.ID()
|
|
||||||
res = append(res, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &resObjectSearchMock{res: res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := prm.Filters[1]
|
|
||||||
if len(prm.Filters) != 2 ||
|
|
||||||
filter.Operation() != object.MatchCommonPrefix && filter.Operation() != object.MatchStringEqual {
|
|
||||||
return nil, fmt.Errorf("usupported filters")
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range t.objects {
|
|
||||||
if strings.Contains(k, cidStr) && isMatched(v.Attributes(), filter) {
|
|
||||||
id, _ := v.ID()
|
|
||||||
res = append(res, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resObjectSearchMock{res: res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
|
||||||
for _, attr := range attributes {
|
|
||||||
if attr.Key() == filter.Header() {
|
|
||||||
switch filter.Operation() {
|
|
||||||
case object.MatchStringEqual:
|
|
||||||
return attr.Value() == filter.Value()
|
|
||||||
case object.MatchCommonPrefix:
|
|
||||||
return strings.HasPrefix(attr.Value(), filter.Value())
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) GetEpochDurations(context.Context) (*utils.EpochDurations, error) {
|
|
||||||
return &utils.EpochDurations{
|
|
||||||
CurrentEpoch: 10,
|
|
||||||
MsPerBlock: 1000,
|
|
||||||
BlockPerEpoch: 100,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) bool {
|
|
||||||
keysToCheck := []string{
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, oid.ID{}),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, objID),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, oid.ID{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keysToCheck {
|
|
||||||
if t.accessList[key] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,443 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Config interface {
|
|
||||||
DefaultTimestamp() bool
|
|
||||||
ArchiveCompression() bool
|
|
||||||
ClientCut() bool
|
|
||||||
IndexPageEnabled() bool
|
|
||||||
IndexPageTemplate() string
|
|
||||||
BufferMaxSizeForPut() uint64
|
|
||||||
NamespaceHeader() string
|
|
||||||
EnableFilepathFallback() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
|
||||||
type PrmContainer struct {
|
|
||||||
// Container identifier.
|
|
||||||
ContainerID cid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmAuth groups authentication parameters for the FrostFS operation.
|
|
||||||
type PrmAuth struct {
|
|
||||||
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
|
|
||||||
BearerToken *bearer.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectHead groups parameters of FrostFS.HeadObject operation.
|
|
||||||
type PrmObjectHead struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectGet groups parameters of FrostFS.GetObject operation.
|
|
||||||
type PrmObjectGet struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectRange groups parameters of FrostFS.RangeObject operation.
|
|
||||||
type PrmObjectRange struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
|
|
||||||
// Offset-length range of the object payload to be read.
|
|
||||||
PayloadRange [2]uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object represents FrostFS object.
|
|
||||||
type Object struct {
|
|
||||||
// Object header (doesn't contain payload).
|
|
||||||
Header object.Object
|
|
||||||
|
|
||||||
// Object payload part encapsulated in io.Reader primitive.
|
|
||||||
// Returns ErrAccessDenied on read access violation.
|
|
||||||
Payload io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectCreate groups parameters of FrostFS.CreateObject operation.
|
|
||||||
type PrmObjectCreate struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
Object *object.Object
|
|
||||||
|
|
||||||
// Object payload encapsulated in io.Reader primitive.
|
|
||||||
Payload io.Reader
|
|
||||||
|
|
||||||
// Enables client side object preparing.
|
|
||||||
ClientCut bool
|
|
||||||
|
|
||||||
// Disables using Tillich-Zémor hash for payload.
|
|
||||||
WithoutHomomorphicHash bool
|
|
||||||
|
|
||||||
// Sets max buffer size to read payload.
|
|
||||||
BufferMaxSize uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
|
||||||
type PrmObjectSearch struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Container to select the objects from.
|
|
||||||
Container cid.ID
|
|
||||||
|
|
||||||
Filters object.SearchFilters
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrmInitMultiObjectReader struct {
|
|
||||||
// payload range
|
|
||||||
Off, Ln uint64
|
|
||||||
|
|
||||||
Addr oid.Address
|
|
||||||
Bearer *bearer.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResObjectSearch interface {
|
|
||||||
Read(buf []oid.ID) (int, error)
|
|
||||||
Iterate(f func(oid.ID) bool) error
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
|
||||||
ErrAccessDenied = errors.New("access denied")
|
|
||||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
|
||||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
|
||||||
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
|
|
||||||
ErrQuotaLimitReached = errors.New("quota limit reached")
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostFS represents virtual connection to FrostFS network.
|
|
||||||
type FrostFS interface {
|
|
||||||
Container(context.Context, PrmContainer) (*container.Container, error)
|
|
||||||
HeadObject(context.Context, PrmObjectHead) (*object.Object, error)
|
|
||||||
GetObject(context.Context, PrmObjectGet) (*Object, error)
|
|
||||||
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
|
||||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
|
||||||
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
|
||||||
InitMultiObjectReader(ctx context.Context, p PrmInitMultiObjectReader) (io.Reader, error)
|
|
||||||
|
|
||||||
utils.EpochInfoFetcher
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerResolver interface {
|
|
||||||
Resolve(ctx context.Context, name string) (*cid.ID, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Handler struct {
|
|
||||||
log *zap.Logger
|
|
||||||
frostfs FrostFS
|
|
||||||
ownerID *user.ID
|
|
||||||
config Config
|
|
||||||
containerResolver ContainerResolver
|
|
||||||
tree layer.TreeService
|
|
||||||
cache *cache.BucketCache
|
|
||||||
workerPool *ants.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
type AppParams struct {
|
|
||||||
Logger *zap.Logger
|
|
||||||
FrostFS FrostFS
|
|
||||||
Owner *user.ID
|
|
||||||
Resolver ContainerResolver
|
|
||||||
Cache *cache.BucketCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
|
|
||||||
return &Handler{
|
|
||||||
log: params.Logger,
|
|
||||||
frostfs: params.FrostFS,
|
|
||||||
ownerID: params.Owner,
|
|
||||||
config: config,
|
|
||||||
containerResolver: params.Resolver,
|
|
||||||
tree: tree,
|
|
||||||
cache: params.Cache,
|
|
||||||
workerPool: workerPool,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
||||||
// prepares request and object address to it.
|
|
||||||
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
handler(ctx, req, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
|
||||||
// resolves object address from S3-like path <bucket name>/<object key>.
|
|
||||||
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
|
|
||||||
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if foundOID.IsDeleteMarker {
|
|
||||||
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, foundOID.OID)
|
|
||||||
handler(ctx, newRequest(c, log), addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byNativeAddress.
|
|
||||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
|
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
|
||||||
key, _ := c.UserValue("attr_key").(string)
|
|
||||||
val, _ := c.UserValue("attr_val").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
|
|
||||||
key, err := url.QueryUnescape(key)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
|
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = url.QueryUnescape(val)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
|
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if key == attrFileName {
|
|
||||||
val = prepareFileName(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(objID)
|
|
||||||
|
|
||||||
handler(ctx, newRequest(c, log), addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
|
||||||
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
|
|
||||||
}
|
|
||||||
defer res.Close()
|
|
||||||
|
|
||||||
buf := make([]oid.ID, 1)
|
|
||||||
|
|
||||||
n, err := res.Read(buf)
|
|
||||||
if n == 0 {
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
|
||||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
|
|
||||||
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal))
|
|
||||||
case errors.Is(err, io.EOF):
|
|
||||||
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
|
||||||
default:
|
|
||||||
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) needSearchByFileName(key, val string) bool {
|
|
||||||
if key != attrFilePath || !h.config.EnableFilepathFallback() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareFileName(fileName string) string {
|
|
||||||
if strings.HasPrefix(fileName, "/") {
|
|
||||||
return fileName[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileName
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveContainer decode container id, if it's not a valid container id
|
|
||||||
// then trey to resolve name using provided resolver.
|
|
||||||
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
|
||||||
cnrID := new(cid.ID)
|
|
||||||
err := cnrID.DecodeString(containerID)
|
|
||||||
if err != nil {
|
|
||||||
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
|
||||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
|
||||||
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cnrID, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
|
|
||||||
ns, err := middleware.GetNamespace(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil {
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
|
|
||||||
zap.String("cnrName", cnrID.String()),
|
|
||||||
logs.TagField(logs.TagExternalStorage))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.cache.Put(bktInfo); err != nil {
|
|
||||||
log.Warn(logs.CouldntPutBucketIntoCache,
|
|
||||||
zap.String("bucket name", bktInfo.Name),
|
|
||||||
zap.Stringer("bucket cid", bktInfo.CID),
|
|
||||||
zap.Error(err),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
|
|
||||||
prm := PrmContainer{ContainerID: cnrID}
|
|
||||||
res, err := h.frostfs.Container(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{
|
|
||||||
CID: cnrID,
|
|
||||||
Name: cnrID.EncodeToString(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if domain := container.ReadDomain(*res); domain.Name() != "" {
|
|
||||||
bktInfo.Name = domain.Name()
|
|
||||||
bktInfo.Zone = domain.Zone()
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
|
||||||
bktInfo.PlacementPolicy = res.PlacementPolicy()
|
|
||||||
|
|
||||||
return bktInfo, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
if !h.config.IndexPageEnabled() {
|
|
||||||
c.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cidURLParam := c.UserValue("cid").(string)
|
|
||||||
oidURLParam := c.UserValue("oid").(string)
|
|
||||||
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
|
|
||||||
|
|
||||||
unescapedKey, err := url.QueryUnescape(oidURLParam)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
listFunc := h.getDirObjectsS3
|
|
||||||
if isNativeList {
|
|
||||||
// tree probe failed, trying to use native
|
|
||||||
listFunc = h.getDirObjectsNative
|
|
||||||
}
|
|
||||||
|
|
||||||
h.browseObjects(c, browseParams{
|
|
||||||
bucketInfo: bktInfo,
|
|
||||||
prefix: unescapedKey,
|
|
||||||
listObjects: listFunc,
|
|
||||||
isNative: isNativeList,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,580 +0,0 @@
|
||||||
//go:build gofuzz
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fuzzSuccessExitCode = 0
|
|
||||||
fuzzFailExitCode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) {
|
|
||||||
array := make([]string, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) {
|
|
||||||
array := make([]bool, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
count = count % max
|
|
||||||
if count < 0 {
|
|
||||||
count += max
|
|
||||||
}
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
count = count % len(params)
|
|
||||||
if count < 0 {
|
|
||||||
count += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
position, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
position = position % len(params)
|
|
||||||
if position < 0 {
|
|
||||||
position += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Header.Set(params[position], v)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) {
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
initValue, err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return initValue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
aclList := []acl.Basic{
|
|
||||||
acl.Private,
|
|
||||||
acl.PrivateExtended,
|
|
||||||
acl.PublicRO,
|
|
||||||
acl.PublicROExtended,
|
|
||||||
acl.PublicRW,
|
|
||||||
acl.PublicRWExtended,
|
|
||||||
acl.PublicAppend,
|
|
||||||
acl.PublicAppendExtended,
|
|
||||||
}
|
|
||||||
|
|
||||||
pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
acl := aclList[pos]
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 6)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
bktName := strings[0]
|
|
||||||
objFileName := strings[1]
|
|
||||||
valAttr := strings[2]
|
|
||||||
keyAttr := strings[3]
|
|
||||||
|
|
||||||
if len(bktName) == 0 {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cnrID.EncodeToString())
|
|
||||||
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: objFileName,
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := tp.GetBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Close(); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Request.SetBodyStream(&buff, buff.Len())
|
|
||||||
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
|
|
||||||
if r.Response.StatusCode() != http.StatusOK {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzUpload() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzUpload(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, _, _, _, _, err = upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzUpload(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) {
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
panic(resp)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
data := resp.Response.Body()
|
|
||||||
err := json.Unmarshal(data, &putRes)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(object.AttributeFilePath)
|
|
||||||
|
|
||||||
filename, err = maybeFillRandom(tp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attr.SetValue(filename)
|
|
||||||
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
oid := putRes.ObjectID
|
|
||||||
oid, err = maybeFillRandom(tp, oid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("oid", oid)
|
|
||||||
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
r.SetUserValue("download", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzGet() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzGet(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzGet(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHead() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHead(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHead(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHead(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHeadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHeadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHeadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHeadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadZipped() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadZipped(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("prefix", prefix)
|
|
||||||
|
|
||||||
hc.Handler().DownloadZip(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadZipped(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadZipped(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzStoreBearerTokenAppCtx() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzStoreBearerTokenAppCtx(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 3)
|
|
||||||
|
|
||||||
rand, err := prepareBools(tp, 2)
|
|
||||||
|
|
||||||
if rand[0] == true {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
} else if rand[1] == true {
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
} else {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens.StoreBearerTokenAppCtx(ctx, r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzStoreBearerTokenAppCtx(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzStoreBearerTokenAppCtx(data)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,524 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type treeServiceMock struct {
|
|
||||||
system map[string]map[string]*data.BaseNodeVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTreeService() *treeServiceMock {
|
|
||||||
return &treeServiceMock{
|
|
||||||
system: make(map[string]map[string]*data.BaseNodeVersion),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
|
|
||||||
_, ok := t.system["bucket-settings"]
|
|
||||||
if !ok {
|
|
||||||
return layer.ErrNodeNotFound
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type configMock struct {
|
|
||||||
additionalSearch bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) DefaultTimestamp() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ArchiveCompression() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) IndexPageEnabled() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) IndexPageTemplate() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) IndexPageNativeTemplate() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ClientCut() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) BufferMaxSizeForPut() uint64 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) NamespaceHeader() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) EnableFilepathFallback() bool {
|
|
||||||
return c.additionalSearch
|
|
||||||
}
|
|
||||||
|
|
||||||
type handlerContext struct {
|
|
||||||
key *keys.PrivateKey
|
|
||||||
owner user.ID
|
|
||||||
|
|
||||||
h *Handler
|
|
||||||
frostfs *TestFrostFS
|
|
||||||
tree *treeServiceMock
|
|
||||||
cfg *configMock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *handlerContext) Handler() *Handler {
|
|
||||||
return hc.h
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareHandlerContext() (*handlerContext, error) {
|
|
||||||
logger, err := zap.NewDevelopment()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
|
||||||
|
|
||||||
testFrostFS := NewTestFrostFS(key)
|
|
||||||
|
|
||||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
|
||||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
|
|
||||||
return testFrostFS.ContainerID(name)
|
|
||||||
})
|
|
||||||
|
|
||||||
params := &AppParams{
|
|
||||||
Logger: logger,
|
|
||||||
FrostFS: testFrostFS,
|
|
||||||
Owner: &owner,
|
|
||||||
Resolver: testResolver,
|
|
||||||
Cache: cache.NewBucketCache(&cache.Config{
|
|
||||||
Size: 1,
|
|
||||||
Lifetime: 1,
|
|
||||||
Logger: logger,
|
|
||||||
}, false),
|
|
||||||
}
|
|
||||||
|
|
||||||
treeMock := newTreeService()
|
|
||||||
cfgMock := &configMock{}
|
|
||||||
|
|
||||||
workerPool, err := ants.NewPool(1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
handler := New(params, cfgMock, treeMock, workerPool)
|
|
||||||
|
|
||||||
return &handlerContext{
|
|
||||||
key: key,
|
|
||||||
owner: owner,
|
|
||||||
h: handler,
|
|
||||||
frostfs: testFrostFS,
|
|
||||||
tree: treeMock,
|
|
||||||
cfg: cfgMock,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
|
|
||||||
var pp netmap.PlacementPolicy
|
|
||||||
err := pp.DecodeString("REP 1")
|
|
||||||
if err != nil {
|
|
||||||
return cid.ID{}, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var cnr container.Container
|
|
||||||
cnr.Init()
|
|
||||||
cnr.SetOwner(hc.owner)
|
|
||||||
cnr.SetPlacementPolicy(pp)
|
|
||||||
cnr.SetBasicACL(basicACL)
|
|
||||||
|
|
||||||
var domain container.Domain
|
|
||||||
domain.SetName(name)
|
|
||||||
container.WriteDomain(&cnr, domain)
|
|
||||||
container.SetName(&cnr, name)
|
|
||||||
container.SetCreationTime(&cnr, time.Now())
|
|
||||||
|
|
||||||
cnrID := cidtest.ID()
|
|
||||||
|
|
||||||
for op := acl.OpObjectGet; op < acl.OpObjectHash; op++ {
|
|
||||||
hc.frostfs.AllowUserOperation(cnrID, hc.owner, op, oid.ID{})
|
|
||||||
if basicACL.IsOpAllowed(op, acl.RoleOthers) {
|
|
||||||
hc.frostfs.AllowUserOperation(cnrID, user.ID{}, op, oid.ID{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnrID, &cnr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBasic(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
bktName := "bucket"
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
|
|
||||||
obj.SetAttributes(append(obj.Attributes(), attr)...)
|
|
||||||
|
|
||||||
t.Run("get", func(t *testing.T) {
|
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("head", func(t *testing.T) {
|
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get by attribute", func(t *testing.T) {
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("head by attribute", func(t *testing.T) {
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("zip", func(t *testing.T) {
|
|
||||||
r = prepareGetZipped(ctx, bktName, "")
|
|
||||||
hc.Handler().DownloadZip(r)
|
|
||||||
|
|
||||||
readerAt := bytes.NewReader(r.Response.Body())
|
|
||||||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, zipReader.File, 1)
|
|
||||||
require.Equal(t, objFileName, zipReader.File[0].Name)
|
|
||||||
f, err := zipReader.File[0].Open()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
inErr := f.Close()
|
|
||||||
require.NoError(t, inErr)
|
|
||||||
}()
|
|
||||||
data, err := io.ReadAll(f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, content, string(data))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindObjectByAttribute(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.cfg.additionalSearch = true
|
|
||||||
|
|
||||||
bktName := "bucket"
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testAttrVal1 := "/folder/cat.jpg"
|
|
||||||
testAttrVal2 := "cat.jpg"
|
|
||||||
testAttrVal3 := "test-attr-val3"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
firstAttr object.Attribute
|
|
||||||
secondAttr object.Attribute
|
|
||||||
reqAttrKey string
|
|
||||||
reqAttrValue string
|
|
||||||
err string
|
|
||||||
additionalSearch bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "success search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath with leading slash (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: "/cat.jpg",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
|
||||||
if tc.err != "" {
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), tc.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNeedSearchByFileName(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
attrKey string
|
|
||||||
attrVal string
|
|
||||||
additionalSearch bool
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "need search - not contains slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "need search - single lead slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - single slash but not lead",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - more one slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - incorrect attribute key",
|
|
||||||
attrKey: attrFileName,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - additional search disabled",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: false,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
|
|
||||||
require.Equal(t, tc.expected, res)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrepareFileName(t *testing.T) {
|
|
||||||
fileName := "/cat.jpg"
|
|
||||||
expected := "cat.jpg"
|
|
||||||
actual := prepareFileName(fileName)
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
|
|
||||||
fileName = "cat.jpg"
|
|
||||||
actual = prepareFileName(fileName)
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
return r, fillMultipartBody(r, content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("oid", objID)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("prefix", prefix)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareObjectAttributes(attrKey, attrValue string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(attrKey)
|
|
||||||
attr.SetValue(attrValue)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
keyAttr = "User-Attribute"
|
|
||||||
valAttr = "user value"
|
|
||||||
objFileName = "newFile.txt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: objFileName,
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(fw, bytes.NewBufferString(content)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Request.SetBodyStream(&buff, buff.Len())
|
|
||||||
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// max bytes needed to detect content type according to http.DetectContentType docs.
|
|
||||||
const sizeToDetectType = 512
|
|
||||||
|
|
||||||
const (
|
|
||||||
hdrObjectID = "X-Object-Id"
|
|
||||||
hdrOwnerID = "X-Owner-Id"
|
|
||||||
hdrContainerID = "X-Container-Id"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid.Address) {
|
|
||||||
var start = time.Now()
|
|
||||||
|
|
||||||
btoken := bearerToken(ctx)
|
|
||||||
|
|
||||||
prm := PrmObjectHead{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: btoken,
|
|
||||||
},
|
|
||||||
Address: objectAddress,
|
|
||||||
}
|
|
||||||
|
|
||||||
obj, err := h.frostfs.HeadObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
req.handleFrostFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
|
||||||
var (
|
|
||||||
contentType string
|
|
||||||
filename string
|
|
||||||
filepath string
|
|
||||||
)
|
|
||||||
for _, attr := range obj.Attributes() {
|
|
||||||
key := attr.Key()
|
|
||||||
val := attr.Value()
|
|
||||||
if !isValidToken(key) || !isValidValue(val) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key = utils.BackwardTransformIfSystem(key)
|
|
||||||
|
|
||||||
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
|
||||||
switch key {
|
|
||||||
case object.AttributeTimestamp:
|
|
||||||
value, err := strconv.ParseInt(val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
req.log.Info(logs.CouldntParseCreationDate,
|
|
||||||
zap.String("key", key),
|
|
||||||
zap.String("val", val),
|
|
||||||
zap.Error(err),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
||||||
case object.AttributeContentType:
|
|
||||||
contentType = val
|
|
||||||
case object.AttributeFilePath:
|
|
||||||
filepath = val
|
|
||||||
case object.AttributeFileName:
|
|
||||||
filename = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filename == "" {
|
|
||||||
filename = filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
idsToResponse(&req.Response, obj)
|
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
|
||||||
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
|
||||||
prmRange := PrmObjectRange{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: btoken,
|
|
||||||
},
|
|
||||||
Address: objectAddress,
|
|
||||||
PayloadRange: [2]uint64{0, sz},
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.frostfs.RangeObject(ctx, prmRange)
|
|
||||||
}, filename)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
req.handleFrostFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
req.SetContentType(contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
|
||||||
objID, _ := obj.ID()
|
|
||||||
cnrID, _ := obj.ContainerID()
|
|
||||||
resp.Header.Set(hdrObjectID, objID.String())
|
|
||||||
resp.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
||||||
resp.Header.Set(hdrContainerID, cnrID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
|
||||||
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
|
||||||
oidParam, _ := c.UserValue("oid").(string)
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cidParam),
|
|
||||||
zap.String("oid", oidParam),
|
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
|
||||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
|
||||||
} else {
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadByAttribute handles attribute-based head requests.
|
|
||||||
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
h.byAttribute(c, h.headObject)
|
|
||||||
}
|
|
|
@ -1,26 +0,0 @@
|
||||||
package middleware
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// keyWrapper is wrapper for context keys.
|
|
||||||
type keyWrapper string
|
|
||||||
|
|
||||||
const nsKey = keyWrapper("namespace")
|
|
||||||
|
|
||||||
// GetNamespace extract namespace from context.
|
|
||||||
func GetNamespace(ctx context.Context) (string, error) {
|
|
||||||
ns, ok := ctx.Value(nsKey).(string)
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("couldn't get namespace from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNamespace sets namespace in the context.
|
|
||||||
func SetNamespace(ctx context.Context, ns string) context.Context {
|
|
||||||
return context.WithValue(ctx, nsKey, ns)
|
|
||||||
}
|
|
|
@ -1,80 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const attributeMultipartObjectSize = "S3-Multipart-Object-Size"
|
|
||||||
|
|
||||||
// MultipartFile provides standard ReadCloser interface and also allows one to
|
|
||||||
// get file name, it's used for multipart uploads.
|
|
||||||
type MultipartFile interface {
|
|
||||||
io.ReadCloser
|
|
||||||
FileName() string
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
|
|
||||||
// To have a custom buffer (3mb) the custom multipart reader is used.
|
|
||||||
// Default reader uses 4KiB chunks, which slow down upload speed up to 400%
|
|
||||||
// https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32
|
|
||||||
reader := multipart.NewReader(r, boundary)
|
|
||||||
|
|
||||||
for {
|
|
||||||
part, err := reader.NextPart()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := part.FormName()
|
|
||||||
if name == "" {
|
|
||||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := part.FileName()
|
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
|
||||||
if filename == "" {
|
|
||||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
|
||||||
if err = part.Close(); err != nil {
|
|
||||||
l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return part, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
|
|
||||||
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
|
|
||||||
cid, ok := p.obj.Header.ContainerID()
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, errors.New("no container id set")
|
|
||||||
}
|
|
||||||
oid, ok := p.obj.Header.ID()
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, errors.New("no object id set")
|
|
||||||
}
|
|
||||||
size, err := strconv.ParseUint(p.strSize, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
ctx := p.req.RequestCtx
|
|
||||||
params := PrmInitMultiObjectReader{
|
|
||||||
Addr: newAddress(cid, oid),
|
|
||||||
Bearer: bearerToken(ctx),
|
|
||||||
}
|
|
||||||
payload, err := h.frostfs.InitMultiObjectReader(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return io.NopCloser(payload), size, nil
|
|
||||||
}
|
|
|
@ -1,423 +0,0 @@
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
//
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package multipart implements MIME multipart parsing, as defined in RFC
|
|
||||||
2046.
|
|
||||||
|
|
||||||
The implementation is sufficient for HTTP (RFC 2388) and the multipart
|
|
||||||
bodies generated by popular browsers.
|
|
||||||
*/
|
|
||||||
package multipart
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"mime/quotedprintable"
|
|
||||||
"net/textproto"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var emptyParams = make(map[string]string)
|
|
||||||
|
|
||||||
// This constant needs to be at least 76 for this package to work correctly.
|
|
||||||
// This is because \r\n--separator_of_len_70- would fill the buffer and it
|
|
||||||
// wouldn't be safe to consume a single byte from it.
|
|
||||||
// This constant is different from the constant in stdlib. The standard value is 4096.
|
|
||||||
const peekBufferSize = 3 << 20
|
|
||||||
|
|
||||||
// A Part represents a single part in a multipart body.
|
|
||||||
type Part struct {
|
|
||||||
// The headers of the body, if any, with the keys canonicalized
|
|
||||||
// in the same fashion that the Go http.Request headers are.
|
|
||||||
// For example, "foo-bar" changes case to "Foo-Bar"
|
|
||||||
Header textproto.MIMEHeader
|
|
||||||
|
|
||||||
mr *Reader
|
|
||||||
|
|
||||||
disposition string
|
|
||||||
dispositionParams map[string]string
|
|
||||||
|
|
||||||
// r is either a reader directly reading from mr, or it's a
|
|
||||||
// wrapper around such a reader, decoding the
|
|
||||||
// Content-Transfer-Encoding
|
|
||||||
r io.Reader
|
|
||||||
|
|
||||||
n int // known data bytes waiting in mr.bufReader
|
|
||||||
total int64 // total data bytes read already
|
|
||||||
err error // error to return when n == 0
|
|
||||||
readErr error // read error observed from mr.bufReader
|
|
||||||
}
|
|
||||||
|
|
||||||
// FormName returns the name parameter if p has a Content-Disposition
|
|
||||||
// of type "form-data". Otherwise it returns the empty string.
|
|
||||||
func (p *Part) FormName() string {
|
|
||||||
// See https://tools.ietf.org/html/rfc2183 section 2 for EBNF
|
|
||||||
// of Content-Disposition value format.
|
|
||||||
if p.dispositionParams == nil {
|
|
||||||
p.parseContentDisposition()
|
|
||||||
}
|
|
||||||
if p.disposition != "form-data" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return p.dispositionParams["name"]
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileName returns the filename parameter of the Part's
|
|
||||||
// Content-Disposition header.
|
|
||||||
func (p *Part) FileName() string {
|
|
||||||
if p.dispositionParams == nil {
|
|
||||||
p.parseContentDisposition()
|
|
||||||
}
|
|
||||||
return p.dispositionParams["filename"]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Part) parseContentDisposition() {
|
|
||||||
v := p.Header.Get("Content-Disposition")
|
|
||||||
var err error
|
|
||||||
p.disposition, p.dispositionParams, err = mime.ParseMediaType(v)
|
|
||||||
if err != nil {
|
|
||||||
p.dispositionParams = emptyParams
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new multipart Reader reading from r using the
|
|
||||||
// given MIME boundary.
|
|
||||||
//
|
|
||||||
// The boundary is usually obtained from the "boundary" parameter of
|
|
||||||
// the message's "Content-Type" header. Use mime.ParseMediaType to
|
|
||||||
// parse such headers.
|
|
||||||
func NewReader(r io.Reader, boundary string) *Reader {
|
|
||||||
b := []byte("\r\n--" + boundary + "--")
|
|
||||||
return &Reader{
|
|
||||||
bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
|
|
||||||
nl: b[:2],
|
|
||||||
nlDashBoundary: b[:len(b)-2],
|
|
||||||
dashBoundaryDash: b[2:],
|
|
||||||
dashBoundary: b[2 : len(b)-2],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// stickyErrorReader is an io.Reader which never calls Read on its
|
|
||||||
// underlying Reader once an error has been seen. (the io.Reader
|
|
||||||
// interface's contract promises nothing about the return values of
|
|
||||||
// Read calls after an error, yet this package does do multiple Reads
|
|
||||||
// after error).
|
|
||||||
type stickyErrorReader struct {
|
|
||||||
r io.Reader
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
|
|
||||||
if r.err != nil {
|
|
||||||
return 0, r.err
|
|
||||||
}
|
|
||||||
n, r.err = r.r.Read(p)
|
|
||||||
return n, r.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPart(mr *Reader, rawPart bool) (*Part, error) {
|
|
||||||
bp := &Part{
|
|
||||||
Header: make(map[string][]string),
|
|
||||||
mr: mr,
|
|
||||||
}
|
|
||||||
if err := bp.populateHeaders(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bp.r = partReader{bp}
|
|
||||||
|
|
||||||
// rawPart is used to switch between Part.NextPart and Part.NextRawPart.
|
|
||||||
if !rawPart {
|
|
||||||
const cte = "Content-Transfer-Encoding"
|
|
||||||
if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") {
|
|
||||||
bp.Header.Del(cte)
|
|
||||||
bp.r = quotedprintable.NewReader(bp.r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Part) populateHeaders() error {
|
|
||||||
r := textproto.NewReader(p.mr.bufReader)
|
|
||||||
header, err := r.ReadMIMEHeader()
|
|
||||||
if err == nil {
|
|
||||||
p.Header = header
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads the body of a part, after its headers and before the
|
|
||||||
// next part (if any) begins.
|
|
||||||
func (p *Part) Read(d []byte) (n int, err error) {
|
|
||||||
return p.r.Read(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// partReader implements io.Reader by reading raw bytes directly from the
|
|
||||||
// wrapped *Part, without doing any Transfer-Encoding decoding.
|
|
||||||
type partReader struct {
|
|
||||||
p *Part
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pr partReader) Read(d []byte) (int, error) {
|
|
||||||
p := pr.p
|
|
||||||
br := p.mr.bufReader
|
|
||||||
|
|
||||||
// Read into buffer until we identify some data to return,
|
|
||||||
// or we find a reason to stop (boundary or read error).
|
|
||||||
for p.n == 0 && p.err == nil {
|
|
||||||
peek, _ := br.Peek(br.Buffered())
|
|
||||||
p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr)
|
|
||||||
if p.n == 0 && p.err == nil {
|
|
||||||
// Force buffered I/O to read more into buffer.
|
|
||||||
_, p.readErr = br.Peek(len(peek) + 1)
|
|
||||||
if p.readErr == io.EOF {
|
|
||||||
p.readErr = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read out from "data to return" part of buffer.
|
|
||||||
if p.n == 0 {
|
|
||||||
return 0, p.err
|
|
||||||
}
|
|
||||||
n := len(d)
|
|
||||||
if n > p.n {
|
|
||||||
n = p.n
|
|
||||||
}
|
|
||||||
n, _ = br.Read(d[:n])
|
|
||||||
p.total += int64(n)
|
|
||||||
p.n -= n
|
|
||||||
if p.n == 0 {
|
|
||||||
return n, p.err
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanUntilBoundary scans buf to identify how much of it can be safely
|
|
||||||
// returned as part of the Part body.
|
|
||||||
// dashBoundary is "--boundary".
|
|
||||||
// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in.
|
|
||||||
// The comments below (and the name) assume "\n--boundary", but either is accepted.
|
|
||||||
// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized.
|
|
||||||
// readErr is the read error, if any, that followed reading the bytes in buf.
|
|
||||||
// scanUntilBoundary returns the number of data bytes from buf that can be
|
|
||||||
// returned as part of the Part body and also the error to return (if any)
|
|
||||||
// once those data bytes are done.
|
|
||||||
func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) {
|
|
||||||
if total == 0 {
|
|
||||||
// At beginning of body, allow dashBoundary.
|
|
||||||
if bytes.HasPrefix(buf, dashBoundary) {
|
|
||||||
switch matchAfterPrefix(buf, dashBoundary, readErr) {
|
|
||||||
case -1:
|
|
||||||
return len(dashBoundary), nil
|
|
||||||
case 0:
|
|
||||||
return 0, nil
|
|
||||||
case +1:
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bytes.HasPrefix(dashBoundary, buf) {
|
|
||||||
return 0, readErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search for "\n--boundary".
|
|
||||||
if i := bytes.Index(buf, nlDashBoundary); i >= 0 {
|
|
||||||
switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) {
|
|
||||||
case -1:
|
|
||||||
return i + len(nlDashBoundary), nil
|
|
||||||
case 0:
|
|
||||||
return i, nil
|
|
||||||
case +1:
|
|
||||||
return i, io.EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bytes.HasPrefix(nlDashBoundary, buf) {
|
|
||||||
return 0, readErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, anything up to the final \n is not part of the boundary
|
|
||||||
// and so must be part of the body.
|
|
||||||
// Also if the section from the final \n onward is not a prefix of the boundary,
|
|
||||||
// it too must be part of the body.
|
|
||||||
i := bytes.LastIndexByte(buf, nlDashBoundary[0])
|
|
||||||
if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) {
|
|
||||||
return i, nil
|
|
||||||
}
|
|
||||||
return len(buf), readErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// matchAfterPrefix checks whether buf should be considered to match the boundary.
|
|
||||||
// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary",
|
|
||||||
// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true.
|
|
||||||
//
|
|
||||||
// matchAfterPrefix returns +1 if the buffer does match the boundary,
|
|
||||||
// meaning the prefix is followed by a dash, space, tab, cr, nl, or end of input.
|
|
||||||
// It returns -1 if the buffer definitely does NOT match the boundary,
|
|
||||||
// meaning the prefix is followed by some other character.
|
|
||||||
// For example, "--foobar" does not match "--foo".
|
|
||||||
// It returns 0 more input needs to be read to make the decision,
|
|
||||||
// meaning that len(buf) == len(prefix) and readErr == nil.
|
|
||||||
func matchAfterPrefix(buf, prefix []byte, readErr error) int {
|
|
||||||
if len(buf) == len(prefix) {
|
|
||||||
if readErr != nil {
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
c := buf[len(prefix)]
|
|
||||||
if c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '-' {
|
|
||||||
return +1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Part) Close() error {
|
|
||||||
_, _ = io.Copy(io.Discard, p)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reader is an iterator over parts in a MIME multipart body.
|
|
||||||
// Reader's underlying parser consumes its input as needed. Seeking
|
|
||||||
// isn't supported.
|
|
||||||
type Reader struct {
|
|
||||||
bufReader *bufio.Reader
|
|
||||||
|
|
||||||
currentPart *Part
|
|
||||||
partsRead int
|
|
||||||
|
|
||||||
nl []byte // "\r\n" or "\n" (set after seeing first boundary line)
|
|
||||||
nlDashBoundary []byte // nl + "--boundary"
|
|
||||||
dashBoundaryDash []byte // "--boundary--"
|
|
||||||
dashBoundary []byte // "--boundary"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextPart returns the next part in the multipart or an error.
|
|
||||||
// When there are no more parts, the error io.EOF is returned.
|
|
||||||
//
|
|
||||||
// As a special case, if the "Content-Transfer-Encoding" header
|
|
||||||
// has a value of "quoted-printable", that header is instead
|
|
||||||
// hidden and the body is transparently decoded during Read calls.
|
|
||||||
func (r *Reader) NextPart() (*Part, error) {
|
|
||||||
return r.nextPart(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NextRawPart returns the next part in the multipart or an error.
|
|
||||||
// When there are no more parts, the error io.EOF is returned.
|
|
||||||
//
|
|
||||||
// Unlike NextPart, it does not have special handling for
|
|
||||||
// "Content-Transfer-Encoding: quoted-printable".
|
|
||||||
func (r *Reader) NextRawPart() (*Part, error) {
|
|
||||||
return r.nextPart(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) nextPart(rawPart bool) (*Part, error) {
|
|
||||||
if r.currentPart != nil {
|
|
||||||
r.currentPart.Close()
|
|
||||||
}
|
|
||||||
if string(r.dashBoundary) == "--" {
|
|
||||||
return nil, fmt.Errorf("multipart: boundary is empty")
|
|
||||||
}
|
|
||||||
expectNewPart := false
|
|
||||||
for {
|
|
||||||
line, err := r.bufReader.ReadSlice('\n')
|
|
||||||
|
|
||||||
if err == io.EOF && r.isFinalBoundary(line) {
|
|
||||||
// If the buffer ends in "--boundary--" without the
|
|
||||||
// trailing "\r\n", ReadSlice will return an error
|
|
||||||
// (since it's missing the '\n'), but this is a valid
|
|
||||||
// multipart EOF so we need to return io.EOF instead of
|
|
||||||
// a fmt-wrapped one.
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("multipart: NextPart: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.isBoundaryDelimiterLine(line) {
|
|
||||||
r.partsRead++
|
|
||||||
bp, err := newPart(r, rawPart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.currentPart = bp
|
|
||||||
return bp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.isFinalBoundary(line) {
|
|
||||||
// Expected EOF
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if expectNewPart {
|
|
||||||
return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line))
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.partsRead == 0 {
|
|
||||||
// skip line
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the "\n" or "\r\n" separator between the
|
|
||||||
// body of the previous part and the boundary line we
|
|
||||||
// now expect will follow. (either a new part or the
|
|
||||||
// end boundary)
|
|
||||||
if bytes.Equal(line, r.nl) {
|
|
||||||
expectNewPart = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isFinalBoundary reports whether line is the final boundary line
|
|
||||||
// indicating that all parts are over.
|
|
||||||
// It matches `^--boundary--[ \t]*(\r\n)?$`.
|
|
||||||
func (r *Reader) isFinalBoundary(line []byte) bool {
|
|
||||||
if !bytes.HasPrefix(line, r.dashBoundaryDash) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
rest := line[len(r.dashBoundaryDash):]
|
|
||||||
rest = skipLWSPChar(rest)
|
|
||||||
return len(rest) == 0 || bytes.Equal(rest, r.nl)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) {
|
|
||||||
// https://tools.ietf.org/html/rfc2046#section-5.1
|
|
||||||
// The boundary delimiter line is then defined as a line
|
|
||||||
// consisting entirely of two hyphen characters ("-",
|
|
||||||
// decimal value 45) followed by the boundary parameter
|
|
||||||
// value from the Content-Type header field, optional linear
|
|
||||||
// whitespace, and a terminating CRLF.
|
|
||||||
if !bytes.HasPrefix(line, r.dashBoundary) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
rest := line[len(r.dashBoundary):]
|
|
||||||
rest = skipLWSPChar(rest)
|
|
||||||
|
|
||||||
// On the first part, see our lines are ending in \n instead of \r\n
|
|
||||||
// and switch into that mode if so. This is a violation of the spec,
|
|
||||||
// but occurs in practice.
|
|
||||||
if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' {
|
|
||||||
r.nl = r.nl[1:]
|
|
||||||
r.nlDashBoundary = r.nlDashBoundary[1:]
|
|
||||||
}
|
|
||||||
return bytes.Equal(rest, r.nl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// skipLWSPChar returns b with leading spaces and tabs removed.
|
|
||||||
// RFC 822 defines:
|
|
||||||
//
|
|
||||||
// LWSP-char = SPACE / HTAB
|
|
||||||
func skipLWSPChar(b []byte) []byte {
|
|
||||||
for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') {
|
|
||||||
b = b[1:]
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
//go:build !integration
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func generateRandomFile(size int64) (string, error) {
|
|
||||||
file, err := os.CreateTemp("", "data")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.CopyN(file, rand.Reader, size)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return file.Name(), file.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkAll(b *testing.B) {
|
|
||||||
fileName, err := generateRandomFile(1024 * 1024 * 256)
|
|
||||||
require.NoError(b, err)
|
|
||||||
fmt.Println(fileName)
|
|
||||||
defer os.Remove(fileName)
|
|
||||||
|
|
||||||
b.Run("bare", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := bareRead(fileName)
|
|
||||||
require.NoError(b, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
b.Run("default", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := defaultMultipart(fileName)
|
|
||||||
require.NoError(b, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
b.Run("custom", func(b *testing.B) {
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
err := customMultipart(fileName)
|
|
||||||
require.NoError(b, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultMultipart(filename string) error {
|
|
||||||
r, bound := multipartFile(filename)
|
|
||||||
|
|
||||||
logger, err := zap.NewProduction()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := fetchMultipartFileDefault(logger, r, bound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(io.Discard, file)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestName(t *testing.T) {
|
|
||||||
fileName, err := generateRandomFile(1024 * 1024 * 256)
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Println(fileName)
|
|
||||||
defer os.Remove(fileName)
|
|
||||||
|
|
||||||
err = defaultMultipart(fileName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func customMultipart(filename string) error {
|
|
||||||
r, bound := multipartFile(filename)
|
|
||||||
|
|
||||||
logger, err := zap.NewProduction()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := fetchMultipartFile(logger, r, bound)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.Copy(io.Discard, file)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) {
|
|
||||||
reader := multipart.NewReader(r, boundary)
|
|
||||||
|
|
||||||
for {
|
|
||||||
part, err := reader.NextPart()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := part.FormName()
|
|
||||||
if name == "" {
|
|
||||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := part.FileName()
|
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
|
||||||
if filename == "" {
|
|
||||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return part, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func bareRead(filename string) error {
|
|
||||||
r, _ := multipartFile(filename)
|
|
||||||
|
|
||||||
_, err := io.Copy(io.Discard, r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func multipartFile(filename string) (*io.PipeReader, string) {
|
|
||||||
r, w := io.Pipe()
|
|
||||||
m := multipart.NewWriter(w)
|
|
||||||
go func() {
|
|
||||||
defer w.Close()
|
|
||||||
defer m.Close()
|
|
||||||
part, err := m.CreateFormFile("myFile", "foo.txt")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
if _, err = io.Copy(part, file); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return r, m.Boundary()
|
|
||||||
}
|
|
|
@ -1,199 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type readCloser struct {
|
|
||||||
io.Reader
|
|
||||||
io.Closer
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
|
||||||
// Returns r's error directly. Also returns the processed data.
|
|
||||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), filename string) (string, []byte, error) {
|
|
||||||
if maxSize > sizeToDetectType {
|
|
||||||
maxSize = sizeToDetectType
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, maxSize) // maybe sync-pool the slice?
|
|
||||||
|
|
||||||
r, err := rInit(maxSize)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := r.Read(buf)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = buf[:n]
|
|
||||||
|
|
||||||
contentType := http.DetectContentType(buf)
|
|
||||||
|
|
||||||
// Since the detector detects the "text/plain" content type for various types of text files,
|
|
||||||
// including CSS, JavaScript, and CSV files,
|
|
||||||
// we'll determine the final content type based on the file's extension.
|
|
||||||
if strings.HasPrefix(contentType, "text/plain") {
|
|
||||||
ext := path.Ext(filename)
|
|
||||||
// If the file doesn't have a file extension, we'll keep the content type as is.
|
|
||||||
if len(ext) > 0 {
|
|
||||||
contentType = mime.TypeByExtension(ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return contentType, buf, err // to not lose io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
type getMultiobjectBodyParams struct {
|
|
||||||
obj *Object
|
|
||||||
req request
|
|
||||||
strSize string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
|
|
||||||
var (
|
|
||||||
shouldDownload = req.QueryArgs().GetBool("download")
|
|
||||||
start = time.Now()
|
|
||||||
filename string
|
|
||||||
filepath string
|
|
||||||
contentType string
|
|
||||||
)
|
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: bearerToken(ctx),
|
|
||||||
},
|
|
||||||
Address: objAddress,
|
|
||||||
}
|
|
||||||
|
|
||||||
rObj, err := h.frostfs.GetObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
req.handleFrostFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// we can't close reader in this function, so how to do it?
|
|
||||||
req.setIDs(rObj.Header)
|
|
||||||
payload := rObj.Payload
|
|
||||||
payloadSize := rObj.Header.PayloadSize()
|
|
||||||
for _, attr := range rObj.Header.Attributes() {
|
|
||||||
key := attr.Key()
|
|
||||||
val := attr.Value()
|
|
||||||
if !isValidToken(key) || !isValidValue(val) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key = utils.BackwardTransformIfSystem(key)
|
|
||||||
|
|
||||||
req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val)
|
|
||||||
switch key {
|
|
||||||
case object.AttributeFileName:
|
|
||||||
filename = val
|
|
||||||
case object.AttributeTimestamp:
|
|
||||||
if err = req.setTimestamp(val); err != nil {
|
|
||||||
req.log.Error(logs.CouldntParseCreationDate,
|
|
||||||
zap.String("val", val),
|
|
||||||
zap.Error(err),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
case object.AttributeContentType:
|
|
||||||
contentType = val
|
|
||||||
case object.AttributeFilePath:
|
|
||||||
filepath = val
|
|
||||||
case attributeMultipartObjectSize:
|
|
||||||
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
|
|
||||||
obj: rObj,
|
|
||||||
req: req,
|
|
||||||
strSize: val,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
req.handleFrostFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if filename == "" {
|
|
||||||
filename = filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
req.setDisposition(shouldDownload, filename)
|
|
||||||
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
|
||||||
// determine the Content-Type from the payload head
|
|
||||||
var payloadHead []byte
|
|
||||||
|
|
||||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
|
||||||
return payload, nil
|
|
||||||
}, filename)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset payload reader since a part of the data has been read
|
|
||||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
|
||||||
|
|
||||||
if err != io.EOF { // otherwise, we've already read full payload
|
|
||||||
headReader = io.MultiReader(headReader, payload)
|
|
||||||
}
|
|
||||||
|
|
||||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
|
||||||
// if it implements io.Closer and that's useful for us.
|
|
||||||
payload = readCloser{headReader, payload}
|
|
||||||
}
|
|
||||||
req.SetContentType(contentType)
|
|
||||||
req.Response.SetBodyStream(payload, int(payloadSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setIDs(obj object.Object) {
|
|
||||||
objID, _ := obj.ID()
|
|
||||||
cnrID, _ := obj.ContainerID()
|
|
||||||
r.Response.Header.Set(hdrObjectID, objID.String())
|
|
||||||
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
||||||
r.Response.Header.Set(hdrContainerID, cnrID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setDisposition(shouldDownload bool, filename string) {
|
|
||||||
const (
|
|
||||||
inlineDisposition = "inline"
|
|
||||||
attachmentDisposition = "attachment"
|
|
||||||
)
|
|
||||||
|
|
||||||
dis := inlineDisposition
|
|
||||||
if shouldDownload {
|
|
||||||
dis = attachmentDisposition
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setTimestamp(timestamp string) error {
|
|
||||||
value, err := strconv.ParseInt(timestamp, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
|
||||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,89 +0,0 @@
|
||||||
//go:build !integration
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
txtContentType = "text/plain; charset=utf-8"
|
|
||||||
cssContentType = "text/css; charset=utf-8"
|
|
||||||
htmlContentType = "text/html; charset=utf-8"
|
|
||||||
javascriptContentType = "text/javascript; charset=utf-8"
|
|
||||||
|
|
||||||
htmlBody = "<!DOCTYPE html><html ><head><meta charset=\"utf-8\"><title>Test Html</title>"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDetector(t *testing.T) {
|
|
||||||
sb := strings.Builder{}
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
Name string
|
|
||||||
ExpectedContentType string
|
|
||||||
Content string
|
|
||||||
FileName string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
Name: "less than 512b",
|
|
||||||
ExpectedContentType: txtContentType,
|
|
||||||
Content: sb.String()[:256],
|
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "more than 512b",
|
|
||||||
ExpectedContentType: txtContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "css content type",
|
|
||||||
ExpectedContentType: cssContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.css",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "javascript content type",
|
|
||||||
ExpectedContentType: javascriptContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.js",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file content",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: htmlBody,
|
|
||||||
FileName: "test.detect-by-content",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file extension",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.html",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "empty file extension",
|
|
||||||
ExpectedContentType: txtContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test",
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
|
||||||
contentType, data, err := readContentType(uint64(len(tc.Content)),
|
|
||||||
func(uint64) (io.Reader, error) {
|
|
||||||
return strings.NewReader(tc.Content), nil
|
|
||||||
}, tc.FileName,
|
|
||||||
)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tc.ExpectedContentType, contentType)
|
|
||||||
require.True(t, strings.HasPrefix(tc.Content, string(data)))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,295 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
jsonHeader = "application/json; charset=UTF-8"
|
|
||||||
drainBufSize = 4096
|
|
||||||
explodeArchiveHeader = "X-Explode-Archive"
|
|
||||||
)
|
|
||||||
|
|
||||||
type putResponse struct {
|
|
||||||
ObjectID string `json:"object_id"`
|
|
||||||
ContainerID string `json:"container_id"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPutResponse(addr oid.Address) *putResponse {
|
|
||||||
return &putResponse{
|
|
||||||
ObjectID: addr.Object().EncodeToString(),
|
|
||||||
ContainerID: addr.Container().EncodeToString(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pr *putResponse) encode(w io.Writer) error {
|
|
||||||
enc := json.NewEncoder(w)
|
|
||||||
enc.SetIndent("", "\t")
|
|
||||||
return enc.Encode(pr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload handles multipart upload request.
|
|
||||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
var file MultipartFile
|
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
bodyStream := c.RequestBodyStream()
|
|
||||||
drainBuf := make([]byte, drainBufSize)
|
|
||||||
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(zap.String("cid", scid))
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
|
||||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
|
||||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
|
|
||||||
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
|
|
||||||
} else {
|
|
||||||
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multipart is multipart and thus can contain more than one part which
|
|
||||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
|
||||||
// the last zero-length chunk might be left unread (because multipart
|
|
||||||
// reader only cares about its boundary and doesn't look further) and
|
|
||||||
// it will be (erroneously) interpreted as the start of the next
|
|
||||||
// pipelined header. Thus, we need to drain the body buffer.
|
|
||||||
for {
|
|
||||||
_, err = bodyStream.Read(drainBuf)
|
|
||||||
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
|
|
||||||
|
|
||||||
attributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, file)
|
|
||||||
if err != nil {
|
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debug(logs.ObjectUploaded,
|
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
|
||||||
zap.String("FileName", file.FileName()),
|
|
||||||
logs.TagField(logs.TagExternalStorage),
|
|
||||||
)
|
|
||||||
|
|
||||||
addr := newAddress(bkt.CID, idObj)
|
|
||||||
c.Response.Header.SetContentType(jsonHeader)
|
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
|
||||||
if err = newPutResponse(addr).encode(c); err != nil {
|
|
||||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
|
|
||||||
obj := object.New()
|
|
||||||
obj.SetContainerID(bkt.CID)
|
|
||||||
obj.SetOwnerID(*h.ownerID)
|
|
||||||
obj.SetAttributes(attrs...)
|
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: h.fetchBearerToken(ctx),
|
|
||||||
},
|
|
||||||
Object: obj,
|
|
||||||
Payload: file,
|
|
||||||
ClientCut: h.config.ClientCut(),
|
|
||||||
WithoutHomomorphicHash: bkt.HomomorphicHashDisabled,
|
|
||||||
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
|
||||||
}
|
|
||||||
|
|
||||||
idObj, err := h.frostfs.CreateObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return idObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
now := time.Now()
|
|
||||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
|
||||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
|
||||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
} else {
|
|
||||||
now = parsed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
|
|
||||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
attributes := make([]object.Attribute, 0, len(filtered))
|
|
||||||
// prepares attributes from filtered headers
|
|
||||||
for key, val := range filtered {
|
|
||||||
attribute := newAttribute(key, val)
|
|
||||||
attributes = append(attributes, attribute)
|
|
||||||
}
|
|
||||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
|
||||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
|
||||||
timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
attributes = append(attributes, timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAttribute(key string, val string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(key)
|
|
||||||
attr.SetValue(val)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
// explodeArchive read files from archive and creates objects for each of them.
|
|
||||||
// Sets FilePath attribute with name from tar.Header.
|
|
||||||
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
// remove user attributes which vary for each file in archive
|
|
||||||
// to guarantee that they won't appear twice
|
|
||||||
delete(filtered, object.AttributeFileName)
|
|
||||||
delete(filtered, object.AttributeFilePath)
|
|
||||||
|
|
||||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
attributes := commonAttributes
|
|
||||||
|
|
||||||
reader := file
|
|
||||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
|
||||||
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
|
|
||||||
gzipReader, err := gzip.NewReader(file)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := gzipReader.Close(); err != nil {
|
|
||||||
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
reader = gzipReader
|
|
||||||
}
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(reader)
|
|
||||||
for {
|
|
||||||
obj, err := tarReader.Next()
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if isDir(obj.Name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// set varying attributes
|
|
||||||
attributes = attributes[:len(commonAttributes)]
|
|
||||||
fileName := filepath.Base(obj.Name)
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
|
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
|
|
||||||
if err != nil {
|
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug(logs.ObjectUploaded,
|
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
|
||||||
zap.String("FileName", fileName),
|
|
||||||
logs.TagField(logs.TagExternalStorage),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
|
|
||||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
|
||||||
|
|
||||||
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
|
||||||
ResponseError(r, msg, statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
|
||||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil {
|
|
||||||
return tkn
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,142 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
||||||
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type request struct {
|
|
||||||
*fasthttp.RequestCtx
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
|
||||||
return request{
|
|
||||||
RequestCtx: ctx,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
|
||||||
logFields := []zap.Field{
|
|
||||||
zap.Stringer("elapsed", time.Since(start)),
|
|
||||||
zap.Error(err),
|
|
||||||
}
|
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
|
|
||||||
logFields = append(logFields, additionalFields...)
|
|
||||||
|
|
||||||
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
|
||||||
ResponseError(r.RequestCtx, msg, statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func bearerToken(ctx context.Context) *bearer.Token {
|
|
||||||
if tkn, err := tokens.LoadBearerToken(ctx); err == nil {
|
|
||||||
return tkn
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDir(name string) bool {
|
|
||||||
return name == "" || strings.HasSuffix(name, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadAttributes(attrs []object.Attribute) map[string]string {
|
|
||||||
result := make(map[string]string)
|
|
||||||
for _, attr := range attrs {
|
|
||||||
result[attr.Key()] = attr.Value()
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidToken(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
if c <= ' ' || c > 127 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidValue(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
// HTTP specification allows for more technically, but we don't want to escape things.
|
|
||||||
if c < ' ' || c > 127 || c == '"' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
|
||||||
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
|
|
||||||
if client.IsErrContainerNotFound(err) {
|
|
||||||
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnr)
|
|
||||||
addr.SetObject(obj)
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// setIfNotExist sets key value to map if key is not present yet.
|
|
||||||
func setIfNotExist(m map[string]string, key, value string) {
|
|
||||||
if _, ok := m[key]; !ok {
|
|
||||||
m[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
|
|
||||||
r.Error(msg+"\n", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
|
|
||||||
var (
|
|
||||||
msg string
|
|
||||||
statusCode int
|
|
||||||
logFields []zap.Field
|
|
||||||
)
|
|
||||||
|
|
||||||
st := new(sdkstatus.ObjectAccessDenied)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case errors.As(err, &st):
|
|
||||||
statusCode = fasthttp.StatusForbidden
|
|
||||||
reason := st.Reason()
|
|
||||||
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
|
||||||
logFields = append(logFields, zap.String("error_detail", reason))
|
|
||||||
case errors.Is(err, ErrQuotaLimitReached):
|
|
||||||
statusCode = fasthttp.StatusConflict
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
|
||||||
statusCode = fasthttp.StatusNotFound
|
|
||||||
msg = "Not Found"
|
|
||||||
default:
|
|
||||||
statusCode = fasthttp.StatusBadRequest
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode, msg, logFields
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TreeService provide interface to interact with tree service using s3 data models.
|
|
||||||
type TreeService interface {
|
|
||||||
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
|
|
||||||
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
|
|
||||||
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNodeNotFound is returned from Tree service in case of not found error.
|
|
||||||
ErrNodeNotFound = errors.New("not found")
|
|
||||||
|
|
||||||
// ErrNodeAccessDenied is returned from Tree service in case of access denied error.
|
|
||||||
ErrNodeAccessDenied = errors.New("access denied")
|
|
||||||
)
|
|
|
@ -1,131 +0,0 @@
|
||||||
package logs
|
|
||||||
|
|
||||||
import "go.uber.org/zap"
|
|
||||||
|
|
||||||
const (
|
|
||||||
TagFieldName = "tag"
|
|
||||||
|
|
||||||
TagApp = "app"
|
|
||||||
TagDatapath = "datapath"
|
|
||||||
TagExternalStorage = "external_storage"
|
|
||||||
TagExternalStorageTree = "external_storage_tree"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TagField(tag string) zap.Field {
|
|
||||||
return zap.String(TagFieldName, tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log messages with the "app" tag.
|
|
||||||
const (
|
|
||||||
ServiceIsRunning = "service is running"
|
|
||||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
|
||||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
|
||||||
ShuttingDownService = "shutting down service"
|
|
||||||
CantShutDownService = "can't shut down service"
|
|
||||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
|
||||||
FailedToCreateResolver = "failed to create resolver"
|
|
||||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
|
||||||
StartingApplication = "starting application"
|
|
||||||
StartingServer = "starting server"
|
|
||||||
ListenAndServe = "listen and serve"
|
|
||||||
ShuttingDownWebServer = "shutting down web server"
|
|
||||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
|
||||||
AddedPathUploadCid = "added path /upload/{cid}"
|
|
||||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
|
||||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
|
||||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
|
||||||
FailedToAddServer = "failed to add server"
|
|
||||||
AddServer = "add server"
|
|
||||||
NoHealthyServers = "no healthy servers"
|
|
||||||
FailedToInitializeTracing = "failed to initialize tracing"
|
|
||||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
|
||||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
|
||||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
|
||||||
UsingCredentials = "using credentials"
|
|
||||||
FailedToCreateConnectionPool = "failed to create connection pool"
|
|
||||||
FailedToDialConnectionPool = "failed to dial connection pool"
|
|
||||||
FailedToCreateTreePool = "failed to create tree pool"
|
|
||||||
FailedToDialTreePool = "failed to dial tree pool"
|
|
||||||
ServerReconnecting = "reconnecting server..."
|
|
||||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
|
||||||
ServerReconnectFailed = "failed to reconnect server"
|
|
||||||
MultinetDialSuccess = "multinet dial successful"
|
|
||||||
MultinetDialFail = "multinet dial failed"
|
|
||||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
|
||||||
MetricsAreDisabled = "metrics are disabled"
|
|
||||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
|
||||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
|
||||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
|
||||||
FailedToReloadConfig = "failed to reload config"
|
|
||||||
FailedToUpdateResolvers = "failed to update resolvers"
|
|
||||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
|
||||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
|
||||||
TracingConfigUpdated = "tracing config updated"
|
|
||||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
|
||||||
AddedStoragePeer = "added storage peer"
|
|
||||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
|
||||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
|
||||||
WarnDuplicateAddress = "duplicate address"
|
|
||||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
|
||||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
|
||||||
LogLevelWontBeUpdated = "log level won't be updated"
|
|
||||||
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
|
|
||||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
|
||||||
SetCustomIndexPageTemplate = "set custom index page template"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "datapath" tag.
|
|
||||||
const (
|
|
||||||
CouldntParseCreationDate = "couldn't parse creation date"
|
|
||||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
|
||||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
|
||||||
CloseZipWriter = "close zip writer"
|
|
||||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
|
||||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
|
||||||
CouldNotParseClientTime = "could not parse client time"
|
|
||||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
|
||||||
CouldNotEncodeResponse = "could not encode response"
|
|
||||||
AddAttributeToResultObject = "add attribute to result object"
|
|
||||||
Request = "request"
|
|
||||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
|
||||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
|
||||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
|
||||||
InvalidCacheEntryType = "invalid cache entry type"
|
|
||||||
FailedToUnescapeQuery = "failed to unescape query"
|
|
||||||
CouldntCacheNetmap = "couldn't cache netmap"
|
|
||||||
FailedToCloseReader = "failed to close reader"
|
|
||||||
FailedToFilterHeaders = "failed to filter headers"
|
|
||||||
FailedToReadFileFromTar = "failed to read file from tar"
|
|
||||||
FailedToGetAttributes = "failed to get attributes"
|
|
||||||
CloseGzipWriter = "close gzip writer"
|
|
||||||
CloseTarWriter = "close tar writer"
|
|
||||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
|
||||||
GzipReaderSelected = "gzip reader selected"
|
|
||||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
|
||||||
ObjectsNotFound = "objects not found"
|
|
||||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
|
||||||
CouldNotGetBucket = "could not get bucket"
|
|
||||||
CouldNotResolveContainerID = "could not resolve container id"
|
|
||||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "external_storage" tag.
|
|
||||||
const (
|
|
||||||
CouldNotReceiveObject = "could not receive object"
|
|
||||||
CouldNotSearchForObjects = "could not search for objects"
|
|
||||||
ObjectNotFound = "object not found"
|
|
||||||
ReadObjectListFailed = "read object list failed"
|
|
||||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
|
||||||
FailedToHeadObject = "failed to head object"
|
|
||||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
|
||||||
FailedToGetObject = "failed to get object"
|
|
||||||
ObjectUploaded = "object uploaded"
|
|
||||||
CouldNotGetContainerInfo = "could not get container info"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "external_storage_tree" tag.
|
|
||||||
const (
|
|
||||||
ObjectWasDeleted = "object was deleted"
|
|
||||||
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
|
||||||
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
|
|
||||||
)
|
|
|
@ -1,68 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/multinet"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errEmptySourceIPList = errors.New("empty source IP list")
|
|
||||||
|
|
||||||
type Subnet struct {
|
|
||||||
Prefix string
|
|
||||||
SourceIPs []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool
|
|
||||||
Subnets []Subnet
|
|
||||||
Balancer string
|
|
||||||
Restrict bool
|
|
||||||
FallbackDelay time.Duration
|
|
||||||
EventHandler multinet.EventHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) toMultinetConfig() (multinet.Config, error) {
|
|
||||||
var subnets []multinet.Subnet
|
|
||||||
for _, s := range c.Subnets {
|
|
||||||
var ms multinet.Subnet
|
|
||||||
p, err := netip.ParsePrefix(s.Prefix)
|
|
||||||
if err != nil {
|
|
||||||
return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
|
|
||||||
}
|
|
||||||
ms.Prefix = p
|
|
||||||
for _, ip := range s.SourceIPs {
|
|
||||||
addr, err := netip.ParseAddr(ip)
|
|
||||||
if err != nil {
|
|
||||||
return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
|
|
||||||
}
|
|
||||||
ms.SourceIPs = append(ms.SourceIPs, addr)
|
|
||||||
}
|
|
||||||
if len(ms.SourceIPs) == 0 {
|
|
||||||
return multinet.Config{}, errEmptySourceIPList
|
|
||||||
}
|
|
||||||
subnets = append(subnets, ms)
|
|
||||||
}
|
|
||||||
return multinet.Config{
|
|
||||||
Subnets: subnets,
|
|
||||||
Balancer: multinet.BalancerType(c.Balancer),
|
|
||||||
Restrict: c.Restrict,
|
|
||||||
FallbackDelay: c.FallbackDelay,
|
|
||||||
Dialer: newDefaultDialer(),
|
|
||||||
EventHandler: c.EventHandler,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) equals(other Config) bool {
|
|
||||||
return c.Enabled == other.Enabled &&
|
|
||||||
slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
|
|
||||||
return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
|
|
||||||
}) &&
|
|
||||||
c.Balancer == other.Balancer &&
|
|
||||||
c.Restrict == other.Restrict &&
|
|
||||||
c.FallbackDelay == other.FallbackDelay
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2014 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseDialTarget returns the network and address to pass to dialer.
|
|
||||||
func parseDialTarget(target string) (string, string) {
|
|
||||||
net := "tcp"
|
|
||||||
m1 := strings.Index(target, ":")
|
|
||||||
m2 := strings.Index(target, ":/")
|
|
||||||
// handle unix:addr which will fail with url.Parse
|
|
||||||
if m1 >= 0 && m2 < 0 {
|
|
||||||
if n := target[0:m1]; n == "unix" {
|
|
||||||
return n, target[m1+1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m2 >= 0 {
|
|
||||||
t, err := url.Parse(target)
|
|
||||||
if err != nil {
|
|
||||||
return net, target
|
|
||||||
}
|
|
||||||
scheme := t.Scheme
|
|
||||||
addr := t.Path
|
|
||||||
if scheme == "unix" {
|
|
||||||
if addr == "" {
|
|
||||||
addr = t.Host
|
|
||||||
}
|
|
||||||
return scheme, addr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return net, target
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newDefaultDialer() net.Dialer {
|
|
||||||
// From `grpc.WithContextDialer` comment:
|
|
||||||
//
|
|
||||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
|
||||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
|
||||||
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
|
|
||||||
// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
|
|
||||||
// option to true from the Control field. For a concrete example of how to do
|
|
||||||
// this, see internal.NetDialerWithTCPKeepalive().
|
|
||||||
//
|
|
||||||
// https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
|
|
||||||
//
|
|
||||||
// From `internal.NetDialerWithTCPKeepalive` comment:
|
|
||||||
//
|
|
||||||
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
|
|
||||||
// appropriate Go version becomes less than our least supported Go version, we
|
|
||||||
// should look into using the new API to make things more straightforward.
|
|
||||||
return net.Dialer{
|
|
||||||
KeepAlive: time.Duration(-1),
|
|
||||||
Control: func(_, _ string, c syscall.RawConn) error {
|
|
||||||
return c.Control(func(fd uintptr) {
|
|
||||||
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/multinet"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DialerSource struct {
|
|
||||||
guard sync.RWMutex
|
|
||||||
|
|
||||||
c Config
|
|
||||||
|
|
||||||
md multinet.Dialer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDialerSource(c Config) (*DialerSource, error) {
|
|
||||||
result := &DialerSource{}
|
|
||||||
if err := result.build(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DialerSource) build(c Config) error {
|
|
||||||
if c.Enabled {
|
|
||||||
mc, err := c.toMultinetConfig()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md, err := multinet.NewDialer(mc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.md = md
|
|
||||||
s.c = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.md = nil
|
|
||||||
s.c = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GrpcContextDialer returns grpc.WithContextDialer func.
|
|
||||||
// Returns nil if multinet disabled.
|
|
||||||
func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
|
|
||||||
s.guard.RLock()
|
|
||||||
defer s.guard.RUnlock()
|
|
||||||
|
|
||||||
if s.c.Enabled {
|
|
||||||
return func(ctx context.Context, address string) (net.Conn, error) {
|
|
||||||
network, address := parseDialTarget(address)
|
|
||||||
return s.md.DialContext(ctx, network, address)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DialerSource) Update(c Config) error {
|
|
||||||
s.guard.Lock()
|
|
||||||
defer s.guard.Unlock()
|
|
||||||
|
|
||||||
if s.c.equals(c) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.build(c)
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LogEventHandler struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err error) {
|
|
||||||
sourceIPString := "undefined"
|
|
||||||
if sourceIP != nil {
|
|
||||||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString),
|
|
||||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
|
||||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString),
|
|
||||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLogEventHandler(logger *zap.Logger) LogEventHandler {
|
|
||||||
return LogEventHandler{logger: logger}
|
|
||||||
}
|
|
|
@ -1,287 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostFS represents virtual connection to the FrostFS network.
|
|
||||||
// It is used to provide an interface to dependent packages
|
|
||||||
// which work with FrostFS.
|
|
||||||
type FrostFS struct {
|
|
||||||
pool *pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFrostFS creates new FrostFS using provided pool.Pool.
|
|
||||||
func NewFrostFS(p *pool.Pool) *FrostFS {
|
|
||||||
return &FrostFS{
|
|
||||||
pool: p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.Container")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
prm := pool.PrmContainerGet{
|
|
||||||
ContainerID: containerPrm.ContainerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.GetContainer(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read container via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.CreateObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmPut pool.PrmObjectPut
|
|
||||||
prmPut.SetHeader(*prm.Object)
|
|
||||||
prmPut.SetPayload(prm.Payload)
|
|
||||||
prmPut.SetClientCut(prm.ClientCut)
|
|
||||||
prmPut.WithoutHomomorphicHash(prm.WithoutHomomorphicHash)
|
|
||||||
prmPut.SetBufferMaxSize(prm.BufferMaxSize)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmPut.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, handleObjectError("save object via connection pool", err)
|
|
||||||
}
|
|
||||||
return idObj.ObjectID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
|
||||||
// to frostfs.ErrAccessDenied.
|
|
||||||
type payloadReader struct {
|
|
||||||
io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x payloadReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := x.ReadCloser.Read(p)
|
|
||||||
if err != nil && errors.Is(err, io.EOF) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
return n, handleObjectError("read payload", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.HeadObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmHead pool.PrmObjectHead
|
|
||||||
prmHead.SetAddress(prm.Address)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmHead.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.HeadObject(ctx, prmHead)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read object header via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmGet pool.PrmObjectGet
|
|
||||||
prmGet.SetAddress(prm.Address)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmGet.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init full object reading via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &handler.Object{
|
|
||||||
Header: res.Header,
|
|
||||||
Payload: res.Payload,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RangeObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.RangeObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmRange pool.PrmObjectRange
|
|
||||||
prmRange.SetAddress(prm.Address)
|
|
||||||
prmRange.SetOffset(prm.PayloadRange[0])
|
|
||||||
prmRange.SetLength(prm.PayloadRange[1])
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmRange.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.ObjectRange(ctx, prmRange)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init payload range reading via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return payloadReader{&res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchObjects implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SearchObjects")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmSearch pool.PrmObjectSearch
|
|
||||||
prmSearch.SetContainerID(prm.Container)
|
|
||||||
prmSearch.SetFilters(prm.Filters)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmSearch.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.SearchObjects(ctx, prmSearch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init object search via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEpochDurations implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetEpochDurations")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &utils.EpochDurations{
|
|
||||||
CurrentEpoch: networkInfo.CurrentEpoch(),
|
|
||||||
MsPerBlock: networkInfo.MsPerBlock(),
|
|
||||||
BlockPerEpoch: networkInfo.EpochDuration(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.BlockPerEpoch == 0 {
|
|
||||||
return nil, fmt.Errorf("EpochDuration is empty")
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.NetmapSnapshot")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
|
||||||
// It implements resolver.FrostFS.
|
|
||||||
type ResolverFrostFS struct {
|
|
||||||
pool *pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResolverFrostFS creates new ResolverFrostFS using provided pool.Pool.
|
|
||||||
func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
|
||||||
return &ResolverFrostFS{pool: p}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SystemDNS implements resolver.FrostFS interface method.
|
|
||||||
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SystemDNS")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", handleObjectError("read network info via client", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
|
||||||
if domain == nil {
|
|
||||||
return "", errors.New("system DNS parameter not found or empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(domain), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleObjectError(msg string, err error) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
|
||||||
if strings.Contains(reason, "limit reached") {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrQuotaLimitReached, reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsTimeoutError(err) {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%s: %w", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnwrapErr(err error) error {
|
|
||||||
unwrappedErr := errors.Unwrap(err)
|
|
||||||
for unwrappedErr != nil {
|
|
||||||
err = unwrappedErr
|
|
||||||
unwrappedErr = errors.Unwrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsErrObjectAccessDenied(err error) (string, bool) {
|
|
||||||
err = UnwrapErr(err)
|
|
||||||
switch err := err.(type) {
|
|
||||||
default:
|
|
||||||
return "", false
|
|
||||||
case *apistatus.ObjectAccessDenied:
|
|
||||||
return err.Reason(), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsTimeoutError(err error) bool {
|
|
||||||
if strings.Contains(err.Error(), "timeout") ||
|
|
||||||
errors.Is(err, context.DeadlineExceeded) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded
|
|
||||||
}
|
|
|
@ -1,83 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandleObjectError(t *testing.T) {
|
|
||||||
msg := "some msg"
|
|
||||||
|
|
||||||
t.Run("nil error", func(t *testing.T) {
|
|
||||||
err := handleObjectError(msg, nil)
|
|
||||||
require.Nil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple access denied", func(t *testing.T) {
|
|
||||||
reason := "some reason"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrAccessDenied)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("access denied - quota reached", func(t *testing.T) {
|
|
||||||
reason := "Quota limit reached"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple timeout", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("timeout")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("deadline exceeded", func(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
<-ctx.Done()
|
|
||||||
|
|
||||||
err := handleObjectError(msg, ctx.Err())
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), ctx.Err().Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
|
||||||
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("unknown error", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("unknown error")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, inputErr)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,248 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
|
||||||
type PartInfo struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Number int `json:"number"`
|
|
||||||
OID oid.ID `json:"oid"`
|
|
||||||
Size uint64 `json:"size"`
|
|
||||||
ETag string `json:"etag"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetFrostFSParams struct {
|
|
||||||
// payload range
|
|
||||||
Off, Ln uint64
|
|
||||||
Addr oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartObj struct {
|
|
||||||
OID oid.ID
|
|
||||||
Size uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type readerInitiator interface {
|
|
||||||
InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiObjectReader implements io.Reader of payloads of the object list stored in the FrostFS network.
|
|
||||||
type MultiObjectReader struct {
|
|
||||||
ctx context.Context
|
|
||||||
|
|
||||||
layer readerInitiator
|
|
||||||
|
|
||||||
startPartOffset uint64
|
|
||||||
endPartLength uint64
|
|
||||||
|
|
||||||
prm GetFrostFSParams
|
|
||||||
|
|
||||||
curIndex int
|
|
||||||
curReader io.ReadCloser
|
|
||||||
|
|
||||||
parts []PartObj
|
|
||||||
}
|
|
||||||
|
|
||||||
type MultiObjectReaderConfig struct {
|
|
||||||
Initiator readerInitiator
|
|
||||||
|
|
||||||
// the offset of complete object and total size to read
|
|
||||||
Off, Ln uint64
|
|
||||||
|
|
||||||
Addr oid.Address
|
|
||||||
Parts []PartObj
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
|
|
||||||
errLengthIsOutOfRange = errors.New("length is out of payload range")
|
|
||||||
errEmptyPartsList = errors.New("empty parts list")
|
|
||||||
errorZeroRangeLength = errors.New("zero range length")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitMultiObjectReader")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
|
||||||
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
|
||||||
Address: p.Addr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get combined object '%s': %w", p.Addr.Object().EncodeToString(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var parts []*PartInfo
|
|
||||||
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objParts := make([]PartObj, len(parts))
|
|
||||||
for i, part := range parts {
|
|
||||||
objParts[i] = PartObj{
|
|
||||||
OID: part.OID,
|
|
||||||
Size: part.Size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
|
||||||
Initiator: x,
|
|
||||||
Off: p.Off,
|
|
||||||
Ln: p.Ln,
|
|
||||||
Parts: objParts,
|
|
||||||
Addr: p.Addr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMultiObjectReader(ctx context.Context, cfg MultiObjectReaderConfig) (*MultiObjectReader, error) {
|
|
||||||
if len(cfg.Parts) == 0 {
|
|
||||||
return nil, errEmptyPartsList
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &MultiObjectReader{
|
|
||||||
ctx: ctx,
|
|
||||||
layer: cfg.Initiator,
|
|
||||||
prm: GetFrostFSParams{
|
|
||||||
Addr: cfg.Addr,
|
|
||||||
},
|
|
||||||
parts: cfg.Parts,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Off+cfg.Ln == 0 {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Off > 0 && cfg.Ln == 0 {
|
|
||||||
return nil, errorZeroRangeLength
|
|
||||||
}
|
|
||||||
|
|
||||||
startPartIndex, startPartOffset := findStartPart(cfg)
|
|
||||||
if startPartIndex == -1 {
|
|
||||||
return nil, errOffsetIsOutOfRange
|
|
||||||
}
|
|
||||||
r.startPartOffset = startPartOffset
|
|
||||||
|
|
||||||
endPartIndex, endPartLength := findEndPart(cfg)
|
|
||||||
if endPartIndex == -1 {
|
|
||||||
return nil, errLengthIsOutOfRange
|
|
||||||
}
|
|
||||||
r.endPartLength = endPartLength
|
|
||||||
|
|
||||||
r.parts = cfg.Parts[startPartIndex : endPartIndex+1]
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findStartPart(cfg MultiObjectReaderConfig) (index int, offset uint64) {
|
|
||||||
position := cfg.Off
|
|
||||||
for i, part := range cfg.Parts {
|
|
||||||
// Strict inequality when searching for start position to avoid reading zero length part.
|
|
||||||
if position < part.Size {
|
|
||||||
return i, position
|
|
||||||
}
|
|
||||||
position -= part.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func findEndPart(cfg MultiObjectReaderConfig) (index int, length uint64) {
|
|
||||||
position := cfg.Off + cfg.Ln
|
|
||||||
for i, part := range cfg.Parts {
|
|
||||||
// Non-strict inequality when searching for end position to avoid out of payload range error.
|
|
||||||
if position <= part.Size {
|
|
||||||
return i, position
|
|
||||||
}
|
|
||||||
position -= part.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
|
|
||||||
if x.curReader != nil {
|
|
||||||
n, err = x.curReader.Read(p)
|
|
||||||
if err != nil {
|
|
||||||
if closeErr := x.curReader.Close(); closeErr != nil {
|
|
||||||
return n, fmt.Errorf("%w (close err: %v)", err, closeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !errors.Is(err, io.EOF) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
x.curIndex++
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.curIndex == len(x.parts) {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
x.prm.Addr.SetObject(x.parts[x.curIndex].OID)
|
|
||||||
|
|
||||||
if x.curIndex == 0 {
|
|
||||||
x.prm.Off = x.startPartOffset
|
|
||||||
x.prm.Ln = x.parts[x.curIndex].Size - x.startPartOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.curIndex == len(x.parts)-1 {
|
|
||||||
x.prm.Ln = x.endPartLength - x.prm.Off
|
|
||||||
}
|
|
||||||
|
|
||||||
x.curReader, err = x.layer.InitFrostFSObjectPayloadReader(x.ctx, x.prm)
|
|
||||||
if err != nil {
|
|
||||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
x.prm.Off = 0
|
|
||||||
x.prm.Ln = 0
|
|
||||||
|
|
||||||
next, err := x.Read(p[n:])
|
|
||||||
|
|
||||||
return n + next, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
|
||||||
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitFrostFSObjectPayloadReader")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmAuth handler.PrmAuth
|
|
||||||
|
|
||||||
if p.Off+p.Ln != 0 {
|
|
||||||
prm := handler.PrmObjectRange{
|
|
||||||
PrmAuth: prmAuth,
|
|
||||||
PayloadRange: [2]uint64{p.Off, p.Ln},
|
|
||||||
Address: p.Addr,
|
|
||||||
}
|
|
||||||
|
|
||||||
return x.RangeObject(ctx, prm)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := handler.PrmObjectGet{
|
|
||||||
PrmAuth: prmAuth,
|
|
||||||
Address: p.Addr,
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.GetObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Payload, nil
|
|
||||||
}
|
|
|
@ -1,137 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type readerInitiatorMock struct {
|
|
||||||
parts map[oid.ID][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readerInitiatorMock) InitFrostFSObjectPayloadReader(_ context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
|
||||||
partPayload, ok := r.parts[p.Addr.Object()]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("part not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off+p.Ln == 0 {
|
|
||||||
return io.NopCloser(bytes.NewReader(partPayload)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off > uint64(len(partPayload)-1) {
|
|
||||||
return nil, fmt.Errorf("invalid offset: %d/%d", p.Off, len(partPayload))
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off+p.Ln > uint64(len(partPayload)) {
|
|
||||||
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.Off, p.Off+p.Ln, len(partPayload))
|
|
||||||
}
|
|
||||||
|
|
||||||
return io.NopCloser(bytes.NewReader(partPayload[p.Off : p.Off+p.Ln])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareDataReader() ([]byte, []PartObj, *readerInitiatorMock) {
|
|
||||||
mockInitReader := &readerInitiatorMock{
|
|
||||||
parts: map[oid.ID][]byte{
|
|
||||||
oidtest.ID(): []byte("first part 1"),
|
|
||||||
oidtest.ID(): []byte("second part 2"),
|
|
||||||
oidtest.ID(): []byte("third part 3"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullPayload []byte
|
|
||||||
parts := make([]PartObj, 0, len(mockInitReader.parts))
|
|
||||||
for id, payload := range mockInitReader.parts {
|
|
||||||
parts = append(parts, PartObj{OID: id, Size: uint64(len(payload))})
|
|
||||||
fullPayload = append(fullPayload, payload...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fullPayload, parts, mockInitReader
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultiReader(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
fullPayload, parts, mockInitReader := prepareDataReader()
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
off uint64
|
|
||||||
ln uint64
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "simple read all",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "simple read with length",
|
|
||||||
ln: uint64(len(fullPayload)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "middle of parts",
|
|
||||||
off: parts[0].Size + 2,
|
|
||||||
ln: 4,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "first and second",
|
|
||||||
off: parts[0].Size - 4,
|
|
||||||
ln: 8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "first and third",
|
|
||||||
off: parts[0].Size - 4,
|
|
||||||
ln: parts[1].Size + 8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "second part",
|
|
||||||
off: parts[0].Size,
|
|
||||||
ln: parts[1].Size,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "second and third",
|
|
||||||
off: parts[0].Size,
|
|
||||||
ln: parts[1].Size + parts[2].Size,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "offset out of range",
|
|
||||||
off: uint64(len(fullPayload) + 1),
|
|
||||||
ln: 1,
|
|
||||||
err: errOffsetIsOutOfRange,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zero length",
|
|
||||||
off: parts[1].Size + 1,
|
|
||||||
ln: 0,
|
|
||||||
err: errorZeroRangeLength,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
multiReader, err := NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
|
||||||
Initiator: mockInitReader,
|
|
||||||
Parts: parts,
|
|
||||||
Off: tc.off,
|
|
||||||
Ln: tc.ln,
|
|
||||||
})
|
|
||||||
require.ErrorIs(t, err, tc.err)
|
|
||||||
|
|
||||||
if tc.err == nil {
|
|
||||||
off := tc.off
|
|
||||||
ln := tc.ln
|
|
||||||
if off+ln == 0 {
|
|
||||||
ln = uint64(len(fullPayload))
|
|
||||||
}
|
|
||||||
data, err := io.ReadAll(multiReader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, fullPayload[off:off+ln], data)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Source struct {
|
|
||||||
frostFS *FrostFS
|
|
||||||
netmapCache *cache.NetmapCache
|
|
||||||
bucketCache *cache.BucketCache
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSource(frostFS *FrostFS, netmapCache *cache.NetmapCache, bucketCache *cache.BucketCache, log *zap.Logger) *Source {
|
|
||||||
return &Source{
|
|
||||||
frostFS: frostFS,
|
|
||||||
netmapCache: netmapCache,
|
|
||||||
bucketCache: bucketCache,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
cachedNetmap := s.netmapCache.Get()
|
|
||||||
if cachedNetmap != nil {
|
|
||||||
return *cachedNetmap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
netmapSnapshot, err := s.frostFS.NetmapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.NetMap{}, fmt.Errorf("get netmap: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
|
|
||||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) PlacementPolicy(ctx context.Context, cnrID cid.ID) (netmap.PlacementPolicy, error) {
|
|
||||||
info := s.bucketCache.GetByCID(cnrID)
|
|
||||||
if info != nil {
|
|
||||||
return info.PlacementPolicy, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := handler.PrmContainer{
|
|
||||||
ContainerID: cnrID,
|
|
||||||
}
|
|
||||||
res, err := s.frostFS.Container(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.PlacementPolicy{}, fmt.Errorf("get container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't put container back to the cache to keep cache
|
|
||||||
// coherent to the requests made by users. FrostFS Source
|
|
||||||
// is being used by SDK Tree Pool and it should not fill cache
|
|
||||||
// with possibly irrelevant container values.
|
|
||||||
|
|
||||||
return res.PlacementPolicy(), nil
|
|
||||||
}
|
|
|
@ -1,170 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
|
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
|
||||||
)
|
|
||||||
|
|
||||||
type GetNodeByPathResponseInfoWrapper struct {
|
|
||||||
response *apitree.GetNodeByPathResponseInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
|
||||||
return []uint64{n.response.GetNodeID()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
|
||||||
return []uint64{n.response.GetParentID()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
|
||||||
return []uint64{n.response.GetTimestamp()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
|
||||||
for i, value := range n.response.GetMeta() {
|
|
||||||
res[i] = value
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
type PoolWrapper struct {
|
|
||||||
p *treepool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
|
||||||
return &PoolWrapper{p: p}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetNodes")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
poolPrm := treepool.GetNodesParams{
|
|
||||||
CID: prm.CnrID,
|
|
||||||
TreeID: prm.TreeID,
|
|
||||||
Path: prm.Path,
|
|
||||||
Meta: prm.Meta,
|
|
||||||
PathAttribute: tree.FileNameKey,
|
|
||||||
LatestOnly: prm.LatestOnly,
|
|
||||||
AllAttrs: prm.AllAttrs,
|
|
||||||
BearerToken: getBearer(ctx),
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make([]tree.NodeResponse, len(nodes))
|
|
||||||
for i, info := range nodes {
|
|
||||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBearer(ctx context.Context) []byte {
|
|
||||||
token, err := tokens.LoadBearerToken(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return token.Marshal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleError(err error) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
|
||||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
|
||||||
}
|
|
||||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
|
||||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetSubTree")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
order := treepool.NoneOrder
|
|
||||||
if sort {
|
|
||||||
order = treepool.AscendingOrder
|
|
||||||
}
|
|
||||||
poolPrm := treepool.GetSubTreeParams{
|
|
||||||
CID: bktInfo.CID,
|
|
||||||
TreeID: treeID,
|
|
||||||
RootID: rootID,
|
|
||||||
Depth: depth,
|
|
||||||
BearerToken: getBearer(ctx),
|
|
||||||
Order: order,
|
|
||||||
}
|
|
||||||
if len(rootID) == 1 && rootID[0] == 0 {
|
|
||||||
// storage node interprets 'nil' value as []uint64{0}
|
|
||||||
// gate wants to send 'nil' value instead of []uint64{0}, because
|
|
||||||
// it provides compatibility with previous tree service api where
|
|
||||||
// single uint64(0) value is dropped from signature
|
|
||||||
poolPrm.RootID = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var subtree []tree.NodeResponse
|
|
||||||
|
|
||||||
node, err := subTreeReader.Next()
|
|
||||||
for err == nil {
|
|
||||||
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
|
||||||
node, err = subTreeReader.Next()
|
|
||||||
}
|
|
||||||
if err != io.EOF {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return subtree, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetSubTreeResponseBodyWrapper struct {
|
|
||||||
response *apitree.GetSubTreeResponseBody
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
|
||||||
return n.response.GetNodeID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
|
||||||
resp := n.response.GetParentID()
|
|
||||||
if resp == nil {
|
|
||||||
// storage sends nil that should be interpreted as []uint64{0}
|
|
||||||
// due to protobuf compatibility, see 'GetSubTree' function
|
|
||||||
return []uint64{0}
|
|
||||||
}
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
|
||||||
return n.response.GetTimestamp()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
|
||||||
for i, value := range n.response.GetMeta() {
|
|
||||||
res[i] = value
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
|
@ -1,112 +0,0 @@
|
||||||
{{$container := .Container}}
|
|
||||||
{{ $prefix := trimPrefix .Prefix }}
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8"/>
|
|
||||||
<title>Index of {{.Protocol}}://{{$container}}
|
|
||||||
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
|
|
||||||
<style>
|
|
||||||
.alert {
|
|
||||||
width: 80%;
|
|
||||||
box-sizing: border-box;
|
|
||||||
padding: 20px;
|
|
||||||
background-color: #f44336;
|
|
||||||
color: white;
|
|
||||||
margin-bottom: 15px;
|
|
||||||
}
|
|
||||||
table {
|
|
||||||
width: 80%;
|
|
||||||
border-collapse: collapse;
|
|
||||||
}
|
|
||||||
body {
|
|
||||||
background: #f2f2f2;
|
|
||||||
}
|
|
||||||
table, th, td {
|
|
||||||
border: 0 solid transparent;
|
|
||||||
}
|
|
||||||
th, td {
|
|
||||||
padding: 10px;
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
th {
|
|
||||||
background-color: #c3bcbc;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
font-size: 1.5em;
|
|
||||||
}
|
|
||||||
tr:nth-child(even) {background-color: #ebe7e7;}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
|
|
||||||
{{ if .HasErrors }}
|
|
||||||
<div class="alert">
|
|
||||||
Errors occurred while processing the request. Perhaps some objects are missing
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Filename</th>
|
|
||||||
<th>OID</th>
|
|
||||||
<th>Size</th>
|
|
||||||
<th>Created</th>
|
|
||||||
<th>Download</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{ $trimmedPrefix := trimPrefix $prefix }}
|
|
||||||
{{if $trimmedPrefix }}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{{else}}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
⮐<a href="/get/{{$container}}/">..</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{range .Objects}}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
{{if .IsDir}}
|
|
||||||
🗀
|
|
||||||
<a href="{{.GetURL}}/">
|
|
||||||
{{.FileName}}/
|
|
||||||
</a>
|
|
||||||
{{else}}
|
|
||||||
🗎
|
|
||||||
<a href="{{ .GetURL }}">
|
|
||||||
{{.FileName}}
|
|
||||||
</a>
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
<td>{{.OID}}</td>
|
|
||||||
<td>{{if not .IsDir}}{{ formatSize .Size }}{{end}}</td>
|
|
||||||
<td>{{ .Created }}</td>
|
|
||||||
<td>
|
|
||||||
{{ if .OID }}
|
|
||||||
<a href="{{ .GetURL }}?download=true">
|
|
||||||
Link
|
|
||||||
</a>
|
|
||||||
{{ end }}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
|
@ -1,6 +0,0 @@
|
||||||
package templates
|
|
||||||
|
|
||||||
import _ "embed"
|
|
||||||
|
|
||||||
//go:embed index.gotmpl
|
|
||||||
var DefaultIndexTemplate string
|
|
168
metrics/desc.go
168
metrics/desc.go
|
@ -1,168 +0,0 @@
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
)
|
|
||||||
|
|
||||||
var appMetricsDesc = map[string]map[string]Description{
|
|
||||||
poolSubsystem: {
|
|
||||||
overallErrorsMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: overallErrorsMetric,
|
|
||||||
Help: "Total number of errors in pool",
|
|
||||||
},
|
|
||||||
overallNodeErrorsMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: overallNodeErrorsMetric,
|
|
||||||
Help: "Total number of errors for connection in pool",
|
|
||||||
VariableLabels: []string{"node"},
|
|
||||||
},
|
|
||||||
overallNodeRequestsMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: overallNodeRequestsMetric,
|
|
||||||
Help: "Total number of requests to specific node in pool",
|
|
||||||
VariableLabels: []string{"node"},
|
|
||||||
},
|
|
||||||
currentErrorMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: currentErrorMetric,
|
|
||||||
Help: "Number of errors on current connections that will be reset after the threshold",
|
|
||||||
VariableLabels: []string{"node"},
|
|
||||||
},
|
|
||||||
avgRequestDurationMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: poolSubsystem,
|
|
||||||
Name: avgRequestDurationMetric,
|
|
||||||
Help: "Average request duration (in milliseconds) for specific method on node in pool",
|
|
||||||
VariableLabels: []string{"node", "method"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
stateSubsystem: {
|
|
||||||
healthMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: stateSubsystem,
|
|
||||||
Name: healthMetric,
|
|
||||||
Help: "Current HTTP gateway state",
|
|
||||||
},
|
|
||||||
versionInfoMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: stateSubsystem,
|
|
||||||
Name: versionInfoMetric,
|
|
||||||
Help: "Version of current FrostFS HTTP Gate instance",
|
|
||||||
VariableLabels: []string{"version"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
serverSubsystem: {
|
|
||||||
healthMetric: Description{
|
|
||||||
Type: dto.MetricType_GAUGE,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: serverSubsystem,
|
|
||||||
Name: healthMetric,
|
|
||||||
Help: "HTTP Server endpoint health",
|
|
||||||
VariableLabels: []string{"endpoint"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
statisticSubsystem: {
|
|
||||||
droppedLogs: Description{
|
|
||||||
Type: dto.MetricType_COUNTER,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: statisticSubsystem,
|
|
||||||
Name: droppedLogs,
|
|
||||||
Help: "Dropped logs (by sampling) count",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type Description struct {
|
|
||||||
Type dto.MetricType
|
|
||||||
Namespace string
|
|
||||||
Subsystem string
|
|
||||||
Name string
|
|
||||||
Help string
|
|
||||||
ConstantLabels prometheus.Labels
|
|
||||||
VariableLabels []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Description) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(&struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
FQName string `json:"name"`
|
|
||||||
Help string `json:"help"`
|
|
||||||
ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"`
|
|
||||||
VariableLabels []string `json:"variable_labels,omitempty"`
|
|
||||||
}{
|
|
||||||
Type: d.Type.String(),
|
|
||||||
FQName: d.BuildFQName(),
|
|
||||||
Help: d.Help,
|
|
||||||
ConstantLabels: d.ConstantLabels,
|
|
||||||
VariableLabels: d.VariableLabels,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Description) BuildFQName() string {
|
|
||||||
return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DescribeAll returns descriptions for metrics.
|
|
||||||
func DescribeAll() []Description {
|
|
||||||
var list []Description
|
|
||||||
for _, m := range appMetricsDesc {
|
|
||||||
for _, description := range m {
|
|
||||||
list = append(list, description)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
func newOpts(description Description) prometheus.Opts {
|
|
||||||
return prometheus.Opts{
|
|
||||||
Namespace: description.Namespace,
|
|
||||||
Subsystem: description.Subsystem,
|
|
||||||
Name: description.Name,
|
|
||||||
Help: description.Help,
|
|
||||||
ConstLabels: description.ConstantLabels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustNewGauge(description Description) prometheus.Gauge {
|
|
||||||
if description.Type != dto.MetricType_GAUGE {
|
|
||||||
panic("invalid metric type")
|
|
||||||
}
|
|
||||||
return prometheus.NewGauge(
|
|
||||||
prometheus.GaugeOpts(newOpts(description)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
|
|
||||||
if description.Type != dto.MetricType_GAUGE {
|
|
||||||
panic("invalid metric type")
|
|
||||||
}
|
|
||||||
return prometheus.NewGaugeVec(
|
|
||||||
prometheus.GaugeOpts(newOpts(description)),
|
|
||||||
description.VariableLabels,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustNewCounter(description Description) prometheus.Counter {
|
|
||||||
if description.Type != dto.MetricType_COUNTER {
|
|
||||||
panic("invalid metric type")
|
|
||||||
}
|
|
||||||
return prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts(newOpts(description)),
|
|
||||||
)
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
//go:build dump_metrics
|
|
||||||
|
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mock struct{}
|
|
||||||
|
|
||||||
func (m mock) Statistic() pool.Statistic {
|
|
||||||
return pool.Statistic{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var metricsPath = flag.String("out", "", "File to export http gateway metrics to.")
|
|
||||||
|
|
||||||
func TestDescribeAll(t *testing.T) {
|
|
||||||
// to check correct metrics type mapping
|
|
||||||
_ = NewGateMetrics(mock{})
|
|
||||||
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description")
|
|
||||||
|
|
||||||
desc := DescribeAll()
|
|
||||||
data, err := json.Marshal(desc)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(*metricsPath, data, 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
|
@ -1,272 +0,0 @@
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
namespace = "frostfs_http_gw"
|
|
||||||
stateSubsystem = "state"
|
|
||||||
poolSubsystem = "pool"
|
|
||||||
serverSubsystem = "server"
|
|
||||||
statisticSubsystem = "statistic"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
healthMetric = "health"
|
|
||||||
versionInfoMetric = "version_info"
|
|
||||||
droppedLogs = "dropped_logs"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
overallErrorsMetric = "overall_errors"
|
|
||||||
overallNodeErrorsMetric = "overall_node_errors"
|
|
||||||
overallNodeRequestsMetric = "overall_node_requests"
|
|
||||||
currentErrorMetric = "current_errors"
|
|
||||||
avgRequestDurationMetric = "avg_request_duration"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
methodGetBalance = "get_balance"
|
|
||||||
methodPutContainer = "put_container"
|
|
||||||
methodGetContainer = "get_container"
|
|
||||||
methodListContainer = "list_container"
|
|
||||||
methodDeleteContainer = "delete_container"
|
|
||||||
methodEndpointInfo = "endpoint_info"
|
|
||||||
methodNetworkInfo = "network_info"
|
|
||||||
methodPutObject = "put_object"
|
|
||||||
methodDeleteObject = "delete_object"
|
|
||||||
methodGetObject = "get_object"
|
|
||||||
methodHeadObject = "head_object"
|
|
||||||
methodRangeObject = "range_object"
|
|
||||||
methodCreateSession = "create_session"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HealthStatus of the gate application.
|
|
||||||
type HealthStatus int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
HealthStatusUndefined HealthStatus = 0
|
|
||||||
HealthStatusStarting HealthStatus = 1
|
|
||||||
HealthStatusReady HealthStatus = 2
|
|
||||||
HealthStatusShuttingDown HealthStatus = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
type StatisticScraper interface {
|
|
||||||
Statistic() pool.Statistic
|
|
||||||
}
|
|
||||||
|
|
||||||
type serverMetrics struct {
|
|
||||||
endpointHealth *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
|
|
||||||
type GateMetrics struct {
|
|
||||||
stateMetrics
|
|
||||||
poolMetricsCollector
|
|
||||||
serverMetrics
|
|
||||||
statisticMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
type stateMetrics struct {
|
|
||||||
healthCheck prometheus.Gauge
|
|
||||||
versionInfo *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
|
|
||||||
type statisticMetrics struct {
|
|
||||||
droppedLogs prometheus.Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
type poolMetricsCollector struct {
|
|
||||||
scraper StatisticScraper
|
|
||||||
overallErrors prometheus.Gauge
|
|
||||||
overallNodeErrors *prometheus.GaugeVec
|
|
||||||
overallNodeRequests *prometheus.GaugeVec
|
|
||||||
currentErrors *prometheus.GaugeVec
|
|
||||||
requestDuration *prometheus.GaugeVec
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGateMetrics creates new metrics for http gate.
|
|
||||||
func NewGateMetrics(p StatisticScraper) *GateMetrics {
|
|
||||||
stateMetric := newStateMetrics()
|
|
||||||
stateMetric.register()
|
|
||||||
|
|
||||||
poolMetric := newPoolMetricsCollector(p)
|
|
||||||
poolMetric.register()
|
|
||||||
|
|
||||||
serverMetric := newServerMetrics()
|
|
||||||
serverMetric.register()
|
|
||||||
|
|
||||||
statsMetric := newStatisticMetrics()
|
|
||||||
statsMetric.register()
|
|
||||||
|
|
||||||
return &GateMetrics{
|
|
||||||
stateMetrics: *stateMetric,
|
|
||||||
poolMetricsCollector: *poolMetric,
|
|
||||||
serverMetrics: *serverMetric,
|
|
||||||
statisticMetrics: *statsMetric,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *GateMetrics) Unregister() {
|
|
||||||
g.stateMetrics.unregister()
|
|
||||||
prometheus.Unregister(&g.poolMetricsCollector)
|
|
||||||
g.serverMetrics.unregister()
|
|
||||||
g.statisticMetrics.unregister()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStateMetrics() *stateMetrics {
|
|
||||||
return &stateMetrics{
|
|
||||||
healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]),
|
|
||||||
versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStatisticMetrics() *statisticMetrics {
|
|
||||||
return &statisticMetrics{
|
|
||||||
droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) register() {
|
|
||||||
prometheus.MustRegister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) unregister() {
|
|
||||||
prometheus.Unregister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) register() {
|
|
||||||
prometheus.MustRegister(m.healthCheck)
|
|
||||||
prometheus.MustRegister(m.versionInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) unregister() {
|
|
||||||
prometheus.Unregister(m.healthCheck)
|
|
||||||
prometheus.Unregister(m.versionInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) SetHealth(s HealthStatus) {
|
|
||||||
m.healthCheck.Set(float64(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) SetVersion(ver string) {
|
|
||||||
m.versionInfo.WithLabelValues(ver).Set(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) DroppedLogsInc() {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.droppedLogs.Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
|
||||||
return &poolMetricsCollector{
|
|
||||||
scraper: p,
|
|
||||||
overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]),
|
|
||||||
overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]),
|
|
||||||
overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]),
|
|
||||||
currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]),
|
|
||||||
requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *poolMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
|
||||||
m.updateStatistic()
|
|
||||||
m.overallErrors.Collect(ch)
|
|
||||||
m.overallNodeErrors.Collect(ch)
|
|
||||||
m.overallNodeRequests.Collect(ch)
|
|
||||||
m.currentErrors.Collect(ch)
|
|
||||||
m.requestDuration.Collect(ch)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
|
|
||||||
m.overallErrors.Describe(descs)
|
|
||||||
m.overallNodeErrors.Describe(descs)
|
|
||||||
m.overallNodeRequests.Describe(descs)
|
|
||||||
m.currentErrors.Describe(descs)
|
|
||||||
m.requestDuration.Describe(descs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *poolMetricsCollector) register() {
|
|
||||||
prometheus.MustRegister(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *poolMetricsCollector) updateStatistic() {
|
|
||||||
stat := m.scraper.Statistic()
|
|
||||||
|
|
||||||
m.overallNodeErrors.Reset()
|
|
||||||
m.overallNodeRequests.Reset()
|
|
||||||
m.currentErrors.Reset()
|
|
||||||
m.requestDuration.Reset()
|
|
||||||
|
|
||||||
for _, node := range stat.Nodes() {
|
|
||||||
m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors()))
|
|
||||||
m.overallNodeRequests.WithLabelValues(node.Address()).Set(float64(node.Requests()))
|
|
||||||
|
|
||||||
m.currentErrors.WithLabelValues(node.Address()).Set(float64(node.CurrentErrors()))
|
|
||||||
m.updateRequestsDuration(node)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.overallErrors.Set(float64(stat.OverallErrors()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetBalance).Set(float64(node.AverageGetBalance().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodPutContainer).Set(float64(node.AveragePutContainer().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodDeleteObject).Set(float64(node.AverageDeleteObject().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetObject).Set(float64(node.AverageGetObject().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodHeadObject).Set(float64(node.AverageHeadObject().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodRangeObject).Set(float64(node.AverageRangeObject().Milliseconds()))
|
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServerMetrics() *serverMetrics {
|
|
||||||
return &serverMetrics{
|
|
||||||
endpointHealth: mustNewGaugeVec(appMetricsDesc[serverSubsystem][healthMetric]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m serverMetrics) register() {
|
|
||||||
prometheus.MustRegister(m.endpointHealth)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m serverMetrics) unregister() {
|
|
||||||
prometheus.Unregister(m.endpointHealth)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m serverMetrics) MarkHealthy(endpoint string) {
|
|
||||||
m.endpointHealth.WithLabelValues(endpoint).Set(float64(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m serverMetrics) MarkUnhealthy(endpoint string) {
|
|
||||||
m.endpointHealth.WithLabelValues(endpoint).Set(float64(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPrometheusService creates a new service for gathering prometheus metrics.
|
|
||||||
func NewPrometheusService(log *zap.Logger, cfg Config) *Service {
|
|
||||||
if log == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Service{
|
|
||||||
Server: &http.Server{
|
|
||||||
Addr: cfg.Address,
|
|
||||||
Handler: promhttp.Handler(),
|
|
||||||
},
|
|
||||||
enabled: cfg.Enabled,
|
|
||||||
serviceType: "Prometheus",
|
|
||||||
log: log.With(zap.String("service", "Prometheus")),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/pprof"
|
|
||||||
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewPprofService creates a new service for gathering pprof metrics.
|
|
||||||
func NewPprofService(l *zap.Logger, cfg Config) *Service {
|
|
||||||
handler := http.NewServeMux()
|
|
||||||
handler.HandleFunc("/debug/pprof/", pprof.Index)
|
|
||||||
handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
|
|
||||||
handler.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
|
||||||
handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
|
||||||
handler.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
|
||||||
|
|
||||||
// Manually add support for paths linked to by index page at /debug/pprof/
|
|
||||||
for _, item := range []string{"allocs", "block", "heap", "goroutine", "mutex", "threadcreate"} {
|
|
||||||
handler.Handle("/debug/pprof/"+item, pprof.Handler(item))
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Service{
|
|
||||||
Server: &http.Server{
|
|
||||||
Addr: cfg.Address,
|
|
||||||
Handler: handler,
|
|
||||||
},
|
|
||||||
enabled: cfg.Enabled,
|
|
||||||
serviceType: "Pprof",
|
|
||||||
log: l.With(zap.String("service", "Pprof")),
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,48 +0,0 @@
|
||||||
package metrics
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Service serves metrics.
|
|
||||||
type Service struct {
|
|
||||||
*http.Server
|
|
||||||
enabled bool
|
|
||||||
log *zap.Logger
|
|
||||||
serviceType string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config is a params to configure service.
|
|
||||||
type Config struct {
|
|
||||||
Address string
|
|
||||||
Enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start runs http service with the exposed endpoint on the configured port.
|
|
||||||
func (ms *Service) Start() {
|
|
||||||
if ms.enabled {
|
|
||||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
|
||||||
err := ms.ListenAndServe()
|
|
||||||
if err != nil && err != http.ErrServerClosed {
|
|
||||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShutDown stops the service.
|
|
||||||
func (ms *Service) ShutDown(ctx context.Context) {
|
|
||||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
|
||||||
err := ms.Shutdown(ctx)
|
|
||||||
if err != nil {
|
|
||||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
if err = ms.Close(); err != nil {
|
|
||||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue