diff --git a/.docker/Dockerfile b/.docker/Dockerfile deleted file mode 100644 index 8d6f806..0000000 --- a/.docker/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM golang:1.24-alpine AS basebuilder -RUN apk add --update make bash ca-certificates - -FROM basebuilder AS builder -ENV GOGC=off -ENV CGO_ENABLED=0 -ARG BUILD=now -ARG VERSION=dev -ARG REPO=repository -WORKDIR /src -COPY . /src - -RUN make - -# Executable image -FROM scratch - -WORKDIR / - -COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=builder /src/bin/frostfs-http-gw /bin/frostfs-http-gw - -ENTRYPOINT ["/bin/frostfs-http-gw"] diff --git a/.docker/Dockerfile.dirty b/.docker/Dockerfile.dirty deleted file mode 100644 index f733447..0000000 --- a/.docker/Dockerfile.dirty +++ /dev/null @@ -1,8 +0,0 @@ -FROM alpine -RUN apk add --update --no-cache bash ca-certificates - -WORKDIR / - -COPY bin/frostfs-http-gw /bin/frostfs-http-gw - -CMD ["frostfs-http-gw"] diff --git a/.forgejo/ISSUE_TEMPLATE/bug_report.md b/.forgejo/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 2861ed3..0000000 --- a/.forgejo/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: community, triage, bug -assignees: '' - ---- - - - -## Expected Behavior - - - -## Current Behavior - - - -## Possible Solution - - -## Steps to Reproduce (for bugs) - - - -1. - -## Context - - - -## Regression - - - -## Your Environment - -* Version used: -* Server setup and configuration: -* Operating System and version (`uname -a`): diff --git a/.forgejo/ISSUE_TEMPLATE/config.yml b/.forgejo/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 3ba13e0..0000000 --- a/.forgejo/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1 +0,0 @@ -blank_issues_enabled: false diff --git a/.forgejo/ISSUE_TEMPLATE/feature_request.md b/.forgejo/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index d6d1162..0000000 --- a/.forgejo/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: community, triage -assignees: '' - ---- - -## Is your feature request related to a problem? Please describe. - - -## Describe the solution you'd like - - -## Describe alternatives you've considered - - -## Additional context - diff --git a/.forgejo/logo.svg b/.forgejo/logo.svg deleted file mode 100644 index 148c359..0000000 --- a/.forgejo/logo.svg +++ /dev/null @@ -1,70 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/.forgejo/workflows/builds.yml b/.forgejo/workflows/builds.yml deleted file mode 100644 index ebb6bcc..0000000 --- a/.forgejo/workflows/builds.yml +++ /dev/null @@ -1,27 +0,0 @@ -on: - pull_request: - push: - branches: - - master - -jobs: - builds: - name: Builds - runs-on: ubuntu-latest - strategy: - matrix: - go_versions: [ '1.23', '1.24' ] - fail-fast: false - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '${{ matrix.go_versions }}' - - - name: Build binary - run: make - - - name: Check dirty suffix - run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi diff --git a/.forgejo/workflows/dco.yml b/.forgejo/workflows/dco.yml deleted file mode 100644 index 4acd633..0000000 --- a/.forgejo/workflows/dco.yml +++ /dev/null @@ -1,20 +0,0 @@ -on: [pull_request] - -jobs: - dco: - name: DCO - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.23' - - - name: Run commit format checker - uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 - with: - from: 'origin/${{ github.event.pull_request.base.ref }}' diff --git a/.forgejo/workflows/oci-image.yml b/.forgejo/workflows/oci-image.yml deleted file mode 100644 index c5c0a2e..0000000 --- a/.forgejo/workflows/oci-image.yml +++ /dev/null @@ -1,27 +0,0 @@ -on: - pull_request: - push: - workflow_dispatch: - -jobs: - image: - name: OCI image - runs-on: docker - container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm - steps: - - name: Clone git repo - uses: actions/checkout@v3 - - - name: Build OCI image - run: make image - - - name: Push image to OCI registry - run: | - echo "$REGISTRY_PASSWORD" \ - | docker login --username truecloudlab --password-stdin git.frostfs.info - make image-push - if: >- - startsWith(github.ref, 'refs/tags/v') && - (github.event_name == 'workflow_dispatch' || github.event_name == 'push') - env: - REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}} diff --git a/.forgejo/workflows/tests.yml b/.forgejo/workflows/tests.yml deleted file mode 100644 index 8fb4c10..0000000 --- a/.forgejo/workflows/tests.yml +++ /dev/null @@ -1,61 +0,0 @@ -on: - pull_request: - push: - branches: - - master - -jobs: - lint: - name: Lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - cache: true - - - name: Install linters - run: make lint-install - - - name: Run linters - run: make lint - - tests: - name: Tests - runs-on: ubuntu-latest - strategy: - matrix: - go_versions: [ '1.23', '1.24' ] - fail-fast: false - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '${{ matrix.go_versions }}' - - - name: Update Go modules - run: make dep - - - name: Run tests - run: make test - - integration: - name: Integration tests - runs-on: oci-runner - steps: - - uses: actions/checkout@v3 - - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: '1.24' - - - name: Run integration tests - run: |- - podman-service.sh - make integration-test diff --git a/.forgejo/workflows/vulncheck.yml b/.forgejo/workflows/vulncheck.yml deleted file mode 100644 index a58d2df..0000000 --- a/.forgejo/workflows/vulncheck.yml +++ /dev/null @@ -1,26 +0,0 @@ -on: - pull_request: - push: - branches: - - master - -jobs: - vulncheck: - name: Vulncheck - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Setup Go - uses: actions/setup-go@v3 - with: - go-version: '1.23' - check-latest: true - - - name: Install govulncheck - run: go install golang.org/x/vuln/cmd/govulncheck@latest - - - name: Run govulncheck - run: govulncheck ./... diff --git a/.gitignore b/.gitignore deleted file mode 100644 index c4a98d8..0000000 --- a/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -.idea -bin -temp -/plugins/ -/vendor/ - -.test.env -*~ -*.log -test.sh -testfile -.blast.yml -.frostfs-cli.yml - -.cache - -coverage.txt -coverage.html - -# debhelpers -**/.debhelper diff --git a/.gitlint b/.gitlint deleted file mode 100644 index e7218ac..0000000 --- a/.gitlint +++ /dev/null @@ -1,11 +0,0 @@ -[general] -fail-without-commits=True -regex-style-search=True -contrib=CC1 - -[title-match-regex] -regex=^\[\#[0-9Xx]+\]\s - -[ignore-by-title] -regex=^Release(.*) -ignore=title-match-regex diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index 2c754ac..0000000 --- a/.golangci.yml +++ /dev/null @@ -1,65 +0,0 @@ -# This file contains all available configuration options -# with their default values. - -# options for analysis running -run: - # timeout for analysis, e.g. 30s, 5m, default is 1m - timeout: 15m - - # include test files or not, default is true - tests: true - -# output configuration options -output: - # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" - formats: - - format: tab - -# all available settings of specific linters -linters-settings: - exhaustive: - # indicates that switch statements are to be considered exhaustive if a - # 'default' case is present, even if all enum members aren't listed in the - # switch - default-signifies-exhaustive: true - custom: - truecloudlab-linters: - path: bin/external_linters.so - original-url: git.frostfs.info/TrueCloudLab/linters.git - settings: - noliteral: - enable: true - target-methods: ["Fatal"] - disable-packages: ["req", "r"] - constants-package: "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - -linters: - enable: - # mandatory linters - - govet - - revive - - # some default golangci-lint linters - - errcheck - - gosimple - - ineffassign - - staticcheck - - typecheck - - unused - - # extra linters - - exhaustive - - godot - - gofmt - - whitespace - - goimports - - truecloudlab-linters - disable-all: true - fast: false - -issues: - include: - - EXC0002 # should have a comment - - EXC0003 # test/Test ... consider calling this - - EXC0004 # govet - - EXC0005 # C-style breaks diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 3c963be..0000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,52 +0,0 @@ -ci: - autofix_prs: false - -repos: - - repo: https://github.com/jorisroovers/gitlint - rev: v0.19.1 - hooks: - - id: gitlint - stages: [commit-msg] - - id: gitlint-ci - - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: check-added-large-files - - id: check-case-conflict - - id: check-executables-have-shebangs - - id: check-shebang-scripts-are-executable - - id: check-merge-conflict - - id: check-json - - id: check-xml - - id: check-yaml - - id: trailing-whitespace - args: [--markdown-linebreak-ext=md] - - id: end-of-file-fixer - exclude: ".key$" - - - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.9.0.2 - hooks: - - id: shellcheck - - - repo: local - hooks: - - id: make-lint-install - name: install linters - entry: make lint-install - language: system - pass_filenames: false - - - id: make-lint - name: run linters - entry: make lint - language: system - pass_filenames: false - - - id: go-unit-tests - name: go unit tests - entry: make test - pass_filenames: false - types: [go] - language: system diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 4465d2f..0000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,212 +0,0 @@ -# Changelog - -This document outlines major changes between releases. - -## [Unreleased] - -- Update Go to 1.23 (#228) - -### Added -- Add handling quota limit reached error (#187) -- Add slash clipping for FileName attribute (#174) -- Add new format of tag names config - -## [0.32.3] - 2025-02-05 - -### Added -- Add slash clipping for FileName attribute (#174) - -## [0.32.2] - 2025-02-03 - -### Fixed -- Possible memory leak in gRPC client (#202) - -## [0.32.1] - 2025-01-27 - -### Fixed -- SIGHUP panic (#198) - -## [0.32.0] - Khumbu - 2024-12-20 - -### Fixed -- Getting S3 object with FrostFS Object ID-like key (#166) -- Ignore delete marked objects in versioned bucket in index page (#181) - -### Added -- Metric of dropped logs by log sampler (#150) -- Fallback FileName attribute search during FilePath attribute search (#174) - -### Changed -- Updated tree service pool without api-go dependency (#178) - -## [0.31.0] - Rongbuk - 2024-11-20 - -### Fixed -- Docker warnings during image build (#126) -- `trace_id` parameter in logs (#148) -- SIGHUP support for `tracing.enabled` config parameter (#157) - -### Added -- Vulnerability report document (#123) -- Root CA configuration for tracing (#139) -- Log sampling policy configuration (#147) -- Index page support for buckets and containers (#137, #151) -- CORS support (#158) -- Source IP binding configuration for FrostFS requests (#160) -- Tracing attributes (#164) - -### Changed -- Updated Go version to 1.22 (#132) - -### Removed -- Duplicated NNS Resolver code (#129) - -## [0.30.3] - 2024-10-18 - -### Fixed -- Get response on S3 multipart object (#142) - -### Added -- Support percent-encoding for GET queries (#134) - -### Changed -- Split `FrostFS` interface into separate read methods (#127) - -## [0.30.2] - 2024-09-03 - -### Added -- Fuzzing tests (#135) - -## [0.30.1] - 2024-08-20 - -### Fixed -- Error counting in pool component before connection switch (#131) - -### Added -- Log of endpoint address during tree pool errors (#131) - -## [0.30.0] - Kangshung - 2024-07-22 - -### Fixed -- Handle query unescape and invalid bearer token errors (#107) -- Fix HTTP/2 requests (#110) - -### Added -- Add new `reconnect_interval` config param (#100) -- Erasure coding support in placement policy (#114) -- HTTP Header canonicalizer for well-known headers (#121) - -### Changed -- Improve test coverage (#112, #117) -- Bumped vulnerable dependencies (#115) -- Replace extended ACL examples with policies in README (#118) - -### Removed - -## [0.29.0] - Zemu - 2024-05-27 - -### Fixed -- Fix possibility of panic during SIGHUP (#99) -- Handle query unescape and invalid bearer token errors (#108) -- Fix log-level change on SIGHUP (#105) - -### Added -- Support client side object cut (#70) - - Add `frostfs.client_cut` config param - - Add `frostfs.buffer_max_size_for_put` config param - - Add bucket/container caching - - Disable homomorphic hash for PUT if it's disabled in container itself -- Add new `logger.destination` config param with journald support (#89, #104) -- Add support namespaces (#91) - -### Changed -- Replace atomics with mutex for reloadable params (#74) - -## [0.28.1] - 2024-01-24 - -### Added -- Tree pool traversal limit (#92) - -### Update from 0.28.0 -See new `frostfs.tree_pool_max_attempts` config parameter. - -## [0.28.0] - Academy of Sciences - 2023-12-07 - -### Fixed -- `grpc` schemas in tree configuration (#62) -- `GetSubTree` failures (#67) -- Debian packaging (#69, #90) -- Get latest version of tree node (#85) - -### Added -- Support dump metrics descriptions (#29) -- Support impersonate bearer token (#40, #45) -- Tracing support (#20, #44, #60) -- Object name resolving with tree service (#30) -- Metrics for current endpoint status (#77) -- Soft memory limit with `runtime.soft_memory_limit` (#72) -- Add selection of the node of the latest version of the object (#85) - -### Changed -- Update prometheus to v1.15.0 (#35) -- Update go version to 1.19 (#50) -- Finish rebranding (#2) -- Use gate key to form object owner (#66) -- Move log messages to constants (#36) -- Uploader and downloader refactor (#73) - -### Removed -- Drop `tree.service` param (now endpoints from `peers` section are used) (#59) - -## [0.27.0] - Karpinsky - 2023-07-12 - -This is a first FrostFS HTTP Gateway release named after -[Karpinsky glacier](https://en.wikipedia.org/wiki/Karpinsky_Glacier). - -### Fixed -- Require only one healthy storage server to start (#7) -- Enable gate metrics (#38) -- `Too many pings` error (#61) - -### Added -- Multiple configs support (#12) - -### Changed -- Repository rebranding (#1) -- Update neo-go to v0.101.0 (#8) -- Update viper to v1.15.0 (#8) -- Update go version to 1.18 (#9) -- Errors have become more detailed (#18) -- Update system attribute names (#22) -- Separate integration tests with build tags (#24) -- Changed values for `frostfs_http_gw_state_health` metric (#32) - -### Updating from neofs-http-gw v0.26.0 - -To set system attributes use updated headers -(you can use old ones for now, but their support will be dropped in the future releases): - -* `X-Attribute-Neofs-*` -> `X-Attribute-System-*` -* `X-Attribute-NEOFS-*` -> `X-Attribute-SYSTEM-*` -* `X-Attribute-neofs-*` -> `X-Attribute-system-*` - - -## Older versions - -This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neofs-http-gw) from version v0.26.0. -To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md. - -[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0 -[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0 -[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1 -[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0 -[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0 -[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...v0.30.1 -[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2 -[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3 -[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0 -[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0 -[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1 -[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2 -[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...v0.32.3 -[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.3...master \ No newline at end of file diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index de5e48e..0000000 --- a/CODEOWNERS +++ /dev/null @@ -1,3 +0,0 @@ -.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers -.forgejo/.* @potyarkin -Makefile @potyarkin diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index ffd587d..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,156 +0,0 @@ -# Contribution guide - -First, thank you for contributing! We love and encourage pull requests from -everyone. Please follow the guidelines: - -- Check the open [issues](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/issues) and - [pull requests](https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/pulls) for existing - discussions. - -- Open an issue first, to discuss a new feature or enhancement. - -- Write tests and make sure the test suite passes locally and on CI. - -- Open a pull request and reference the relevant issue(s). - -- Make sure your commits are logically separated and have good comments - explaining the details of your change. - -- After receiving a feedback, amend your commits or add new ones as - appropriate. - -- **Have fun!** - -## Development Workflow - -Start by forking the `frostfs-http-gw` repository, make changes in a branch and then -send a pull request. We encourage pull requests to discuss code changes. Here -are the steps in details: - -### Set up your git repository -Fork [FrostFS HTTP Gateway -upstream](https://git.frostfs.info/repo/fork/8) source repository -to your own personal repository. Copy the URL of your fork (you will need it for -the `git clone` command below). - -```sh -$ git clone https://git.frostfs.info//frostfs-http-gw.git -``` - -### Set up git remote as ``upstream`` -```sh -$ cd frostfs-http-gw -$ git remote add upstream https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git -$ git fetch upstream -$ git merge upstream/master -... -``` - -### Create your feature branch -Before making code changes, make sure you create a separate branch for these -changes. Maybe you will find it convenient to name a branch in -`/-` format. - -``` -$ git checkout -b feature/123-something_awesome -``` - -### Test your changes -After your code changes, make sure - -- To add test cases for the new code. -- To run `make lint` -- To squash your commits into a single commit or a series of logically separated - commits run `git rebase -i`. It's okay to force update your pull request. -- To run `make test` and `make all` completes. - -### Commit changes -After verification, commit your changes. This is a [great -post](https://chris.beams.io/posts/git-commit/) on how to write useful commit -messages. Try following this template: - -``` -[#Issue] Summary - -Description - - - - -``` - -``` -$ git commit -am '[#123] Add some feature' -``` - -### Push to the branch -Push your locally committed changes to the remote origin (your fork) -``` -$ git push origin feature/123-something_awesome -``` - -### Create a Pull Request -Pull requests can be created via Forgejo. Refer to [this -document](https://docs.codeberg.org/collaborating/pull-requests-and-git-flow/) for -detailed steps on how to create a pull request. After a Pull Request gets peer -reviewed and approved, it will be merged. - -## DCO Sign off - -All authors to the project retain copyright to their work. However, to ensure -that they are only submitting work that they have rights to, we require -everyone to acknowledge this by signing their work. - -Any copyright notices in this repository should specify the authors as "the -contributors". - -To sign your work, just add a line like this at the end of your commit message: - -``` -Signed-off-by: Samii Sakisaka -``` - -This can be easily done with the `--signoff` option to `git commit`. - -By doing this you state that you can certify the following (from [The Developer -Certificate of Origin](https://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -1 Letterman Drive -Suite D4700 -San Francisco, CA, 94129 - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` diff --git a/CREDITS.md b/CREDITS.md deleted file mode 100644 index e019a0b..0000000 --- a/CREDITS.md +++ /dev/null @@ -1,20 +0,0 @@ -# Credits - -In alphabetical order: - -- Alexey Vanin -- Angira Kekteeva -- Denis Kirillov -- Evgeniy Kulikov -- Pavel Korotkov -- Roman Khimov - -# Contributors - -In chronological order: - -- Anatoly Bogatyrev -- Stanislav Bogatyrev -- Anastasia Prasolova -- Leonard Liubich -- Elizaveta Chichindaeva diff --git a/LICENSE b/LICENSE deleted file mode 100644 index f288702..0000000 --- a/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/Makefile b/Makefile deleted file mode 100755 index 11084f0..0000000 --- a/Makefile +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/make -f - -REPO ?= $(shell go list -m) -VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") -GO_VERSION ?= 1.23 -LINT_VERSION ?= 1.64.8 -TRUECLOUDLAB_LINT_VERSION ?= 0.0.10 -BUILD ?= $(shell date -u --iso=seconds) - -HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw -HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')" - -METRICS_DUMP_OUT ?= ./metrics-dump.json - -OUTPUT_LINT_DIR ?= $(shell pwd)/bin -LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION) -TMP_DIR := .cache - -# List of binaries to build. For now just one. -BINDIR = bin -CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*))) -BINS = $(addprefix $(BINDIR)/, $(CMDS)) - -.PHONY: all $(BINS) $(DIRS) dep docker/ test cover fmt image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean - -# .deb package versioning -OS_RELEASE = $(shell lsb_release -cs) -PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \ - sed -E "s/(.*)-(g[a-fA-F0-9]{6,8})(.*)/\1\3~\2/" | \ - sed "s/-/~/")-${OS_RELEASE} -.PHONY: debpackage debclean - -FUZZING_DIR = $(shell pwd)/tests/fuzzing/files -NGFUZZ_REPO = https://gitflic.ru/project/yadro/ngfuzz.git -FUZZ_TIMEOUT ?= 30 -FUZZ_FUNCTIONS ?= "" -FUZZ_AUX ?= "" - -# Make all binaries -all: $(BINS) -$(BINS): $(DIRS) dep - @echo "⇒ Build $@" - CGO_ENABLED=0 \ - go build -v -trimpath \ - -ldflags "-X main.Version=$(VERSION)" \ - -o $@ ./cmd/$(subst frostfs-,,$(notdir $@)) - -$(DIRS): - @echo "⇒ Ensure dir: $@" - @mkdir -p $@ - -# Pull go dependencies -dep: - @printf "⇒ Download requirements: " - @CGO_ENABLED=0 \ - go mod download && echo OK - @printf "⇒ Tidy requirements: " - @CGO_ENABLED=0 \ - go mod tidy -v && echo OK - -# Run `make %` in Golang container, for more information run `make help.docker/%` -docker/%: - $(if $(filter $*,all $(BINS)), \ - @echo "=> Running 'make $*' in clean Docker environment" && \ - docker run --rm -t \ - -v `pwd`:/src \ - -w /src \ - -u `stat -c "%u:%g" .` \ - --env HOME=/src \ - golang:$(GO_VERSION) make $*,\ - @echo "supported docker targets: all $(BINS) lint") - -# Run tests -test: - @go test ./... -cover - -# Run integration tests -.PHONY: integration-test -integration-test: - @go test ./... -cover --tags=integration - -# Run tests with race detection and produce coverage output -cover: - @go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic - @go tool cover -html=coverage.txt -o coverage.html - -# Run fuzzing -CLANG := $(shell which clang-17 2>/dev/null) -.PHONY: check-clang all -check-clang: -ifeq ($(CLANG),) - @echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh " - @exit 1 -endif - -.PHONY: check-ngfuzz all -check-ngfuzz: - @if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \ - echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \ - exit 1; \ - fi - -.PHONY: install-ngfuzz -install-ngfuzz: -ifeq (,$(wildcard $(FUZZING_DIR)/ngfuzz)) - @rm -rf $(FUZZING_DIR)/ngfuzz - @git clone $(NGFUZZ_REPO) $(FUZZING_DIR)/ngfuzz - @cd $(FUZZING_DIR)/ngfuzz && make -endif - -.PHONY: fuzz -fuzz: check-clang install-ngfuzz - @START_PATH=$$(pwd); \ - ROOT_PATH=$$(realpath --relative-to=$(FUZZING_DIR)/ngfuzz $$START_PATH) ; \ - cd $(FUZZING_DIR)/ngfuzz && \ - ./bin/ngfuzz clean && \ - env CGO_ENABLED=1 ./bin/ngfuzz fuzz --funcs $(FUZZ_FUNCTIONS) --rootdir $$ROOT_PATH --timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \ - ./bin/ngfuzz coverage --rootdir $$ROOT_PATH - -# Reformat code -fmt: - @echo "⇒ Processing gofmt check" - @gofmt -s -w ./ - -# Build clean Docker image -image: - @echo "⇒ Build FrostFS HTTP Gateway docker image " - @docker build \ - --build-arg REPO=$(REPO) \ - --build-arg VERSION=$(VERSION) \ - --rm \ - -f .docker/Dockerfile \ - -t $(HUB_IMAGE):$(HUB_TAG) . - -# Push Docker image to the hub -image-push: - @echo "⇒ Publish image" - @docker push $(HUB_IMAGE):$(HUB_TAG) - -# Build dirty Docker image -dirty-image: - @echo "⇒ Build FrostFS HTTP Gateway dirty docker image " - @docker build \ - --build-arg REPO=$(REPO) \ - --build-arg VERSION=$(VERSION) \ - --rm \ - -f .docker/Dockerfile.dirty \ - -t $(HUB_IMAGE)-dirty:$(HUB_TAG) . - -# Install linters - lint-install: - @mkdir -p $(TMP_DIR) - @rm -rf $(TMP_DIR)/linters - @git -c advice.detachedHead=false clone --branch v$(TRUECLOUDLAB_LINT_VERSION) https://git.frostfs.info/TrueCloudLab/linters.git $(TMP_DIR)/linters - @@make -C $(TMP_DIR)/linters lib CGO_ENABLED=1 OUT_DIR=$(OUTPUT_LINT_DIR) - @rm -rf $(TMP_DIR)/linters - @rmdir $(TMP_DIR) 2>/dev/null || true - @CGO_ENABLED=1 GOBIN=$(LINT_DIR) go install -trimpath github.com/golangci/golangci-lint/cmd/golangci-lint@v$(LINT_VERSION) - -# Run linters -lint: - @if [ ! -d "$(LINT_DIR)" ]; then \ - echo "Run make lint-install"; \ - exit 1; \ - fi - $(LINT_DIR)/golangci-lint --timeout=5m run - -# Run linters in Docker -docker/lint: - docker run --rm -it \ - -v `pwd`:/src \ - -u `stat -c "%u:%g" .` \ - --env HOME=/src \ - golangci/golangci-lint:v$(LINT_VERSION) bash -c 'cd /src/ && make lint' - -# Activate pre-commit hooks -pre-commit: - pre-commit install -t pre-commit -t commit-msg - -# Deactivate pre-commit hooks -unpre-commit: - pre-commit uninstall -t pre-commit -t commit-msg - -# Print version -version: - @echo $(VERSION) - -# Clean up -clean: - rm -rf vendor - rm -rf $(BINDIR) - -# Package for Debian -debpackage: - dch --package frostfs-http-gw \ - --controlmaint \ - --newversion $(PKG_VERSION) \ - --distribution $(OS_RELEASE) \ - "Please see CHANGELOG.md for code changes for $(VERSION)" - dpkg-buildpackage --no-sign -b - -debclean: - dh clean - -# Dump metrics (use METRICS_DUMP_OUT variable to override default out file './metrics-dump.json') -.PHONY: dump-metrics -dump-metrics: - @go test ./metrics -run TestDescribeAll --tags=dump_metrics --out=$(abspath $(METRICS_DUMP_OUT)) - - -include help.mk diff --git a/README.md b/README.md index 9c17c2a..7463f9e 100644 --- a/README.md +++ b/README.md @@ -1,465 +1,3 @@ -

-FrostFS logo -

-

- FrostFS is a decentralized distributed object storage integrated with the NEO Blockchain. -

+# WIP area: this repo is just a fork! ---- -[![Report](https://goreportcard.com/badge/git.frostfs.info/TrueCloudLab/frostfs-http-gw)](https://goreportcard.com/report/git.frostfs.info/TrueCloudLab/frostfs-http-gw) -![Release](https://img.shields.io/badge/dynamic/json.svg?label=release&url=https://git.frostfs.info/api/v1/repos/TrueCloudLab/frostfs-http-gw/releases&query=$[0].tag_name&color=orange) -![License](https://img.shields.io/badge/license-GPL--3.0-orange.svg) - -# FrostFS HTTP Gateway - -FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard. -- you can download one file per request from the FrostFS Network -- you can upload one file per request into the FrostFS Network - -See available routes in [specification](./docs/api.md). - -## Installation - -```go install git.frostfs.info/TrueCloudLab/frostfs-http-gw``` - -Or you can call `make` to build it from the cloned repository (the binary will -end up in `bin/frostfs-http-gw`). To build frostfs-http-gw binary in clean docker -environment, call `make docker/bin/frostfs-http-gw`. - -Other notable make targets: - -``` -dep Check and ensure dependencies -image Build clean docker image -dirty-image Build dirty docker image with host-built binaries -fmt Format the code -lint Run linters -version Show current version -``` - -Or you can also use a [Docker -image](https://git.frostfs.info/TrueCloudLab/-/packages/container/frostfs-http-gw) provided for the released -(and occasionally unreleased) versions of the gateway (`:latest` points to the -latest stable release). - -## Execution - -HTTP gateway itself is not a FrostFS node, so to access FrostFS it uses node's -gRPC interface and you need to provide some node that it will connect to. This -can be done either via `-p` parameter or via `HTTP_GW_PEERS__ADDRESS` and -`HTTP_GW_PEERS__WEIGHT` environment variables (the gate supports multiple -FrostFS nodes with weighted load balancing). - -If you launch HTTP gateway in bundle with [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env), -you can get the IP address of the node in the output of `make hosts` command -(with s0*.frostfs.devenv name). - -These two commands are functionally equivalent, they run the gate with one -backend node (and otherwise default settings): -``` -$ frostfs-http-gw -p 192.168.130.72:8080 -$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.72:8080 frostfs-http-gw -``` -It's also possible to specify uri scheme (grpc or grpcs) when using `-p`: -``` -$ frostfs-http-gw -p grpc://192.168.130.72:8080 -$ HTTP_GW_PEERS_0_ADDRESS=grpcs://192.168.130.72:8080 frostfs-http-gw -``` - -## Configuration - -In general, everything available as CLI parameter can also be specified via -environment variables (see [example](./config/config.env)), so they're not specifically mentioned in most cases -(see `--help` also). If you prefer a config file you can use it in yaml format. - -### Nodes: weights and priorities - -You can specify multiple `-p` options to add more FrostFS nodes, this will make -gateway spread requests equally among them (using weight 1 and priority 1 for every node): - -``` -$ frostfs-http-gw -p 192.168.130.72:8080 -p 192.168.130.71:8080 -``` -If you want some specific load distribution proportions, use weights and priorities: - -``` -$ HTTP_GW_PEERS_0_ADDRESS=192.168.130.71:8080 HTTP_GW_PEERS_0_WEIGHT=1 HTTP_GW_PEERS_0_PRIORITY=1 \ - HTTP_GW_PEERS_1_ADDRESS=192.168.130.72:8080 HTTP_GW_PEERS_1_WEIGHT=9 HTTP_GW_PEERS_1_PRIORITY=2 \ - HTTP_GW_PEERS_2_ADDRESS=192.168.130.73:8080 HTTP_GW_PEERS_2_WEIGHT=1 HTTP_GW_PEERS_2_PRIORITY=2 \ - frostfs-http-gw -``` -This command will make gateway use 192.168.130.71 while it is healthy. Otherwise, it will make the gateway use -192.168.130.72 for 90% of requests and 192.168.130.73 for remaining 10%. - -### Keys -You can provide a wallet via `--wallet` or `-w` flag. You can also specify the account address using `--address` -(if no address provided default one will be used). If wallet is used, you need to set `HTTP_GW_WALLET_PASSPHRASE` variable to decrypt the wallet. -If no wallet provided, the gateway autogenerates a key pair it will use for FrostFS requests. -``` -$ frostfs-http-gw -p $FROSTFS_NODE -w $WALLET_PATH --address $ACCOUNT_ADDRESS -``` -Example: -``` -$ frostfs-http-gw -p 192.168.130.72:8080 -w wallet.json --address NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP -``` - -### Binding and TLS - -You can make the gateway listen on specific address using the `--listen_address` option. - -It can also provide TLS interface for its users, just specify paths to the key and -certificate files via `--tls_key` and `--tls_certificate` parameters. Note -that using these options makes gateway TLS-only. If you need to serve both TLS -and plain text HTTP, you either have to run two gateway instances or use some -external redirecting solution. - -Example to bind to `192.168.130.130:443` and serve TLS there: - -``` -$ frostfs-http-gw -p 192.168.130.72:8080 --listen_address 192.168.130.130:443 \ - --tls_key=key.pem --tls_certificate=cert.pem -``` - -### HTTP parameters - -You can tune HTTP read and write buffer sizes as well as timeouts with -`HTTP_GW_WEB_READ_BUFFER_SIZE`, `HTTP_GW_WEB_READ_TIMEOUT`, -`HTTP_GW_WEB_WRITE_BUFFER_SIZE` and `HTTP_GW_WEB_WRITE_TIMEOUT` environment -variables. - -**Note:** to allow upload and download of big data streams, disable read -and write timeouts correspondingly. To do that, set `HTTP_GW_WEB_READ_TIMEOUT=0` -and `HTTP_GW_WEB_WRITE_TIMEOUT=0`. Otherwise, HTTP Gateway will terminate -request with data stream after timeout. - -`HTTP_GW_WEB_STREAM_REQUEST_BODY` environment variable can be used to disable -request body streaming (effectively it'll make the gateway accept the file completely -first and only then try sending it to FrostFS). - -`HTTP_GW_WEB_MAX_REQUEST_BODY_SIZE` controls maximum request body size -limiting uploads to files slightly lower than this limit. - -### FrostFS parameters - -Gateway can automatically set timestamps for uploaded files based on local -time source, use `HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP` environment -variable to control this behavior. - -### Monitoring and metrics - -Pprof and Prometheus are integrated into the gateway. To enable them use `--pprof` and `--metrics` flags or -`HTTP_GW_PPROF`/`HTTP_GW_METRICS` environment variables. - -### Timeouts - -You can tune gRPC interface parameters with `--connect_timeout` (for -connection to a node) and `--request_timeout` (for request processing over -established connection) options. - -gRPC-level checks allow the gateway to detect dead peers, but it declares them -unhealthy at pool level once per `--rebalance_timer` interval, so check for it -if needed. - -All timing options accept values with suffixes, so "15s" is 15 seconds and -"2m" is 2 minutes. - -### Zip streaming -The gateway supports downloading files by common prefix (like dir) in zip format. You can enable compression -using config or `HTTP_GW_ZIP_COMPRESSION=true` environment variable. - -### Logging -You can specify logging level using variable: -``` -HTTP_GW_LOGGER_LEVEL=debug -``` - -### Yaml file -Configuration file is optional and can be used instead of environment variables/other parameters. -It can be specified with `--config` parameter: -``` -$ frostfs-http-gw --config your-config.yaml -``` - -See [config](./config/config.yaml) and [defaults](./docs/gate-configuration.md) for example. - -#### Multiple configs - -You can use several config files when running application. It allows you to split configuration into parts. -For example, you can use separate yaml file for pprof and prometheus section in config (see [config examples](./config)). -You can either provide several files with repeating `--config` flag or provide path to the dir that contains all configs using `--config-dir` flag. -Also, you can combine these flags: - -```shell -$ frostfs-http-gw --config ./config/config.yaml --config /your/partial/config.yaml --config-dir ./config/dir -``` - -**Note:** next file in `--config` flag overwrites values from the previous one. -Files from `--config-dir` directory overwrite values from `--config` files. -So the command above run `frostfs-http-gw` to listen on `0.0.0.0:8080` address (value from `./config/config.yaml`), -applies parameters from `/your/partial/config.yaml`, -enable pprof (value from `./config/dir/pprof.yaml`) and prometheus (value from `./config/dir/prometheus.yaml`). - -## HTTP API provided - -This gateway intentionally provides limited feature set and doesn't try to -substitute (or completely wrap) regular gRPC FrostFS interface. You can download -and upload objects with it, but deleting, searching, managing ACLs, creating -containers and other activities are not supported and not planned to be -supported. - -### Preparation - -Before uploading or downloading a file make sure you have a prepared container. -You can create it with instructions below. - -Also, in case of downloading, you need to have a file inside a container. - -### NNS - -In all download/upload routes you can use container name instead of its id (`$CID`). -Read more about it in [docs/nns.md](./docs/nns.md). - - -#### Create a container - -You can create a container via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases): -``` -$ frostfs-cli -r $FROSTFS_NODE -w $WALLET container create --policy $POLICY --basic-acl $ACL -``` -where `$WALLET` is a path to user wallet, -`$ACL` -- hex encoded basic ACL value or keywords 'private, 'public-read', 'public-read-write' and -`$POLICY` -- QL-encoded or JSON-encoded placement policy or path to file with it - -For example: -``` -$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json container create --policy "REP 3" --basic-acl public --await -``` - -If you have launched nodes via [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env), -you can get the key value from `wallets/wallet.json` or write the path to -the file `wallets/wallet.key`. - -#### Prepare a file in a container - -To create a file via [frostfs-cli](https://git.frostfs.info/TrueCloudLab/frostfs-node/releases), run a command below: -``` -$ frostfs-cli -r $FROSTFS_NODE -k $KEY object put --file $FILENAME --cid $CID -``` -where -`$KEY` -- the key, please read the information [above](#create-a-container), -`$CID` -- container ID. - -For example: -``` -$ frostfs-cli -r 192.168.130.72:8080 -w ./wallet.json object put --file cat.png --cid Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ --attributes img_type=cat,my_attr=cute -``` - - -### Downloading - -#### Requests - -The following requests support GET/HEAD methods. - -##### By IDs - -Basic downloading involves container ID and object ID and is done via GET -requests to `/get/$CID/$OID` path, where `$CID` is a container ID or its name if NNS is enabled, -`$OID` is an object's (i.e. your file's) ID. - -For example: - -```shell -$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY -``` - -or if container has a name: - -```shell -$ wget http://localhost:8082/get/container-name/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY -``` - -##### By attributes -There is also more complex interface provided for attribute-based downloads, -it's usually used to retrieve files by their names, but any other attribute -can be used as well. The generic syntax for it looks like this: - -```/get_by_attribute/$CID/$ATTRIBUTE_NAME/$ATTRIBUTE_VALUE``` - -where -`$CID` is a container ID or its name if NNS is enabled, -`$ATTRIBUTE_NAME` is the name of the attribute we want to use, -`$ATTRIBUTE_VALUE` is the value of this attribute that the target object should have. - -**NB!** The attribute key and value should be url encoded, i.e., if you want to download an object with the attribute value -`a cat`, the value in the request must be `a+cat`. In the same way with the attribute key. If you don't escape such values -everything can still work (for example you can use `d@ta` without encoding) but it's HIGHLY RECOMMENDED to encode all your attributes. - -If multiple objects have specified attribute with specified value, then the -first one of them is returned (and you can't get others via this interface). - -Example for file name attribute: - -``` -$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat.jpeg -``` -Or when the filename includes special symbols: -``` -$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat+jpeg # means 'cat jpeg' -$ wget http://localhost:8082/get_by_attribute/88GdaZFTcYJn1dqiSECss8kKPmmun6d6BfvC4zhwfLYM/FileName/cat%25jpeg # means 'cat%jpeg' -``` - -Some other user-defined attributes: - -``` -$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Ololo/100500 -``` - -Or when the attribute includes special symbols: -``` -$ wget http://localhost:8082/get_by_attribute/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/Olo%2Blo/100500 # means Olo+lo -``` - -An optional `download=true` argument for `Content-Disposition` management is -also supported (more on that below): - -``` -$ wget http://localhost:8082/get/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY?download=true - -``` - -##### Zip -You can download some dir (files with the same prefix) in zip (it will be compressed if config contains appropriate param): -``` -$ wget http://localhost:8082/zip/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ/common/prefix -``` - -**Note:** the objects must have a valid `FilePath` attribute (it should not contain trailing `/`), -otherwise they will not be in the zip archive. You can upload file with this attribute using `curl`: - -``` -$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H 'X-Attribute-FilePath: common/prefix/cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ -``` - - -#### Replies - -You get object contents in the reply body (if GET method was used), but at the same time you also get a -set of reply headers generated using the following rules: - * `Content-Length` is set to the length of the object - * `Content-Type` is autodetected dynamically by gateway - * `Content-Disposition` is `inline` for regular requests and `attachment` for - requests with `download=true` argument, `filename` is also added if there - is `FileName` attribute set for this object - * `Last-Modified` header is set to `Timestamp` attribute value if it's - present for the object - * `x-container-id` contains container ID - * `x-object-id` contains object ID - * `x-owner-id` contains owner address - * all the other FrostFS attributes are converted to `X-Attribute-*` headers (but only - if they can be safely represented in HTTP header), for example `FileName` - attribute becomes `X-Attribute-FileName` header - -##### Caching strategy - -HTTP Gateway doesn't control caching (doesn't anything with the `Cache-Control` header). Caching strategy strictly -depends on application use case. So it should be carefully done by proxy server. - -### Uploading - -You can POST files to `/upload/$CID` path where `$CID` is a container ID or its name if NNS is enabled. The -request must contain multipart form with mandatory `filename` parameter. Only -one part in multipart form will be processed, so to upload another file just -issue a new POST request. - -Example request: - -``` -$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ -``` - -Chunked encoding is supported by the server (but check for request read -timeouts if you're planning some streaming). You can try streaming support -with a large file piped through named FIFO pipe: - -``` -$ mkfifo pipe -$ cat video.mp4 > pipe & -$ curl --no-buffer -F 'file=@pipe;filename=catvideo.mp4' http://localhost:8082/upload/Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ -``` - -You can also add some attributes to your file using the following rules: - * all "X-Attribute-*" headers get converted to object attributes with - "X-Attribute-" prefix stripped, that is if you add "X-Attribute-Ololo: - 100500" header to your request the resulting object will get "Ololo: - 100500" attribute - * "X-Attribute-SYSTEM-*" headers are special - (`-SYSTEM-` part can also be `-system-` or`-System-` (and even legacy `-Neofs-` for some next releases)), they're used to set internal - FrostFS attributes starting with `__SYSTEM__` prefix, for these attributes all - dashes get converted to underscores and all letters are capitalized. For - example, you can use "X-Attribute-SYSTEM-Expiration-Epoch" header to set - `__SYSTEM__EXPIRATION_EPOCH` attribute - * `FileName` attribute is set from multipart's `filename` if not set - explicitly via `X-Attribute-FileName` header - * `Timestamp` attribute can be set using gateway local time if using - HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP option and if request doesn't - provide `X-Attribute-Timestamp` header of its own - ---- -**NOTE** - -There are some reserved headers type of `X-Attribute-SYSTEM-*` (headers are arranged in descending order of priority): -1. `X-Attribute-System-Expiration-Epoch: 100` -2. `X-Attribute-System-Expiration-Duration: 24h30m` -3. `X-Attribute-System-Expiration-Timestamp: 1637574797` -4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z` - -which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way. - ---- - -For successful uploads you get JSON data in reply body with a container and -object ID, like this: -``` -{ - "object_id": "9ANhbry2ryjJY1NZbcjryJMRXG5uGNKd73kD3V1sVFsX", - "container_id": "Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ" -} -``` - -#### Authentication - -Read more about request authentication in [docs/authentication.md](./docs/authemtnication.md) - -### Metrics and Pprof - -If enabled, Prometheus metrics are available at `localhost:8084` endpoint -and Pprof at `localhost:8083/debug/pprof` by default. Host and port can be configured. -See [configuration](./docs/gate-configuration.md). - -## Credits - -Please see [CREDITS](CREDITS.md) for details. - -## Fuzzing - -To run fuzzing tests use the following command: - -```shell -$ make fuzz -``` - -This command will install dependencies for the fuzzing process and run existing fuzzing tests. - -You can also use the following arguments: - -``` -FUZZ_TIMEOUT - time to run each fuzzing test (default 30) -FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all") -FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug") -FUZZ_NGFUZZ_DIR - path to ngfuzz tool -```` - -## Credits - -Please see [CREDITS](CREDITS.md) for details. +Useful things may be published only in [other branches](../../../branches) diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 46fe535..0000000 --- a/SECURITY.md +++ /dev/null @@ -1,26 +0,0 @@ -# Security Policy - - -## How To Report a Vulnerability - -If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure. - -**Please do not report security vulnerabilities through public issues, discussions, or change requests.** - -Instead, you can report it using one of the following ways: - -* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email - -Please include as much of the information listed below as you can to help us better understand and resolve the issue: - -* The type of issue (e.g., buffer overflow, or cross-site scripting) -* Affected version(s) -* Impact of the issue, including how an attacker might exploit the issue -* Step-by-step instructions to reproduce the issue -* The location of the affected source code (tag/branch/commit or direct URL) -* Full paths of source file(s) related to the manifestation of the issue -* Any special configuration required to reproduce the issue -* Any log files that are related to this issue (if possible) -* Proof-of-concept or exploit code (if possible) - -This information will help us triage your report more quickly. diff --git a/VERSION b/VERSION deleted file mode 100644 index 2c768c5..0000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -v0.32.3 diff --git a/cmd/http-gw/app.go b/cmd/http-gw/app.go deleted file mode 100644 index 4a83caf..0000000 --- a/cmd/http-gw/app.go +++ /dev/null @@ -1,1197 +0,0 @@ -package main - -import ( - "bytes" - "context" - "crypto/x509" - "errors" - "fmt" - "net/http" - "os" - "os/signal" - "runtime/debug" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net" - containerClient "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/container" - contractsUtil "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/contracts/util" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/fasthttp/router" - "github.com/nspcc-dev/neo-go/cli/flags" - "github.com/nspcc-dev/neo-go/cli/input" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/panjf2000/ants/v2" - "github.com/spf13/viper" - "github.com/valyala/fasthttp" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "golang.org/x/exp/slices" -) - -type ( - app struct { - ctx context.Context - log *zap.Logger - pool *pool.Pool - treePool *treepool.Pool - key *keys.PrivateKey - owner *user.ID - cfg *appCfg - webServer *fasthttp.Server - webDone chan struct{} - resolver *resolver.ContainerResolver - metrics *gateMetrics - services []*metrics.Service - settings *appSettings - loggerSettings *loggerSettings - bucketCache *cache.BucketCache - handle *handler.Handler - corsCnrID cid.ID - - servers []Server - unbindServers []ServerInfo - mu sync.RWMutex - } - - loggerSettings struct { - mu sync.RWMutex - appMetrics *metrics.GateMetrics - } - - // App is an interface for the main gateway function. - App interface { - Wait() - Serve() - } - - gateMetrics struct { - logger *zap.Logger - provider *metrics.GateMetrics - mu sync.RWMutex - enabled bool - } - - // appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex. - appSettings struct { - reconnectInterval time.Duration - dialerSource *internalnet.DialerSource - workerPoolSize int - logLevelConfig *logLevelConfig - - mu sync.RWMutex - defaultTimestamp bool - archiveCompression bool - clientCut bool - returnIndexPage bool - indexPageTemplate string - bufferMaxSizeForPut uint64 - namespaceHeader string - defaultNamespaces []string - cors *data.CORSRule - enableFilepathFallback bool - enableFilepathSlashFallback bool - } - - tagsConfig struct { - tagLogs sync.Map - defaultLvl zap.AtomicLevel - } - - logLevelConfig struct { - logLevel zap.AtomicLevel - tagsConfig *tagsConfig - } -) - -func newLogLevel(v *viper.Viper) zap.AtomicLevel { - ll, err := getLogLevel(v) - if err != nil { - panic(err.Error()) - } - atomicLogLevel := zap.NewAtomicLevel() - atomicLogLevel.SetLevel(ll) - return atomicLogLevel -} - -func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig { - t := tagsConfig{defaultLvl: zap.NewAtomicLevelAt(ll)} - if err := t.update(v, ll); err != nil { - // panic here is analogue of the similar panic during common log level initialization. - panic(err.Error()) - } - - return &t -} - -func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig { - cfg := &logLevelConfig{ - logLevel: lvl, - tagsConfig: tagsConfig, - } - - cfg.setMinLogLevel() - - return cfg -} - -func (l *logLevelConfig) setMinLogLevel() { - l.tagsConfig.tagLogs.Range(func(_, value any) bool { - v := value.(zapcore.Level) - if v < l.logLevel.Level() { - l.logLevel.SetLevel(v) - } - return true - }) -} - -func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) { - if lvl, err := getLogLevel(cfg); err != nil { - log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) - } else { - l.logLevel.SetLevel(lvl) - } - - if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil { - log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) - } - - l.setMinLogLevel() -} - -func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool { - lvl, ok := t.tagLogs.Load(tag) - if !ok { - return t.defaultLvl.Enabled(tgtLevel) - } - - return lvl.(zapcore.Level).Enabled(tgtLevel) -} - -func (t *tagsConfig) DefaultEnabled(lvl zapcore.Level) bool { - return t.defaultLvl.Enabled(lvl) -} - -func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error { - tags, err := fetchLogTagsConfig(cfg, ll) - if err != nil { - return err - } - - t.tagLogs.Range(func(key, _ any) bool { - k := key.(string) - - if _, ok := tags[k]; !ok { - t.tagLogs.Delete(key) - } - return true - }) - - for k, v := range tags { - t.tagLogs.Store(k, v) - } - t.defaultLvl.SetLevel(ll) - - return nil -} - -func newApp(ctx context.Context, cfg *appCfg) App { - logSettings := &loggerSettings{} - logLevel := newLogLevel(cfg.config()) - tagConfig := newTagsConfig(cfg.config(), logLevel.Level()) - logConfig := newLogLevelConfig(logLevel, tagConfig) - log := pickLogger(cfg.config(), logConfig.logLevel, logSettings, tagConfig) - - a := &app{ - ctx: ctx, - log: log.logger, - cfg: cfg, - loggerSettings: logSettings, - webServer: new(fasthttp.Server), - webDone: make(chan struct{}), - bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)), - } - - a.initAppSettings(logConfig) - - // -- setup FastHTTP server -- - a.webServer.Name = "frost-http-gw" - a.webServer.ReadBufferSize = a.config().GetInt(cfgWebReadBufferSize) - a.webServer.WriteBufferSize = a.config().GetInt(cfgWebWriteBufferSize) - a.webServer.ReadTimeout = a.config().GetDuration(cfgWebReadTimeout) - a.webServer.WriteTimeout = a.config().GetDuration(cfgWebWriteTimeout) - a.webServer.DisableHeaderNamesNormalizing = true - a.webServer.NoDefaultServerHeader = true - a.webServer.NoDefaultContentType = true - a.webServer.MaxRequestBodySize = a.config().GetInt(cfgWebMaxRequestBodySize) - a.webServer.DisablePreParseMultipartForm = true - a.webServer.StreamRequestBody = a.config().GetBool(cfgWebStreamRequestBody) - // -- -- -- -- -- -- -- -- -- -- -- -- -- -- - a.initPools(ctx) - - var owner user.ID - user.IDFromKey(&owner, a.key.PrivateKey.PublicKey) - a.owner = &owner - - a.setRuntimeParameters() - - a.initResolver() - a.initMetrics() - a.initTracing(ctx) - a.initContainers(ctx) - - return a -} - -func (a *app) config() *viper.Viper { - return a.cfg.config() -} - -func (a *app) initContainers(ctx context.Context) { - corsCnrID, err := a.fetchContainerID(ctx, cfgContainersCORS) - if err != nil { - a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err), logs.TagField(logs.TagApp)) - } - a.corsCnrID = *corsCnrID -} - -func (a *app) initRPCClient(ctx context.Context) *rpcclient.Client { - rpcCli, err := rpcclient.New(ctx, a.config().GetString(cfgRPCEndpoint), rpcclient.Options{}) - if err != nil { - a.log.Fatal(logs.InitRPCClientFailed, zap.Error(err), logs.TagField(logs.TagApp)) - } - return rpcCli -} - -func (a *app) initAppSettings(lc *logLevelConfig) { - a.settings = &appSettings{ - reconnectInterval: fetchReconnectInterval(a.config()), - dialerSource: getDialerSource(a.log, a.config()), - workerPoolSize: a.config().GetInt(cfgWorkerPoolSize), - logLevelConfig: lc, - } - a.settings.update(a.config(), a.log) -} - -func (s *appSettings) update(v *viper.Viper, l *zap.Logger) { - defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp) - archiveCompression := fetchArchiveCompression(v) - returnIndexPage := v.GetBool(cfgIndexPageEnabled) - clientCut := v.GetBool(cfgClientCut) - bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut) - namespaceHeader := v.GetString(cfgResolveNamespaceHeader) - defaultNamespaces := fetchDefaultNamespaces(v) - indexPage, indexEnabled := fetchIndexPageTemplate(v, l) - cors := fetchCORSConfig(v) - enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback) - enableFilepathSlashFallback := v.GetBool(cfgFeaturesEnableFilepathSlashFallback) - - s.mu.Lock() - defer s.mu.Unlock() - - s.defaultTimestamp = defaultTimestamp - s.archiveCompression = archiveCompression - s.returnIndexPage = returnIndexPage - s.clientCut = clientCut - s.bufferMaxSizeForPut = bufferMaxSizeForPut - s.namespaceHeader = namespaceHeader - s.defaultNamespaces = defaultNamespaces - s.returnIndexPage = indexEnabled - s.indexPageTemplate = indexPage - s.cors = cors - s.enableFilepathFallback = enableFilepathFallback - s.enableFilepathSlashFallback = enableFilepathSlashFallback -} - -func (s *loggerSettings) DroppedLogsInc() { - s.mu.RLock() - defer s.mu.RUnlock() - - if s.appMetrics != nil { - s.appMetrics.DroppedLogsInc() - } -} - -func (s *loggerSettings) setMetrics(appMetrics *metrics.GateMetrics) { - s.mu.Lock() - defer s.mu.Unlock() - - s.appMetrics = appMetrics -} - -func (s *appSettings) DefaultTimestamp() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.defaultTimestamp -} - -func (s *appSettings) ArchiveCompression() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.archiveCompression -} - -func (s *appSettings) IndexPageEnabled() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.returnIndexPage -} - -func (s *appSettings) IndexPageTemplate() string { - s.mu.RLock() - defer s.mu.RUnlock() - if s.indexPageTemplate == "" { - return templates.DefaultIndexTemplate - } - return s.indexPageTemplate -} - -func (s *appSettings) CORS() *data.CORSRule { - s.mu.RLock() - defer s.mu.RUnlock() - - if s.cors == nil { - return nil - } - - allowMethods := make([]string, len(s.cors.AllowedMethods)) - copy(allowMethods, s.cors.AllowedMethods) - - allowHeaders := make([]string, len(s.cors.AllowedHeaders)) - copy(allowHeaders, s.cors.AllowedHeaders) - - exposeHeaders := make([]string, len(s.cors.ExposeHeaders)) - copy(exposeHeaders, s.cors.ExposeHeaders) - - allowOrigins := make([]string, len(s.cors.AllowedOrigins)) - copy(allowOrigins, s.cors.AllowedOrigins) - - return &data.CORSRule{ - AllowedOrigins: allowOrigins, - AllowedMethods: allowMethods, - AllowedHeaders: allowHeaders, - ExposeHeaders: exposeHeaders, - AllowedCredentials: s.cors.AllowedCredentials, - MaxAgeSeconds: s.cors.MaxAgeSeconds, - } -} - -func (s *appSettings) ClientCut() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.clientCut -} - -func (s *appSettings) BufferMaxSizeForPut() uint64 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.bufferMaxSizeForPut -} - -func (s *appSettings) NamespaceHeader() string { - s.mu.RLock() - defer s.mu.RUnlock() - return s.namespaceHeader -} - -func (s *appSettings) FormContainerZone(ns string) string { - s.mu.RLock() - namespaces := s.defaultNamespaces - s.mu.RUnlock() - if slices.Contains(namespaces, ns) { - return v2container.SysAttributeZoneDefault - } - - return ns + ".ns" -} - -func (s *appSettings) EnableFilepathFallback() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.enableFilepathFallback -} - -func (s *appSettings) EnableFilepathSlashFallback() bool { - s.mu.RLock() - defer s.mu.RUnlock() - return s.enableFilepathSlashFallback -} - -func (a *app) initResolver() { - var err error - a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig()) - if err != nil { - a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp)) - } -} - -func (a *app) getResolverConfig() ([]string, *resolver.Config) { - resolveCfg := &resolver.Config{ - FrostFS: frostfs.NewResolverFrostFS(a.pool), - RPCAddress: a.config().GetString(cfgRPCEndpoint), - } - - order := a.config().GetStringSlice(cfgResolveOrder) - if resolveCfg.RPCAddress == "" { - order = remove(order, resolver.NNSResolver) - a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp)) - } - - if len(order) == 0 { - a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty, - logs.TagField(logs.TagApp)) - } - - return order, resolveCfg -} - -func (a *app) initMetrics() { - gateMetricsProvider := metrics.NewGateMetrics(a.pool) - a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.config().GetBool(cfgPrometheusEnabled)) - a.metrics.SetHealth(metrics.HealthStatusStarting) - a.loggerSettings.setMetrics(a.metrics.provider) -} - -func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics { - if !enabled { - logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp)) - } - return &gateMetrics{ - logger: logger, - provider: provider, - enabled: enabled, - } -} - -func (m *gateMetrics) isEnabled() bool { - m.mu.RLock() - defer m.mu.RUnlock() - - return m.enabled -} - -func (m *gateMetrics) SetEnabled(enabled bool) { - if !enabled { - m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp)) - } - - m.mu.Lock() - m.enabled = enabled - m.mu.Unlock() -} - -func (m *gateMetrics) SetHealth(status metrics.HealthStatus) { - if !m.isEnabled() { - return - } - - m.provider.SetHealth(status) -} - -func (m *gateMetrics) SetVersion(ver string) { - if !m.isEnabled() { - return - } - - m.provider.SetVersion(ver) -} - -func (m *gateMetrics) Shutdown() { - m.mu.Lock() - if m.enabled { - m.provider.SetHealth(metrics.HealthStatusShuttingDown) - m.enabled = false - } - m.provider.Unregister() - m.mu.Unlock() -} - -func (m *gateMetrics) MarkHealthy(endpoint string) { - if !m.isEnabled() { - return - } - - m.provider.MarkHealthy(endpoint) -} - -func (m *gateMetrics) MarkUnhealthy(endpoint string) { - if !m.isEnabled() { - return - } - - m.provider.MarkUnhealthy(endpoint) -} - -func remove(list []string, element string) []string { - for i, item := range list { - if item == element { - return append(list[:i], list[i+1:]...) - } - } - return list -} - -func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error) { - walletPath := cfg.GetString(cfgWalletPath) - - if len(walletPath) == 0 { - log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun, logs.TagField(logs.TagApp)) - key, err := keys.NewPrivateKey() - if err != nil { - return nil, err - } - return key, nil - } - w, err := wallet.NewWalletFromFile(walletPath) - if err != nil { - return nil, err - } - - var password *string - if cfg.IsSet(cfgWalletPassphrase) { - pwd := cfg.GetString(cfgWalletPassphrase) - password = &pwd - } - - address := cfg.GetString(cfgWalletAddress) - - return getKeyFromWallet(w, address, password) -} - -func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys.PrivateKey, error) { - var addr util.Uint160 - var err error - - if addrStr == "" { - addr = w.GetChangeAddress() - } else { - addr, err = flags.ParseAddress(addrStr) - if err != nil { - return nil, fmt.Errorf("invalid address") - } - } - - acc := w.GetAccount(addr) - if acc == nil { - return nil, fmt.Errorf("couldn't find wallet account for %s", addrStr) - } - - if password == nil { - pwd, err := input.ReadPassword("Enter password > ") - if err != nil { - return nil, fmt.Errorf("couldn't read password") - } - password = &pwd - } - - if err := acc.Decrypt(*password, w.Scrypt); err != nil { - return nil, fmt.Errorf("couldn't decrypt account: %w", err) - } - - return acc.PrivateKey(), nil -} - -func (a *app) Wait() { - a.log.Info(logs.StartingApplication, - zap.String("app_name", "frostfs-http-gw"), - zap.String("version", Version), - logs.TagField(logs.TagApp)) - - a.metrics.SetVersion(Version) - a.setHealthStatus() - - <-a.webDone // wait for web-server to be stopped -} - -func (a *app) setHealthStatus() { - a.metrics.SetHealth(metrics.HealthStatusReady) -} - -func (a *app) Serve() { - workerPool := a.initWorkerPool() - defer func() { - workerPool.Release() - close(a.webDone) - }() - - // Configure router. - a.configureRouter(workerPool) - - a.startServices() - a.initServers(a.ctx) - - servs := a.getServers() - - for i := range servs { - go func(i int) { - a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp)) - if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed { - a.metrics.MarkUnhealthy(servs[i].Address()) - a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp)) - } - }(i) - } - - if len(a.unbindServers) != 0 { - a.scheduleReconnect(a.ctx, a.webServer) - } - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGHUP) - -LOOP: - for { - select { - case <-a.ctx.Done(): - break LOOP - case <-sigs: - a.configReload(a.ctx) - } - } - - a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()), logs.TagField(logs.TagApp)) - - a.metrics.Shutdown() - a.stopServices() - a.shutdownTracing() -} - -func (a *app) initWorkerPool() *ants.Pool { - workerPool, err := ants.NewPool(a.settings.workerPoolSize) - if err != nil { - a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp)) - } - return workerPool -} - -func (a *app) shutdownTracing() { - const tracingShutdownTimeout = 5 * time.Second - shdnCtx, cancel := context.WithTimeout(context.Background(), tracingShutdownTimeout) - defer cancel() - - if err := tracing.Shutdown(shdnCtx); err != nil { - a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp)) - } -} - -func (a *app) configReload(ctx context.Context) { - a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp)) - if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) { - a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp)) - return - } - if err := a.cfg.reload(); err != nil { - a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp)) - return - } - - a.settings.logLevelConfig.update(a.cfg.config(), a.log) - - if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil { - a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp)) - } - - if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil { - a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err), logs.TagField(logs.TagApp)) - } - - if err := a.updateServers(); err != nil { - a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp)) - } - - a.setRuntimeParameters() - - a.stopServices() - a.startServices() - - a.settings.update(a.config(), a.log) - - a.metrics.SetEnabled(a.config().GetBool(cfgPrometheusEnabled)) - a.initTracing(ctx) - a.setHealthStatus() - - a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp)) -} - -func (a *app) startServices() { - a.services = a.services[:0] - - pprofConfig := metrics.Config{Enabled: a.config().GetBool(cfgPprofEnabled), Address: a.config().GetString(cfgPprofAddress)} - pprofService := metrics.NewPprofService(a.log, pprofConfig) - a.services = append(a.services, pprofService) - go pprofService.Start() - - prometheusConfig := metrics.Config{Enabled: a.config().GetBool(cfgPrometheusEnabled), Address: a.config().GetString(cfgPrometheusAddress)} - prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig) - a.services = append(a.services, prometheusService) - go prometheusService.Start() -} - -func (a *app) stopServices() { - ctx, cancel := context.WithTimeout(context.Background(), defaultShutdownTimeout) - defer cancel() - - for _, svc := range a.services { - svc.ShutDown(ctx) - } -} - -func (a *app) configureRouter(workerPool *ants.Pool) { - rpcCli := a.initRPCClient(a.ctx) - cnrContractName := a.config().GetString(cfgContractsContainerName) - rpcEndpoint := a.config().GetString(cfgRPCEndpoint) - cnrAddr, err := contractsUtil.ResolveContractHash(cnrContractName, rpcEndpoint) - if err != nil { - a.log.Fatal(logs.FailedToResolveContractHash, zap.Error(err), logs.TagField(logs.TagApp)) - } - cnrClient, err := containerClient.New(containerClient.Config{ - ContractHash: cnrAddr, - Key: a.key, - RPCClient: rpcCli, - }) - if err != nil { - a.log.Fatal(logs.InitContainerContractFailed, zap.Error(err), logs.TagField(logs.TagApp)) - } - a.handle = handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool), a.log), cnrClient, workerPool) - - r := router.New() - r.RedirectTrailingSlash = true - r.NotFound = func(r *fasthttp.RequestCtx) { - handler.ResponseError(r, "Route Not found", fasthttp.StatusNotFound) - } - r.MethodNotAllowed = func(r *fasthttp.RequestCtx) { - handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed) - } - - r.POST("/upload/{cid}", a.addMiddlewares(a.handle.Upload)) - r.OPTIONS("/upload/{cid}", a.addPreflight(a.handle.Preflight)) - a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp)) - r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.DownloadByAddressOrBucketName)) - r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(a.handle.HeadByAddressOrBucketName)) - r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight(a.handle.Preflight)) - a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp)) - r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.DownloadByAttribute)) - r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(a.handle.HeadByAttribute)) - r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight(a.handle.Preflight)) - a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp)) - r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadZip)) - r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight)) - r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(a.handle.DownloadTar)) - r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight(a.handle.Preflight)) - a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp)) - - a.webServer.Handler = r.Handler -} - -func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler { - list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{ - a.tracer, - a.logger, - a.canonicalizer, - a.tokenizer, - a.reqNamespace, - a.cors, - } - - for i := len(list) - 1; i >= 0; i-- { - h = list[i](h) - } - - return h -} - -func (a *app) addPreflight(h fasthttp.RequestHandler) fasthttp.RequestHandler { - list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{ - a.tracer, - a.logger, - a.canonicalizer, - a.reqNamespace, - } - - for i := len(list) - 1; i >= 0; i-- { - h = list[i](h) - } - - return h -} - -func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(c *fasthttp.RequestCtx) { - h(c) - code := c.Response.StatusCode() - if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices { - a.handle.SetCORSHeaders(c) - } - } -} - -func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(req *fasthttp.RequestCtx) { - requiredFields := []zap.Field{zap.Uint64("id", req.ID())} - reqCtx := utils.GetContextFromRequest(req) - if traceID := trace.SpanFromContext(reqCtx).SpanContext().TraceID(); traceID.IsValid() { - requiredFields = append(requiredFields, zap.String("trace_id", traceID.String())) - } - log := a.log.With(requiredFields...) - - reqCtx = utils.SetReqLog(reqCtx, log) - utils.SetContextToRequest(reqCtx, req) - - log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()), - zap.ByteString("method", req.Method()), - zap.ByteString("path", req.Path()), - zap.ByteString("query", req.QueryArgs().QueryString()), - logs.TagField(logs.TagDatapath)) - h(req) - } -} - -func (a *app) canonicalizer(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(req *fasthttp.RequestCtx) { - // regardless of DisableHeaderNamesNormalizing setting, some headers - // MUST be normalized in order to process execution. They are normalized - // here. - - toAddKeys := make([][]byte, 0, 10) - toAddValues := make([][]byte, 0, 10) - prefix := []byte(utils.UserAttributeHeaderPrefix) - - req.Request.Header.VisitAll(func(k, v []byte) { - if bytes.HasPrefix(k, prefix) { - return - } - toAddKeys = append(toAddKeys, k) - toAddValues = append(toAddValues, v) - }) - - // this is safe to do after all headers were read into header structure - req.Request.Header.EnableNormalizing() - - for i := range toAddKeys { - req.Request.Header.SetBytesKV(toAddKeys[i], toAddValues[i]) - } - - // return normalization setting back - req.Request.Header.DisableNormalizing() - - h(req) - } -} - -func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(req *fasthttp.RequestCtx) { - reqCtx := utils.GetContextFromRequest(req) - appCtx, err := tokens.StoreBearerTokenAppCtx(reqCtx, req) - if err != nil { - log := utils.GetReqLogOrDefault(reqCtx, a.log) - - log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err), logs.TagField(logs.TagDatapath)) - handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest) - return - } - utils.SetContextToRequest(appCtx, req) - h(req) - } -} - -func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(req *fasthttp.RequestCtx) { - appCtx, span := utils.StartHTTPServerSpan(a.ctx, req, "REQUEST") - defer func() { - utils.SetHTTPTraceInfo(appCtx, span, req) - span.End() - }() - - appCtx = treepool.SetRequestID(appCtx, strconv.FormatUint(req.ID(), 10)) - - utils.SetContextToRequest(appCtx, req) - h(req) - } -} - -func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler { - return func(req *fasthttp.RequestCtx) { - appCtx := utils.GetContextFromRequest(req) - - nsBytes := req.Request.Header.Peek(a.settings.NamespaceHeader()) - appCtx = middleware.SetNamespace(appCtx, string(nsBytes)) - - utils.SetContextToRequest(appCtx, req) - h(req) - } -} - -func (a *app) AppParams() *handler.AppParams { - return &handler.AppParams{ - Logger: a.log, - FrostFS: frostfs.NewFrostFS(a.pool), - Owner: a.owner, - Resolver: a.resolver, - Cache: a.bucketCache, - CORSCnrID: a.corsCnrID, - CORSCache: cache.NewCORSCache(getCORSCacheOptions(a.config(), a.log)), - } -} - -func (a *app) initServers(ctx context.Context) { - serversInfo := fetchServers(a.config(), a.log) - - a.servers = make([]Server, 0, len(serversInfo)) - for _, serverInfo := range serversInfo { - fields := []zap.Field{ - zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled), - zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile), - } - srv, err := newServer(ctx, serverInfo) - if err != nil { - a.unbindServers = append(a.unbindServers, serverInfo) - a.metrics.MarkUnhealthy(serverInfo.Address) - a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...) - continue - } - a.metrics.MarkHealthy(serverInfo.Address) - - a.servers = append(a.servers, srv) - a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...) - } - - if len(a.servers) == 0 { - a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp)) - } -} - -func (a *app) updateServers() error { - serversInfo := fetchServers(a.config(), a.log) - - a.mu.Lock() - defer a.mu.Unlock() - - var found bool - for _, serverInfo := range serversInfo { - ser := a.getServer(serverInfo.Address) - if ser != nil { - if serverInfo.TLS.Enabled { - if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil { - return fmt.Errorf("failed to update tls certs: %w", err) - } - } - found = true - } else if unbind := a.updateUnbindServerInfo(serverInfo); unbind { - found = true - } - } - - if !found { - return fmt.Errorf("invalid servers configuration: no known server found") - } - - return nil -} - -func (a *app) getServers() []Server { - a.mu.RLock() - defer a.mu.RUnlock() - return a.servers -} - -func (a *app) getServer(address string) Server { - for i := range a.servers { - if a.servers[i].Address() == address { - return a.servers[i] - } - } - return nil -} - -func (a *app) updateUnbindServerInfo(info ServerInfo) bool { - for i := range a.unbindServers { - if a.unbindServers[i].Address == info.Address { - a.unbindServers[i] = info - return true - } - } - return false -} - -func (a *app) initTracing(ctx context.Context) { - instanceID := "" - if len(a.servers) > 0 { - instanceID = a.servers[0].Address() - } - cfg := tracing.Config{ - Enabled: a.config().GetBool(cfgTracingEnabled), - Exporter: tracing.Exporter(a.config().GetString(cfgTracingExporter)), - Endpoint: a.config().GetString(cfgTracingEndpoint), - Service: "frostfs-http-gw", - InstanceID: instanceID, - Version: Version, - } - - if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" { - caBytes, err := os.ReadFile(trustedCa) - if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) - return - } - certPool := x509.NewCertPool() - ok := certPool.AppendCertsFromPEM(caBytes) - if !ok { - a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"), - logs.TagField(logs.TagApp)) - return - } - cfg.ServerCaCertPool = certPool - } - - attributes, err := fetchTracingAttributes(a.config()) - if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) - return - } - cfg.Attributes = attributes - - updated, err := tracing.Setup(ctx, cfg) - if err != nil { - a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp)) - } - if updated { - a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp)) - } -} - -func (a *app) setRuntimeParameters() { - if len(os.Getenv("GOMEMLIMIT")) != 0 { - // default limit < yaml limit < app env limit < GOMEMLIMIT - a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp)) - return - } - - softMemoryLimit := fetchSoftMemoryLimit(a.config()) - previous := debug.SetMemoryLimit(softMemoryLimit) - if softMemoryLimit != previous { - a.log.Info(logs.RuntimeSoftMemoryLimitUpdated, - zap.Int64("new_value", softMemoryLimit), - zap.Int64("old_value", previous), - logs.TagField(logs.TagApp)) - } -} - -func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) { - go func() { - t := time.NewTicker(a.settings.reconnectInterval) - defer t.Stop() - for { - select { - case <-t.C: - if a.tryReconnect(ctx, srv) { - return - } - t.Reset(a.settings.reconnectInterval) - case <-ctx.Done(): - return - } - } - }() -} - -func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool { - a.mu.Lock() - defer a.mu.Unlock() - - a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp)) - var failedServers []ServerInfo - - for _, serverInfo := range a.unbindServers { - srv, err := newServer(ctx, serverInfo) - if err != nil { - a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp)) - failedServers = append(failedServers, serverInfo) - a.metrics.MarkUnhealthy(serverInfo.Address) - continue - } - - go func() { - a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp)) - a.metrics.MarkHealthy(serverInfo.Address) - if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) { - a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp)) - a.metrics.MarkUnhealthy(serverInfo.Address) - } - }() - - a.servers = append(a.servers, srv) - a.log.Info(logs.ServerReconnectedSuccessfully, - zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled), - zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile), - logs.TagField(logs.TagApp)) - } - - a.unbindServers = failedServers - - return len(a.unbindServers) == 0 -} - -func (a *app) fetchContainerID(ctx context.Context, cfgKey string) (id *cid.ID, err error) { - cnrID, err := a.resolveContainerID(ctx, cfgKey) - if err != nil { - return nil, err - } - - err = checkContainerExists(ctx, *cnrID, a.pool) - if err != nil { - return nil, err - } - - return cnrID, nil -} - -func (a *app) resolveContainerID(ctx context.Context, cfgKey string) (*cid.ID, error) { - containerString := a.config().GetString(cfgKey) - - id := new(cid.ID) - if err := id.DecodeString(containerString); err != nil { - i := strings.Index(containerString, ".") - if i < 0 { - return nil, fmt.Errorf("invalid container address: %s", containerString) - } - - if id, err = a.resolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil { - return nil, fmt.Errorf("resolve container address %s: %w", containerString, err) - } - } - - return id, nil -} - -func checkContainerExists(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) error { - prm := pool.PrmContainerGet{ - ContainerID: id, - } - - _, err := frostFSPool.GetContainer(ctx, prm) - return err -} diff --git a/cmd/http-gw/integration_test.go b/cmd/http-gw/integration_test.go deleted file mode 100644 index 6ab8e99..0000000 --- a/cmd/http-gw/integration_test.go +++ /dev/null @@ -1,686 +0,0 @@ -//go:build integration - -package main - -import ( - "archive/zip" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "mime/multipart" - "net/http" - "os" - "sort" - "strings" - "testing" - "time" - - containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - docker "github.com/docker/docker/api/types/container" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/wallet" - "github.com/stretchr/testify/require" - "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" -) - -type putResponse struct { - CID string `json:"container_id"` - OID string `json:"object_id"` -} - -const ( - testContainerName = "friendly" - testListenAddress = "localhost:8082" - testHost = "http://" + testListenAddress - testCORSContainerName = "cors" -) - -func TestIntegration(t *testing.T) { - rootCtx := context.Background() - aioImage := "git.frostfs.info/truecloudlab/frostfs-aio:" - versions := []string{ - "1.2.7", - "1.3.0", - "1.5.0", - "1.6.5", - } - key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb") - require.NoError(t, err) - - file, err := os.CreateTemp("", "wallet") - require.NoError(t, err) - defer os.Remove(file.Name()) - makeTempWallet(t, key, file.Name()) - - var ownerID user.ID - user.IDFromKey(&ownerID, key.PrivateKey.PublicKey) - - for _, version := range versions { - ctx, cancel2 := context.WithCancel(rootCtx) - - aioContainer := createDockerContainer(ctx, t, aioImage+version) - if strings.HasPrefix(version, "1.6") { - registerUser(t, ctx, aioContainer, file.Name()) - } - - // Creating CORS container - clientPool := getPool(ctx, t, key) - _, err = createContainer(ctx, t, clientPool, ownerID, testCORSContainerName) - require.NoError(t, err, version) - - // See the logs from the command execution. - server, cancel := runServer(file.Name()) - CID, err := createContainer(ctx, t, clientPool, ownerID, testContainerName) - require.NoError(t, err, version) - - jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version) - - t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID) }) - t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) }) - t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) }) - t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) }) - t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) }) - t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) }) - t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID) }) - t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) }) - t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) }) - t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) }) - t.Run("test status codes "+version, func(t *testing.T) { checkStatusCodes(ctx, t, clientPool, ownerID, version) }) - - cancel() - server.Wait() - err = aioContainer.Terminate(ctx) - require.NoError(t, err) - cancel2() - } -} - -func runServer(pathToWallet string) (App, context.CancelFunc) { - cancelCtx, cancel := context.WithCancel(context.Background()) - - v := getDefaultConfig() - v.config().Set(cfgWalletPath, pathToWallet) - v.config().Set(cfgWalletPassphrase, "") - - v.config().Set(cfgContainersCORS, testCORSContainerName+"."+containerv2.SysAttributeZoneDefault) - - application := newApp(cancelCtx, v) - go application.Serve() - - return application, cancel -} - -func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID) { - url := testHost + "/upload/" + CID.String() - makePutRequestAndCheck(ctx, t, p, CID, url) - - url = testHost + "/upload/" + testContainerName - makePutRequestAndCheck(ctx, t, p, CID, url) -} - -func putWithBearerTokenInHeader(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) { - url := testHost + "/upload/" + CID.String() - - request, content, attributes := makePutRequest(t, url) - request.Header.Set("Authorization", "Bearer "+token) - resp, err := http.DefaultClient.Do(request) - require.NoError(t, err) - - checkPutResponse(ctx, t, p, CID, resp, content, attributes) -} - -func putWithBearerTokenInCookie(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) { - url := testHost + "/upload/" + CID.String() - - request, content, attributes := makePutRequest(t, url) - request.AddCookie(&http.Cookie{Name: "Bearer", Value: token}) - resp, err := http.DefaultClient.Do(request) - require.NoError(t, err) - - checkPutResponse(ctx, t, p, CID, resp, content, attributes) -} - -func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) { - request, content, attributes := makePutRequest(t, url) - - resp, err := http.DefaultClient.Do(request) - require.NoError(t, err) - - checkPutResponse(ctx, t, p, cnrID, resp, content, attributes) -} - -func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string]string) { - content := "content of file" - keyAttr, valAttr := "User-Attribute", "user value" - attributes := map[string]string{ - object.AttributeFileName: "newFile.txt", - keyAttr: valAttr, - } - - var buff bytes.Buffer - w := multipart.NewWriter(&buff) - fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName]) - require.NoError(t, err) - _, err = io.Copy(fw, bytes.NewBufferString(content)) - require.NoError(t, err) - err = w.Close() - require.NoError(t, err) - - request, err := http.NewRequest(http.MethodPost, url, &buff) - require.NoError(t, err) - request.Header.Set("Content-Type", w.FormDataContentType()) - request.Header.Set("X-Attribute-"+keyAttr, valAttr) - - return request, content, attributes -} - -func checkPutResponse(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, resp *http.Response, content string, attributes map[string]string) { - defer func() { - err := resp.Body.Close() - require.NoError(t, err) - }() - - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - - if resp.StatusCode != http.StatusOK { - fmt.Println(string(body)) - } - require.Equal(t, http.StatusOK, resp.StatusCode) - - addr := &putResponse{} - err = json.Unmarshal(body, addr) - require.NoError(t, err) - - err = cnrID.DecodeString(addr.CID) - require.NoError(t, err) - - var id oid.ID - err = id.DecodeString(addr.OID) - require.NoError(t, err) - - var objectAddress oid.Address - objectAddress.SetContainer(cnrID) - objectAddress.SetObject(id) - - payload := bytes.NewBuffer(nil) - - var prm pool.PrmObjectGet - prm.SetAddress(objectAddress) - - res, err := p.GetObject(ctx, prm) - require.NoError(t, err) - - _, err = io.Copy(payload, res.Payload) - require.NoError(t, err) - - require.Equal(t, content, payload.String()) - - for _, attribute := range res.Header.Attributes() { - require.Equal(t, attributes[attribute.Key()], attribute.Value()) - } -} - -func putWithDuplicateKeys(t *testing.T, CID cid.ID) { - url := testHost + "/upload/" + CID.String() - - attr := "X-Attribute-User-Attribute" - content := "content of file" - valOne, valTwo := "first_value", "second_value" - fileName := "newFile.txt" - - var buff bytes.Buffer - w := multipart.NewWriter(&buff) - fw, err := w.CreateFormFile("file", fileName) - require.NoError(t, err) - _, err = io.Copy(fw, bytes.NewBufferString(content)) - require.NoError(t, err) - err = w.Close() - require.NoError(t, err) - - request, err := http.NewRequest(http.MethodPost, url, &buff) - require.NoError(t, err) - request.Header.Set("Content-Type", w.FormDataContentType()) - request.Header.Add(attr, valOne) - request.Header.Add(attr, valTwo) - - resp, err := http.DefaultClient.Do(request) - require.NoError(t, err) - - defer func() { - err := resp.Body.Close() - require.NoError(t, err) - }() - - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.Contains(t, string(body), "key duplication error: "+attr+"\n") - require.Equal(t, http.StatusBadRequest, resp.StatusCode) -} - -func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) { - content := "content of file" - attributes := map[string]string{ - "some-attr": "some-get-value", - } - - id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes) - - resp, err := http.Get(testHost + "/get/" + CID.String() + "/" + id.String()) - require.NoError(t, err) - checkGetResponse(t, resp, content, attributes) - - resp, err = http.Get(testHost + "/get/" + testContainerName + "/" + id.String()) - require.NoError(t, err) - checkGetResponse(t, resp, content, attributes) -} - -func checkGetResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) { - defer func() { - err := resp.Body.Close() - require.NoError(t, err) - }() - - data, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.Equal(t, content, string(data)) - - for k, v := range attributes { - require.Equal(t, v, resp.Header.Get("X-Attribute-"+k)) - } -} - -func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, attributes map[string]string) { - defer func() { - err := resp.Body.Close() - require.NoError(t, err) - }() - - data, err := io.ReadAll(resp.Body) - require.NoError(t, err) - require.Equal(t, content, string(data)) - - for k, v := range attributes { - require.Equal(t, v, resp.Header.Get(k)) - } -} - -func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) { - keyAttr, valAttr := "some-attr", "some-get-by-attr-value" - content := "content of file" - attributes := map[string]string{keyAttr: valAttr} - - id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes) - - expectedAttr := map[string]string{ - "X-Attribute-" + keyAttr: valAttr, - "x-object-id": id.String(), - "x-container-id": CID.String(), - } - - resp, err := http.Get(testHost + "/get_by_attribute/" + CID.String() + "/" + keyAttr + "/" + valAttr) - require.NoError(t, err) - checkGetByAttrResponse(t, resp, content, expectedAttr) - - resp, err = http.Get(testHost + "/get_by_attribute/" + testContainerName + "/" + keyAttr + "/" + valAttr) - require.NoError(t, err) - checkGetByAttrResponse(t, resp, content, expectedAttr) -} - -func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) { - names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"} - contents := []string{"content of file1", "content of file2"} - attributes1 := map[string]string{object.AttributeFilePath: names[0]} - attributes2 := map[string]string{object.AttributeFilePath: names[1]} - - putObject(ctx, t, clientPool, ownerID, CID, contents[0], attributes1) - putObject(ctx, t, clientPool, ownerID, CID, contents[1], attributes2) - - baseURL := testHost + "/zip/" + CID.String() - makeZipTest(t, baseURL, names, contents) - - baseURL = testHost + "/zip/" + testContainerName - makeZipTest(t, baseURL, names, contents) -} - -func makeZipTest(t *testing.T, baseURL string, names, contents []string) { - url := baseURL + "/zipfolder" - makeZipRequest(t, url, names, contents) - - // check nested folder - url = baseURL + "/zipfolder/dir" - makeZipRequest(t, url, names[:1], contents[:1]) -} - -func makeZipRequest(t *testing.T, url string, names, contents []string) { - resp, err := http.Get(url) - require.NoError(t, err) - defer func() { - err := resp.Body.Close() - require.NoError(t, err) - }() - - data, err := io.ReadAll(resp.Body) - require.NoError(t, err) - checkZip(t, data, int64(len(data)), names, contents) -} - -func checkZip(t *testing.T, data []byte, length int64, names, contents []string) { - readerAt := bytes.NewReader(data) - - zipReader, err := zip.NewReader(readerAt, length) - require.NoError(t, err) - - require.Equal(t, len(names), len(zipReader.File)) - - sort.Slice(zipReader.File, func(i, j int) bool { - return zipReader.File[i].FileHeader.Name < zipReader.File[j].FileHeader.Name - }) - - for i, f := range zipReader.File { - require.Equal(t, names[i], f.FileHeader.Name) - - rc, err := f.Open() - require.NoError(t, err) - - all, err := io.ReadAll(rc) - require.NoError(t, err) - require.Equal(t, contents[i], string(all)) - - err = rc.Close() - require.NoError(t, err) - } -} - -func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) { - content := "content of file" - attributes := map[string]string{ - "some-attr": "some-get-value", - } - - id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes) - - req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil) - require.NoError(t, err) - req.Header.Set(defaultNamespaceHeader, "") - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - checkGetResponse(t, resp, content, attributes) - - req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil) - require.NoError(t, err) - req.Header.Set(defaultNamespaceHeader, "root") - - resp, err = http.DefaultClient.Do(req) - require.NoError(t, err) - checkGetResponse(t, resp, content, attributes) - - req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil) - require.NoError(t, err) - req.Header.Set(defaultNamespaceHeader, "root2") - - resp, err = http.DefaultClient.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, resp.StatusCode) -} - -func checkStatusCodes(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) { - cli := http.Client{Timeout: 30 * time.Second} - - t.Run("container not found by name", func(t *testing.T) { - resp, err := cli.Get(testHost + "/get/unknown/object") - require.NoError(t, err) - require.Equal(t, http.StatusNotFound, resp.StatusCode) - requireBodyContains(t, resp, "container not found") - }) - - t.Run("container not found by cid", func(t *testing.T) { - cnrIDTest := cidtest.ID() - resp, err := cli.Get(testHost + "/get/" + cnrIDTest.EncodeToString() + "/object") - require.NoError(t, err) - requireBodyContains(t, resp, "container not found") - require.Equal(t, http.StatusNotFound, resp.StatusCode) - }) - - t.Run("object not found in storage", func(t *testing.T) { - resp, err := cli.Get(testHost + "/get_by_attribute/" + testContainerName + "/FilePath/object2") - require.NoError(t, err) - requireBodyContains(t, resp, "object not found") - require.Equal(t, http.StatusNotFound, resp.StatusCode) - }) - - t.Run("access denied", func(t *testing.T) { - basicACL := acl.Private - var recs []*eacl.Record - if version == "1.2.7" { - basicACL = acl.PublicRWExtended - rec := eacl.NewRecord() - rec.SetAction(eacl.ActionDeny) - rec.SetOperation(eacl.OperationGet) - recs = append(recs, rec) - } - - cnrID, err := createContainerBase(ctx, t, clientPool, ownerID, basicACL, "") - require.NoError(t, err) - - key, err := keys.NewPrivateKey() - require.NoError(t, err) - jsonToken, _ := makeBearerTokens(t, key, ownerID, version, recs...) - - t.Run("get", func(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, testHost+"/get/"+cnrID.EncodeToString()+"/object", nil) - require.NoError(t, err) - request.Header.Set("Authorization", "Bearer "+jsonToken) - - resp, err := cli.Do(request) - require.NoError(t, err) - requireBodyContains(t, resp, "access denied") - require.Equal(t, http.StatusForbidden, resp.StatusCode) - }) - - t.Run("upload", func(t *testing.T) { - request, _, _ := makePutRequest(t, testHost+"/upload/"+cnrID.EncodeToString()) - request.Header.Set("Authorization", "Bearer "+jsonToken) - - resp, err := cli.Do(request) - require.NoError(t, err) - requireBodyContains(t, resp, "access denied") - require.Equal(t, http.StatusForbidden, resp.StatusCode) - }) - }) -} - -func requireBodyContains(t *testing.T, resp *http.Response, msg string) { - data, err := io.ReadAll(resp.Body) - require.NoError(t, err) - defer resp.Body.Close() - - require.Contains(t, strings.ToLower(string(data)), strings.ToLower(msg)) -} - -func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container { - req := testcontainers.ContainerRequest{ - Image: image, - WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(2 * time.Minute), - Name: "aio", - Hostname: "aio", - HostConfigModifier: func(hc *docker.HostConfig) { - hc.NetworkMode = "host" - }, - } - aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ - ContainerRequest: req, - Started: true, - }) - require.NoError(t, err) - - return aioC -} - -func getDefaultConfig() *appCfg { - v := settings() - v.config().SetDefault(cfgPeers+".0.address", "localhost:8080") - v.config().SetDefault(cfgPeers+".0.weight", 1) - v.config().SetDefault(cfgPeers+".0.priority", 1) - - v.config().SetDefault(cfgRPCEndpoint, "http://localhost:30333") - v.config().SetDefault("server.0.address", testListenAddress) - - return v -} - -func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool { - var prm pool.InitParameters - prm.SetKey(&key.PrivateKey) - prm.SetNodeDialTimeout(5 * time.Second) - prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1)) - - clientPool, err := pool.NewPool(prm) - require.NoError(t, err) - - err = clientPool.Dial(ctx) - require.NoError(t, err) - return clientPool -} - -func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, name string) (cid.ID, error) { - return createContainerBase(ctx, t, clientPool, ownerID, acl.PublicRWExtended, name) -} - -func createContainerBase(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, basicACL acl.Basic, name string) (cid.ID, error) { - var policy netmap.PlacementPolicy - err := policy.DecodeString("REP 1") - require.NoError(t, err) - - var cnr container.Container - cnr.Init() - cnr.SetPlacementPolicy(policy) - cnr.SetBasicACL(basicACL) - cnr.SetOwner(ownerID) - - container.SetCreationTime(&cnr, time.Now()) - - if name != "" { - var domain container.Domain - domain.SetName(name) - - cnr.SetAttribute(containerv2.SysAttributeName, domain.Name()) - cnr.SetAttribute(containerv2.SysAttributeZone, domain.Zone()) - } - - prm := pool.PrmContainerPut{ - ClientParams: client.PrmContainerPut{ - Container: &cnr, - }, - WaitParams: &pool.WaitParams{ - Timeout: 15 * time.Second, - PollInterval: 3 * time.Second, - }, - } - - CID, err := clientPool.PutContainer(ctx, prm) - if err != nil { - return cid.ID{}, err - } - fmt.Println(CID.String()) - - return CID, err -} - -func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID { - obj := object.New() - obj.SetContainerID(CID) - obj.SetOwnerID(ownerID) - - var attrs []object.Attribute - for key, val := range attributes { - attr := object.NewAttribute() - attr.SetKey(key) - attr.SetValue(val) - attrs = append(attrs, *attr) - } - obj.SetAttributes(attrs...) - - var prm pool.PrmObjectPut - prm.SetHeader(*obj) - prm.SetPayload(bytes.NewBufferString(content)) - - id, err := clientPool.PutObject(ctx, prm) - require.NoError(t, err) - - return id.ObjectID -} - -func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers.Container, pathToWallet string) { - err := aioContainer.CopyFileToContainer(ctx, pathToWallet, "/usr/wallet.json", 644) - require.NoError(t, err) - - _, _, err = aioContainer.Exec(ctx, []string{ - "/usr/bin/frostfs-s3-authmate", "register-user", - "--wallet", "/usr/wallet.json", - "--rpc-endpoint", "http://localhost:30333", - "--contract-wallet", "/config/s3-gw-wallet.json"}) - require.NoError(t, err) -} - -func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string, records ...*eacl.Record) (jsonTokenBase64, binaryTokenBase64 string) { - tkn := new(bearer.Token) - tkn.ForUser(ownerID) - tkn.SetExp(10000) - - if version == "1.2.7" { - table := eacl.NewTable() - for i := range records { - table.AddRecord(records[i]) - } - - tkn.SetEACLTable(*table) - } else { - tkn.SetImpersonate(true) - } - - err := tkn.Sign(key.PrivateKey) - require.NoError(t, err) - - jsonToken, err := tkn.MarshalJSON() - require.NoError(t, err) - - jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken) - binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal()) - - require.NotEmpty(t, jsonTokenBase64) - require.NotEmpty(t, binaryTokenBase64) - - return -} - -func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) { - w, err := wallet.NewWallet(path) - require.NoError(t, err) - - acc := wallet.NewAccountFromPrivateKey(key) - err = acc.Encrypt("", w.Scrypt) - require.NoError(t, err) - - w.AddAccount(acc) - - err = w.Save() - require.NoError(t, err) -} diff --git a/cmd/http-gw/logger.go b/cmd/http-gw/logger.go deleted file mode 100644 index 196cff3..0000000 --- a/cmd/http-gw/logger.go +++ /dev/null @@ -1,175 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/zapjournald" - "github.com/spf13/viper" - "github.com/ssgreg/journald" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -func getLogLevel(v *viper.Viper) (zapcore.Level, error) { - var lvl zapcore.Level - lvlStr := v.GetString(cfgLoggerLevel) - err := lvl.UnmarshalText([]byte(lvlStr)) - if err != nil { - return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+ - "value should be one of %v", lvlStr, err, [...]zapcore.Level{ - zapcore.DebugLevel, - zapcore.InfoLevel, - zapcore.WarnLevel, - zapcore.ErrorLevel, - zapcore.DPanicLevel, - zapcore.PanicLevel, - zapcore.FatalLevel, - }) - } - return lvl, nil -} - -var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil) - -type zapCoreTagFilterWrapper struct { - core zapcore.Core - settings TagFilterSettings - extra []zap.Field -} - -type TagFilterSettings interface { - LevelEnabled(tag string, lvl zapcore.Level) bool - DefaultEnabled(lvl zapcore.Level) bool -} - -func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool { - return c.core.Enabled(level) -} - -func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core { - return &zapCoreTagFilterWrapper{ - core: c.core.With(fields), - settings: c.settings, - extra: append(c.extra, fields...), - } -} - -func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry { - if c.core.Enabled(entry.Level) { - return checked.AddCore(entry, c) - } - return checked -} - -func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error { - if c.shouldSkip(entry, fields, c.extra) { - return nil - } - - return c.core.Write(entry, fields) -} - -func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field, extra []zap.Field) bool { - for _, field := range fields { - if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { - return !c.settings.LevelEnabled(field.String, entry.Level) - } - } - for _, field := range extra { - if field.Key == logs.TagFieldName && field.Type == zapcore.StringType { - return !c.settings.LevelEnabled(field.String, entry.Level) - } - } - - return !c.settings.DefaultEnabled(entry.Level) -} - -func (c *zapCoreTagFilterWrapper) Sync() error { - return c.core.Sync() -} - -func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core { - core = &zapCoreTagFilterWrapper{ - core: core, - settings: tagSetting, - } - - if v.GetBool(cfgLoggerSamplingEnabled) { - core = zapcore.NewSamplerWithOptions(core, - v.GetDuration(cfgLoggerSamplingInterval), - v.GetInt(cfgLoggerSamplingInitial), - v.GetInt(cfgLoggerSamplingThereafter), - zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) { - if dec&zapcore.LogDropped > 0 { - loggerSettings.DroppedLogsInc() - } - })) - } - - return core -} - -func newLogEncoder() zapcore.Encoder { - c := zap.NewProductionEncoderConfig() - c.EncodeTime = zapcore.ISO8601TimeEncoder - - return zapcore.NewConsoleEncoder(c) -} - -// newStdoutLogger constructs a zap.Logger instance for current application. -// Panics on failure. -// -// Logger is built from zap's production logging configuration with: -// - parameterized level (debug by default) -// - console encoding -// - ISO8601 time encoding -// -// Logger records a stack trace for all messages at or above fatal level. -// -// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace. -func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger { - stdout := zapcore.AddSync(os.Stdout) - - consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl) - consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting) - - return &Logger{ - logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), - } -} - -func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger { - encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields) - - core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields) - coreWithContext := core.With([]zapcore.Field{ - zapjournald.SyslogFacility(zapjournald.LogDaemon), - zapjournald.SyslogIdentifier(), - zapjournald.SyslogPid(), - }) - - coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting) - - return &Logger{ - logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))), - } -} - -type LoggerAppSettings interface { - DroppedLogsInc() -} - -func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger { - dest := v.GetString(cfgLoggerDestination) - - switch dest { - case destinationStdout: - return newStdoutLogger(v, lvl, loggerSettings, tagSettings) - case destinationJournald: - return newJournaldLogger(v, lvl, loggerSettings, tagSettings) - default: - panic(fmt.Sprintf("wrong destination for logger: %s", dest)) - } -} diff --git a/cmd/http-gw/main.go b/cmd/http-gw/main.go deleted file mode 100644 index 002f190..0000000 --- a/cmd/http-gw/main.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -import ( - "context" - "os/signal" - "syscall" -) - -func main() { - globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) - cfg := settings() - - application := newApp(globalContext, cfg) - go application.Serve() - application.Wait() -} diff --git a/cmd/http-gw/misc.go b/cmd/http-gw/misc.go deleted file mode 100644 index 1edee97..0000000 --- a/cmd/http-gw/misc.go +++ /dev/null @@ -1,10 +0,0 @@ -package main - -// Prefix is a prefix used for environment variables containing gateway -// configuration. -const Prefix = "HTTP_GW" - -var ( - // Version is the gateway version. - Version = "dev" -) diff --git a/cmd/http-gw/server.go b/cmd/http-gw/server.go deleted file mode 100644 index f8a20d9..0000000 --- a/cmd/http-gw/server.go +++ /dev/null @@ -1,123 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "sync" -) - -type ( - ServerInfo struct { - Address string - TLS ServerTLSInfo - } - - ServerTLSInfo struct { - Enabled bool - CertFile string - KeyFile string - } - - Server interface { - Address() string - Listener() net.Listener - UpdateCert(certFile, keyFile string) error - } - - server struct { - address string - listener net.Listener - tlsProvider *certProvider - } - - certProvider struct { - Enabled bool - - mu sync.RWMutex - certPath string - keyPath string - cert *tls.Certificate - } -) - -func (s *server) Address() string { - return s.address -} - -func (s *server) Listener() net.Listener { - return s.listener -} - -func (s *server) UpdateCert(certFile, keyFile string) error { - return s.tlsProvider.UpdateCert(certFile, keyFile) -} - -func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) { - var lic net.ListenConfig - ln, err := lic.Listen(ctx, "tcp", serverInfo.Address) - if err != nil { - return nil, fmt.Errorf("could not prepare listener: %w", err) - } - - tlsProvider := &certProvider{ - Enabled: serverInfo.TLS.Enabled, - } - - if serverInfo.TLS.Enabled { - if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil { - lnErr := ln.Close() - return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err) - } - - ln = tls.NewListener(ln, &tls.Config{ - GetCertificate: tlsProvider.GetCertificate, - }) - } - - return &server{ - address: serverInfo.Address, - listener: ln, - tlsProvider: tlsProvider, - }, nil -} - -func (p *certProvider) GetCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) { - if !p.Enabled { - return nil, errors.New("cert provider: disabled") - } - - p.mu.RLock() - defer p.mu.RUnlock() - return p.cert, nil -} - -func (p *certProvider) UpdateCert(certPath, keyPath string) error { - if !p.Enabled { - return fmt.Errorf("tls disabled") - } - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return fmt.Errorf("cannot load TLS key pair from certFile '%s' and keyFile '%s': %w", certPath, keyPath, err) - } - - p.mu.Lock() - p.certPath = certPath - p.keyPath = keyPath - p.cert = &cert - p.mu.Unlock() - return nil -} - -func (p *certProvider) FilePaths() (string, string) { - if !p.Enabled { - return "", "" - } - - p.mu.RLock() - defer p.mu.RUnlock() - return p.certPath, p.keyPath -} diff --git a/cmd/http-gw/server_test.go b/cmd/http-gw/server_test.go deleted file mode 100644 index 6f92f17..0000000 --- a/cmd/http-gw/server_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package main - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "net" - "net/http" - "os" - "path" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/valyala/fasthttp" -) - -const ( - expHeaderKey = "Foo" - expHeaderValue = "Bar" -) - -func TestHTTP_TLS(t *testing.T) { - ctx := context.Background() - certPath, keyPath := prepareTestCerts(t) - - tlsListener, err := newServer(ctx, ServerInfo{ - Address: ":0", - TLS: ServerTLSInfo{ - Enabled: true, - CertFile: certPath, - KeyFile: keyPath, - }, - }) - require.NoError(t, err) - port := tlsListener.Listener().Addr().(*net.TCPAddr).Port - addr := fmt.Sprintf("https://localhost:%d", port) - - go func() { - _ = fasthttp.Serve(tlsListener.Listener(), testHandler) - }() - - tlsClientConfig := &tls.Config{ - InsecureSkipVerify: true, - } - - cliHTTP := http.Client{Transport: &http.Transport{}} - cliHTTPS := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}} - - req, err := http.NewRequest("GET", addr, nil) - require.NoError(t, err) - req.Header[expHeaderKey] = []string{expHeaderValue} - - resp, err := cliHTTPS.Do(req) - require.NoError(t, err) - require.Equal(t, http.StatusOK, resp.StatusCode) - - _, err = cliHTTP.Do(req) - require.ErrorContains(t, err, "failed to verify certificate") -} - -func testHandler(ctx *fasthttp.RequestCtx) { - hdr := ctx.Request.Header.Peek(expHeaderKey) - if len(hdr) == 0 || string(hdr) != expHeaderValue { - ctx.Response.SetStatusCode(http.StatusBadRequest) - } else { - ctx.Response.SetStatusCode(http.StatusOK) - } -} - -func prepareTestCerts(t *testing.T) (certPath, keyPath string) { - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - template := x509.Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{CommonName: "localhost"}, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * 365), - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) - require.NoError(t, err) - - dir := t.TempDir() - certPath = path.Join(dir, "cert.pem") - keyPath = path.Join(dir, "key.pem") - - certFile, err := os.Create(certPath) - require.NoError(t, err) - defer certFile.Close() - - keyFile, err := os.Create(keyPath) - require.NoError(t, err) - defer keyFile.Close() - - err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - require.NoError(t, err) - - err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}) - require.NoError(t, err) - - return certPath, keyPath -} diff --git a/cmd/http-gw/settings.go b/cmd/http-gw/settings.go deleted file mode 100644 index 4071969..0000000 --- a/cmd/http-gw/settings.go +++ /dev/null @@ -1,900 +0,0 @@ -package main - -import ( - "context" - "encoding/hex" - "fmt" - "io" - "math" - "os" - "path" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" - grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc" - qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree" - "github.com/spf13/pflag" - "github.com/spf13/viper" - "github.com/valyala/fasthttp" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -const ( - destinationStdout = "stdout" - destinationJournald = "journald" -) - -const ( - defaultRebalanceTimer = 60 * time.Second - defaultRequestTimeout = 15 * time.Second - defaultConnectTimeout = 10 * time.Second - defaultStreamTimeout = 10 * time.Second - - defaultLoggerSamplerInterval = 1 * time.Second - - defaultShutdownTimeout = 15 * time.Second - - defaultPoolErrorThreshold uint32 = 100 - - defaultSoftMemoryLimit = math.MaxInt64 - - defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb - - defaultNamespaceHeader = "X-Frostfs-Namespace" - - defaultReconnectInterval = time.Minute - - defaultCORSMaxAge = 600 // seconds - - defaultMultinetFallbackDelay = 300 * time.Millisecond - - defaultContainerContractName = "container.frostfs" - - cfgServer = "server" - cfgTLSEnabled = "tls.enabled" - cfgTLSCertFile = "tls.cert_file" - cfgTLSKeyFile = "tls.key_file" - - cfgReconnectInterval = "reconnect_interval" - - cfgIndexPageEnabled = "index_page.enabled" - cfgIndexPageTemplatePath = "index_page.template_path" - - cfgWorkerPoolSize = "worker_pool_size" - - // Web. - cfgWebReadBufferSize = "web.read_buffer_size" - cfgWebWriteBufferSize = "web.write_buffer_size" - cfgWebReadTimeout = "web.read_timeout" - cfgWebWriteTimeout = "web.write_timeout" - cfgWebStreamRequestBody = "web.stream_request_body" - cfgWebMaxRequestBodySize = "web.max_request_body_size" - - // Metrics / Profiler. - cfgPrometheusEnabled = "prometheus.enabled" - cfgPrometheusAddress = "prometheus.address" - cfgPprofEnabled = "pprof.enabled" - cfgPprofAddress = "pprof.address" - - // Tracing ... - cfgTracingEnabled = "tracing.enabled" - cfgTracingExporter = "tracing.exporter" - cfgTracingEndpoint = "tracing.endpoint" - cfgTracingTrustedCa = "tracing.trusted_ca" - cfgTracingAttributes = "tracing.attributes" - - // Pool config. - cfgConTimeout = "connect_timeout" - cfgStreamTimeout = "stream_timeout" - cfgReqTimeout = "request_timeout" - cfgRebalance = "rebalance_timer" - cfgPoolErrorThreshold = "pool_error_threshold" - - // Logger. - cfgLoggerLevel = "logger.level" - cfgLoggerDestination = "logger.destination" - - cfgLoggerSamplingEnabled = "logger.sampling.enabled" - cfgLoggerSamplingInitial = "logger.sampling.initial" - cfgLoggerSamplingThereafter = "logger.sampling.thereafter" - cfgLoggerSamplingInterval = "logger.sampling.interval" - - cfgLoggerTags = "logger.tags" - cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d." - cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "names" - cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level" - - // Wallet. - cfgWalletPassphrase = "wallet.passphrase" - cfgWalletPath = "wallet.path" - cfgWalletAddress = "wallet.address" - - // Uploader Header. - cfgUploaderHeaderEnableDefaultTimestamp = "upload_header.use_default_timestamp" - - // Peers. - cfgPeers = "peers" - - // NeoGo. - cfgRPCEndpoint = "rpc_endpoint" - - // Resolving. - cfgResolveOrder = "resolve_order" - - // Zip compression. - // - // Deprecated: Use cfgArchiveCompression instead. - cfgZipCompression = "zip.compression" - - // Archive compression. - cfgArchiveCompression = "archive.compression" - - // Runtime. - cfgSoftMemoryLimit = "runtime.soft_memory_limit" - - // Enabling client side object preparing for PUT operations. - cfgClientCut = "frostfs.client_cut" - // Sets max buffer size for read payload in put operations. - cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put" - // Configuration of parameters of requests to FrostFS. - // Sets max attempt to make successful tree request. - cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts" - - // Caching. - cfgBucketsCacheLifetime = "cache.buckets.lifetime" - cfgBucketsCacheSize = "cache.buckets.size" - cfgNetmapCacheLifetime = "cache.netmap.lifetime" - cfgCORSCacheLifetime = "cache.cors.lifetime" - cfgCORSCacheSize = "cache.cors.size" - - // Bucket resolving options. - cfgResolveNamespaceHeader = "resolve_bucket.namespace_header" - cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces" - - // CORS. - cfgCORS = "cors" - cfgCORSAllowOrigin = cfgCORS + ".allow_origin" - cfgCORSAllowMethods = cfgCORS + ".allow_methods" - cfgCORSAllowHeaders = cfgCORS + ".allow_headers" - cfgCORSExposeHeaders = cfgCORS + ".expose_headers" - cfgCORSAllowCredentials = cfgCORS + ".allow_credentials" - cfgCORSMaxAge = cfgCORS + ".max_age" - - // Multinet. - cfgMultinetEnabled = "multinet.enabled" - cfgMultinetBalancer = "multinet.balancer" - cfgMultinetRestrict = "multinet.restrict" - cfgMultinetFallbackDelay = "multinet.fallback_delay" - cfgMultinetSubnets = "multinet.subnets" - - // Feature. - cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback" - cfgFeaturesEnableFilepathSlashFallback = "features.enable_filepath_slash_fallback" - cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support" - - // Containers. - cfgContainersCORS = "containers.cors" - - // Command line args. - cmdHelp = "help" - cmdVersion = "version" - cmdPprof = "pprof" - cmdMetrics = "metrics" - cmdWallet = "wallet" - cmdAddress = "address" - cmdConfig = "config" - cmdConfigDir = "config-dir" - cmdListenAddress = "listen_address" - - // Contracts. - cfgContractsContainerName = "contracts.container.name" -) - -var ignore = map[string]struct{}{ - cfgPeers: {}, - cmdHelp: {}, - cmdVersion: {}, -} - -var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree} - -type Logger struct { - logger *zap.Logger -} - -type appCfg struct { - flags *pflag.FlagSet - - mu sync.RWMutex - settings *viper.Viper -} - -func (a *appCfg) reload() error { - old := a.config() - - v, err := newViper(a.flags) - if err != nil { - return err - } - - if old.IsSet(cmdConfig) { - v.Set(cmdConfig, old.Get(cmdConfig)) - } - if old.IsSet(cmdConfigDir) { - v.Set(cmdConfigDir, old.Get(cmdConfigDir)) - } - - if err = readInConfig(v); err != nil { - return err - } - - a.setConfig(v) - return nil -} - -func (a *appCfg) config() *viper.Viper { - a.mu.RLock() - defer a.mu.RUnlock() - - return a.settings -} - -func (a *appCfg) setConfig(v *viper.Viper) { - a.mu.Lock() - a.settings = v - a.mu.Unlock() -} - -func newViper(flags *pflag.FlagSet) (*viper.Viper, error) { - v := viper.New() - - v.AutomaticEnv() - v.SetEnvPrefix(Prefix) - v.AllowEmptyEnv(true) - v.SetConfigType("yaml") - v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) - - if err := bindFlags(v, flags); err != nil { - return nil, err - } - - setDefaults(v, flags) - - if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) { - v.Set(cfgServer+".0."+cfgTLSEnabled, true) - } - - return v, nil -} - -func settings() *appCfg { - // flags setup: - flags := pflag.NewFlagSet("commandline", pflag.ExitOnError) - flags.SetOutput(os.Stdout) - flags.SortFlags = false - - flags.Bool(cmdPprof, false, "enable pprof") - flags.Bool(cmdMetrics, false, "enable prometheus") - - help := flags.BoolP(cmdHelp, "h", false, "show help") - version := flags.BoolP(cmdVersion, "v", false, "show version") - - flags.StringP(cmdWallet, "w", "", `path to the wallet`) - flags.String(cmdAddress, "", `address of wallet account`) - flags.StringArray(cmdConfig, nil, "config paths") - flags.String(cmdConfigDir, "", "config dir path") - flags.Duration(cfgConTimeout, defaultConnectTimeout, "gRPC connect timeout") - flags.Duration(cfgStreamTimeout, defaultStreamTimeout, "gRPC individual message timeout") - flags.Duration(cfgReqTimeout, defaultRequestTimeout, "gRPC request timeout") - flags.Duration(cfgRebalance, defaultRebalanceTimer, "gRPC connection rebalance timer") - - flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen") - flags.String(cfgTLSCertFile, "", "TLS certificate path") - flags.String(cfgTLSKeyFile, "", "TLS key path") - flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes") - - flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order") - - if err := flags.Parse(os.Args); err != nil { - panic(err) - } - - v, err := newViper(flags) - if err != nil { - panic(fmt.Errorf("bind flags: %w", err)) - } - - switch { - case help != nil && *help: - fmt.Printf("FrostFS HTTP Gateway %s\n", Version) - flags.PrintDefaults() - - fmt.Println() - fmt.Println("Default environments:") - fmt.Println() - keys := v.AllKeys() - sort.Strings(keys) - - for i := range keys { - if _, ok := ignore[keys[i]]; ok { - continue - } - - defaultValue := v.GetString(keys[i]) - if len(defaultValue) == 0 { - continue - } - - k := strings.Replace(keys[i], ".", "_", -1) - fmt.Printf("%s_%s = %s\n", Prefix, strings.ToUpper(k), defaultValue) - } - - fmt.Println() - fmt.Println("Peers preset:") - fmt.Println() - - fmt.Printf("%s_%s_[N]_ADDRESS = string\n", Prefix, strings.ToUpper(cfgPeers)) - fmt.Printf("%s_%s_[N]_WEIGHT = float\n", Prefix, strings.ToUpper(cfgPeers)) - - os.Exit(0) - case version != nil && *version: - fmt.Printf("FrostFS HTTP Gateway\nVersion: %s\nGoVersion: %s\n", Version, runtime.Version()) - os.Exit(0) - } - - if err := readInConfig(v); err != nil { - panic(err) - } - - return &appCfg{ - flags: flags, - settings: v, - } -} - -func setDefaults(v *viper.Viper, flags *pflag.FlagSet) { - // set defaults: - - // logger: - v.SetDefault(cfgLoggerLevel, "debug") - v.SetDefault(cfgLoggerDestination, "stdout") - v.SetDefault(cfgLoggerSamplingEnabled, false) - v.SetDefault(cfgLoggerSamplingThereafter, 100) - v.SetDefault(cfgLoggerSamplingInitial, 100) - v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval) - - // pool: - v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold) - - // frostfs: - v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut) - - // web-server: - v.SetDefault(cfgWebReadBufferSize, 4096) - v.SetDefault(cfgWebWriteBufferSize, 4096) - v.SetDefault(cfgWebReadTimeout, time.Minute*10) - v.SetDefault(cfgWebWriteTimeout, time.Minute*5) - v.SetDefault(cfgWebStreamRequestBody, true) - v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize) - - v.SetDefault(cfgWorkerPoolSize, 1000) - // upload header - v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false) - - // metrics - v.SetDefault(cfgPprofAddress, "localhost:8083") - v.SetDefault(cfgPrometheusAddress, "localhost:8084") - - // resolve bucket - v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader) - v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"}) - - // multinet - v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay) - - // contracts - v.SetDefault(cfgContractsContainerName, defaultContainerContractName) - - if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil { - v.SetDefault(cfgResolveOrder, resolveMethods) - } - - if peers, err := flags.GetStringArray(cfgPeers); err == nil { - for i := range peers { - v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i]) - v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1) - v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1) - } - } -} - -func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error { - // Binding flags - if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil { - return err - } - if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil { - return err - } - - if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil { - return err - } - - if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil { - return err - } - - if err := v.BindPFlags(flags); err != nil { - return err - } - - if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil { - return err - } - if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil { - return err - } - if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil { - return err - } - - return nil -} - -func readInConfig(v *viper.Viper) error { - if v.IsSet(cmdConfig) { - if err := readConfig(v); err != nil { - return err - } - } - - if v.IsSet(cmdConfigDir) { - if err := readConfigDir(v); err != nil { - return err - } - } - - return nil -} - -func readConfigDir(v *viper.Viper) error { - cfgSubConfigDir := v.GetString(cmdConfigDir) - entries, err := os.ReadDir(cfgSubConfigDir) - if err != nil { - return err - } - - for _, entry := range entries { - if entry.IsDir() { - continue - } - ext := path.Ext(entry.Name()) - if ext != ".yaml" && ext != ".yml" { - continue - } - - if err = mergeConfig(v, path.Join(cfgSubConfigDir, entry.Name())); err != nil { - return err - } - } - - return nil -} - -func readConfig(v *viper.Viper) error { - for _, fileName := range v.GetStringSlice(cmdConfig) { - if err := mergeConfig(v, fileName); err != nil { - return err - } - } - return nil -} - -func mergeConfig(v *viper.Viper, fileName string) error { - cfgFile, err := os.Open(fileName) - if err != nil { - return err - } - - defer func() { - if errClose := cfgFile.Close(); errClose != nil { - panic(errClose) - } - }() - - return v.MergeConfig(cfgFile) -} - -func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) { - res := make(map[string]zapcore.Level) - - for i := 0; ; i++ { - tagNames := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i)) - if tagNames == "" { - break - } - - lvl := defaultLvl - level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i)) - if level != "" { - if err := lvl.Set(level); err != nil { - return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level) - } - } - - for _, tagName := range strings.Split(tagNames, ",") { - tagName = strings.TrimSpace(tagName) - if len(tagName) != 0 { - res[tagName] = lvl - } - } - } - - if len(res) == 0 && !v.IsSet(cfgLoggerTags) { - for _, tag := range defaultTags { - res[tag] = defaultLvl - } - } - - return res, nil -} - -func fetchReconnectInterval(cfg *viper.Viper) time.Duration { - reconnect := cfg.GetDuration(cfgReconnectInterval) - if reconnect <= 0 { - reconnect = defaultReconnectInterval - } - - return reconnect -} - -func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) { - if !v.GetBool(cfgIndexPageEnabled) { - return "", false - } - reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath)) - if err != nil { - l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp)) - return "", true - } - - tmpl, err := io.ReadAll(reader) - if err != nil { - l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp)) - return "", true - } - - l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp)) - return string(tmpl), true -} - -func fetchDefaultNamespaces(v *viper.Viper) []string { - namespaces := v.GetStringSlice(cfgResolveDefaultNamespaces) - - for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"` - namespaces[i] = strings.Trim(namespaces[i], "\"") - } - - return namespaces -} - -func fetchCORSMaxAge(v *viper.Viper) int { - maxAge := v.GetInt(cfgCORSMaxAge) - if maxAge <= 0 { - maxAge = defaultCORSMaxAge - } - - return maxAge -} - -func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo { - var servers []ServerInfo - seen := make(map[string]struct{}) - - for i := 0; ; i++ { - key := cfgServer + "." + strconv.Itoa(i) + "." - - var serverInfo ServerInfo - serverInfo.Address = v.GetString(key + "address") - serverInfo.TLS.Enabled = v.GetBool(key + cfgTLSEnabled) - serverInfo.TLS.KeyFile = v.GetString(key + cfgTLSKeyFile) - serverInfo.TLS.CertFile = v.GetString(key + cfgTLSCertFile) - - if serverInfo.Address == "" { - break - } - - if _, ok := seen[serverInfo.Address]; ok { - log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp)) - continue - } - seen[serverInfo.Address] = struct{}{} - servers = append(servers, serverInfo) - } - - return servers -} - -func (a *app) initPools(ctx context.Context) { - key, err := getFrostFSKey(a.config(), a.log) - if err != nil { - a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp)) - } - - var prm pool.InitParameters - var prmTree treepool.InitParameters - - prm.SetKey(&key.PrivateKey) - prmTree.SetKey(key) - a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())), - logs.TagField(logs.TagApp)) - - for _, peer := range fetchPeers(a.log, a.config()) { - prm.AddNode(peer) - prmTree.AddNode(peer) - } - - connTimeout := a.config().GetDuration(cfgConTimeout) - if connTimeout <= 0 { - connTimeout = defaultConnectTimeout - } - prm.SetNodeDialTimeout(connTimeout) - prmTree.SetNodeDialTimeout(connTimeout) - - streamTimeout := a.config().GetDuration(cfgStreamTimeout) - if streamTimeout <= 0 { - streamTimeout = defaultStreamTimeout - } - prm.SetNodeStreamTimeout(streamTimeout) - prmTree.SetNodeStreamTimeout(streamTimeout) - - healthCheckTimeout := a.config().GetDuration(cfgReqTimeout) - if healthCheckTimeout <= 0 { - healthCheckTimeout = defaultRequestTimeout - } - prm.SetHealthcheckTimeout(healthCheckTimeout) - prmTree.SetHealthcheckTimeout(healthCheckTimeout) - - rebalanceInterval := a.config().GetDuration(cfgRebalance) - if rebalanceInterval <= 0 { - rebalanceInterval = defaultRebalanceTimer - } - prm.SetClientRebalanceInterval(rebalanceInterval) - prmTree.SetClientRebalanceInterval(rebalanceInterval) - - errorThreshold := a.config().GetUint32(cfgPoolErrorThreshold) - if errorThreshold <= 0 { - errorThreshold = defaultPoolErrorThreshold - } - prm.SetErrorThreshold(errorThreshold) - prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath))) - prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath))) - - prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts)) - - interceptors := []grpc.DialOption{ - grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()), - grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()), - grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()), - grpc.WithChainUnaryInterceptor(qostagging.NewUnaryClientInteceptor()), - grpc.WithChainStreamInterceptor(qostagging.NewStreamClientInterceptor()), - } - prm.SetGRPCDialOptions(interceptors...) - prmTree.SetGRPCDialOptions(interceptors...) - - p, err := pool.NewPool(prm) - if err != nil { - a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp)) - } - - if err = p.Dial(ctx); err != nil { - a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp)) - } - - if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) { - prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p), cache.NewNetmapCache(getNetmapCacheOptions(a.config(), a.log)), a.bucketCache, a.log)) - } - - treePool, err := treepool.NewPool(prmTree) - if err != nil { - a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp)) - } - if err = treePool.Dial(ctx); err != nil { - a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp)) - } - - a.pool = p - a.treePool = treePool - a.key = key -} - -func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam { - var nodes []pool.NodeParam - for i := 0; ; i++ { - key := cfgPeers + "." + strconv.Itoa(i) + "." - address := v.GetString(key + "address") - weight := v.GetFloat64(key + "weight") - priority := v.GetInt(key + "priority") - - if address == "" { - break - } - if weight <= 0 { // unspecified or wrong - weight = 1 - } - if priority <= 0 { // unspecified or wrong - priority = 1 - } - - nodes = append(nodes, pool.NewNodeParam(priority, address, weight)) - - l.Info(logs.AddedStoragePeer, - zap.Int("priority", priority), - zap.String("address", address), - zap.Float64("weight", weight), - logs.TagField(logs.TagApp)) - } - - return nodes -} - -func fetchSoftMemoryLimit(cfg *viper.Viper) int64 { - softMemoryLimit := cfg.GetSizeInBytes(cfgSoftMemoryLimit) - if softMemoryLimit <= 0 { - softMemoryLimit = defaultSoftMemoryLimit - } - - return int64(softMemoryLimit) -} - -func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config { - cacheCfg := cache.DefaultBucketConfig(l) - - cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime) - cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size) - - return cacheCfg -} - -func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConfig { - cacheCfg := cache.DefaultNetmapConfig(l) - - cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgNetmapCacheLifetime, cacheCfg.Lifetime) - - return cacheCfg -} - -func getCORSCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config { - cacheCfg := cache.DefaultCORSConfig(l) - - cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgCORSCacheLifetime, cacheCfg.Lifetime) - cacheCfg.Size = fetchCacheSize(v, l, cfgCORSCacheSize, cacheCfg.Size) - - return cacheCfg -} - -func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration { - if v.IsSet(cfgEntry) { - lifetime := v.GetDuration(cfgEntry) - if lifetime <= 0 { - l.Error(logs.InvalidLifetimeUsingDefaultValue, - zap.String("parameter", cfgEntry), - zap.Duration("value in config", lifetime), - zap.Duration("default", defaultValue), - logs.TagField(logs.TagApp)) - } else { - return lifetime - } - } - - return defaultValue -} - -func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int { - if v.IsSet(cfgEntry) { - size := v.GetInt(cfgEntry) - if size <= 0 { - l.Error(logs.InvalidCacheSizeUsingDefaultValue, - zap.String("parameter", cfgEntry), - zap.Int("value in config", size), - zap.Int("default", defaultValue), - logs.TagField(logs.TagApp)) - } else { - return size - } - } - - return defaultValue -} - -func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource { - source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger)) - if err != nil { - logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp)) - } - return source -} - -func fetchMultinetConfig(v *viper.Viper, l *zap.Logger) (cfg internalnet.Config) { - cfg.Enabled = v.GetBool(cfgMultinetEnabled) - cfg.Balancer = v.GetString(cfgMultinetBalancer) - cfg.Restrict = v.GetBool(cfgMultinetRestrict) - cfg.FallbackDelay = v.GetDuration(cfgMultinetFallbackDelay) - cfg.Subnets = make([]internalnet.Subnet, 0, 5) - cfg.EventHandler = internalnet.NewLogEventHandler(l) - - for i := 0; ; i++ { - key := cfgMultinetSubnets + "." + strconv.Itoa(i) + "." - subnet := internalnet.Subnet{} - - subnet.Prefix = v.GetString(key + "mask") - if subnet.Prefix == "" { - break - } - subnet.SourceIPs = v.GetStringSlice(key + "source_ips") - cfg.Subnets = append(cfg.Subnets, subnet) - } - - return -} - -func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) { - attributes := make(map[string]string) - for i := 0; ; i++ { - key := cfgTracingAttributes + "." + strconv.Itoa(i) + "." - attrKey := v.GetString(key + "key") - attrValue := v.GetString(key + "value") - if attrKey == "" { - break - } - - if _, ok := attributes[attrKey]; ok { - return nil, fmt.Errorf("tracing attribute key %s defined more than once", attrKey) - } - - if attrValue == "" { - return nil, fmt.Errorf("empty tracing attribute value for key %s", attrKey) - } - - attributes[attrKey] = attrValue - } - - return attributes, nil -} - -func fetchArchiveCompression(v *viper.Viper) bool { - if v.IsSet(cfgZipCompression) { - return v.GetBool(cfgZipCompression) - } - return v.GetBool(cfgArchiveCompression) -} - -func fetchCORSConfig(v *viper.Viper) *data.CORSRule { - if !v.IsSet(cfgCORS) { - return nil - } - - return &data.CORSRule{ - AllowedOrigins: []string{v.GetString(cfgCORSAllowOrigin)}, - AllowedMethods: v.GetStringSlice(cfgCORSAllowMethods), - AllowedHeaders: v.GetStringSlice(cfgCORSAllowHeaders), - ExposeHeaders: v.GetStringSlice(cfgCORSExposeHeaders), - AllowedCredentials: v.GetBool(cfgCORSAllowCredentials), - MaxAgeSeconds: fetchCORSMaxAge(v), - } -} diff --git a/cmd/http-gw/settings_test.go b/cmd/http-gw/settings_test.go deleted file mode 100644 index 13bd50d..0000000 --- a/cmd/http-gw/settings_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package main - -import ( - "os" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" - "github.com/stretchr/testify/require" -) - -func TestConfigReload(t *testing.T) { - f, err := os.CreateTemp("", "conf") - require.NoError(t, err) - defer func() { - require.NoError(t, os.Remove(f.Name())) - }() - - confData := ` -pprof: - enabled: true - -resolve_bucket: - default_namespaces: [""] - -resolve_order: - - nns -` - - _, err = f.WriteString(confData) - require.NoError(t, err) - require.NoError(t, f.Close()) - - cfg := settings() - - require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--connect_timeout", "15s"})) - require.NoError(t, cfg.reload()) - - require.True(t, cfg.config().GetBool(cfgPprofEnabled)) - require.Equal(t, []string{""}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces)) - require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder)) - require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout)) - - require.NoError(t, os.Truncate(f.Name(), 0)) - require.NoError(t, cfg.reload()) - - require.False(t, cfg.config().GetBool(cfgPprofEnabled)) - require.Equal(t, []string{"", "root"}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces)) - require.Equal(t, []string{resolver.NNSResolver, resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder)) - require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout)) -} - -func TestSetTLSEnabled(t *testing.T) { - cfg := settings() - - require.NoError(t, cfg.flags.Parse([]string{"--" + cfgTLSCertFile, "tls.crt", "--" + cfgTLSKeyFile, "tls.key"})) - require.NoError(t, cfg.reload()) - - require.True(t, cfg.config().GetBool(cfgServer+".0."+cfgTLSEnabled)) -} diff --git a/config/config.env b/config/config.env deleted file mode 100644 index ff880d5..0000000 --- a/config/config.env +++ /dev/null @@ -1,186 +0,0 @@ -# Wallet section. - -# Path to wallet. -HTTP_GW_WALLET_PATH=/path/to/wallet.json -# Account address. If omitted default one will be used. -HTTP_GW_WALLET_ADDRESS=NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP -# Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here. -HTTP_GW_WALLET_PASSPHRASE=pwd - -# Enable metrics. -HTTP_GW_PPROF_ENABLED=true -HTTP_GW_PPROF_ADDRESS=localhost:8083 - -HTTP_GW_PROMETHEUS_ENABLED=true -HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084 - -# Logger. -HTTP_GW_LOGGER_LEVEL=debug -HTTP_GW_LOGGER_SAMPLING_ENABLED=false -HTTP_GW_LOGGER_SAMPLING_INITIAL=100 -HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100 -HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s -HTTP_GW_LOGGER_TAGS_0_NAMES=app,datapath -HTTP_GW_LOGGER_TAGS_0_LEVEL=level -HTTP_GW_LOGGER_TAGS_1_NAME=external_storage_tree - -HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443 -HTTP_GW_SERVER_0_TLS_ENABLED=false -HTTP_GW_SERVER_0_TLS_CERT_FILE=/path/to/tls/cert -HTTP_GW_SERVER_0_TLS_KEY_FILE=/path/to/tls/key -HTTP_GW_SERVER_1_ADDRESS=0.0.0.0:444 -HTTP_GW_SERVER_1_TLS_ENABLED=true -HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert -HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key - -# How often to reconnect to the servers -HTTP_GW_RECONNECT_INTERVAL: 1m - -# Nodes configuration. -# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080) -# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080) -# for 10% of requests and the third node for 90% of requests. - -# Peer 1. -# Endpoint. -HTTP_GW_PEERS_0_ADDRESS=grpc://s01.frostfs.devenv:8080 -# Until nodes with the same priority level are healthy -# nodes with other priority are not used. -# The lower the value, the higher the priority. -HTTP_GW_PEERS_0_PRIORITY=1 -# Load distribution proportion for nodes with the same priority. -HTTP_GW_PEERS_0_WEIGHT=1 -# Peer 2. -HTTP_GW_PEERS_1_ADDRESS=grpc://s02.frostfs.devenv:8080 -HTTP_GW_PEERS_1_PRIORITY=2 -HTTP_GW_PEERS_1_WEIGHT=1 -# Peer 3. -HTTP_GW_PEERS_2_ADDRESS=grpc://s03.frostfs.devenv:8080 -HTTP_GW_PEERS_2_PRIORITY=2 -HTTP_GW_PEERS_2_WEIGHT=9 - -# Per-connection buffer size for requests' reading. -# This also limits the maximum header size. -HTTP_GW_WEB_READ_BUFFER_SIZE=4096 -# Per-connection buffer size for responses' writing. -HTTP_GW_WRITE_BUFFER_SIZE=4096 -# ReadTimeout is the amount of time allowed to read -# the full request including body. The connection's read -# deadline is reset when the connection opens, or for -# keep-alive connections after the first byte has been read. -HTTP_GW_READ_TIMEOUT=10m -# WriteTimeout is the maximum duration before timing out -# writes of the response. It is reset after the request handler -# has returned. -HTTP_GW_WRITE_TIMEOUT=5m -# StreamRequestBody enables request body streaming, -# and calls the handler sooner when given body is -# larger then the current limit. -HTTP_GW_STREAM_REQUEST_BODY=true -# Maximum request body size. -# The server rejects requests with bodies exceeding this limit. -HTTP_GW_MAX_REQUEST_BODY_SIZE=4194304 - -# RPC endpoint to be able to use nns container resolving. -HTTP_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333 -# The order in which resolvers are used to find an container id by name. -HTTP_GW_RESOLVE_ORDER="nns dns" - -# Create timestamp for object if it isn't provided by header. -HTTP_GW_UPLOAD_HEADER_USE_DEFAULT_TIMESTAMP=false - -# Timeout to dial node. -HTTP_GW_CONNECT_TIMEOUT=5s -# Timeout for individual operations in streaming RPC. -HTTP_GW_STREAM_TIMEOUT=10s -# Timeout to check node health during rebalance. -HTTP_GW_REQUEST_TIMEOUT=5s -# Interval to check nodes health. -HTTP_GW_REBALANCE_TIMER=30s -# The number of errors on connection after which node is considered as unhealthy -HTTP_GW_POOL_ERROR_THRESHOLD=100 - -# Enable archive compression to download files by common prefix. -# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead. -HTTP_GW_ZIP_COMPRESSION=false - -# Enable archive compression to download files by common prefix. -HTTP_GW_ARCHIVE_COMPRESSION=false - -HTTP_GW_TRACING_ENABLED=true -HTTP_GW_TRACING_ENDPOINT="localhost:4317" -HTTP_GW_TRACING_EXPORTER="otlp_grpc" -HTTP_GW_TRACING_TRUSTED_CA="" -HTTP_GW_TRACING_ATTRIBUTES_0_KEY=key0 -HTTP_GW_TRACING_ATTRIBUTES_0_VALUE=value -HTTP_GW_TRACING_ATTRIBUTES_1_KEY=key1 -HTTP_GW_TRACING_ATTRIBUTES_1_VALUE=value - -HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824 - -# Parameters of requests to FrostFS -# This flag enables client side object preparing. -HTTP_GW_FROSTFS_CLIENT_CUT=false -# Sets max buffer size for read payload in put operations. -HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576 - -# Caching -# Cache which contains mapping of bucket name to bucket info -HTTP_GW_CACHE_BUCKETS_LIFETIME=1m -HTTP_GW_CACHE_BUCKETS_SIZE=1000 -# Cache which stores netmap -HTTP_GW_CACHE_NETMAP_LIFETIME=1m -# Cache which stores container CORS configurations -HTTP_GW_CACHE_CORS_LIFETIME=5m -HTTP_GW_CACHE_CORS_SIZE=1000 - -# Header to determine zone to resolve bucket name -HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace -# Namespaces that should be handled as default -HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root" - -# Max attempt to make successful tree request. -# default value is 0 that means the number of attempts equals to number of nodes in pool. -HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0 - -HTTP_GW_CORS_ALLOW_ORIGIN="*" -HTTP_GW_CORS_ALLOW_METHODS="GET" "POST" -HTTP_GW_CORS_ALLOW_HEADERS="*" -HTTP_GW_CORS_EXPOSE_HEADERS="*" -HTTP_GW_CORS_ALLOW_CREDENTIALS=false -HTTP_GW_CORS_MAX_AGE=600 - -# Multinet properties -# Enable multinet support -HTTP_GW_MULTINET_ENABLED=false -# Strategy to pick source IP address -HTTP_GW_MULTINET_BALANCER=roundrobin -# Restrict requests with unknown destination subnet -HTTP_GW_MULTINET_RESTRICT=false -# Delay between ipv6 to ipv4 fallback switch -HTTP_GW_MULTINET_FALLBACK_DELAY=300ms -# List of subnets and IP addresses to use as source for those subnets -HTTP_GW_MULTINET_SUBNETS_1_MASK=1.2.3.4/24 -HTTP_GW_MULTINET_SUBNETS_1_SOURCE_IPS=1.2.3.4 1.2.3.5 - -# Number of workers in handler's worker pool -HTTP_GW_WORKER_POOL_SIZE=1000 - -# Index page -# Enable index page support -HTTP_GW_INDEX_PAGE_ENABLED=false -# Index page template path -HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl - -# Enable using fallback path to search for a object by attribute -HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false -# See description in docs/gate-configuration.md -HTTP_GW_FEATURES_ENABLE_FILEPATH_SLASH_FALLBACK=false -# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service -HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true - -# Containers properties -HTTP_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj - -# Container contract hash (LE) or name in NNS. -HTTP_GW_CONTRACTS_CONTAINER_NAME=container.frostfs diff --git a/config/config.yaml b/config/config.yaml deleted file mode 100644 index 9b4b3c9..0000000 --- a/config/config.yaml +++ /dev/null @@ -1,206 +0,0 @@ -wallet: - path: /path/to/wallet.json # Path to wallet. - address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP # Account address. If omitted default one will be used. - passphrase: pwd # Passphrase to decrypt wallet. If you're using a wallet without a password, place '' here. - -pprof: - enabled: false # Enable pprof. - address: localhost:8083 -prometheus: - enabled: false # Enable metrics. - address: localhost:8084 - -tracing: - enabled: true - exporter: "otlp_grpc" - endpoint: "localhost:4317" - trusted_ca: "" - attributes: - - key: key0 - value: value - - key: key1 - value: value - -logger: - level: debug # Log level. - destination: stdout - sampling: - enabled: false - initial: 100 - thereafter: 100 - interval: 1s - tags: - - names: app,datapath - level: debug - -server: - - address: 0.0.0.0:8080 - tls: - enabled: false - cert_file: /path/to/cert - key_file: /path/to/key - - address: 0.0.0.0:8081 - tls: - enabled: false - cert_file: /path/to/cert - key_file: /path/to/key - -# Nodes configuration. -# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080) -# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080) -# for 10% of requests and the third node for 90% of requests. -peers: - 0: - # Endpoint. - address: grpc://s01.frostfs.devenv:8080 - - # Until nodes with the same priority level are healthy - # nodes with other priority are not used. - # The lower the value, the higher the priority. - priority: 1 - - # Load distribution proportion for nodes with the same priority. - weight: 1 - 1: - address: grpc://s02.frostfs.devenv:8080 - priority: 2 - weight: 1 - 2: - address: grpc://s03.frostfs.devenv:8080 - priority: 2 - weight: 9 - -reconnect_interval: 1m - -web: - # Per-connection buffer size for requests' reading. - # This also limits the maximum header size. - read_buffer_size: 4096 - - # Per-connection buffer size for responses' writing. - write_buffer_size: 4096 - - # ReadTimeout is the amount of time allowed to read - # the full request including body. The connection's read - # deadline is reset when the connection opens, or for - # keep-alive connections after the first byte has been read. - read_timeout: 10m - - # WriteTimeout is the maximum duration before timing out - # writes of the response. It is reset after the request handler - # has returned. - write_timeout: 5m - - # StreamRequestBody enables request body streaming, - # and calls the handler sooner when given body is - # larger then the current limit. - stream_request_body: true - - # Maximum request body size. - # The server rejects requests with bodies exceeding this limit. - max_request_body_size: 4194304 - -# RPC endpoint to be able to use nns container resolving. -rpc_endpoint: http://morph-chain.frostfs.devenv:30333 -# The order in which resolvers are used to find an container id by name. -resolve_order: - - nns - - dns - -upload_header: - use_default_timestamp: false # Create timestamp for object if it isn't provided by header. - -connect_timeout: 5s # Timeout to dial node. -stream_timeout: 10s # Timeout for individual operations in streaming RPC. -request_timeout: 5s # Timeout to check node health during rebalance. -rebalance_timer: 30s # Interval to check nodes health. -pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy. - -# Number of workers in handler's worker pool -worker_pool_size: 1000 - -# Enables index page to see objects list for specified container and prefix -index_page: - enabled: false - template_path: internal/handler/templates/index.gotmpl - -# Deprecated: Use archive.compression instead -zip: - # Enables zip compression to download files by common prefix. - compression: false - -archive: - # Enables archive compression to download files by common prefix. - compression: false - -runtime: - soft_memory_limit: 1gb - -# Parameters of requests to FrostFS -frostfs: - # This flag enables client side object preparing. - client_cut: false - # Sets max buffer size for read payload in put operations. - buffer_max_size_for_put: 1048576 - # Max attempt to make successful tree request. - # default value is 0 that means the number of attempts equals to number of nodes in pool. - tree_pool_max_attempts: 0 - -# Caching -cache: - # Cache which contains mapping of bucket name to bucket info - buckets: - lifetime: 1m - size: 1000 - # Cache which stores netmap - netmap: - lifetime: 1m - # Cache which stores container CORS configurations - cors: - lifetime: 5m - size: 1000 - -resolve_bucket: - namespace_header: X-Frostfs-Namespace - default_namespaces: [ "", "root" ] - -cors: - allow_origin: "" - allow_methods: [] - allow_headers: [] - expose_headers: [] - allow_credentials: false - max_age: 600 - -# Multinet properties -multinet: - # Enable multinet support - enabled: false - # Strategy to pick source IP address - balancer: roundrobin - # Restrict requests with unknown destination subnet - restrict: false - # Delay between ipv6 to ipv4 fallback switch - fallback_delay: 300ms - # List of subnets and IP addresses to use as source for those subnets - subnets: - - mask: 1.2.3.4/24 - source_ips: - - 1.2.3.4 - - 1.2.3.5 - -features: - # Enable using fallback path to search for a object by attribute - enable_filepath_fallback: false - # See description in docs/gate-configuration.md - enable_filepath_slash_fallback: false - # Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service - tree_pool_netmap_support: true - -containers: - cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj - -contracts: - container: - # Container contract hash (LE) or name in NNS. - name: container.frostfs diff --git a/config/dir/pprof.yaml b/config/dir/pprof.yaml deleted file mode 100644 index 44f93ca..0000000 --- a/config/dir/pprof.yaml +++ /dev/null @@ -1,3 +0,0 @@ -pprof: - enabled: true - address: localhost:8083 diff --git a/config/dir/prometheus.yaml b/config/dir/prometheus.yaml deleted file mode 100644 index f29db69..0000000 --- a/config/dir/prometheus.yaml +++ /dev/null @@ -1,3 +0,0 @@ -prometheus: - enabled: true - address: localhost:8084 diff --git a/debian/changelog b/debian/changelog deleted file mode 100644 index 257bf0f..0000000 --- a/debian/changelog +++ /dev/null @@ -1,5 +0,0 @@ -frostfs-http-gw (0.0.0) stable; urgency=medium - - * Please see CHANGELOG.md - - -- TrueCloudLab Wed, 24 Aug 2022 18:29:49 +0300 diff --git a/debian/control b/debian/control deleted file mode 100644 index 7924bc6..0000000 --- a/debian/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: frostfs-http-gw -Section: frostfs -Priority: optional -Maintainer: TrueCloudLab -Build-Depends: debhelper-compat (= 13), dh-sysuser, git, devscripts -Standards-Version: 4.5.1 -Homepage: https://frostfs.info/ -Vcs-Git: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw.git -Vcs-Browser: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw - -Package: frostfs-http-gw -Architecture: any -Depends: ${misc:Depends} -Description: FrostFS HTTP Gateway bridges FrostFS internal protocol and HTTP standard. diff --git a/debian/copyright b/debian/copyright deleted file mode 100644 index 9ab3cb9..0000000 --- a/debian/copyright +++ /dev/null @@ -1,25 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: frostfs-http-gw -Upstream-Contact: tech@frostfs.info -Source: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw - -Files: * -Copyright: 2018-2022 NeoSPCC (@nspcc-dev), contributors of neofs-http-gw project - (https://github.com/nspcc-dev/neofs-http-gw/blob/master/CREDITS.md) - 2022 True Cloud Lab (@TrueCloudLab), contributors of frostfs-http-gw project - (https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/src/branch/master/CREDITS.md) - - -License: GPL-3 - This program is free software: you can redistribute it and/or modify it - under the terms of the GNU General Public License as published - by the Free Software Foundation; version 3. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program or at /usr/share/common-licenses/GPL-3. - If not, see . diff --git a/debian/frostfs-http-gw.dirs b/debian/frostfs-http-gw.dirs deleted file mode 100644 index f51b198..0000000 --- a/debian/frostfs-http-gw.dirs +++ /dev/null @@ -1,2 +0,0 @@ -etc/frostfs -srv/frostfs_cache diff --git a/debian/frostfs-http-gw.docs b/debian/frostfs-http-gw.docs deleted file mode 100644 index 884d34d..0000000 --- a/debian/frostfs-http-gw.docs +++ /dev/null @@ -1,4 +0,0 @@ -docs/gate-configuration.md -README.md -CREDITS.md -CONTRIBUTING.md diff --git a/debian/frostfs-http-gw.examples b/debian/frostfs-http-gw.examples deleted file mode 100644 index dd04e98..0000000 --- a/debian/frostfs-http-gw.examples +++ /dev/null @@ -1 +0,0 @@ -config/* diff --git a/debian/frostfs-http-gw.install b/debian/frostfs-http-gw.install deleted file mode 100644 index 2f71be4..0000000 --- a/debian/frostfs-http-gw.install +++ /dev/null @@ -1,2 +0,0 @@ -bin/frostfs-http-gw usr/bin -config/config.yaml etc/frostfs/http diff --git a/debian/frostfs-http-gw.postinst b/debian/frostfs-http-gw.postinst deleted file mode 100755 index 1f25055..0000000 --- a/debian/frostfs-http-gw.postinst +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh -# postinst script for frostfs-http-gw -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - USERNAME=http - id -u frostfs-$USERNAME >/dev/null 2>&1 || useradd -s /usr/sbin/nologin -d /var/lib/frostfs/$USERNAME --system -m -U -c "FrostFS HTTP gateway" frostfs-$USERNAME - if ! dpkg-statoverride --list /etc/frostfs/$USERNAME >/dev/null; then - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME - chown -f root:frostfs-$USERNAME /etc/frostfs/$USERNAME/config.yaml || true - chmod -f 0750 /etc/frostfs/$USERNAME - chmod -f 0640 /etc/frostfs/$USERNAME/config.yaml || true - fi - USERDIR=$(getent passwd "frostfs-$USERNAME" | cut -d: -f6) - if ! dpkg-statoverride --list "$USERDIR" >/dev/null; then - chown -f frostfs-$USERNAME: "$USERDIR" - fi - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-http-gw.postrm b/debian/frostfs-http-gw.postrm deleted file mode 100755 index ebb2dec..0000000 --- a/debian/frostfs-http-gw.postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for frostfs-http-gw -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - purge) - rm -rf /srv/frostfs_cache - ;; - - remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-http-gw.preinst b/debian/frostfs-http-gw.preinst deleted file mode 100755 index c18093b..0000000 --- a/debian/frostfs-http-gw.preinst +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/sh -# preinst script for frostfs-http-gw -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-http-gw.prerm b/debian/frostfs-http-gw.prerm deleted file mode 100755 index 7623f7f..0000000 --- a/debian/frostfs-http-gw.prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for frostfs-http-gw -# -# see: dh_installdeb(1) - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/debian/frostfs-http-gw.service b/debian/frostfs-http-gw.service deleted file mode 100644 index 4851a3f..0000000 --- a/debian/frostfs-http-gw.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=FrostFS HTTP Gateway -Requires=network.target - -[Service] -Type=simple -ExecStart=/usr/bin/frostfs-http-gw --config /etc/frostfs/http/config.yaml -User=frostfs-http -Group=frostfs-http -WorkingDirectory=/srv/frostfs_cache -Restart=always -RestartSec=5 -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/debian/rules b/debian/rules deleted file mode 100755 index 0554034..0000000 --- a/debian/rules +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/make -f - -# Do not try to strip Go binaries and do not run test -export DEB_BUILD_OPTIONS := nostrip nocheck -SERVICE = frostfs-http-gw - -%: - dh $@ - -override_dh_installsystemd: - dh_installsystemd --no-enable --no-start $(SERVICE).service - -override_dh_installchangelogs: - dh_installchangelogs -k CHANGELOG.md diff --git a/debian/source/format b/debian/source/format deleted file mode 100644 index 163aaf8..0000000 --- a/debian/source/format +++ /dev/null @@ -1 +0,0 @@ -3.0 (quilt) diff --git a/docs/api.md b/docs/api.md deleted file mode 100644 index 698e9b1..0000000 --- a/docs/api.md +++ /dev/null @@ -1,325 +0,0 @@ -# HTTP Gateway Specification - -| Route | Description | -|-------------------------------------------------|--------------------------------------------------| -| `/upload/{cid}` | [Put object](#put-object) | -| `/get/{cid}/{oid}` | [Get object](#get-object) | -| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) | -| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) | - -**Note:** `cid` parameter can be base58 encoded container ID or container name -(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)). - -Route parameters can be: - -* `Single` - match a single path segment (cannot contain `/` and be empty) -* `Catch-All` - match everything (such parameter usually the last one in routes) -* `Query` - regular query parameter - -### Bearer token - -All routes can accept [bearer token](./authentication.md) from: - -* `Authorization` header with `Bearer` type and base64-encoded token in - credentials field -* `Bearer` cookie with base64-encoded token contents - -Example: - -Header: - -``` -Authorization: Bearer ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB -``` - -Cookie: - -``` -cookie: Bearer=ChA5Gev0d8JI26tAtWyyQA3WEhsKGTVxfQ56a0uQeFmOO63mqykBS1HNpw1rxSgaBgiyEBjODyIhAyxcn89Bj5fwCfXlj5HjSYjonHSErZoXiSqeyh0ZQSb2MgQIARAB -``` - -## Put object - -Route: `/upload/{cid}` - -| Route parameter | Type | Description | -|-----------------|--------|---------------------------------------------------------| -| `cid` | Single | Base58 encoded container ID or container name from NNS. | - -### Methods - -#### POST - -Upload file as object with attributes to FrostFS. - -##### Request - -###### Headers - -| Header | Description | -|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Common headers | See [bearer token](#bearer-token). | -| `X-Attribute-System-*` | Used to set system FrostFS object attributes
(e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). | -| `X-Attribute-*` | Used to set regular object attributes
(e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). | -| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. | -| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. | -| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. | - -There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority): - -1. `X-Attribute-System-Expiration-Epoch: 100` -2. `X-Attribute-System-Expiration-Duration: 24h30m` -3. `X-Attribute-System-Expiration-Timestamp: 1637574797` -4. `X-Attribute-System-Expiration-RFC3339: 2021-11-22T09:55:49Z` - -which transforms to `X-Attribute-System-Expiration-Epoch`. So you can provide expiration any convenient way. - -If you don't specify the `X-Attribute-Timestamp` header the `Timestamp` attribute can be set anyway -(see http-gw [configuration](gate-configuration.md#upload-header-section)). - -The `X-Attribute-*` headers must be unique. If you provide several the same headers only one will be used. -Attribute key and value must be valid utf8 string. All attributes in sum must not be greater than 3mb. - -###### Body - -Body must contain multipart form with file. -The `filename` field from the multipart form will be set as `FileName` attribute of object -(can be overriden by `X-Attribute-FileName` header). - -##### Response - -###### Status codes - -| Status | Description | -|--------|----------------------------------------------| -| 200 | Object created successfully. | -| 400 | Some error occurred during object uploading. | -| 403 | Access denied. | -| 409 | Can not upload object due to quota reached. | - -## Get object - -Route: `/get/{cid}/{oid}?[download=false]` - -| Route parameter | Type | Description | -|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. | -| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. | -| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.
This make the browser to download object as file instead of showing it on the page. | - -### Methods - -#### GET - -Get an object (payload and attributes) by an address. - -##### Request - -###### Headers - -| Header | Description | -|----------------|------------------------------------| -| Common headers | See [bearer token](#bearer-token). | - -##### Response - -###### Headers - -| Header | Description | -|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| `X-Attribute-System-*` | System FrostFS object attributes
(e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). | -| `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). | -| `Content-Disposition` | Indicate how to browsers should treat file.
Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). | -| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. | -| `Content-Length` | Size of object payload. | -| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). | -| `X-Owner-Id` | Base58 encoded owner ID. | -| `X-Container-Id` | Base58 encoded container ID. | -| `X-Object-Id` | Base58 encoded object ID. | - -###### Status codes - -| Status | Description | -|--------|------------------------------------------------| -| 200 | Object got successfully. | -| 400 | Some error occurred during object downloading. | -| 403 | Access denied. | -| 404 | Container or object not found. | - -###### Body - -Returns object data. If request performed from browser, either displays raw data or downloads it as -attachment if `download` query parameter is set to `true`. -If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified -S3-name was found. - -#### HEAD - -Get an object attributes by an address. - -##### Request - -###### Headers - -| Header | Description | -|----------------|------------------------------------| -| Common headers | See [bearer token](#bearer-token). | - -##### Response - -###### Headers - -| Header | Description | -|------------------------|------------------------------------------------------------------------------------------------------------------------------| -| `X-Attribute-System-*` | System FrostFS object attributes
(e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). | -| `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). | -| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. | -| `Content-Length` | Size of object payload. | -| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). | -| `X-Owner-Id` | Base58 encoded owner ID. | -| `X-Container-Id` | Base58 encoded container ID. | -| `X-Object-Id` | Base58 encoded object ID. | - -###### Status codes - -| Status | Description | -|--------|---------------------------------------------------| -| 200 | Object head successfully. | -| 400 | Some error occurred during object HEAD operation. | -| 403 | Access denied. | -| 404 | Container or object not found. | - -## Search object - -Route: `/get_by_attribute/{cid}/{attr_key}/{attr_val}?[download=true]` - -| Route parameter | Type | Description | -|-----------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------| -| `cid` | Single | Base58 encoded container ID or container name from NNS. | -| `attr_key` | Single | Object attribute key to search. | -| `attr_val` | Catch-All | Object attribute value to match. | -| `download` | Query | Set the `Content-Disposition` header as `attachment` in response. This make the browser to download object as file instead of showing it on the page. | - -### Methods - -#### GET - -Find and get an object (payload and attributes) by a specific attribute. -If more than one object is found, an arbitrary one will be returned. - -##### Request - -###### Headers - -| Header | Description | -|----------------|------------------------------------| -| Common headers | See [bearer token](#bearer-token). | - -##### Response - -###### Headers - -| Header | Description | -|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| `X-Attribute-System-*` | System FrostFS object attributes
(e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). | -| `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). | -| `Content-Disposition` | Indicate how to browsers should treat file.
Set `filename` as base part of `FileName` object attribute (if it's set, empty otherwise). | -| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. | -| `Content-Length` | Size of object payload. | -| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). | -| `X-Owner-Id` | Base58 encoded owner ID. | -| `X-Container-Id` | Base58 encoded container ID. | -| `X-Object-Id` | Base58 encoded object ID. | - -###### Status codes - -| Status | Description | -|--------|------------------------------------------------| -| 200 | Object got successfully. | -| 400 | Some error occurred during object downloading. | -| 403 | Access denied. | -| 404 | Container or object not found. | - -#### HEAD - -Get object attributes by a specific attribute. -If more than one object is found, an arbitrary one will be used to get attributes. - -##### Request - -###### Headers - -| Header | Description | -|----------------|------------------------------------| -| Common headers | See [bearer token](#bearer-token). | - -##### Response - -###### Headers - -| Header | Description | -|------------------------|------------------------------------------------------------------------------------------------------------------------------| -| `X-Attribute-System-*` | System FrostFS object attributes
(e.g. `__SYSTEM__EXPIRATION_EPOCH` set "X-Attribute-System-Expiration-Epoch" header). | -| `X-Attribute-*` | Regular object attributes
(e.g. `My-Tag` set "X-Attribute-My-Tag" header). | -| `Content-Type` | Indicate content type of object. Set from `Content-Type` attribute or detected using payload. | -| `Content-Length` | Size of object payload. | -| `Last-Modified` | Contains the `Timestamp` attribute (if exists) formatted as HTTP time (RFC7231,RFC1123). | -| `X-Owner-Id` | Base58 encoded owner ID. | -| `X-Container-Id` | Base58 encoded container ID. | -| `X-Object-Id` | Base58 encoded object ID. | - -###### Status codes - -| Status | Description | -|--------|---------------------------------------| -| 200 | Object head successfully. | -| 400 | Some error occurred during operation. | -| 403 | Access denied. | -| 404 | Container or object not found. | - -## Download archive - -Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` - -| Route parameter | Type | Description | -|-----------------|-----------|---------------------------------------------------------| -| `cid` | Single | Base58 encoded container ID or container name from NNS. | -| `prefix` | Catch-All | Prefix for object attribute `FilePath` to match. | - -### Methods - -#### GET - -Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive. -Name of files in archive sets to `FilePath` attribute of objects. -Time of files sets to time when object has started downloading. -You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or -`/tar/{cid}/` route. - -Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)). - -##### Request - -###### Headers - -| Header | Description | -|----------------|------------------------------------| -| Common headers | See [bearer token](#bearer-token). | - -##### Response - -###### Headers - -| Header | Description | -|-----------------------|---------------------------------------------------------------------------------------------| -| `Content-Disposition` | Indicate how to browsers should treat file (`attachment`). Set `filename` as `archive.zip`. | -| `Content-Type` | Indicate content type of object. Set to `application/zip` | - -###### Status codes - -| Status | Description | -|--------|------------------------------------------------| -| 200 | Object got successfully. | -| 400 | Some error occurred during object downloading. | -| 403 | Access denied. | -| 404 | Container or objects not found. | diff --git a/docs/authentication.md b/docs/authentication.md deleted file mode 100644 index d8bb235..0000000 --- a/docs/authentication.md +++ /dev/null @@ -1,108 +0,0 @@ -# Request authentication - -HTTP Gateway does not authorize requests. Gateway converts HTTP request to a -FrostFS request and signs it with its own private key. - -You can always upload files to public containers (open for anyone to put -objects into), but for restricted containers you need to explicitly allow PUT -operations for a request signed with your HTTP Gateway keys. - -If you don't want to manage gateway's secret keys and adjust policies when -gateway configuration changes (new gate, key rotation, etc) or you plan to use -public services, there is an option to let your application backend (or you) to -issue Bearer Tokens and pass them from the client via gate down to FrostFS level -to grant access. - -FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS -documentation for more details). There are two options to pass them to gateway: -* "Authorization" header with "Bearer" type and base64-encoded token in - credentials field -* "Bearer" cookie with base64-encoded token contents - -For example, you have a mobile application frontend with a backend part storing -data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS -Bearer token and provides it to the frontend. Then, the mobile app may generate -some data and upload it via any available FrostFS HTTP Gateway by adding -the corresponding header to the upload request. Accessing policy protected data -works the same way. - -##### Example -In order to generate a bearer token, you need to have wallet (which will be used to sign the token) - -1. Suppose you have a container with private policy for wallet key - -``` -$ frostfs-cli container create -r --wallet -policy --basic-acl 0 --await -CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z - -$ frostfs-cli ape-manager add -r --wallet \ - --target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \ - --rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \ - --chain-id -``` - - -2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate - HTTP Gateway request as wallet signed request and save it to **bearer.json**: -``` -{ - "body": { - "allowImpersonate": true, - "lifetime": { - "exp": "10000", - "nbf": "0", - "iat": "0" - } - }, - "signature": null -} -``` - -3. Sign it with the wallet: -``` -$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w -``` - -4. Encode to base64 to use in header: -``` -$ base64 -w 0 signed.json -# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw== -``` - -After that, the Bearer token can be used: - -``` -$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \ - http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K -# output: -# { -# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X", -# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K" -# } -``` - -##### Note: Bearer Token owner - -You can specify exact key who can use Bearer Token (gateway wallet address). -To do this, encode wallet address in base64 format - -``` -$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64 -# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg== -``` - -Then specify this value in Bearer Token Json -``` -{ - "body": { - "ownerID": { - "value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==" - }, - ... -``` - -##### Note: Policy override - -Instead of impersonation, you can define the set of policies that will be applied -to the request sender. This allows to restrict access to specific operation and -specific objects without giving full impersonation control to the token user. diff --git a/docs/building-deb-package.md b/docs/building-deb-package.md deleted file mode 100644 index 26a77a2..0000000 --- a/docs/building-deb-package.md +++ /dev/null @@ -1,46 +0,0 @@ -# Building Debian package on host - -## Prerequisites - -For now, we're assuming building for Debian 11 (stable) x86_64. - -Go version 18.4 or later should already be installed, i.e. this runs -successfully: - -* `make all` - -## Installing packaging dependencies - -```shell -$ sudo apt install debhelper-compat dh-sequence-bash-completion devscripts -``` - -Warining: number of package installed is pretty large considering dependecies. - -## Package building - -```shell -$ make debpackage -``` - -## Leftovers cleaning - -```shell -$ make debclean -``` -or -```shell -$ dh clean -``` - -# Package versioning - -By default, package version is based on product version and may also contain git -tags and hashes. - -Package version could be overwritten by setting `PKG_VERSION` variable before -build, Debian package versioning rules should be respected. - -```shell -$ PKG_VERSION=0.32.0 make debpackge -``` diff --git a/docs/gate-configuration.md b/docs/gate-configuration.md deleted file mode 100644 index 7f3c4ef..0000000 --- a/docs/gate-configuration.md +++ /dev/null @@ -1,542 +0,0 @@ -# FrostFS HTTP Gateway configuration file - -This section contains detailed FrostFS HTTP Gateway configuration file description -including default config values and some tips to set up configurable values. - -There are some custom types used for brevity: - -* `duration` -- string consisting of a number and a suffix. Suffix examples include `s` (seconds), `m` (minutes), `ms` ( - milliseconds). - -# Reload on SIGHUP - -Some config values can be reloaded on SIGHUP signal. -Such parameters have special mark in tables below. - -You can send SIGHUP signal to app using the following command: - -```shell -$ kill -s SIGHUP -``` - -Example: - -```shell -$ ./bin/frostfs-http-gw --config config.yaml &> http.log & -[1] 998346 - -$ cat http.log -# ... -2022-10-03T09:37:25.826+0300 info frostfs-http-gw/app.go:332 starting application {"app_name": "frostfs-http-gw", "version": "v0.24.0"} -# ... - -$ kill -s SIGHUP 998346 - -$ cat http.log -# ... -2022-10-03T09:38:16.205+0300 info frostfs-http-gw/app.go:470 SIGHUP config reload completed -``` - -# Structure - -| Section | Description | -|------------------|----------------------------------------------------------------| -| no section | [General parameters](#general-section) | -| `wallet` | [Wallet configuration](#wallet-section) | -| `peers` | [Nodes configuration](#peers-section) | -| `logger` | [Logger configuration](#logger-section) | -| `web` | [Web configuration](#web-section) | -| `server` | [Server configuration](#server-section) | -| `upload-header` | [Upload header configuration](#upload-header-section) | -| `zip` | [ZIP configuration](#zip-section) | -| `pprof` | [Pprof configuration](#pprof-section) | -| `prometheus` | [Prometheus configuration](#prometheus-section) | -| `tracing` | [Tracing configuration](#tracing-section) | -| `runtime` | [Runtime configuration](#runtime-section) | -| `frostfs` | [Frostfs configuration](#frostfs-section) | -| `cache` | [Cache configuration](#cache-section) | -| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) | -| `index_page` | [Index page configuration](#index_page-section) | -| `multinet` | [Multinet configuration](#multinet-section) | -| `features` | [Features configuration](#features-section) | -| `containers` | [Containers configuration](#containers-section) | -| `contracts` | [Contracts configuration](#contracts-section) | - -# General section - -```yaml -rpc_endpoint: http://morph-chain.frostfs.devenv:30333 -resolve_order: - - nns - - dns - -connect_timeout: 5s -stream_timeout: 10s -request_timeout: 5s -rebalance_timer: 30s -pool_error_threshold: 100 -reconnect_interval: 1m -worker_pool_size: 1000 - -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|------------------------|------------|---------------|---------------|------------------------------------------------------------------------------------| -| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. | -| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. | -| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. | -| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. | -| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. | -| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. | -| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. | -| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. | -| `worker_pool_size` | `int` | no | `1000` | Maximum worker count in handler's worker pool. | - -# `wallet` section - -```yaml -wallet: - path: /path/to/wallet.json - address: NfgHwwTi3wHAS8aFAN243C5vGbkYDpqLHP - passphrase: pwd -``` - -| Parameter | Type | Default value | Description | -|--------------|----------|---------------|--------------------------------------------------------------------------| -| `path` | `string` | | Path to the wallet. | -| `address` | `string` | | Account address to get from wallet. If omitted default one will be used. | -| `passphrase` | `string` | | Passphrase to decrypt wallet. | - -# `peers` section - -```yaml -# Nodes configuration -# This configuration makes the gateway use the first node (node1.frostfs:8080) -# while it's healthy. Otherwise, gateway uses the second node (node2.frostfs:8080) -# for 10% of requests and the third node (node3.frostfs:8080) for 90% of requests. -# Until nodes with the same priority level are healthy -# nodes with other priority are not used. -# The lower the value, the higher the priority. -peers: - 0: - address: node1.frostfs:8080 - priority: 1 - weight: 1 - 1: - address: node2.frostfs:8080 - priority: 2 - weight: 0.1 - 2: - address: node3.frostfs:8080 - priority: 2 - weight: 0.9 -``` - -| Parameter | Type | Default value | Description | -|------------|----------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| -| `address` | `string` | | Address of storage node. | -| `priority` | `int` | `1` | It allows to group nodes and don't switch group until all nodes with the same priority will be unhealthy. The lower the value, the higher the priority. | -| `weight` | `float` | `1` | Weight of node in the group with the same priority. Distribute requests to nodes proportionally to these values. | - -# `server` section - -You can specify several listeners for server. For example, for `http` and `https`. - -```yaml -server: - - address: 0.0.0.0:8080 - tls: - enabled: false - cert_file: /path/to/cert - key_file: /path/to/key - - address: 0.0.0.0:8081 - tls: - enabled: true - cert_file: /path/to/another/cert - key_file: /path/to/another/key -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------------|----------|---------------|----------------|-----------------------------------------------| -| `address` | `string` | | `0.0.0.0:8080` | The address that the gateway is listening on. | -| `tls.enabled` | `bool` | | false | Enable TLS or not. | -| `tls.cert_file` | `string` | yes | | Path to the TLS certificate. | -| `tls.key_file` | `string` | yes | | Path to the key. | - -# `logger` section - -```yaml -logger: - level: debug - destination: stdout - sampling: - enabled: false - initial: 100 - thereafter: 100 - interval: 1s - tags: - - names: "app,datapath" - level: info - - names: "external_storage_tree" -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------| -| `level` | `string` | yes | `debug` | Logging level.
Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | -| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` | -| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. | -| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. | -| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. | -| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. | -| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). | - -## Tags - -There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags` -parameter. Available tags: - -```yaml -tags: - - names: "app,datapath" - level: info -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------| -| `names` | `[]string` | yes | | Tag names separated by `,`. Possible values see below in `Tag values` section. | -| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. | - -### Tag values - -* `app` - common application logs (enabled by default). -* `datapath` - main logic of application (enabled by default). -* `external_storage` - external interaction with storage node (enabled by default). -* `external_storage_tree` - external interaction with tree service in storage node (enabled by default). - -# `web` section - -```yaml -web: - read_buffer_size: 4096 - write_buffer_size: 4096 - read_timeout: 10m - write_timeout: 5m - stream_request_body: true - max_request_body_size: 4194304 -``` - -| Parameter | Type | Default value | Description | -|-------------------------|------------|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `read_buffer_size` | `int` | `4096` | Per-connection buffer size for requests' reading. This also limits the maximum header size. | -| `write_buffer_size` | `int` | `4096` | Per-connection buffer size for responses' writing. | -| `read_timeout` | `duration` | `10m` | The amount of time allowed to read the full request including body. The connection's read deadline is reset when the connection opens, or for keep-alive connections after the first byte has been read. | -| `write_timeout` | `duration` | `5m` | The maximum duration before timing out writes of the response. It is reset after the request handler has returned. | -| `stream_request_body` | `bool` | `true` | Enables request body streaming, and calls the handler sooner when given body is larger than the current limit. | -| `max_request_body_size` | `int` | `4194304` | Maximum request body size. The server rejects requests with bodies exceeding this limit. | - -# `upload-header` section - -```yaml -upload_header: - use_default_timestamp: false -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-------------------------|--------|---------------|---------------|-------------------------------------------------------------| -| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. | - -# `zip` section - -> **_DEPRECATED:_** Use archive section instead - -```yaml -zip: - compression: false -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|---------------|--------|---------------|---------------|--------------------------------------------------------------| -| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. | - -# `archive` section - -```yaml -archive: - compression: false -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|---------------|--------|---------------|---------------|------------------------------------------------------------------| -| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. | - -# `pprof` section - -Contains configuration for the `pprof` profiler. - -```yaml -pprof: - enabled: true - address: localhost:8083 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------|----------|---------------|------------------|-----------------------------------------| -| `enabled` | `bool` | yes | `false` | Flag to enable the service. | -| `address` | `string` | yes | `localhost:8083` | Address that service listener binds to. | - -# `prometheus` section - -Contains configuration for the `prometheus` metrics service. - -```yaml -prometheus: - enabled: true - address: localhost:8084 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------|----------|---------------|------------------|-----------------------------------------| -| `enabled` | `bool` | yes | `false` | Flag to enable the service. | -| `address` | `string` | yes | `localhost:8084` | Address that service listener binds to. | - -# `tracing` section - -Contains configuration for the `tracing` service. - -```yaml -tracing: - enabled: true - exporter: "otlp_grpc" - endpoint: "localhost:4317" - trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem" - attributes: - - key: key0 - value: value - - key: key1 - value: value -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|--------------|----------------------------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------| -| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. | -| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). | -| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. | -| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. | -| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. | - -#### `attributes` subsection - -```yaml - attributes: - - key: key0 - value: value - - key: key1 - value: value -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------|----------|---------------|---------------|------------------| -| `key` | `string` | yes | | Attribute key. | -| `value` | `string` | yes | | Attribute value. | - -# `runtime` section - -Contains runtime parameters. - -```yaml -runtime: - soft_memory_limit: 1gb -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. | - -# `frostfs` section - -Contains parameters of requests to FrostFS. - -```yaml -frostfs: - client_cut: false - buffer_max_size_for_put: 1048576 # 1mb - tree_pool_max_attempts: 0 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------| -| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. | -| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. | -| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. | - -### `cache` section - -```yaml -cache: - buckets: - lifetime: 1m - size: 1000 - netmap: - lifetime: 1m - cors: - lifetime: 5m - size: 1000 -``` - -| Parameter | Type | Default value | Description | -|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------| -| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`
`size: 1000` | Cache which contains mapping of bucket name to bucket info. | -| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. | -| `cors` | [Cache config](#cache-subsection) | `lifetime: 5m`
`size: 1000` | Cache which stores container CORS configurations. | - -#### `cache` subsection - -```yaml -lifetime: 1m -size: 1000 -``` - -| Parameter | Type | Default value | Description | -|------------|------------|------------------|-------------------------------| -| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. | -| `size` | `int` | depends on cache | LRU cache size. | - -# `resolve_bucket` section - -Bucket name resolving parameters from and to container ID. - -```yaml -resolve_bucket: - namespace_header: X-Frostfs-Namespace - default_namespaces: [ "", "root" ] -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|----------------------|------------|---------------|-----------------------|--------------------------------------------------| -| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. | -| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. | - -# `index_page` section - -Parameters for index HTML-page output. Activates if `GetObject` request returns `not found`. Two -index page modes available: - -* `s3` mode uses tree service for listing objects, -* `native` sends requests to nodes via native protocol. - If request pass S3-bucket name instead of CID, `s3` mode will be used, otherwise `native`. - -```yaml -index_page: - enabled: false - template_path: "" -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------| -| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. | -| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. | - -# `cors` section - -Parameters for CORS (used in OPTIONS requests and responses in all handlers). -If values are not set, settings from CORS container will be used. - -```yaml -cors: - allow_origin: "*" - allow_methods: [ "GET", "HEAD" ] - allow_headers: [ "Authorization" ] - expose_headers: [ "*" ] - allow_credentials: false - max_age: 600 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|---------------------|------------|---------------|---------------|--------------------------------------------------------| -| `allow_origin` | `string` | yes | | Values for `Access-Control-Allow-Origin` headers. | -| `allow_methods` | `[]string` | yes | | Values for `Access-Control-Allow-Methods` headers. | -| `allow_headers` | `[]string` | yes | | Values for `Access-Control-Allow-Headers` headers. | -| `expose_headers` | `[]string` | yes | | Values for `Access-Control-Expose-Headers` headers. | -| `allow_credentials` | `bool` | yes | `false` | Values for `Access-Control-Allow-Credentials` headers. | -| `max_age` | `int` | yes | `600` | Values for `Access-Control-Max-Age ` headers. | - -# `multinet` section - -Configuration of multinet support. - -```yaml -multinet: - enabled: false - balancer: roundrobin - restrict: false - fallback_delay: 300ms - subnets: - - mask: 1.2.3.4/24 - source_ips: - - 1.2.3.4 - - 1.2.3.5 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|------------------|--------------------------------|---------------|---------------|--------------------------------------------------------------------------------------------| -| `enabled` | `bool` | yes | `false` | Enables multinet setting to manage source ip of outcoming requests. | -| `balancer` | `string` | yes | `""` | Strategy to pick source IP. By default picks first address. Supports `roundrobin` setting. | -| `restrict` | `bool` | yes | `false` | Restricts requests to an undefined subnets. | -| `fallback_delay` | `duration` | yes | `300ms` | Delay between IPv6 and IPv4 fallback stack switch. | -| `subnets` | [[]Subnet](#subnet-subsection) | yes | | Set of subnets to apply multinet dial settings. | - -#### `subnet` subsection - -```yaml -- mask: 1.2.3.4/24 - source_ips: - - 1.2.3.4 - - 1.2.3.5 -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|--------------|------------|---------------|---------------|----------------------------------------------------------------------| -| `mask` | `string` | yes | | Destination subnet. | -| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. | - -# `features` section - -Contains parameters for enabling features. - -```yaml -features: - enable_filepath_fallback: true - enable_filepath_slash_fallback: false - tree_pool_netmap_support: true -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-------------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FileName` attribute if object with `FilePath` attribute wasn't found. | -| `features.enable_filepath_slash_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by `FilePath`/`FileName` with/without (depends on provided value in `FilePath`/`FileName`) leading slash if object with provided `FilePath`/`FileName` wasn't found. This fallback goes before `enable_filepath_fallback`. | -| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. | - -# `containers` section - -Section for well-known containers to store data and settings. - -```yaml -containers: - cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|-----------|----------|---------------|---------------|-----------------------------------------| -| `cors` | `string` | no | | Container name for CORS configurations. | - -# `contracts` section - -```yaml -contracts: - container: - name: container.frostfs -``` - -| Parameter | Type | SIGHUP reload | Default value | Description | -|------------------|----------|---------------|---------------------|----------------------------------------------| -| `container.name` | `string` | no | `container.frostfs` | Container contract hash (LE) or name in NNS. | diff --git a/docs/nns.md b/docs/nns.md deleted file mode 100644 index acb9f21..0000000 --- a/docs/nns.md +++ /dev/null @@ -1,36 +0,0 @@ -# Nicename Resolving with NNS - -Steps to start using name resolving: - -1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples): - -```yaml -rpc_endpoint: http://morph-chain.frostfs.devenv:30333 -resolve_order: - - nns -``` - -2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env) - you can check if your container (e.g. with `container-name` name) is registered in NNS: - -```shell -$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \ - http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash' - -0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 - -$ docker exec -it morph_chain neo-go \ - contract testinvokefunction \ - -r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \ - resolve string:container-name.container int:16 \ - | jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \ - | base64 -d && echo - -7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL -``` - -3. Use container name instead of its `$CID`. For example: - -```shell -$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name -``` diff --git a/go.mod b/go.mod deleted file mode 100644 index 6082ef6..0000000 --- a/go.mod +++ /dev/null @@ -1,139 +0,0 @@ -module git.frostfs.info/TrueCloudLab/frostfs-http-gw - -go 1.23 - -require ( - git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e - git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 - git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe - git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc - git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 - git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 - github.com/bluele/gcache v0.0.2 - github.com/docker/docker v27.1.1+incompatible - github.com/docker/go-units v0.5.0 - github.com/fasthttp/router v1.4.1 - github.com/nspcc-dev/neo-go v0.106.2 - github.com/panjf2000/ants/v2 v2.5.0 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.5.0 - github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.15.0 - github.com/ssgreg/journald v1.0.0 - github.com/stretchr/testify v1.9.0 - github.com/testcontainers/testcontainers-go v0.35.0 - github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4 - github.com/valyala/fasthttp v1.34.0 - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 - go.uber.org/zap v1.27.0 - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 - golang.org/x/sys v0.28.0 - google.golang.org/grpc v1.69.2 -) - -require ( - dario.cat/mergo v1.0.0 // indirect - git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect - git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect - git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect - git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/VictoriaMetrics/easyproto v0.1.4 // indirect - github.com/andybalholm/brotli v1.0.4 // indirect - github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/containerd v1.7.18 // indirect - github.com/containerd/log v0.1.0 // indirect - github.com/containerd/platforms v0.2.1 // indirect - github.com/cpuguy83/dockercfg v0.3.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.5.0 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/ipfs/go-cid v0.0.7 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/patternmatcher v0.6.0 // indirect - github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.1.0 // indirect - github.com/moby/term v0.5.0 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/mr-tron/base58 v1.2.0 // indirect - github.com/multiformats/go-base32 v0.1.0 // indirect - github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.14.0 // indirect - github.com/multiformats/go-multibase v0.2.0 // indirect - github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-varint v0.0.7 // indirect - github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect - github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect - github.com/nspcc-dev/rfc6979 v0.2.1 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.3 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect - github.com/twmb/murmur3 v1.1.8 // indirect - github.com/urfave/cli v1.22.12 // indirect - github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.etcd.io/bbolt v1.3.9 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.31.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect - google.golang.org/protobuf v1.36.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index 6050ad6..0000000 --- a/go.sum +++ /dev/null @@ -1,822 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4= -git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc= -git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk= -git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121 h1:/Z8DfbLZXp7exUQWUKoG/9tbFdI9d5lV1qSReaYoG8I= -git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121/go.mod h1:kbwB4v2o6RyOfCo9kEFeUDZIX3LKhmS0yXPrtvzkQ1g= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe h1:81gDNdWNLP24oMQukRiCE9R1wGSh0l0dRq3F1W+Oesc= -git.frostfs.info/TrueCloudLab/frostfs-qos v0.0.0-20250128150313-cfbca7fa1dfe/go.mod h1:PCijYq4oa8vKtIEcUX6jRiszI6XAW+nBwU+T1kB4d1U= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc h1:fS6Yp4GvI+C22UrWz9oqJXwvQw5Q6SmADIY4H9eIQsc= -git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250317082814-87bb55f992dc/go.mod h1:aQpPWfG8oyfJ2X+FenPTJpSRWZjwcP5/RAtkW+/VEX8= -git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc= -git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM= -git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972 h1:/960fWeyn2AFHwQUwDsWB3sbP6lTEnFnMzLMM6tx6N8= -git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972/go.mod h1:2hM42MBrlhvN6XToaW6OWNk5ZLcu1FhaukGgxtfpDDI= -git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA= -git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc= -git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA= -git.frostfs.info/TrueCloudLab/tzhash v1.8.0/go.mod h1:dhY+oy274hV8wGvGL4MwwMpdL3GYvaX1a8GQZQHvlF8= -git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02 h1:HeY8n27VyPRQe49l/fzyVMkWEB2fsLJYKp64pwA7tz4= -git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.mod h1:rQFJJdEOV7KbbMtQYR2lNfiZk+ONRDJSbMCTWxKt8Fw= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= -github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc= -github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= -github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= -github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb h1:f0BMgIjhZy4lSRHCXFbQst85f5agZAjtDMixQqBWNpc= -github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= -github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= -github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fasthttp/router v1.4.1 h1:3xPUO+hy/HAkgGDSd5sX5w18cyGDIFbC7vip8KwPDk8= -github.com/fasthttp/router v1.4.1/go.mod h1:4P0Kq4C882tA2evBKDW7De7hGfWmvV8FN+zqt8Lu49Q= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= -github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= -github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= -github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= -github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= -github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU= -github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= -github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= -github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= -github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk= -github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc= -github.com/nspcc-dev/neo-go v0.106.2 h1:KXSJ2J5Oacc7LrX3r4jvnC8ihKqHs5NB21q4f2S3r9o= -github.com/nspcc-dev/neo-go v0.106.2/go.mod h1:Ojwfx3/lv0VTeEHMpQ17g0wTnXcCSoFQVq5GEeCZmGo= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k= -github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY= -github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM= -github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/panjf2000/ants/v2 v2.5.0 h1:1rWGWSnxCsQBga+nQbA4/iY6VMeNoOIAM0ZWh9u3q2Q= -github.com/panjf2000/ants/v2 v2.5.0/go.mod h1:cU93usDlihJZ5CfRGNDYsiBYvoilLvBF5Qp/BT2GNRE= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 h1:N3Af8f13ooDKcIhsmFT7Z05CStZWu4C7Md0uDEy4q6o= -github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873/go.mod h1:dmPawKuiAeG/aFYVs2i+Dyosoo7FNcm+Pi8iK6ZUrX8= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/ssgreg/journald v1.0.0 h1:0YmTDPJXxcWDPba12qNMdO6TxvfkFSYpFIJ31CwmLcU= -github.com/ssgreg/journald v1.0.0/go.mod h1:RUckwmTM8ghGWPslq2+ZBZzbb9/2KgjzYZ4JEP+oRt0= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo= -github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4 h1:GpfJ7OdNjS7BFTVwNCUI9L4aCJOFRbr5fdHqjdhoYE8= -github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4/go.mod h1:f3jBhpWvuZmue0HZK52GzRHJOYHYSILs/c8+K2S/J+o= -github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= -github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.28.0/go.mod h1:cmWIqlu99AO/RKcp1HWaViTqc57FswJOfYYdPJBl8BA= -github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= -github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 h1:fVoAXEKA4+yufmbdVYv+SE73+cPZbbbe8paLsHfkK+U= -google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53/go.mod h1:riSXTwQ4+nqmPGtobMFyW5FqVAmIs0St6VPp4Ug7CE4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 h1:X58yt85/IXCx0Y3ZwN6sEIKZzQtDEYaBWrDvErdXrRE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= -google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= -gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/help.mk b/help.mk deleted file mode 100644 index b5fbbc9..0000000 --- a/help.mk +++ /dev/null @@ -1,22 +0,0 @@ -.PHONY: help - -# Show this help prompt -help: - @echo ' Usage:' - @echo '' - @echo ' make ' - @echo '' - @echo ' Targets:' - @echo '' - @awk '/^#/{ comment = substr($$0,3) } comment && /^[a-zA-Z][a-zA-Z0-9.%_/-]+ ?:/{ print " ", $$1, comment }' $(MAKEFILE_LIST) | column -t -s ':' | grep -v 'IGNORE' | sort | uniq - -# Show help for docker/% IGNORE -help.docker/%: - $(eval TARGETS:=$(notdir all lint) ${BINS}) - @echo ' Usage:' - @echo '' - @echo ' make docker/% -- Run `make %` in Golang container' - @echo '' - @echo ' Supported docker targets:' - @echo '' - @$(foreach bin, $(TARGETS), echo ' ' $(bin);) diff --git a/internal/cache/buckets.go b/internal/cache/buckets.go deleted file mode 100644 index 91ae5b2..0000000 --- a/internal/cache/buckets.go +++ /dev/null @@ -1,111 +0,0 @@ -package cache - -import ( - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/bluele/gcache" - "go.uber.org/zap" -) - -// BucketCache contains cache with objects and the lifetime of cache entries. -type BucketCache struct { - cache gcache.Cache - cidCache gcache.Cache - logger *zap.Logger -} - -// Config stores expiration params for cache. -type Config struct { - Size int - Lifetime time.Duration - Logger *zap.Logger -} - -const ( - // DefaultBucketCacheSize is a default maximum number of entries in cache. - DefaultBucketCacheSize = 1e3 - // DefaultBucketCacheLifetime is a default lifetime of entries in cache. - DefaultBucketCacheLifetime = time.Minute -) - -// DefaultBucketConfig returns new default cache expiration values. -func DefaultBucketConfig(logger *zap.Logger) *Config { - return &Config{ - Size: DefaultBucketCacheSize, - Lifetime: DefaultBucketCacheLifetime, - Logger: logger, - } -} - -// NewBucketCache creates an object of BucketCache. -func NewBucketCache(config *Config, cidCache bool) *BucketCache { - cache := &BucketCache{ - cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(), - logger: config.Logger, - } - - if cidCache { - cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build() - } - return cache -} - -// Get returns a cached object. -func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo { - return o.get(formKey(ns, bktName)) -} - -func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo { - if o.cidCache == nil { - return nil - } - - entry, err := o.cidCache.Get(cnrID) - if err != nil { - return nil - } - - key, ok := entry.(string) - if !ok { - o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)), - zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath)) - return nil - } - - return o.get(key) -} - -func (o *BucketCache) get(key string) *data.BucketInfo { - entry, err := o.cache.Get(key) - if err != nil { - return nil - } - - result, ok := entry.(*data.BucketInfo) - if !ok { - o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)), - zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath)) - return nil - } - - return result -} - -// Put puts an object to cache. -func (o *BucketCache) Put(bkt *data.BucketInfo) error { - if o.cidCache != nil { - if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil { - return err - } - } - - return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt) -} - -func formKey(ns, name string) string { - return name + "." + ns -} diff --git a/internal/cache/cors.go b/internal/cache/cors.go deleted file mode 100644 index 24465b8..0000000 --- a/internal/cache/cors.go +++ /dev/null @@ -1,62 +0,0 @@ -package cache - -import ( - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/bluele/gcache" - "go.uber.org/zap" -) - -// CORSCache contains cache with CORS objects. -type CORSCache struct { - cache gcache.Cache - logger *zap.Logger -} - -const ( - // DefaultCORSCacheSize is a default maximum number of entries in cache. - DefaultCORSCacheSize = 1e3 - // DefaultCORSCacheLifetime is a default lifetime of entries in cache. - DefaultCORSCacheLifetime = 5 * time.Minute -) - -// DefaultCORSConfig returns new default cache expiration values. -func DefaultCORSConfig(logger *zap.Logger) *Config { - return &Config{ - Size: DefaultCORSCacheSize, - Lifetime: DefaultCORSCacheLifetime, - Logger: logger, - } -} - -// NewCORSCache creates an object of CORSCache. -func NewCORSCache(config *Config) *CORSCache { - gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build() - return &CORSCache{cache: gc, logger: config.Logger} -} - -// Get returns a cached object. -func (o *CORSCache) Get(cnrID cid.ID) *data.CORSConfiguration { - entry, err := o.cache.Get(cnrID) - if err != nil { - return nil - } - - result, ok := entry.(*data.CORSConfiguration) - if !ok { - o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)), - zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath)) - return nil - } - - return result -} - -// Put puts an object to cache. -func (o *CORSCache) Put(cnrID cid.ID, cors *data.CORSConfiguration) error { - return o.cache.Set(cnrID, cors) -} diff --git a/internal/cache/netmap.go b/internal/cache/netmap.go deleted file mode 100644 index ce01b47..0000000 --- a/internal/cache/netmap.go +++ /dev/null @@ -1,65 +0,0 @@ -package cache - -import ( - "fmt" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "github.com/bluele/gcache" - "go.uber.org/zap" -) - -type ( - // NetmapCache provides cache for netmap. - NetmapCache struct { - cache gcache.Cache - logger *zap.Logger - } - - // NetmapCacheConfig stores expiration params for cache. - NetmapCacheConfig struct { - Lifetime time.Duration - Logger *zap.Logger - } -) - -const ( - DefaultNetmapCacheLifetime = time.Minute - netmapCacheSize = 1 - netmapKey = "netmap" -) - -// DefaultNetmapConfig returns new default cache expiration values. -func DefaultNetmapConfig(logger *zap.Logger) *NetmapCacheConfig { - return &NetmapCacheConfig{ - Lifetime: DefaultNetmapCacheLifetime, - Logger: logger, - } -} - -// NewNetmapCache creates an object of NetmapCache. -func NewNetmapCache(config *NetmapCacheConfig) *NetmapCache { - gc := gcache.New(netmapCacheSize).LRU().Expiration(config.Lifetime).Build() - return &NetmapCache{cache: gc, logger: config.Logger} -} - -func (c *NetmapCache) Get() *netmap.NetMap { - entry, err := c.cache.Get(netmapKey) - if err != nil { - return nil - } - - result, ok := entry.(netmap.NetMap) - if !ok { - c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)), - zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath)) - return nil - } - - return &result -} - -func (c *NetmapCache) Put(nm netmap.NetMap) error { - return c.cache.Set(netmapKey, nm) -} diff --git a/internal/data/cors.go b/internal/data/cors.go deleted file mode 100644 index d1b1106..0000000 --- a/internal/data/cors.go +++ /dev/null @@ -1,18 +0,0 @@ -package data - -type ( - // CORSConfiguration stores CORS configuration of a request. - CORSConfiguration struct { - CORSRules []CORSRule `xml:"CORSRule" json:"CORSRules"` - } - - // CORSRule stores rules for CORS configuration. - CORSRule struct { - AllowedHeaders []string `xml:"AllowedHeader" json:"AllowedHeaders"` - AllowedMethods []string `xml:"AllowedMethod" json:"AllowedMethods"` - AllowedOrigins []string `xml:"AllowedOrigin" json:"AllowedOrigins"` - ExposeHeaders []string `xml:"ExposeHeader" json:"ExposeHeaders"` - MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty" json:"MaxAgeSeconds,omitempty"` - AllowedCredentials bool `xml:"AllowedCredentials,omitempty" json:"AllowedCredentials,omitempty"` - } -) diff --git a/internal/data/info.go b/internal/data/info.go deleted file mode 100644 index f5c80d6..0000000 --- a/internal/data/info.go +++ /dev/null @@ -1,14 +0,0 @@ -package data - -import ( - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" -) - -type BucketInfo struct { - Name string // container name from system attribute - Zone string // container zone from system attribute - CID cid.ID - HomomorphicHashDisabled bool - PlacementPolicy netmap.PlacementPolicy -} diff --git a/internal/data/tree.go b/internal/data/tree.go deleted file mode 100644 index fcf8add..0000000 --- a/internal/data/tree.go +++ /dev/null @@ -1,27 +0,0 @@ -package data - -import ( - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// NodeVersion represent node from tree service. -type NodeVersion struct { - BaseNodeVersion -} - -// BaseNodeVersion is minimal node info from tree service. -// Basically used for "system" object. -type BaseNodeVersion struct { - ID uint64 - OID oid.ID - IsDeleteMarker bool -} - -type NodeInfo struct { - Meta []NodeMeta -} - -type NodeMeta interface { - GetKey() string - GetValue() []byte -} diff --git a/internal/handler/browse.go b/internal/handler/browse.go deleted file mode 100644 index d9e6625..0000000 --- a/internal/handler/browse.go +++ /dev/null @@ -1,382 +0,0 @@ -package handler - -import ( - "context" - "html/template" - "net/url" - "sort" - "strconv" - "strings" - "sync" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/docker/go-units" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -const ( - dateFormat = "02-01-2006 15:04" - attrOID = "OID" - attrCreated = "Created" - attrFileName = "FileName" - attrFilePath = "FilePath" - attrSize = "Size" - attrDeleteMarker = "IsDeleteMarker" -) - -type ( - BrowsePageData struct { - HasErrors bool - Container string - Prefix string - Protocol string - Objects []ResponseObject - } - ResponseObject struct { - OID string - Created string - FileName string - FilePath string - Size string - IsDir bool - GetURL string - IsDeleteMarker bool - } -) - -func newListObjectsResponseS3(attrs map[string]string) ResponseObject { - return ResponseObject{ - Created: formatTimestamp(attrs[attrCreated]), - OID: attrs[attrOID], - FileName: attrs[attrFileName], - Size: attrs[attrSize], - IsDir: attrs[attrOID] == "", - IsDeleteMarker: attrs[attrDeleteMarker] == "true", - } -} - -func newListObjectsResponseNative(attrs map[string]string) ResponseObject { - filename := lastPathElement(attrs[object.AttributeFilePath]) - if filename == "" { - filename = attrs[attrFileName] - } - return ResponseObject{ - OID: attrs[attrOID], - Created: formatTimestamp(attrs[object.AttributeTimestamp] + "000"), - FileName: filename, - FilePath: attrs[object.AttributeFilePath], - Size: attrs[attrSize], - IsDir: false, - } -} - -func getNextDir(filepath, prefix string) string { - restPath := strings.Replace(filepath, prefix, "", 1) - index := strings.Index(restPath, "/") - if index == -1 { - return "" - } - return restPath[:index] -} - -func lastPathElement(path string) string { - if path == "" { - return path - } - index := strings.LastIndex(path, "/") - if index == len(path)-1 { - index = strings.LastIndex(path[:index], "/") - } - return path[index+1:] -} - -func parseTimestamp(tstamp string) (time.Time, error) { - millis, err := strconv.ParseInt(tstamp, 10, 64) - if err != nil { - return time.Time{}, err - } - - return time.UnixMilli(millis), nil -} - -func formatTimestamp(strdate string) string { - date, err := parseTimestamp(strdate) - if err != nil || date.IsZero() { - return "" - } - - return date.Format(dateFormat) -} - -func formatSize(strsize string) string { - size, err := strconv.ParseFloat(strsize, 64) - if err != nil { - return "0B" - } - return units.HumanSize(size) -} - -func parentDir(prefix string) string { - index := strings.LastIndex(prefix, "/") - if index == -1 { - return prefix - } - return prefix[index:] -} - -func getParent(encPrefix string) string { - prefix, err := url.PathUnescape(encPrefix) - if err != nil { - return "" - } - if prefix != "" && prefix[len(prefix)-1] == '/' { - prefix = prefix[:len(prefix)-1] - } - - slashIndex := strings.LastIndex(prefix, "/") - if slashIndex == -1 { - return "" - } - return prefix[:slashIndex] -} - -func urlencode(path string) string { - var res strings.Builder - - prefixParts := strings.Split(path, "/") - for _, prefixPart := range prefixParts { - prefixPart = "/" + url.PathEscape(prefixPart) - if prefixPart == "/." || prefixPart == "/.." { - prefixPart = url.PathEscape(prefixPart) - } - res.WriteString(prefixPart) - } - - return res.String() -} - -type GetObjectsResponse struct { - objects []ResponseObject - hasErrors bool - isNative bool -} - -func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) { - if prefix != "" && prefix[len(prefix)-1] == '/' { - prefix = prefix[:len(prefix)-1] - } - - nodes, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true) - if err != nil { - return nil, err - } - - result := &GetObjectsResponse{ - objects: make([]ResponseObject, 0, len(nodes)), - } - for _, node := range nodes { - meta := node.Meta - if meta == nil { - continue - } - var attrs = make(map[string]string, len(meta)) - for _, m := range meta { - attrs[m.GetKey()] = string(m.GetValue()) - } - obj := newListObjectsResponseS3(attrs) - if obj.IsDeleteMarker { - continue - } - obj.FilePath = prefix + "/" + obj.FileName - obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath) - result.objects = append(result.objects, obj) - } - - return result, nil -} - -func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) { - basePath := prefix - if basePath != "" && basePath[len(basePath)-1] != '/' { - basePath += "/" - } - - filters := object.NewSearchFilters() - filters.AddRootFilter() - if prefix != "" { - filters.AddFilter(object.AttributeFilePath, prefix, object.MatchCommonPrefix) - } - - prm := PrmObjectSearch{ - PrmAuth: PrmAuth{ - BearerToken: bearerToken(ctx), - }, - Container: bucketInfo.CID, - Filters: filters, - } - objectIDs, err := h.frostfs.SearchObjects(ctx, prm) - if err != nil { - return nil, err - } - defer objectIDs.Close() - - resp, err := h.headDirObjects(ctx, bucketInfo.CID, objectIDs, basePath) - if err != nil { - return nil, err - } - - log := h.reqLogger(ctx) - dirs := make(map[string]struct{}) - result := &GetObjectsResponse{ - objects: make([]ResponseObject, 0, 100), - isNative: true, - } - for objExt := range resp { - if objExt.Error != nil { - log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage)) - result.hasErrors = true - continue - } - if objExt.Object.IsDir { - if _, ok := dirs[objExt.Object.FileName]; ok { - continue - } - objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + urlencode(objExt.Object.FilePath) - dirs[objExt.Object.FileName] = struct{}{} - } else { - objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + "/" + objExt.Object.OID - } - result.objects = append(result.objects, objExt.Object) - } - return result, nil -} - -type ResponseObjectExtended struct { - Object ResponseObject - Error error -} - -func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs ResObjectSearch, basePath string) (<-chan ResponseObjectExtended, error) { - res := make(chan ResponseObjectExtended) - - go func() { - defer close(res) - log := h.reqLogger(ctx).With( - zap.String("cid", cnrID.EncodeToString()), - zap.String("path", basePath), - ) - var wg sync.WaitGroup - err := objectIDs.Iterate(func(id oid.ID) bool { - wg.Add(1) - err := h.workerPool.Submit(func() { - defer wg.Done() - var obj ResponseObjectExtended - obj.Object, obj.Error = h.headDirObject(ctx, cnrID, id, basePath) - res <- obj - }) - if err != nil { - wg.Done() - log.Warn(logs.FailedToSubmitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - select { - case <-ctx.Done(): - return true - default: - return false - } - }) - if err != nil { - log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - wg.Wait() - }() - - return res, nil -} - -func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID, basePath string) (ResponseObject, error) { - addr := newAddress(cnrID, objID) - obj, err := h.frostfs.HeadObject(ctx, PrmObjectHead{ - PrmAuth: PrmAuth{BearerToken: bearerToken(ctx)}, - Address: addr, - }) - if err != nil { - return ResponseObject{}, err - } - - attrs := loadAttributes(obj.Attributes()) - attrs[attrOID] = objID.EncodeToString() - if multipartSize, ok := attrs[attributeMultipartObjectSize]; ok { - attrs[attrSize] = multipartSize - } else { - attrs[attrSize] = strconv.FormatUint(obj.PayloadSize(), 10) - } - - dirname := getNextDir(attrs[object.AttributeFilePath], basePath) - if dirname == "" { - return newListObjectsResponseNative(attrs), nil - } - - return ResponseObject{ - FileName: dirname, - FilePath: basePath + dirname, - IsDir: true, - }, nil -} - -type browseParams struct { - bucketInfo *data.BucketInfo - prefix string - objects *GetObjectsResponse -} - -func (h *Handler) browseObjects(ctx context.Context, req *fasthttp.RequestCtx, p browseParams) { - const S3Protocol = "s3" - const FrostfsProtocol = "frostfs" - - objects := p.objects.objects - sort.Slice(objects, func(i, j int) bool { - if objects[i].IsDir == objects[j].IsDir { - return objects[i].FileName < objects[j].FileName - } - return objects[i].IsDir - }) - - tmpl, err := template.New("index").Funcs(template.FuncMap{ - "formatSize": formatSize, - "getParent": getParent, - "urlencode": urlencode, - "parentDir": parentDir, - }).Parse(h.config.IndexPageTemplate()) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToParseTemplate, err) - return - } - bucketName := p.bucketInfo.Name - protocol := S3Protocol - if p.objects.isNative { - bucketName = p.bucketInfo.CID.EncodeToString() - protocol = FrostfsProtocol - } - prefix := p.prefix - if prefix != "" && prefix[len(prefix)-1] != '/' { - prefix += "/" - } - - if err = tmpl.Execute(req, &BrowsePageData{ - Container: bucketName, - Prefix: prefix, - Objects: objects, - Protocol: protocol, - HasErrors: p.objects.hasErrors, - }); err != nil { - h.logAndSendError(ctx, req, logs.FailedToExecuteTemplate, err) - return - } -} diff --git a/internal/handler/container.go b/internal/handler/container.go deleted file mode 100644 index 3c7bec8..0000000 --- a/internal/handler/container.go +++ /dev/null @@ -1,42 +0,0 @@ -package handler - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "go.uber.org/zap" -) - -func (h *Handler) containerInfo(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) { - info := &data.BucketInfo{ - CID: cnrID, - Name: cnrID.EncodeToString(), - } - res, err := h.cnrContract.GetContainerByID(cnrID) - if err != nil { - return nil, fmt.Errorf("get frostfs container: %w", err) - } - - cnr := *res - - if domain := container.ReadDomain(cnr); domain.Name() != "" { - info.Name = domain.Name() - info.Zone = domain.Zone() - } - info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr) - info.PlacementPolicy = cnr.PlacementPolicy() - - if err = h.cache.Put(info); err != nil { - h.reqLogger(ctx).Warn(logs.CouldntPutBucketIntoCache, - zap.String("bucket name", info.Name), - zap.Stringer("cid", info.CID), - zap.Error(err), - logs.TagField(logs.TagDatapath)) - } - - return info, nil -} diff --git a/internal/handler/cors.go b/internal/handler/cors.go deleted file mode 100644 index 7e8db93..0000000 --- a/internal/handler/cors.go +++ /dev/null @@ -1,345 +0,0 @@ -package handler - -import ( - "context" - "encoding/xml" - "errors" - "fmt" - "regexp" - "slices" - "sort" - "strconv" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - qostagging "git.frostfs.info/TrueCloudLab/frostfs-qos/tagging" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -const ( - internalIOTag = "internal" - corsFilePathTemplate = "/%s.cors" - wildcard = "*" -) - -var errNoCORS = errors.New("no CORS objects found") - -func (h *Handler) Preflight(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Preflight") - defer span.End() - - ctx = qostagging.ContextWithIOTag(ctx, internalIOTag) - cidParam, _ := req.UserValue("cid").(string) - reqLog := h.reqLogger(ctx) - log := reqLog.With(zap.String("cid", cidParam)) - - origin := req.Request.Header.Peek(fasthttp.HeaderOrigin) - if len(origin) == 0 { - log.Error(logs.EmptyOriginRequestHeader, logs.TagField(logs.TagDatapath)) - ResponseError(req, "Origin request header needed", fasthttp.StatusBadRequest) - return - } - - method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod) - if len(method) == 0 { - log.Error(logs.EmptyAccessControlRequestMethodHeader, logs.TagField(logs.TagDatapath)) - ResponseError(req, "Access-Control-Request-Method request header needed", fasthttp.StatusBadRequest) - return - } - - corsRule := h.config.CORS() - if corsRule != nil { - setCORSHeadersFromRule(req, corsRule) - return - } - - corsConfig, err := h.getCORSConfig(ctx, log, cidParam) - if err != nil { - log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath)) - status := fasthttp.StatusInternalServerError - if errors.Is(err, errNoCORS) { - status = fasthttp.StatusNotFound - } - ResponseError(req, "could not get CORS configuration: "+err.Error(), status) - return - } - - var headers []string - requestHeaders := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestHeaders) - if len(requestHeaders) > 0 { - headers = strings.Split(string(requestHeaders), ", ") - } - - for _, rule := range corsConfig.CORSRules { - for _, o := range rule.AllowedOrigins { - if o == string(origin) || o == wildcard || (strings.Contains(o, "*") && match(o, string(origin))) { - for _, m := range rule.AllowedMethods { - if m == string(method) { - if !checkSubslice(rule.AllowedHeaders, headers) { - continue - } - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin)) - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", ")) - if headers != nil { - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, string(requestHeaders)) - } - if rule.ExposeHeaders != nil { - req.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(rule.ExposeHeaders, ", ")) - } - if rule.MaxAgeSeconds > 0 || rule.MaxAgeSeconds == -1 { - req.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(rule.MaxAgeSeconds)) - } - if o != wildcard { - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") - } - return - } - } - } - } - } - log.Error(logs.CORSRuleWasNotMatched, logs.TagField(logs.TagDatapath)) - ResponseError(req, "Forbidden", fasthttp.StatusForbidden) -} - -func (h *Handler) SetCORSHeaders(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.SetCORSHeaders") - defer span.End() - - origin := req.Request.Header.Peek(fasthttp.HeaderOrigin) - if len(origin) == 0 { - return - } - - method := req.Request.Header.Peek(fasthttp.HeaderAccessControlRequestMethod) - if len(method) == 0 { - method = req.Method() - } - - ctx = qostagging.ContextWithIOTag(ctx, internalIOTag) - cidParam, _ := req.UserValue("cid").(string) - reqLog := h.reqLogger(ctx) - log := reqLog.With(zap.String("cid", cidParam)) - - corsRule := h.config.CORS() - if corsRule != nil { - setCORSHeadersFromRule(req, corsRule) - return - } - - corsConfig, err := h.getCORSConfig(ctx, log, cidParam) - if err != nil { - log.Error(logs.CouldNotGetCORSConfiguration, zap.Error(err), logs.TagField(logs.TagDatapath)) - return - } - - var withCredentials bool - if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil { - withCredentials = true - } - - for _, rule := range corsConfig.CORSRules { - for _, o := range rule.AllowedOrigins { - if o == string(origin) || (strings.Contains(o, "*") && len(o) > 1 && match(o, string(origin))) { - for _, m := range rule.AllowedMethods { - if m == string(method) { - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin)) - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", ")) - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") - req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin) - return - } - } - } - if o == wildcard { - for _, m := range rule.AllowedMethods { - if m == string(method) { - if withCredentials { - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, string(origin)) - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") - req.Response.Header.Set(fasthttp.HeaderVary, fasthttp.HeaderOrigin) - } else { - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, o) - } - req.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(rule.AllowedMethods, ", ")) - return - } - } - } - } - } -} - -func (h *Handler) getCORSConfig(ctx context.Context, log *zap.Logger, cidStr string) (*data.CORSConfiguration, error) { - cnrID, err := h.resolveContainer(ctx, cidStr) - if err != nil { - return nil, fmt.Errorf("resolve container '%s': %w", cidStr, err) - } - - if cors := h.corsCache.Get(*cnrID); cors != nil { - return cors, nil - } - - objID, err := h.getLastCORSObject(ctx, *cnrID) - if err != nil { - return nil, fmt.Errorf("get last cors object: %w", err) - } - - var addr oid.Address - addr.SetContainer(h.corsCnrID) - addr.SetObject(objID) - corsObj, err := h.frostfs.GetObject(ctx, PrmObjectGet{ - Address: addr, - }) - if err != nil { - return nil, fmt.Errorf("get cors object '%s': %w", addr.EncodeToString(), err) - } - - corsConfig := &data.CORSConfiguration{} - if err = xml.NewDecoder(corsObj.Payload).Decode(corsConfig); err != nil { - return nil, fmt.Errorf("decode cors object: %w", err) - } - - if err = h.corsCache.Put(*cnrID, corsConfig); err != nil { - log.Warn(logs.CouldntCacheCors, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - - return corsConfig, nil -} - -func (h *Handler) getLastCORSObject(ctx context.Context, cnrID cid.ID) (oid.ID, error) { - filters := object.NewSearchFilters() - filters.AddRootFilter() - filters.AddFilter(object.AttributeFilePath, fmt.Sprintf(corsFilePathTemplate, cnrID), object.MatchStringEqual) - - res, err := h.frostfs.SearchObjects(ctx, PrmObjectSearch{ - Container: h.corsCnrID, - Filters: filters, - }) - if err != nil { - return oid.ID{}, fmt.Errorf("search cors versions: %w", err) - } - defer res.Close() - - var ( - addr oid.Address - obj *object.Object - headErr error - objs = make([]*object.Object, 0) - ) - addr.SetContainer(h.corsCnrID) - err = res.Iterate(func(id oid.ID) bool { - addr.SetObject(id) - obj, headErr = h.frostfs.HeadObject(ctx, PrmObjectHead{ - Address: addr, - }) - if headErr != nil { - headErr = fmt.Errorf("head cors object '%s': %w", addr.EncodeToString(), headErr) - return true - } - - objs = append(objs, obj) - return false - }) - if err != nil { - return oid.ID{}, fmt.Errorf("iterate cors objects: %w", err) - } - - if headErr != nil { - return oid.ID{}, headErr - } - - if len(objs) == 0 { - return oid.ID{}, errNoCORS - } - - sort.Slice(objs, func(i, j int) bool { - versionID1, _ := objs[i].ID() - versionID2, _ := objs[j].ID() - timestamp1 := utils.GetAttributeValue(objs[i].Attributes(), object.AttributeTimestamp) - timestamp2 := utils.GetAttributeValue(objs[j].Attributes(), object.AttributeTimestamp) - - if objs[i].CreationEpoch() != objs[j].CreationEpoch() { - return objs[i].CreationEpoch() < objs[j].CreationEpoch() - } - - if len(timestamp1) > 0 && len(timestamp2) > 0 && timestamp1 != timestamp2 { - unixTime1, err := strconv.ParseInt(timestamp1, 10, 64) - if err != nil { - return versionID1.EncodeToString() < versionID2.EncodeToString() - } - - unixTime2, err := strconv.ParseInt(timestamp2, 10, 64) - if err != nil { - return versionID1.EncodeToString() < versionID2.EncodeToString() - } - - return unixTime1 < unixTime2 - } - - return versionID1.EncodeToString() < versionID2.EncodeToString() - }) - - objID, _ := objs[len(objs)-1].ID() - return objID, nil -} - -func setCORSHeadersFromRule(c *fasthttp.RequestCtx, cors *data.CORSRule) { - c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAgeSeconds)) - - if len(cors.AllowedOrigins) != 0 { - c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowedOrigins[0]) - } - - if len(cors.AllowedMethods) != 0 { - c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowedMethods, ", ")) - } - - if len(cors.AllowedHeaders) != 0 { - c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowedHeaders, ", ")) - } - - if len(cors.ExposeHeaders) != 0 { - c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ", ")) - } - - if cors.AllowedCredentials { - c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true") - } -} - -func checkSubslice(slice []string, subSlice []string) bool { - if slices.Contains(slice, wildcard) { - return true - } - for _, r := range subSlice { - if !sliceContains(slice, r) { - return false - } - } - return true -} - -func sliceContains(slice []string, str string) bool { - for _, s := range slice { - if s == str || (strings.Contains(s, "*") && match(s, str)) { - return true - } - } - return false -} - -func match(tmpl, str string) bool { - regexpStr := "^" + regexp.QuoteMeta(tmpl) + "$" - regexpStr = regexpStr[:strings.Index(regexpStr, "*")-1] + "." + regexpStr[strings.Index(regexpStr, "*"):] - reg := regexp.MustCompile(regexpStr) - return reg.Match([]byte(str)) -} diff --git a/internal/handler/cors_test.go b/internal/handler/cors_test.go deleted file mode 100644 index 1ac07d7..0000000 --- a/internal/handler/cors_test.go +++ /dev/null @@ -1,930 +0,0 @@ -package handler - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "net/http" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" - "github.com/valyala/fasthttp" -) - -func TestPreflight(t *testing.T) { - hc := prepareHandlerContext(t) - - bktName := "bucket-preflight" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - var epoch uint64 - - t.Run("CORS object", func(t *testing.T) { - for _, tc := range []struct { - name string - corsConfig *data.CORSConfiguration - requestHeaders map[string]string - expectedHeaders map[string]string - status int - }{ - { - name: "no CORS configuration", - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - fasthttp.HeaderAccessControlExposeHeaders: "", - fasthttp.HeaderAccessControlMaxAge: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - }, - status: fasthttp.StatusNotFound, - }, - { - name: "specific allowed origin", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"http://example.com"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedHeaders: []string{"Content-Type"}, - ExposeHeaders: []string{"x-amz-*", "X-Amz-*"}, - MaxAgeSeconds: 900, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "Content-Type", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD", - fasthttp.HeaderAccessControlAllowHeaders: "Content-Type", - fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*", - fasthttp.HeaderAccessControlMaxAge: "900", - fasthttp.HeaderAccessControlAllowCredentials: "true", - }, - status: fasthttp.StatusOK, - }, - { - name: "wildcard allowed origin", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedHeaders: []string{"Content-Type"}, - ExposeHeaders: []string{"x-amz-*", "X-Amz-*"}, - MaxAgeSeconds: 900, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD", - fasthttp.HeaderAccessControlAllowHeaders: "", - fasthttp.HeaderAccessControlExposeHeaders: "x-amz-*, X-Amz-*", - fasthttp.HeaderAccessControlMaxAge: "900", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - status: fasthttp.StatusOK, - }, - { - name: "not allowed header", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedHeaders: []string{"Content-Type"}, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - fasthttp.HeaderAccessControlRequestHeaders: "Authorization", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - fasthttp.HeaderAccessControlExposeHeaders: "", - fasthttp.HeaderAccessControlMaxAge: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - status: fasthttp.StatusForbidden, - }, - { - name: "empty Origin header", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - }, - }, - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - fasthttp.HeaderAccessControlExposeHeaders: "", - fasthttp.HeaderAccessControlMaxAge: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - status: fasthttp.StatusBadRequest, - }, - { - name: "empty Access-Control-Request-Method header", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - fasthttp.HeaderAccessControlExposeHeaders: "", - fasthttp.HeaderAccessControlMaxAge: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - status: fasthttp.StatusBadRequest, - }, - } { - t.Run(tc.name, func(t *testing.T) { - if tc.corsConfig != nil { - epoch++ - setCORSObject(t, hc, cnrID, tc.corsConfig, epoch) - } - - r := prepareCORSRequest(t, bktName, tc.requestHeaders) - hc.Handler().Preflight(r) - - require.Equal(t, tc.status, r.Response.StatusCode()) - for k, v := range tc.expectedHeaders { - require.Equal(t, v, string(r.Response.Header.Peek(k))) - } - }) - } - }) - - t.Run("CORS config", func(t *testing.T) { - hc.cfg.cors = &data.CORSRule{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedHeaders: []string{"Content-Type", "Content-Encoding"}, - ExposeHeaders: []string{"x-amz-*", "X-Amz-*"}, - MaxAgeSeconds: 900, - AllowedCredentials: true, - } - - r := prepareCORSRequest(t, bktName, map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }) - hc.Handler().Preflight(r) - - require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode()) - require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge))) - require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin))) - require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods))) - require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders))) - require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders))) - require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials))) - }) -} - -func TestSetCORSHeaders(t *testing.T) { - hc := prepareHandlerContext(t) - - bktName := "bucket-set-cors-headers" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - var epoch uint64 - - t.Run("CORS object", func(t *testing.T) { - for _, tc := range []struct { - name string - corsConfig *data.CORSConfiguration - requestHeaders map[string]string - expectedHeaders map[string]string - }{ - { - name: "empty Origin header", - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderVary: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - }, - { - name: "no CORS configuration", - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderVary: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - }, - }, - { - name: "specific allowed origin", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"http://example.com"}, - AllowedMethods: []string{"GET", "HEAD"}, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD", - fasthttp.HeaderVary: fasthttp.HeaderOrigin, - fasthttp.HeaderAccessControlAllowCredentials: "true", - }, - }, - { - name: "wildcard allowed origin, with credentials", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - }, - }, - }, - requestHeaders: func() map[string]string { - tkn := new(bearer.Token) - err = tkn.Sign(hc.key.PrivateKey) - require.NoError(t, err) - - t64 := base64.StdEncoding.EncodeToString(tkn.Marshal()) - require.NotEmpty(t, t64) - - return map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - fasthttp.HeaderAuthorization: "Bearer " + t64, - } - }(), - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD", - fasthttp.HeaderVary: fasthttp.HeaderOrigin, - fasthttp.HeaderAccessControlAllowCredentials: "true", - }, - }, - { - name: "wildcard allowed origin, without credentials", - corsConfig: &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - }, - }, - }, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://example.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "*", - fasthttp.HeaderAccessControlAllowMethods: "GET, HEAD", - fasthttp.HeaderVary: "", - fasthttp.HeaderAccessControlAllowCredentials: "", - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - epoch++ - setCORSObject(t, hc, cnrID, tc.corsConfig, epoch) - r := prepareCORSRequest(t, bktName, tc.requestHeaders) - hc.Handler().SetCORSHeaders(r) - - require.Equal(t, fasthttp.StatusOK, r.Response.StatusCode()) - for k, v := range tc.expectedHeaders { - require.Equal(t, v, string(r.Response.Header.Peek(k))) - } - }) - } - }) - - t.Run("CORS config", func(t *testing.T) { - hc.cfg.cors = &data.CORSRule{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "HEAD"}, - AllowedHeaders: []string{"Content-Type", "Content-Encoding"}, - ExposeHeaders: []string{"x-amz-*", "X-Amz-*"}, - MaxAgeSeconds: 900, - AllowedCredentials: true, - } - - r := prepareCORSRequest(t, bktName, map[string]string{fasthttp.HeaderOrigin: "http://example.com"}) - hc.Handler().SetCORSHeaders(r) - - require.Equal(t, "900", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlMaxAge))) - require.Equal(t, "*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowOrigin))) - require.Equal(t, "GET, HEAD", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowMethods))) - require.Equal(t, "Content-Type, Content-Encoding", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowHeaders))) - require.Equal(t, "x-amz-*, X-Amz-*", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlExposeHeaders))) - require.Equal(t, "true", string(r.Response.Header.Peek(fasthttp.HeaderAccessControlAllowCredentials))) - }) -} - -func TestCheckSubslice(t *testing.T) { - for _, tc := range []struct { - name string - allowed []string - actual []string - expected bool - }{ - { - name: "empty allowed slice", - allowed: []string{}, - actual: []string{"str1", "str2", "str3"}, - expected: false, - }, - { - name: "empty actual slice", - allowed: []string{"str1", "str2", "str3"}, - actual: []string{}, - expected: true, - }, - { - name: "allowed wildcard", - allowed: []string{"str", "*"}, - actual: []string{"str1", "str2", "str3"}, - expected: true, - }, - { - name: "similar allowed and actual", - allowed: []string{"str1", "str2", "str3"}, - actual: []string{"str1", "str2", "str3"}, - expected: true, - }, - { - name: "allowed actual", - allowed: []string{"str", "str1", "str2", "str4"}, - actual: []string{"str1", "str2"}, - expected: true, - }, - { - name: "not allowed actual", - allowed: []string{"str", "str1", "str2", "str4"}, - actual: []string{"str1", "str5"}, - expected: false, - }, - { - name: "wildcard in allowed", - allowed: []string{"str*"}, - actual: []string{"str", "str5"}, - expected: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - require.Equal(t, tc.expected, checkSubslice(tc.allowed, tc.actual)) - }) - } -} - -func TestAllowedOriginWildcards(t *testing.T) { - hc := prepareHandlerContext(t) - bktName := "bucket-allowed-origin-wildcards" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - cfg := &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"*suffix.example"}, - AllowedMethods: []string{"GET"}, - }, - { - AllowedOrigins: []string{"https://*example"}, - AllowedMethods: []string{"GET"}, - }, - { - AllowedOrigins: []string{"prefix.example*"}, - AllowedMethods: []string{"GET"}, - }, - }, - } - setCORSObject(t, hc, cnrID, cfg, 1) - - for _, tc := range []struct { - name string - handler func(*fasthttp.RequestCtx) - requestHeaders map[string]string - expectedHeaders map[string]string - expectedStatus int - }{ - { - name: "set cors headers, empty request cors headers", - handler: hc.Handler().SetCORSHeaders, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, invalid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://origin.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, first rule, no symbols in place of wildcard", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "suffix.example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "suffix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, first rule, valid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://suffix.example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, first rule, invalid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://suffix-example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, second rule, no symbols in place of wildcard", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, second rule, valid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://www.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, second rule, invalid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, third rule, no symbols in place of wildcard", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "prefix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, third rule, valid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example.com", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "set cors headers, third rule, invalid origin", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "www.prefix.example", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, third rule, invalid request method in header", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlRequestMethod: "PUT", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - }, - { - name: "set cors headers, third rule, valid request method in header", - handler: hc.Handler().SetCORSHeaders, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, empty request cors headers", - handler: hc.Handler().Preflight, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusBadRequest, - }, - { - name: "preflight, invalid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://origin.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "preflight, first rule, no symbols in place of wildcard", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "suffix.example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "suffix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "prelight, first rule, valid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://suffix.example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "http://suffix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, first rule, invalid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "http://suffix-example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "preflight, second rule, no symbols in place of wildcard", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, second rule, valid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://www.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, second rule, invalid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "preflight, third rule, no symbols in place of wildcard", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "prefix.example", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, third rule, valid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlAllowMethods: "GET", - }, - }, - { - name: "preflight, third rule, invalid origin", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "www.prefix.example", - fasthttp.HeaderAccessControlRequestMethod: "GET", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "preflight, third rule, invalid request method in header", - handler: hc.Handler().Preflight, - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "prefix.example.com", - fasthttp.HeaderAccessControlRequestMethod: "PUT", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - }, - expectedStatus: http.StatusForbidden, - }, - } { - t.Run(tc.name, func(t *testing.T) { - r := prepareCORSRequest(t, bktName, tc.requestHeaders) - tc.handler(r) - - expectedStatus := fasthttp.StatusOK - if tc.expectedStatus != 0 { - expectedStatus = tc.expectedStatus - } - require.Equal(t, expectedStatus, r.Response.StatusCode()) - for k, v := range tc.expectedHeaders { - require.Equal(t, v, string(r.Response.Header.Peek(k))) - } - }) - } -} - -func TestAllowedHeaderWildcards(t *testing.T) { - hc := prepareHandlerContext(t) - bktName := "bucket-allowed-header-wildcards" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.Private) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - cfg := &data.CORSConfiguration{ - CORSRules: []data.CORSRule{ - { - AllowedOrigins: []string{"https://www.example.com"}, - AllowedMethods: []string{"HEAD"}, - AllowedHeaders: []string{"*-suffix"}, - }, - { - AllowedOrigins: []string{"https://www.example.com"}, - AllowedMethods: []string{"HEAD"}, - AllowedHeaders: []string{"start-*-end"}, - }, - { - AllowedOrigins: []string{"https://www.example.com"}, - AllowedMethods: []string{"HEAD"}, - AllowedHeaders: []string{"X-Amz-*"}, - }, - }, - } - setCORSObject(t, hc, cnrID, cfg, 1) - - for _, tc := range []struct { - name string - requestHeaders map[string]string - expectedHeaders map[string]string - expectedStatus int - }{ - { - name: "first rule, valid headers", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "header-suffix, -suffix", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlAllowMethods: "HEAD", - fasthttp.HeaderAccessControlAllowHeaders: "header-suffix, -suffix", - }, - }, - { - name: "first rule, invalid headers", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "header-suffix-*", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "second rule, valid headers", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "start--end, start-header-end", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlAllowMethods: "HEAD", - fasthttp.HeaderAccessControlAllowHeaders: "start--end, start-header-end", - }, - }, - { - name: "second rule, invalid header ending", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "start-header-end-*", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "second rule, invalid header beginning", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "*-start-header-end", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - }, - expectedStatus: http.StatusForbidden, - }, - { - name: "third rule, valid headers", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "X-Amz-Date, X-Amz-Content-Sha256", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlAllowMethods: "HEAD", - fasthttp.HeaderAccessControlAllowHeaders: "X-Amz-Date, X-Amz-Content-Sha256", - }, - }, - { - name: "third rule, invalid headers", - requestHeaders: map[string]string{ - fasthttp.HeaderOrigin: "https://www.example.com", - fasthttp.HeaderAccessControlRequestMethod: "HEAD", - fasthttp.HeaderAccessControlRequestHeaders: "Authorization", - }, - expectedHeaders: map[string]string{ - fasthttp.HeaderAccessControlAllowOrigin: "", - fasthttp.HeaderAccessControlAllowMethods: "", - fasthttp.HeaderAccessControlAllowHeaders: "", - }, - expectedStatus: http.StatusForbidden, - }, - } { - t.Run(tc.name, func(t *testing.T) { - r := prepareCORSRequest(t, bktName, tc.requestHeaders) - hc.Handler().Preflight(r) - - expectedStatus := http.StatusOK - if tc.expectedStatus != 0 { - expectedStatus = tc.expectedStatus - } - require.Equal(t, expectedStatus, r.Response.StatusCode()) - for k, v := range tc.expectedHeaders { - require.Equal(t, v, string(r.Response.Header.Peek(k))) - } - }) - } -} - -func setCORSObject(t *testing.T, hc *handlerContext, cnrID cid.ID, corsConfig *data.CORSConfiguration, epoch uint64) { - payload, err := xml.Marshal(corsConfig) - require.NoError(t, err) - - a := object.NewAttribute() - a.SetKey(object.AttributeFilePath) - a.SetValue(fmt.Sprintf(corsFilePathTemplate, cnrID)) - - objID := oidtest.ID() - obj := object.New() - obj.SetAttributes(*a) - obj.SetOwnerID(hc.owner) - obj.SetPayload(payload) - obj.SetPayloadSize(uint64(len(payload))) - obj.SetContainerID(hc.corsCnr) - obj.SetID(objID) - obj.SetCreationEpoch(epoch) - - var addr oid.Address - addr.SetObject(objID) - addr.SetContainer(hc.corsCnr) - - hc.frostfs.SetObject(addr, obj) -} diff --git a/internal/handler/download.go b/internal/handler/download.go deleted file mode 100644 index 15fb886..0000000 --- a/internal/handler/download.go +++ /dev/null @@ -1,457 +0,0 @@ -package handler - -import ( - "archive/tar" - "archive/zip" - "bufio" - "compress/gzip" - "context" - "errors" - "fmt" - "io" - "net/url" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format. -func (h *Handler) DownloadByAddressOrBucketName(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAddressOrBucketName") - defer span.End() - - cidParam := req.UserValue("cid").(string) - oidParam := req.UserValue("oid").(string) - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( - zap.String("cid", cidParam), - zap.String("oid", oidParam), - )) - - path, err := url.QueryUnescape(oidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err) - return - } - - bktInfo, err := h.getBucketInfo(ctx, cidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) - if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) { - h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err) - return - } - - prm := MiddlewareParam{ - Context: ctx, - Request: req, - BktInfo: bktInfo, - Path: path, - } - - indexPageEnabled := h.config.IndexPageEnabled() - - if checkS3Err == nil { - run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), - Middleware{Func: h.byS3PathMiddleware(h.receiveFile, noopFormer), Enabled: true}, - Middleware{Func: h.byS3PathMiddleware(h.receiveFile, indexFormer), Enabled: indexPageEnabled}, - Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsS3), Enabled: indexPageEnabled}, - ) - } else { - slashFallbackEnabled := h.config.EnableFilepathSlashFallback() - fileNameFallbackEnabled := h.config.EnableFilepathFallback() - - run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), - Middleware{Func: h.byAddressMiddleware(h.receiveFile), Enabled: true}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, noopFormer), Enabled: true}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.receiveFile, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled}, - Middleware{Func: h.browseIndexMiddleware(h.getDirObjectsNative), Enabled: indexPageEnabled}, - ) - } -} - -type ObjectHandlerFunc func(context.Context, *fasthttp.RequestCtx, oid.Address) - -type MiddlewareFunc func(param MiddlewareParam) bool - -type MiddlewareParam struct { - Context context.Context - Request *fasthttp.RequestCtx - BktInfo *data.BucketInfo - Path string -} - -type Middleware struct { - Func MiddlewareFunc - Enabled bool -} - -func run(prm MiddlewareParam, defaultMiddleware MiddlewareFunc, middlewares ...Middleware) { - for _, m := range middlewares { - if m.Enabled && !m.Func(prm) { - return - } - } - - defaultMiddleware(prm) -} - -func indexFormer(path string) string { - indexPath := path - if indexPath != "" && !strings.HasSuffix(indexPath, "/") { - indexPath += "/" - } - - return indexPath + "index.html" -} - -func reverseLeadingSlash(path string) string { - if path == "" || path == "/" { - return path - } - - if path[0] == '/' { - return path[1:] - } - - return "/" + path -} - -func noopFormer(path string) string { - return path -} - -func (h *Handler) byS3PathMiddleware(handler func(context.Context, *fasthttp.RequestCtx, oid.Address), pathFormer func(string) string) MiddlewareFunc { - return func(prm MiddlewareParam) bool { - ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byS3Path") - defer span.End() - - path := pathFormer(prm.Path) - - foundOID, err := h.tree.GetLatestVersion(ctx, &prm.BktInfo.CID, path) - if err == nil { - if foundOID.IsDeleteMarker { - h.logAndSendError(ctx, prm.Request, logs.IndexWasDeleted, ErrObjectNotFound) - return false - } - - addr := newAddress(prm.BktInfo.CID, foundOID.OID) - handler(ctx, prm.Request, addr) - return false - } - - if !errors.Is(err, tree.ErrNodeNotFound) { - h.logAndSendError(ctx, prm.Request, logs.FailedToGetLatestVersionOfIndexObject, err, zap.String("path", path)) - return false - } - - return true - } -} - -func (h *Handler) byAttributeSearchMiddleware(handler ObjectHandlerFunc, attr string, pathFormer func(string) string) MiddlewareFunc { - return func(prm MiddlewareParam) bool { - ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAttributeSearch") - defer span.End() - - path := pathFormer(prm.Path) - - res, err := h.search(ctx, prm.BktInfo.CID, attr, path, object.MatchStringEqual) - if err != nil { - h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err) - return false - } - defer res.Close() - - buf := make([]oid.ID, 1) - n, err := res.Read(buf) - if err == nil && n > 0 { - addr := newAddress(prm.BktInfo.CID, buf[0]) - handler(ctx, prm.Request, addr) - return false - } - - if !errors.Is(err, io.EOF) { - h.logAndSendError(ctx, prm.Request, logs.FailedToFindObjectByAttribute, err) - return false - } - - return true - } -} - -func (h *Handler) byAddressMiddleware(handler ObjectHandlerFunc) MiddlewareFunc { - return func(prm MiddlewareParam) bool { - ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.byAddress") - defer span.End() - - var objID oid.ID - if objID.DecodeString(prm.Path) == nil { - handler(ctx, prm.Request, newAddress(prm.BktInfo.CID, objID)) - return false - } - - return true - } -} - -// DownloadByAttribute handles attribute-based download requests. -func (h *Handler) DownloadByAttribute(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadByAttribute") - defer span.End() - - h.byAttribute(ctx, req, h.receiveFile) -} - -func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) { - filters := object.NewSearchFilters() - filters.AddRootFilter() - filters.AddFilter(key, val, op) - - prm := PrmObjectSearch{ - PrmAuth: PrmAuth{ - BearerToken: bearerToken(ctx), - }, - Container: cnrID, - Filters: filters, - } - - return h.frostfs.SearchObjects(ctx, prm) -} - -// DownloadZip handles zip by prefix requests. -func (h *Handler) DownloadZip(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadZip") - defer span.End() - - scid, _ := req.UserValue("cid").(string) - prefix, _ := req.UserValue("prefix").(string) - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix))) - - bktInfo, err := h.getBucketInfo(ctx, scid) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix) - if err != nil { - return - } - - req.Response.Header.Set(fasthttp.HeaderContentType, "application/zip") - req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"") - - req.SetBodyStreamWriter(h.getZipResponseWriter(ctx, resSearch, bktInfo)) -} - -func (h *Handler) getZipResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) { - return func(w *bufio.Writer) { - defer resSearch.Close() - - buf := make([]byte, 3<<20) - zipWriter := zip.NewWriter(w) - var objectsWritten int - - errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf, - func(obj *object.Object) (io.Writer, error) { - objectsWritten++ - return h.createZipFile(zipWriter, obj) - }), - ) - if errIter != nil { - h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath)) - return - } else if objectsWritten == 0 { - h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath)) - } - if err := zipWriter.Close(); err != nil { - h.reqLogger(ctx).Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - } -} - -func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) { - method := zip.Store - if h.config.ArchiveCompression() { - method = zip.Deflate - } - - filePath := getFilePath(obj) - if len(filePath) == 0 || filePath[len(filePath)-1] == '/' { - return nil, fmt.Errorf("invalid filepath '%s'", filePath) - } - - return zw.CreateHeader(&zip.FileHeader{ - Name: filePath, - Method: method, - Modified: time.Now(), - }) -} - -// DownloadTar forms tar.gz from objects by prefix. -func (h *Handler) DownloadTar(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.DownloadTar") - defer span.End() - - scid, _ := req.UserValue("cid").(string) - prefix, _ := req.UserValue("prefix").(string) - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", scid), zap.String("prefix", prefix))) - - bktInfo, err := h.getBucketInfo(ctx, scid) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - resSearch, err := h.searchObjectsByPrefix(ctx, bktInfo.CID, prefix) - if err != nil { - return - } - - req.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip") - req.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"") - - req.SetBodyStreamWriter(h.getTarResponseWriter(ctx, resSearch, bktInfo)) -} - -func (h *Handler) getTarResponseWriter(ctx context.Context, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) { - return func(w *bufio.Writer) { - defer resSearch.Close() - - compressionLevel := gzip.NoCompression - if h.config.ArchiveCompression() { - compressionLevel = gzip.DefaultCompression - } - - // ignore error because it's not nil only if compressionLevel argument is invalid - gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel) - tarWriter := tar.NewWriter(gzipWriter) - - defer func() { - if err := tarWriter.Close(); err != nil { - h.reqLogger(ctx).Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - if err := gzipWriter.Close(); err != nil { - h.reqLogger(ctx).Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - }() - - var objectsWritten int - buf := make([]byte, 3<<20) // the same as for upload - - errIter := resSearch.Iterate(h.putObjectToArchive(ctx, bktInfo.CID, buf, - func(obj *object.Object) (io.Writer, error) { - objectsWritten++ - return h.createTarFile(tarWriter, obj) - }), - ) - if errIter != nil { - h.reqLogger(ctx).Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath)) - } else if objectsWritten == 0 { - h.reqLogger(ctx).Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath)) - } - } -} - -func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) { - filePath := getFilePath(obj) - if len(filePath) == 0 || filePath[len(filePath)-1] == '/' { - return nil, fmt.Errorf("invalid filepath '%s'", filePath) - } - - return tw, tw.WriteHeader(&tar.Header{ - Name: filePath, - Mode: 0655, - Size: int64(obj.PayloadSize()), - }) -} - -func (h *Handler) putObjectToArchive(ctx context.Context, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool { - return func(id oid.ID) bool { - logger := h.reqLogger(ctx).With(zap.String("oid", id.EncodeToString())) - - prm := PrmObjectGet{ - PrmAuth: PrmAuth{ - BearerToken: bearerToken(ctx), - }, - Address: newAddress(cnrID, id), - } - - resGet, err := h.frostfs.GetObject(ctx, prm) - if err != nil { - logger.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage)) - return false - } - - fileWriter, err := createArchiveHeader(&resGet.Header) - if err != nil { - logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath)) - return false - } - - if err = writeToArchive(resGet, fileWriter, buf); err != nil { - logger.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath)) - return false - } - - return false - } -} - -func (h *Handler) searchObjectsByPrefix(ctx context.Context, cnrID cid.ID, prefix string) (ResObjectSearch, error) { - prefix, err := url.QueryUnescape(prefix) - if err != nil { - return nil, fmt.Errorf("unescape prefix: %w", err) - } - - resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix) - if err != nil { - return nil, fmt.Errorf("search objects by prefix: %w", err) - } - - return resSearch, nil -} - -func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error { - var err error - if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil { - return fmt.Errorf("copy object payload to zip file: %v", err) - } - - if err = resGet.Payload.Close(); err != nil { - return fmt.Errorf("object body close error: %w", err) - } - - return nil -} - -func getFilePath(obj *object.Object) string { - for _, attr := range obj.Attributes() { - if attr.Key() == object.AttributeFilePath { - return attr.Value() - } - } - - return "" -} diff --git a/internal/handler/filter.go b/internal/handler/filter.go deleted file mode 100644 index da99db7..0000000 --- a/internal/handler/filter.go +++ /dev/null @@ -1,58 +0,0 @@ -package handler - -import ( - "bytes" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]string, error) { - var err error - result := make(map[string]string) - prefix := []byte(utils.UserAttributeHeaderPrefix) - - header.VisitAll(func(key, val []byte) { - // checks that the key and the val not empty - if len(key) == 0 || len(val) == 0 { - return - } - - // checks that the key has attribute prefix - if !bytes.HasPrefix(key, prefix) { - return - } - - // removing attribute prefix - clearKey := bytes.TrimPrefix(key, prefix) - - clearKey = utils.TransformIfSystem(clearKey) - - // checks that the attribute key is not empty - if len(clearKey) == 0 { - return - } - - // check if key gets duplicated - // return error containing full key name (with prefix) - if _, ok := result[string(clearKey)]; ok { - err = fmt.Errorf("key duplication error: %s", string(key)) - return - } - - // make string representation of key / val - k, v := string(clearKey), string(val) - - result[k] = v - - l.Debug(logs.AddAttributeToResultObject, - zap.String("key", k), - zap.String("val", v), - logs.TagField(logs.TagDatapath)) - }) - - return result, err -} diff --git a/internal/handler/filter_test.go b/internal/handler/filter_test.go deleted file mode 100644 index 0322952..0000000 --- a/internal/handler/filter_test.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !integration - -package handler - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -func TestFilter(t *testing.T) { - log := zap.NewNop() - - t.Run("duplicate keys error", func(t *testing.T) { - req := &fasthttp.RequestHeader{} - req.DisableNormalizing() - req.Add("X-Attribute-DupKey", "first-value") - req.Add("X-Attribute-DupKey", "second-value") - _, err := filterHeaders(log, req) - require.Error(t, err) - }) - - t.Run("duplicate system keys error", func(t *testing.T) { - req := &fasthttp.RequestHeader{} - req.DisableNormalizing() - req.Add("X-Attribute-System-DupKey", "first-value") - req.Add("X-Attribute-System-DupKey", "second-value") - _, err := filterHeaders(log, req) - require.Error(t, err) - }) - - req := &fasthttp.RequestHeader{} - req.DisableNormalizing() - - req.Set("X-Attribute-System-Expiration-Epoch1", "101") - req.Set("X-Attribute-SYSTEM-Expiration-Epoch2", "102") - req.Set("X-Attribute-system-Expiration-Epoch3", "103") - req.Set("X-Attribute-MyAttribute", "value") - - expected := map[string]string{ - "__SYSTEM__EXPIRATION_EPOCH1": "101", - "MyAttribute": "value", - "__SYSTEM__EXPIRATION_EPOCH3": "103", - "__SYSTEM__EXPIRATION_EPOCH2": "102", - } - - result, err := filterHeaders(log, req) - require.NoError(t, err) - - require.Equal(t, expected, result) -} diff --git a/internal/handler/frostfs_mock.go b/internal/handler/frostfs_mock.go deleted file mode 100644 index 540697f..0000000 --- a/internal/handler/frostfs_mock.go +++ /dev/null @@ -1,289 +0,0 @@ -package handler - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/sha256" - "fmt" - "io" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" -) - -type TestFrostFS struct { - objects map[string]*object.Object - containers map[string]*container.Container - accessList map[string]bool - key *keys.PrivateKey -} - -func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS { - return &TestFrostFS{ - objects: make(map[string]*object.Object), - containers: make(map[string]*container.Container), - accessList: make(map[string]bool), - key: key, - } -} - -func (t *TestFrostFS) ContainerID(name string) (*cid.ID, error) { - for id, cnr := range t.containers { - if container.Name(*cnr) == name { - var cnrID cid.ID - return &cnrID, cnrID.DecodeString(id) - } - } - return nil, fmt.Errorf("not found") -} - -func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) { - t.containers[cnrID.EncodeToString()] = cnr -} - -func (t *TestFrostFS) SetObject(addr oid.Address, obj *object.Object) { - t.objects[addr.EncodeToString()] = obj -} - -// AllowUserOperation grants access to object operations. -// Empty userID and objID means any user and object respectively. -func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) { - t.accessList[fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID)] = true -} - -func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) { - for k, v := range t.containers { - if k == prm.ContainerID.EncodeToString() { - return v, nil - } - } - - return nil, fmt.Errorf("container not found %s", prm.ContainerID) -} - -func (t *TestFrostFS) requestOwner(btoken *bearer.Token) user.ID { - if btoken != nil { - return bearer.ResolveIssuer(*btoken) - } - - var owner user.ID - user.IDFromKey(&owner, t.key.PrivateKey.PublicKey) - return owner -} - -func (t *TestFrostFS) retrieveObject(addr oid.Address, btoken *bearer.Token) (*object.Object, error) { - sAddr := addr.EncodeToString() - - if obj, ok := t.objects[sAddr]; ok { - owner := t.requestOwner(btoken) - - if !t.isAllowed(addr.Container(), owner, acl.OpObjectGet, addr.Object()) { - return nil, ErrAccessDenied - } - - return obj, nil - } - - return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr) -} - -func (t *TestFrostFS) HeadObject(_ context.Context, prm PrmObjectHead) (*object.Object, error) { - return t.retrieveObject(prm.Address, prm.BearerToken) -} - -func (t *TestFrostFS) GetObject(_ context.Context, prm PrmObjectGet) (*Object, error) { - obj, err := t.retrieveObject(prm.Address, prm.BearerToken) - if err != nil { - return nil, err - } - - return &Object{ - Header: *obj, - Payload: io.NopCloser(bytes.NewReader(obj.Payload())), - }, nil -} - -func (t *TestFrostFS) RangeObject(_ context.Context, prm PrmObjectRange) (io.ReadCloser, error) { - obj, err := t.retrieveObject(prm.Address, prm.BearerToken) - if err != nil { - return nil, err - } - - off := prm.PayloadRange[0] - payload := obj.Payload()[off : off+prm.PayloadRange[1]] - return io.NopCloser(bytes.NewReader(payload)), nil -} - -func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) { - b := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, b); err != nil { - return oid.ID{}, err - } - var id oid.ID - id.SetSHA256(sha256.Sum256(b)) - prm.Object.SetID(id) - - attrs := prm.Object.Attributes() - if prm.ClientCut { - a := object.NewAttribute() - a.SetKey("s3-client-cut") - a.SetValue("true") - attrs = append(attrs, *a) - } - - prm.Object.SetAttributes(attrs...) - - if prm.Payload != nil { - all, err := io.ReadAll(prm.Payload) - if err != nil { - return oid.ID{}, err - } - prm.Object.SetPayload(all) - prm.Object.SetPayloadSize(uint64(len(all))) - var hash checksum.Checksum - checksum.Calculate(&hash, checksum.SHA256, all) - prm.Object.SetPayloadChecksum(hash) - } - - cnrID, _ := prm.Object.ContainerID() - objID, _ := prm.Object.ID() - - owner := t.requestOwner(prm.BearerToken) - - if !t.isAllowed(cnrID, owner, acl.OpObjectPut, objID) { - return oid.ID{}, ErrAccessDenied - } - - addr := newAddress(cnrID, objID) - t.objects[addr.EncodeToString()] = prm.Object - return objID, nil -} - -type resObjectSearchMock struct { - res []oid.ID -} - -func (r *resObjectSearchMock) Read(buf []oid.ID) (int, error) { - for i := range buf { - if i > len(r.res)-1 { - return len(r.res), io.EOF - } - buf[i] = r.res[i] - } - - r.res = r.res[len(buf):] - - return len(buf), nil -} - -func (r *resObjectSearchMock) Iterate(f func(oid.ID) bool) error { - for _, id := range r.res { - if f(id) { - return nil - } - } - - return nil -} - -func (r *resObjectSearchMock) Close() {} - -func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (ResObjectSearch, error) { - if !t.isAllowed(prm.Container, t.requestOwner(prm.BearerToken), acl.OpObjectSearch, oid.ID{}) { - return nil, ErrAccessDenied - } - - cidStr := prm.Container.EncodeToString() - var res []oid.ID - - if len(prm.Filters) == 1 { // match root filter - for k, v := range t.objects { - if strings.Contains(k, cidStr) { - id, _ := v.ID() - res = append(res, id) - } - } - return &resObjectSearchMock{res: res}, nil - } - - filter := prm.Filters[1] - if len(prm.Filters) != 2 || - filter.Operation() != object.MatchCommonPrefix && filter.Operation() != object.MatchStringEqual { - return nil, fmt.Errorf("usupported filters") - } - - for k, v := range t.objects { - if strings.Contains(k, cidStr) && isMatched(v.Attributes(), filter) { - id, _ := v.ID() - res = append(res, id) - } - } - - return &resObjectSearchMock{res: res}, nil -} - -func (t *TestFrostFS) GetContainerByID(cid cid.ID) (*container.Container, error) { - for k, v := range t.containers { - if k == cid.EncodeToString() { - return v, nil - } - } - - return nil, fmt.Errorf("container does not exist %s", cid) -} - -func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) { - return nil, nil -} - -func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool { - for _, attr := range attributes { - if attr.Key() == filter.Header() { - switch filter.Operation() { - case object.MatchStringEqual: - return attr.Value() == filter.Value() - case object.MatchCommonPrefix: - return strings.HasPrefix(attr.Value(), filter.Value()) - default: - return false - } - } - } - - return false -} - -func (t *TestFrostFS) GetEpochDurations(context.Context) (*utils.EpochDurations, error) { - return &utils.EpochDurations{ - CurrentEpoch: 10, - MsPerBlock: 1000, - BlockPerEpoch: 100, - }, nil -} - -func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) bool { - keysToCheck := []string{ - fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID), - fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, oid.ID{}), - fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, objID), - fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, oid.ID{}), - } - - for _, key := range keysToCheck { - if t.accessList[key] { - return true - } - } - return false -} diff --git a/internal/handler/handler.go b/internal/handler/handler.go deleted file mode 100644 index 2efd71d..0000000 --- a/internal/handler/handler.go +++ /dev/null @@ -1,348 +0,0 @@ -package handler - -import ( - "context" - "errors" - "fmt" - "io" - "net/url" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/panjf2000/ants/v2" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -type Config interface { - DefaultTimestamp() bool - ArchiveCompression() bool - ClientCut() bool - IndexPageEnabled() bool - IndexPageTemplate() string - BufferMaxSizeForPut() uint64 - NamespaceHeader() string - EnableFilepathFallback() bool - EnableFilepathSlashFallback() bool - FormContainerZone(string) string - CORS() *data.CORSRule -} - -// PrmContainer groups parameters of FrostFS.Container operation. -type PrmContainer struct { - // Container identifier. - ContainerID cid.ID -} - -// PrmAuth groups authentication parameters for the FrostFS operation. -type PrmAuth struct { - // Bearer token to be used for the operation. Overlaps PrivateKey. Optional. - BearerToken *bearer.Token -} - -// PrmObjectHead groups parameters of FrostFS.HeadObject operation. -type PrmObjectHead struct { - // Authentication parameters. - PrmAuth - - // Address to read the object header from. - Address oid.Address -} - -// PrmObjectGet groups parameters of FrostFS.GetObject operation. -type PrmObjectGet struct { - // Authentication parameters. - PrmAuth - - // Address to read the object header from. - Address oid.Address -} - -// PrmObjectRange groups parameters of FrostFS.RangeObject operation. -type PrmObjectRange struct { - // Authentication parameters. - PrmAuth - - // Address to read the object header from. - Address oid.Address - - // Offset-length range of the object payload to be read. - PayloadRange [2]uint64 -} - -// Object represents FrostFS object. -type Object struct { - // Object header (doesn't contain payload). - Header object.Object - - // Object payload part encapsulated in io.Reader primitive. - // Returns ErrAccessDenied on read access violation. - Payload io.ReadCloser -} - -// PrmObjectCreate groups parameters of FrostFS.CreateObject operation. -type PrmObjectCreate struct { - // Authentication parameters. - PrmAuth - - Object *object.Object - - // Object payload encapsulated in io.Reader primitive. - Payload io.Reader - - // Enables client side object preparing. - ClientCut bool - - // Disables using Tillich-Zémor hash for payload. - WithoutHomomorphicHash bool - - // Sets max buffer size to read payload. - BufferMaxSize uint64 -} - -// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation. -type PrmObjectSearch struct { - // Authentication parameters. - PrmAuth - - // Container to select the objects from. - Container cid.ID - - Filters object.SearchFilters -} - -type PrmInitMultiObjectReader struct { - // payload range - Off, Ln uint64 - - Addr oid.Address - Bearer *bearer.Token -} - -type ResObjectSearch interface { - Read(buf []oid.ID) (int, error) - Iterate(f func(oid.ID) bool) error - Close() -} - -var ( - // ErrAccessDenied is returned from FrostFS in case of access violation. - ErrAccessDenied = errors.New("access denied") - // ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc. - ErrGatewayTimeout = errors.New("gateway timeout") - // ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded. - ErrQuotaLimitReached = errors.New("quota limit reached") - // ErrContainerNotFound is returned from FrostFS in case of container was not found. - ErrContainerNotFound = errors.New("container not found") - // ErrObjectNotFound is returned from FrostFS in case of object was not found. - ErrObjectNotFound = errors.New("object not found") -) - -// FrostFS represents virtual connection to FrostFS network. -type FrostFS interface { - Container(context.Context, PrmContainer) (*container.Container, error) - HeadObject(context.Context, PrmObjectHead) (*object.Object, error) - GetObject(context.Context, PrmObjectGet) (*Object, error) - RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error) - CreateObject(context.Context, PrmObjectCreate) (oid.ID, error) - SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error) - InitMultiObjectReader(ctx context.Context, p PrmInitMultiObjectReader) (io.Reader, error) - - utils.EpochInfoFetcher -} - -type ContainerResolver interface { - Resolve(ctx context.Context, zone, name string) (*cid.ID, error) -} - -type ContainerContract interface { - // GetContainerByID reads a container from contract by ID. - GetContainerByID(cid.ID) (*container.Container, error) -} - -type Handler struct { - log *zap.Logger - frostfs FrostFS - ownerID *user.ID - config Config - containerResolver ContainerResolver - cnrContract ContainerContract - tree *tree.Tree - cache *cache.BucketCache - workerPool *ants.Pool - corsCnrID cid.ID - corsCache *cache.CORSCache -} - -type AppParams struct { - Logger *zap.Logger - FrostFS FrostFS - Owner *user.ID - Resolver ContainerResolver - Cache *cache.BucketCache - CORSCnrID cid.ID - CORSCache *cache.CORSCache -} - -func New(params *AppParams, config Config, tree *tree.Tree, rpcCli ContainerContract, workerPool *ants.Pool) *Handler { - return &Handler{ - log: params.Logger, - frostfs: params.FrostFS, - ownerID: params.Owner, - config: config, - containerResolver: params.Resolver, - tree: tree, - cache: params.Cache, - workerPool: workerPool, - corsCnrID: params.CORSCnrID, - corsCache: params.CORSCache, - cnrContract: rpcCli, - } -} - -// byAttribute is a wrapper similar to byNativeAddress. -func (h *Handler) byAttribute(ctx context.Context, req *fasthttp.RequestCtx, handler func(context.Context, *fasthttp.RequestCtx, oid.Address)) { - cidParam, _ := req.UserValue("cid").(string) - key, _ := req.UserValue("attr_key").(string) - val, _ := req.UserValue("attr_val").(string) - - key, err := url.QueryUnescape(key) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_key", key)) - return - } - - val, err = url.QueryUnescape(val) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUnescapeQuery, err, zap.String("cid", cidParam), zap.String("attr_val", key)) - return - } - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With(zap.String("cid", cidParam), - zap.String("attr_key", key), zap.String("attr_val", val))) - - bktInfo, err := h.getBucketInfo(ctx, cidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - objID, err := h.findObjectByAttribute(ctx, bktInfo.CID, key, val) - if err != nil { - if errors.Is(err, io.EOF) { - err = fmt.Errorf("%w: %s", ErrObjectNotFound, err.Error()) - } - h.logAndSendError(ctx, req, logs.FailedToFindObjectByAttribute, err) - return - } - - var addr oid.Address - addr.SetContainer(bktInfo.CID) - addr.SetObject(objID) - - handler(ctx, req, addr) -} - -func (h *Handler) findObjectByAttribute(ctx context.Context, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) { - res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual) - if err != nil { - return oid.ID{}, fmt.Errorf("search objects: %w", err) - } - defer res.Close() - - buf := make([]oid.ID, 1) - - n, err := res.Read(buf) - if n == 0 { - switch { - case errors.Is(err, io.EOF): - h.reqLogger(ctx).Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage)) - return oid.ID{}, fmt.Errorf("object not found: %w", err) - default: - h.reqLogger(ctx).Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage)) - return oid.ID{}, fmt.Errorf("read object list failed: %w", err) - } - } - - return buf[0], nil -} - -// resolveContainer decode container id, if it's not a valid container id -// then trey to resolve name using provided resolver. -func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) { - cnrID := new(cid.ID) - err := cnrID.DecodeString(containerID) - if err != nil { - var namespace string - namespace, err = middleware.GetNamespace(ctx) - if err != nil { - return nil, err - } - - zone := h.config.FormContainerZone(namespace) - cnrID, err = h.containerResolver.Resolve(ctx, zone, containerID) - if err != nil && strings.Contains(err.Error(), "not found") { - err = fmt.Errorf("%w: %s", ErrContainerNotFound, err.Error()) - } - } - return cnrID, err -} - -func (h *Handler) getBucketInfo(ctx context.Context, containerName string) (*data.BucketInfo, error) { - ns, err := middleware.GetNamespace(ctx) - if err != nil { - return nil, err - } - - if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil { - return bktInfo, nil - } - - cnrID, err := h.resolveContainer(ctx, containerName) - if err != nil { - return nil, fmt.Errorf("resolve container: %w", err) - } - - return h.containerInfo(ctx, *cnrID) -} - -type ListFunc func(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) - -func (h *Handler) browseIndexMiddleware(fn ListFunc) MiddlewareFunc { - return func(prm MiddlewareParam) bool { - ctx, span := tracing.StartSpanFromContext(prm.Context, "handler.browseIndex") - defer span.End() - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( - zap.String("bucket", prm.BktInfo.Name), - zap.String("container", prm.BktInfo.CID.EncodeToString()), - zap.String("prefix", prm.Path), - )) - - objects, err := fn(ctx, prm.BktInfo, prm.Path) - if err != nil { - h.logAndSendError(ctx, prm.Request, logs.FailedToListObjects, err) - return false - } - - h.browseObjects(ctx, prm.Request, browseParams{ - bucketInfo: prm.BktInfo, - prefix: prm.Path, - objects: objects, - }) - - return false - } -} diff --git a/internal/handler/handler_fuzz_test.go b/internal/handler/handler_fuzz_test.go deleted file mode 100644 index ff38b11..0000000 --- a/internal/handler/handler_fuzz_test.go +++ /dev/null @@ -1,581 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package handler - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "mime/multipart" - "net/http" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - go_fuzz_utils "github.com/trailofbits/go-fuzz-utils" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -const ( - fuzzSuccessExitCode = 0 - fuzzFailExitCode = -1 -) - -func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) { - array := make([]string, count) - var err error - - for i := 0; i < count; i++ { - err = tp.Reset() - if err != nil { - return nil, err - } - - array[i], err = tp.GetString() - if err != nil { - return nil, err - } - } - - return array, nil -} - -func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) { - array := make([]bool, count) - var err error - - for i := 0; i < count; i++ { - err = tp.Reset() - if err != nil { - return nil, err - } - - array[i], err = tp.GetBool() - if err != nil { - return nil, err - } - } - - return array, nil -} - -func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) { - count, err := tp.GetInt() - if err != nil { - return -1, err - } - count = count % max - if count < 0 { - count += max - } - return count, nil -} - -func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error { - count, err := tp.GetInt() - if err != nil { - return err - } - count = count % len(params) - if count < 0 { - count += len(params) - } - - for i := 0; i < count; i++ { - position, err := tp.GetInt() - if err != nil { - return err - } - position = position % len(params) - if position < 0 { - position += len(params) - } - - v, err := tp.GetString() - if err != nil { - return err - } - - r.Header.Set(params[position], v) - - } - - return nil -} - -func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) { - rnd, err := tp.GetBool() - if err != nil { - return "", err - } - if rnd == true { - initValue, err = tp.GetString() - if err != nil { - return "", err - } - } - return initValue, nil -} - -func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) { - hc, err := prepareHandlerContextBase(zap.NewExample()) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - aclList := []acl.Basic{ - acl.Private, - acl.PrivateExtended, - acl.PublicRO, - acl.PublicROExtended, - acl.PublicRW, - acl.PublicRWExtended, - acl.PublicAppend, - acl.PublicAppendExtended, - } - - pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList)) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - acl := aclList[pos] - - strings, err := prepareStrings(tp, 6) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - bktName := strings[0] - objFileName := strings[1] - valAttr := strings[2] - keyAttr := strings[3] - - if len(bktName) == 0 { - return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets") - } - - cnrID, cnr, err := hc.prepareContainer(bktName, acl) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - hc.frostfs.SetContainer(cnrID, cnr) - - ctx := context.Background() - ctx = middleware.SetNamespace(ctx, "") - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", cnrID.EncodeToString()) - - attributes := map[string]string{ - object.AttributeFileName: objFileName, - keyAttr: valAttr, - } - - var buff bytes.Buffer - w := multipart.NewWriter(&buff) - fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName]) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - content, err := tp.GetBytes() - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - if err = w.Close(); err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - r.Request.SetBodyStream(&buff, buff.Len()) - r.Request.Header.Set("Content-Type", w.FormDataContentType()) - r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr) - - err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]}) - if err != nil { - return nil, nil, cid.ID{}, nil, "", "", "", err - } - - hc.Handler().Upload(r) - - if r.Response.StatusCode() != http.StatusOK { - return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload") - } - - return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil -} - -func InitFuzzUpload() { - -} - -func DoFuzzUpload(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - _, _, _, _, _, _, _, err = upload(tp) - if err != nil { - return fuzzFailExitCode - } - - return fuzzSuccessExitCode -} - -func FuzzUpload(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzUpload(data) - }) -} - -func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) { - - var putRes putResponse - - defer func() { - if r := recover(); r != nil { - panic(resp) - } - }() - - data := resp.Response.Body() - err := json.Unmarshal(data, &putRes) - - if err != nil { - return nil, err - } - - obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID] - attr := object.NewAttribute() - attr.SetKey(object.AttributeFilePath) - - filename, err = maybeFillRandom(tp, filename) - if err != nil { - return nil, err - } - - attr.SetValue(filename) - obj.SetAttributes(append(obj.Attributes(), *attr)...) - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - - cid := cnrID.EncodeToString() - cid, err = maybeFillRandom(tp, cid) - if err != nil { - return nil, err - } - oid := putRes.ObjectID - oid, err = maybeFillRandom(tp, oid) - if err != nil { - return nil, err - } - r.SetUserValue("cid", cid) - r.SetUserValue("oid", oid) - - rnd, err := tp.GetBool() - if err != nil { - return nil, err - } - if rnd == true { - r.SetUserValue("download", "true") - } - - return r, nil -} - -func InitFuzzGet() { - -} - -func DoFuzzGet(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - ctx, hc, cnrID, resp, filename, _, _, err := upload(tp) - if err != nil { - return fuzzFailExitCode - } - - r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename) - if err != nil { - return fuzzFailExitCode - } - - hc.Handler().DownloadByAddressOrBucketName(r) - - return fuzzSuccessExitCode -} - -func FuzzGet(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzUpload(data) - }) -} - -func InitFuzzHead() { - -} - -func DoFuzzHead(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - ctx, hc, cnrID, resp, filename, _, _, err := upload(tp) - if err != nil { - return fuzzFailExitCode - } - - r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename) - if err != nil { - return fuzzFailExitCode - } - - hc.Handler().HeadByAddressOrBucketName(r) - - return fuzzSuccessExitCode -} - -func FuzzHead(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzHead(data) - }) -} - -func InitFuzzDownloadByAttribute() { - -} - -func DoFuzzDownloadByAttribute(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp) - if err != nil { - return fuzzFailExitCode - } - - cid := cnrID.EncodeToString() - cid, err = maybeFillRandom(tp, cid) - if err != nil { - return fuzzFailExitCode - } - - attrKey, err = maybeFillRandom(tp, attrKey) - if err != nil { - return fuzzFailExitCode - } - - attrVal, err = maybeFillRandom(tp, attrVal) - if err != nil { - return fuzzFailExitCode - } - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", cid) - r.SetUserValue("attr_key", attrKey) - r.SetUserValue("attr_val", attrVal) - - hc.Handler().DownloadByAttribute(r) - - return fuzzSuccessExitCode -} - -func FuzzDownloadByAttribute(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzDownloadByAttribute(data) - }) -} - -func InitFuzzHeadByAttribute() { - -} - -func DoFuzzHeadByAttribute(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp) - if err != nil { - return fuzzFailExitCode - } - - cid := cnrID.EncodeToString() - cid, err = maybeFillRandom(tp, cid) - if err != nil { - return fuzzFailExitCode - } - - attrKey, err = maybeFillRandom(tp, attrKey) - if err != nil { - return fuzzFailExitCode - } - - attrVal, err = maybeFillRandom(tp, attrVal) - if err != nil { - return fuzzFailExitCode - } - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", cid) - r.SetUserValue("attr_key", attrKey) - r.SetUserValue("attr_val", attrVal) - - hc.Handler().HeadByAttribute(r) - - return fuzzSuccessExitCode -} - -func FuzzHeadByAttribute(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzHeadByAttribute(data) - }) -} - -func InitFuzzDownloadZipped() { - -} - -func DoFuzzDownloadZipped(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - ctx, hc, cnrID, _, _, _, _, err := upload(tp) - if err != nil { - return fuzzFailExitCode - } - - cid := cnrID.EncodeToString() - cid, err = maybeFillRandom(tp, cid) - if err != nil { - return fuzzFailExitCode - } - - prefix := "" - prefix, err = maybeFillRandom(tp, prefix) - if err != nil { - return fuzzFailExitCode - } - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", cid) - r.SetUserValue("prefix", prefix) - - hc.Handler().DownloadZip(r) - - return fuzzSuccessExitCode -} - -func FuzzDownloadZipped(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzDownloadZipped(data) - }) -} - -func InitFuzzStoreBearerTokenAppCtx() { - -} - -func DoFuzzStoreBearerTokenAppCtx(input []byte) int { - // FUZZER INIT - if len(input) < 100 { - return fuzzFailExitCode - } - - tp, err := go_fuzz_utils.NewTypeProvider(input) - if err != nil { - return fuzzFailExitCode - } - - prefix := "" - prefix, err = maybeFillRandom(tp, prefix) - if err != nil { - return fuzzFailExitCode - } - - ctx := context.Background() - ctx = middleware.SetNamespace(ctx, "") - - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - - strings, err := prepareStrings(tp, 3) - - rand, err := prepareBools(tp, 2) - - if rand[0] == true { - r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0]) - } else if rand[1] == true { - r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1]) - } else { - r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0]) - r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1]) - } - - tokens.StoreBearerTokenAppCtx(ctx, r) - - return fuzzSuccessExitCode -} - -func FuzzStoreBearerTokenAppCtx(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - DoFuzzStoreBearerTokenAppCtx(data) - }) -} diff --git a/internal/handler/handler_test.go b/internal/handler/handler_test.go deleted file mode 100644 index 6c715fe..0000000 --- a/internal/handler/handler_test.go +++ /dev/null @@ -1,695 +0,0 @@ -package handler - -import ( - "archive/zip" - "bytes" - "context" - "encoding/json" - "io" - "mime/multipart" - "net/http" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/panjf2000/ants/v2" - "github.com/stretchr/testify/require" - "github.com/valyala/fasthttp" - "go.uber.org/zap" - "go.uber.org/zap/zaptest" -) - -type configMock struct { - additionalFilenameSearch bool - additionalSlashSearch bool - indexEnabled bool - cors *data.CORSRule -} - -func (c *configMock) DefaultTimestamp() bool { - return false -} - -func (c *configMock) ArchiveCompression() bool { - return false -} - -func (c *configMock) IndexPageEnabled() bool { - return c.indexEnabled -} - -func (c *configMock) IndexPageTemplate() string { - return templates.DefaultIndexTemplate -} - -func (c *configMock) IndexPageNativeTemplate() string { - return "" -} - -func (c *configMock) ClientCut() bool { - return false -} - -func (c *configMock) BufferMaxSizeForPut() uint64 { - return 0 -} - -func (c *configMock) NamespaceHeader() string { - return "" -} - -func (c *configMock) EnableFilepathFallback() bool { - return c.additionalFilenameSearch -} - -func (c *configMock) EnableFilepathSlashFallback() bool { - return c.additionalSlashSearch -} - -func (c *configMock) FormContainerZone(string) string { - return v2container.SysAttributeZoneDefault -} - -func (c *configMock) CORS() *data.CORSRule { - return c.cors -} - -type handlerContext struct { - key *keys.PrivateKey - owner user.ID - corsCnr cid.ID - - h *Handler - frostfs *TestFrostFS - tree *treeServiceClientMock - cfg *configMock -} - -func (hc *handlerContext) Handler() *Handler { - return hc.h -} - -func prepareHandlerContext(t *testing.T) *handlerContext { - hc, err := prepareHandlerContextBase(zaptest.NewLogger(t)) - require.NoError(t, err) - return hc -} - -func prepareHandlerContextBase(logger *zap.Logger) (*handlerContext, error) { - key, err := keys.NewPrivateKey() - if err != nil { - return nil, err - } - - var owner user.ID - user.IDFromKey(&owner, key.PrivateKey.PublicKey) - - testFrostFS := NewTestFrostFS(key) - - testResolver := &resolver.Resolver{Name: "test_resolver"} - testResolver.SetResolveFunc(func(_ context.Context, _, name string) (*cid.ID, error) { - return testFrostFS.ContainerID(name) - }) - - cnrID := createCORSContainer(owner, testFrostFS) - - params := &AppParams{ - Logger: logger, - FrostFS: testFrostFS, - Owner: &owner, - Resolver: testResolver, - Cache: cache.NewBucketCache(&cache.Config{ - Size: 1, - Lifetime: 1, - Logger: logger, - }, false), - CORSCnrID: cnrID, - CORSCache: cache.NewCORSCache(&cache.Config{ - Size: 1, - Lifetime: 1, - Logger: logger, - }), - } - - treeMock := newTreeServiceClientMock() - cfgMock := &configMock{} - - workerPool, err := ants.NewPool(1) - if err != nil { - return nil, err - } - handler := New(params, cfgMock, tree.NewTree(treeMock, logger), testFrostFS, workerPool) - - return &handlerContext{ - key: key, - owner: owner, - corsCnr: cnrID, - h: handler, - frostfs: testFrostFS, - tree: treeMock, - cfg: cfgMock, - }, nil -} - -func createCORSContainer(owner user.ID, frostfs *TestFrostFS) cid.ID { - var cnr container.Container - cnr.Init() - cnr.SetOwner(owner) - - cnrID := cidtest.ID() - frostfs.SetContainer(cnrID, &cnr) - frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectSearch, oid.ID{}) - frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectHead, oid.ID{}) - frostfs.AllowUserOperation(cnrID, owner, acl.OpObjectGet, oid.ID{}) - - return cnrID -} - -func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) { - var pp netmap.PlacementPolicy - err := pp.DecodeString("REP 1") - if err != nil { - return cid.ID{}, nil, err - } - - var cnr container.Container - cnr.Init() - cnr.SetOwner(hc.owner) - cnr.SetPlacementPolicy(pp) - cnr.SetBasicACL(basicACL) - - var domain container.Domain - domain.SetName(name) - container.WriteDomain(&cnr, domain) - container.SetName(&cnr, name) - container.SetCreationTime(&cnr, time.Now()) - - cnrID := cidtest.ID() - - for op := acl.OpObjectGet; op < acl.OpObjectHash; op++ { - hc.frostfs.AllowUserOperation(cnrID, hc.owner, op, oid.ID{}) - if basicACL.IsOpAllowed(op, acl.RoleOthers) { - hc.frostfs.AllowUserOperation(cnrID, user.ID{}, op, oid.ID{}) - } - } - - return cnrID, &cnr, nil -} - -func TestBasic(t *testing.T) { - hc := prepareHandlerContext(t) - - bktName := "bucket" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - ctx := context.Background() - ctx = middleware.SetNamespace(ctx, "") - - content := "hello" - r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content) - require.NoError(t, err) - - hc.Handler().Upload(r) - require.Equal(t, r.Response.StatusCode(), http.StatusOK) - - var putRes putResponse - err = json.Unmarshal(r.Response.Body(), &putRes) - require.NoError(t, err) - - hc.cfg.additionalFilenameSearch = true - obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID] - fileName := prepareObjectAttributes(object.AttributeFileName, objFileName) - filePath := prepareObjectAttributes(object.AttributeFilePath, objFilePath) - obj.SetAttributes(append(obj.Attributes(), fileName)...) - obj.SetAttributes(append(obj.Attributes(), filePath)...) - - t.Run("get", func(t *testing.T) { - r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID) - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, content, string(r.Response.Body())) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath) - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, content, string(r.Response.Body())) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName) - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, content, string(r.Response.Body())) - }) - - t.Run("head", func(t *testing.T) { - r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID) - hc.Handler().HeadByAddressOrBucketName(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFilePath) - hc.Handler().HeadByAddressOrBucketName(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), objFileName) - hc.Handler().HeadByAddressOrBucketName(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) - }) - - t.Run("get by attribute", func(t *testing.T) { - r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr) - hc.Handler().DownloadByAttribute(r) - require.Equal(t, content, string(r.Response.Body())) - - r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath) - hc.Handler().DownloadByAttribute(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName) - hc.Handler().DownloadByAttribute(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - }) - - t.Run("head by attribute", func(t *testing.T) { - r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr) - hc.Handler().HeadByAttribute(r) - require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID))) - require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID))) - - r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, objFilePath) - hc.Handler().HeadByAttribute(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetByAttributeRequest(ctx, bktName, attrFilePath, objFileName) - hc.Handler().HeadByAttribute(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - }) - - t.Run("zip", func(t *testing.T) { - r = prepareGetZipped(ctx, bktName, "") - hc.Handler().DownloadZip(r) - - readerAt := bytes.NewReader(r.Response.Body()) - zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body()))) - require.NoError(t, err) - require.Len(t, zipReader.File, 1) - require.Equal(t, objFilePath, zipReader.File[0].Name) - f, err := zipReader.File[0].Open() - require.NoError(t, err) - defer func() { - inErr := f.Close() - require.NoError(t, inErr) - }() - data, err := io.ReadAll(f) - require.NoError(t, err) - require.Equal(t, content, string(data)) - }) -} - -func prepareHandlerAndBucket(t *testing.T) (*handlerContext, cid.ID) { - hc := prepareHandlerContext(t) - - bktName := "bucket" - cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended) - require.NoError(t, err) - hc.frostfs.SetContainer(cnrID, cnr) - - return hc, cnrID -} - -func TestGetObjectWithFallback(t *testing.T) { - ctx := middleware.SetNamespace(context.Background(), "") - - t.Run("by oid", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), obj1ID.String()) - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) - - t.Run("by filepath as it is", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - obj2ID := oidtest.ID() - obj2 := object.New() - obj2.SetID(obj2ID) - obj2.SetPayload([]byte("obj2")) - obj2.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "/filepath/obj2")) - hc.frostfs.objects[cnrID.String()+"/"+obj2ID.String()] = obj2 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj2") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj2.Payload()), string(r.Response.Body())) - }) - - t.Run("by filepath slash fallback", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.additionalSlashSearch = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filepath/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) - - t.Run("by filename fallback", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.additionalFilenameSearch = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) - - t.Run("by filename and slash fallback", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.additionalFilenameSearch = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.additionalSlashSearch = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "/filename/obj1") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) - - t.Run("index fallback", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "filepath/index.html")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.indexEnabled = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filepath/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) - - t.Run("index filename fallback", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFileName, "filename/index.html")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.indexEnabled = true - hc.cfg.additionalFilenameSearch = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "filename/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, string(obj1.Payload()), string(r.Response.Body())) - }) -} - -func TestIndex(t *testing.T) { - ctx := middleware.SetNamespace(context.Background(), "") - - t.Run("s3", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - hc.tree.containers[cnrID.String()] = containerInfo{ - trees: map[string]map[string]nodeResponse{ - "system": {"bucket-settings": nodeResponse{nodeID: 1}}, - "version": { - "": nodeResponse{}, //root - "prefix": nodeResponse{ - nodeID: 1, - meta: []nodeMeta{{key: tree.FileNameKey, value: []byte("prefix")}}}, - "obj1": nodeResponse{ - parentID: 1, - nodeID: 2, - meta: []nodeMeta{ - {key: tree.FileNameKey, value: []byte("obj1")}, - {key: "OID", value: []byte(obj1ID.String())}, - }, - }, - }, - }, - } - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.indexEnabled = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix") - require.Contains(t, string(r.Response.Body()), obj1ID.String()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/prefix") - require.Contains(t, string(r.Response.Body()), obj1ID.String()) - - r = prepareGetRequest(ctx, "bucket", "dummy") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of s3://bucket/dummy") - }) - - t.Run("native", func(t *testing.T) { - hc, cnrID := prepareHandlerAndBucket(t) - - obj1ID := oidtest.ID() - obj1 := object.New() - obj1.SetID(obj1ID) - obj1.SetPayload([]byte("obj1")) - obj1.SetAttributes(prepareObjectAttributes(object.AttributeFilePath, "prefix/obj1")) - hc.frostfs.objects[cnrID.String()+"/"+obj1ID.String()] = obj1 - - r := prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Equal(t, fasthttp.StatusNotFound, r.Response.StatusCode()) - - hc.cfg.indexEnabled = true - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix") - require.Contains(t, string(r.Response.Body()), obj1ID.String()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "prefix/") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/prefix") - require.Contains(t, string(r.Response.Body()), obj1ID.String()) - - r = prepareGetRequest(ctx, cnrID.EncodeToString(), "dummy") - hc.Handler().DownloadByAddressOrBucketName(r) - require.Contains(t, string(r.Response.Body()), "Index of frostfs://"+cnrID.String()+"/dummy") - }) -} - -func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) { - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", bucket) - return r, fillMultipartBody(r, content) -} - -func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.RequestCtx { - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", bucket) - r.SetUserValue("oid", objID) - return r -} - -func prepareCORSRequest(t *testing.T, bucket string, headers map[string]string) *fasthttp.RequestCtx { - ctx := context.Background() - ctx = middleware.SetNamespace(ctx, "") - - r := new(fasthttp.RequestCtx) - r.SetUserValue("cid", bucket) - - for k, v := range headers { - r.Request.Header.Set(k, v) - } - - ctx, err := tokens.StoreBearerTokenAppCtx(ctx, r) - require.NoError(t, err) - - utils.SetContextToRequest(ctx, r) - - return r -} - -func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx { - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", bucket) - r.SetUserValue("attr_key", attrKey) - r.SetUserValue("attr_val", attrVal) - return r -} - -func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.RequestCtx { - r := new(fasthttp.RequestCtx) - utils.SetContextToRequest(ctx, r) - r.SetUserValue("cid", bucket) - r.SetUserValue("prefix", prefix) - return r -} - -func prepareObjectAttributes(attrKey, attrValue string) object.Attribute { - attr := object.NewAttribute() - attr.SetKey(attrKey) - attr.SetValue(attrValue) - return *attr -} - -const ( - keyAttr = "User-Attribute" - valAttr = "user value" - objFileName = "newFile.txt" - objFilePath = "/newFile.txt" -) - -func fillMultipartBody(r *fasthttp.RequestCtx, content string) error { - attributes := map[string]string{ - object.AttributeFileName: objFileName, - keyAttr: valAttr, - } - - var buff bytes.Buffer - w := multipart.NewWriter(&buff) - fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName]) - if err != nil { - return err - } - - if _, err = io.Copy(fw, bytes.NewBufferString(content)); err != nil { - return err - } - - if err = w.Close(); err != nil { - return err - } - - r.Request.SetBodyStream(&buff, buff.Len()) - r.Request.Header.Set("Content-Type", w.FormDataContentType()) - r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr) - - return nil -} diff --git a/internal/handler/head.go b/internal/handler/head.go deleted file mode 100644 index 508dc37..0000000 --- a/internal/handler/head.go +++ /dev/null @@ -1,193 +0,0 @@ -package handler - -import ( - "context" - "errors" - "io" - "net/http" - "net/url" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -// max bytes needed to detect content type according to http.DetectContentType docs. -const sizeToDetectType = 512 - -const ( - hdrObjectID = "X-Object-Id" - hdrOwnerID = "X-Owner-Id" - hdrContainerID = "X-Container-Id" -) - -func (h *Handler) headObject(ctx context.Context, req *fasthttp.RequestCtx, objectAddress oid.Address) { - var start = time.Now() - - btoken := bearerToken(ctx) - - prm := PrmObjectHead{ - PrmAuth: PrmAuth{ - BearerToken: btoken, - }, - Address: objectAddress, - } - - obj, err := h.frostfs.HeadObject(ctx, prm) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToHeadObject, err, zap.Stringer("elapsed", time.Since(start))) - return - } - - req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10)) - var ( - contentType string - filename string - filepath string - ) - for _, attr := range obj.Attributes() { - key := attr.Key() - val := attr.Value() - if !isValidToken(key) || !isValidValue(val) { - continue - } - - key = utils.BackwardTransformIfSystem(key) - - req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val) - switch key { - case object.AttributeTimestamp: - value, err := strconv.ParseInt(val, 10, 64) - if err != nil { - h.reqLogger(ctx).Info(logs.CouldntParseCreationDate, - zap.String("key", key), - zap.String("val", val), - zap.Error(err), - logs.TagField(logs.TagDatapath)) - continue - } - req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat)) - case object.AttributeContentType: - contentType = val - case object.AttributeFilePath: - filepath = val - case object.AttributeFileName: - filename = val - } - } - if filename == "" { - filename = filepath - } - - idsToResponse(&req.Response, obj) - - if len(contentType) == 0 { - contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) { - prmRange := PrmObjectRange{ - PrmAuth: PrmAuth{ - BearerToken: btoken, - }, - Address: objectAddress, - PayloadRange: [2]uint64{0, sz}, - } - - return h.frostfs.RangeObject(ctx, prmRange) - }, filename) - if err != nil && err != io.EOF { - h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start))) - return - } - } - req.SetContentType(contentType) -} - -func idsToResponse(resp *fasthttp.Response, obj *object.Object) { - objID, _ := obj.ID() - cnrID, _ := obj.ContainerID() - resp.Header.Set(hdrObjectID, objID.String()) - resp.Header.Set(hdrOwnerID, obj.OwnerID().String()) - resp.Header.Set(hdrContainerID, cnrID.String()) -} - -// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format. -func (h *Handler) HeadByAddressOrBucketName(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAddressOrBucketName") - defer span.End() - - cidParam, _ := req.UserValue("cid").(string) - oidParam, _ := req.UserValue("oid").(string) - - ctx = utils.SetReqLog(ctx, h.reqLogger(ctx).With( - zap.String("cid", cidParam), - zap.String("oid", oidParam), - )) - - path, err := url.QueryUnescape(oidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUnescapePath, err) - return - } - - bktInfo, err := h.getBucketInfo(ctx, cidParam) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo) - if checkS3Err != nil && !errors.Is(checkS3Err, tree.ErrNodeNotFound) { - h.logAndSendError(ctx, req, logs.FailedToCheckIfSettingsNodeExist, checkS3Err) - return - } - - prm := MiddlewareParam{ - Context: ctx, - Request: req, - BktInfo: bktInfo, - Path: path, - } - - indexPageEnabled := h.config.IndexPageEnabled() - - if checkS3Err == nil { - run(prm, h.errorMiddleware(logs.ObjectNotFound, tree.ErrNodeNotFound), - Middleware{Func: h.byS3PathMiddleware(h.headObject, noopFormer), Enabled: true}, - Middleware{Func: h.byS3PathMiddleware(h.headObject, indexFormer), Enabled: indexPageEnabled}, - ) - } else { - slashFallbackEnabled := h.config.EnableFilepathSlashFallback() - fileNameFallbackEnabled := h.config.EnableFilepathFallback() - - run(prm, h.errorMiddleware(logs.ObjectNotFound, ErrObjectNotFound), - Middleware{Func: h.byAddressMiddleware(h.headObject), Enabled: true}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, noopFormer), Enabled: true}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, reverseLeadingSlash), Enabled: slashFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, noopFormer), Enabled: fileNameFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, reverseLeadingSlash), Enabled: fileNameFallbackEnabled && slashFallbackEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFilePath, indexFormer), Enabled: indexPageEnabled}, - Middleware{Func: h.byAttributeSearchMiddleware(h.headObject, object.AttributeFileName, indexFormer), Enabled: fileNameFallbackEnabled && indexPageEnabled}, - ) - } -} - -// HeadByAttribute handles attribute-based head requests. -func (h *Handler) HeadByAttribute(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.HeadByAttribute") - defer span.End() - - h.byAttribute(ctx, req, h.headObject) -} - -func (h *Handler) errorMiddleware(msg string, err error) MiddlewareFunc { - return func(prm MiddlewareParam) bool { - h.logAndSendError(prm.Context, prm.Request, msg, err) - return false - } -} diff --git a/internal/handler/middleware/util.go b/internal/handler/middleware/util.go deleted file mode 100644 index 284513a..0000000 --- a/internal/handler/middleware/util.go +++ /dev/null @@ -1,26 +0,0 @@ -package middleware - -import ( - "context" - "fmt" -) - -// keyWrapper is wrapper for context keys. -type keyWrapper string - -const nsKey = keyWrapper("namespace") - -// GetNamespace extract namespace from context. -func GetNamespace(ctx context.Context) (string, error) { - ns, ok := ctx.Value(nsKey).(string) - if !ok { - return "", fmt.Errorf("couldn't get namespace from context") - } - - return ns, nil -} - -// SetNamespace sets namespace in the context. -func SetNamespace(ctx context.Context, ns string) context.Context { - return context.WithValue(ctx, nsKey, ns) -} diff --git a/internal/handler/multipart.go b/internal/handler/multipart.go deleted file mode 100644 index 5b06882..0000000 --- a/internal/handler/multipart.go +++ /dev/null @@ -1,80 +0,0 @@ -package handler - -import ( - "context" - "errors" - "io" - "strconv" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "go.uber.org/zap" -) - -const attributeMultipartObjectSize = "S3-Multipart-Object-Size" - -// MultipartFile provides standard ReadCloser interface and also allows one to -// get file name, it's used for multipart uploads. -type MultipartFile interface { - io.ReadCloser - FileName() string -} - -func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) { - // To have a custom buffer (3mb) the custom multipart reader is used. - // Default reader uses 4KiB chunks, which slow down upload speed up to 400% - // https://github.com/golang/go/blob/91b9915d3f6f8cd2e9e9fda63f67772803adfa03/src/mime/multipart/multipart.go#L32 - reader := multipart.NewReader(r, boundary) - - for { - part, err := reader.NextPart() - if err != nil { - return nil, err - } - - name := part.FormName() - if name == "" { - l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath)) - continue - } - - filename := part.FileName() - - // ignore multipart/form-data values - if filename == "" { - l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath)) - if err = part.Close(); err != nil { - l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - continue - } - - return part, nil - } -} - -// getPayload returns initial payload if object is not multipart else composes new reader with parts data. -func (h *Handler) getPayload(ctx context.Context, p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) { - cid, ok := p.obj.Header.ContainerID() - if !ok { - return nil, 0, errors.New("no container id set") - } - oid, ok := p.obj.Header.ID() - if !ok { - return nil, 0, errors.New("no object id set") - } - size, err := strconv.ParseUint(p.strSize, 10, 64) - if err != nil { - return nil, 0, err - } - params := PrmInitMultiObjectReader{ - Addr: newAddress(cid, oid), - Bearer: bearerToken(ctx), - } - payload, err := h.frostfs.InitMultiObjectReader(ctx, params) - if err != nil { - return nil, 0, err - } - - return io.NopCloser(payload), size, nil -} diff --git a/internal/handler/multipart/multipart.go b/internal/handler/multipart/multipart.go deleted file mode 100644 index 69e6719..0000000 --- a/internal/handler/multipart/multipart.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// - -/* -Package multipart implements MIME multipart parsing, as defined in RFC -2046. - -The implementation is sufficient for HTTP (RFC 2388) and the multipart -bodies generated by popular browsers. -*/ -package multipart - -import ( - "bufio" - "bytes" - "fmt" - "io" - "mime" - "mime/quotedprintable" - "net/textproto" - "strings" -) - -var emptyParams = make(map[string]string) - -// This constant needs to be at least 76 for this package to work correctly. -// This is because \r\n--separator_of_len_70- would fill the buffer and it -// wouldn't be safe to consume a single byte from it. -// This constant is different from the constant in stdlib. The standard value is 4096. -const peekBufferSize = 3 << 20 - -// A Part represents a single part in a multipart body. -type Part struct { - // The headers of the body, if any, with the keys canonicalized - // in the same fashion that the Go http.Request headers are. - // For example, "foo-bar" changes case to "Foo-Bar" - Header textproto.MIMEHeader - - mr *Reader - - disposition string - dispositionParams map[string]string - - // r is either a reader directly reading from mr, or it's a - // wrapper around such a reader, decoding the - // Content-Transfer-Encoding - r io.Reader - - n int // known data bytes waiting in mr.bufReader - total int64 // total data bytes read already - err error // error to return when n == 0 - readErr error // read error observed from mr.bufReader -} - -// FormName returns the name parameter if p has a Content-Disposition -// of type "form-data". Otherwise it returns the empty string. -func (p *Part) FormName() string { - // See https://tools.ietf.org/html/rfc2183 section 2 for EBNF - // of Content-Disposition value format. - if p.dispositionParams == nil { - p.parseContentDisposition() - } - if p.disposition != "form-data" { - return "" - } - return p.dispositionParams["name"] -} - -// FileName returns the filename parameter of the Part's -// Content-Disposition header. -func (p *Part) FileName() string { - if p.dispositionParams == nil { - p.parseContentDisposition() - } - return p.dispositionParams["filename"] -} - -func (p *Part) parseContentDisposition() { - v := p.Header.Get("Content-Disposition") - var err error - p.disposition, p.dispositionParams, err = mime.ParseMediaType(v) - if err != nil { - p.dispositionParams = emptyParams - } -} - -// NewReader creates a new multipart Reader reading from r using the -// given MIME boundary. -// -// The boundary is usually obtained from the "boundary" parameter of -// the message's "Content-Type" header. Use mime.ParseMediaType to -// parse such headers. -func NewReader(r io.Reader, boundary string) *Reader { - b := []byte("\r\n--" + boundary + "--") - return &Reader{ - bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize), - nl: b[:2], - nlDashBoundary: b[:len(b)-2], - dashBoundaryDash: b[2:], - dashBoundary: b[2 : len(b)-2], - } -} - -// stickyErrorReader is an io.Reader which never calls Read on its -// underlying Reader once an error has been seen. (the io.Reader -// interface's contract promises nothing about the return values of -// Read calls after an error, yet this package does do multiple Reads -// after error). -type stickyErrorReader struct { - r io.Reader - err error -} - -func (r *stickyErrorReader) Read(p []byte) (n int, _ error) { - if r.err != nil { - return 0, r.err - } - n, r.err = r.r.Read(p) - return n, r.err -} - -func newPart(mr *Reader, rawPart bool) (*Part, error) { - bp := &Part{ - Header: make(map[string][]string), - mr: mr, - } - if err := bp.populateHeaders(); err != nil { - return nil, err - } - bp.r = partReader{bp} - - // rawPart is used to switch between Part.NextPart and Part.NextRawPart. - if !rawPart { - const cte = "Content-Transfer-Encoding" - if strings.EqualFold(bp.Header.Get(cte), "quoted-printable") { - bp.Header.Del(cte) - bp.r = quotedprintable.NewReader(bp.r) - } - } - return bp, nil -} - -func (p *Part) populateHeaders() error { - r := textproto.NewReader(p.mr.bufReader) - header, err := r.ReadMIMEHeader() - if err == nil { - p.Header = header - } - return err -} - -// Read reads the body of a part, after its headers and before the -// next part (if any) begins. -func (p *Part) Read(d []byte) (n int, err error) { - return p.r.Read(d) -} - -// partReader implements io.Reader by reading raw bytes directly from the -// wrapped *Part, without doing any Transfer-Encoding decoding. -type partReader struct { - p *Part -} - -func (pr partReader) Read(d []byte) (int, error) { - p := pr.p - br := p.mr.bufReader - - // Read into buffer until we identify some data to return, - // or we find a reason to stop (boundary or read error). - for p.n == 0 && p.err == nil { - peek, _ := br.Peek(br.Buffered()) - p.n, p.err = scanUntilBoundary(peek, p.mr.dashBoundary, p.mr.nlDashBoundary, p.total, p.readErr) - if p.n == 0 && p.err == nil { - // Force buffered I/O to read more into buffer. - _, p.readErr = br.Peek(len(peek) + 1) - if p.readErr == io.EOF { - p.readErr = io.ErrUnexpectedEOF - } - } - } - - // Read out from "data to return" part of buffer. - if p.n == 0 { - return 0, p.err - } - n := len(d) - if n > p.n { - n = p.n - } - n, _ = br.Read(d[:n]) - p.total += int64(n) - p.n -= n - if p.n == 0 { - return n, p.err - } - return n, nil -} - -// scanUntilBoundary scans buf to identify how much of it can be safely -// returned as part of the Part body. -// dashBoundary is "--boundary". -// nlDashBoundary is "\r\n--boundary" or "\n--boundary", depending on what mode we are in. -// The comments below (and the name) assume "\n--boundary", but either is accepted. -// total is the number of bytes read out so far. If total == 0, then a leading "--boundary" is recognized. -// readErr is the read error, if any, that followed reading the bytes in buf. -// scanUntilBoundary returns the number of data bytes from buf that can be -// returned as part of the Part body and also the error to return (if any) -// once those data bytes are done. -func scanUntilBoundary(buf, dashBoundary, nlDashBoundary []byte, total int64, readErr error) (int, error) { - if total == 0 { - // At beginning of body, allow dashBoundary. - if bytes.HasPrefix(buf, dashBoundary) { - switch matchAfterPrefix(buf, dashBoundary, readErr) { - case -1: - return len(dashBoundary), nil - case 0: - return 0, nil - case +1: - return 0, io.EOF - } - } - if bytes.HasPrefix(dashBoundary, buf) { - return 0, readErr - } - } - - // Search for "\n--boundary". - if i := bytes.Index(buf, nlDashBoundary); i >= 0 { - switch matchAfterPrefix(buf[i:], nlDashBoundary, readErr) { - case -1: - return i + len(nlDashBoundary), nil - case 0: - return i, nil - case +1: - return i, io.EOF - } - } - if bytes.HasPrefix(nlDashBoundary, buf) { - return 0, readErr - } - - // Otherwise, anything up to the final \n is not part of the boundary - // and so must be part of the body. - // Also if the section from the final \n onward is not a prefix of the boundary, - // it too must be part of the body. - i := bytes.LastIndexByte(buf, nlDashBoundary[0]) - if i >= 0 && bytes.HasPrefix(nlDashBoundary, buf[i:]) { - return i, nil - } - return len(buf), readErr -} - -// matchAfterPrefix checks whether buf should be considered to match the boundary. -// The prefix is "--boundary" or "\r\n--boundary" or "\n--boundary", -// and the caller has verified already that bytes.HasPrefix(buf, prefix) is true. -// -// matchAfterPrefix returns +1 if the buffer does match the boundary, -// meaning the prefix is followed by a dash, space, tab, cr, nl, or end of input. -// It returns -1 if the buffer definitely does NOT match the boundary, -// meaning the prefix is followed by some other character. -// For example, "--foobar" does not match "--foo". -// It returns 0 more input needs to be read to make the decision, -// meaning that len(buf) == len(prefix) and readErr == nil. -func matchAfterPrefix(buf, prefix []byte, readErr error) int { - if len(buf) == len(prefix) { - if readErr != nil { - return +1 - } - return 0 - } - c := buf[len(prefix)] - if c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '-' { - return +1 - } - return -1 -} - -func (p *Part) Close() error { - _, _ = io.Copy(io.Discard, p) - return nil -} - -// Reader is an iterator over parts in a MIME multipart body. -// Reader's underlying parser consumes its input as needed. Seeking -// isn't supported. -type Reader struct { - bufReader *bufio.Reader - - currentPart *Part - partsRead int - - nl []byte // "\r\n" or "\n" (set after seeing first boundary line) - nlDashBoundary []byte // nl + "--boundary" - dashBoundaryDash []byte // "--boundary--" - dashBoundary []byte // "--boundary" -} - -// NextPart returns the next part in the multipart or an error. -// When there are no more parts, the error io.EOF is returned. -// -// As a special case, if the "Content-Transfer-Encoding" header -// has a value of "quoted-printable", that header is instead -// hidden and the body is transparently decoded during Read calls. -func (r *Reader) NextPart() (*Part, error) { - return r.nextPart(false) -} - -// NextRawPart returns the next part in the multipart or an error. -// When there are no more parts, the error io.EOF is returned. -// -// Unlike NextPart, it does not have special handling for -// "Content-Transfer-Encoding: quoted-printable". -func (r *Reader) NextRawPart() (*Part, error) { - return r.nextPart(true) -} - -func (r *Reader) nextPart(rawPart bool) (*Part, error) { - if r.currentPart != nil { - r.currentPart.Close() - } - if string(r.dashBoundary) == "--" { - return nil, fmt.Errorf("multipart: boundary is empty") - } - expectNewPart := false - for { - line, err := r.bufReader.ReadSlice('\n') - - if err == io.EOF && r.isFinalBoundary(line) { - // If the buffer ends in "--boundary--" without the - // trailing "\r\n", ReadSlice will return an error - // (since it's missing the '\n'), but this is a valid - // multipart EOF so we need to return io.EOF instead of - // a fmt-wrapped one. - return nil, io.EOF - } - if err != nil { - return nil, fmt.Errorf("multipart: NextPart: %v", err) - } - - if r.isBoundaryDelimiterLine(line) { - r.partsRead++ - bp, err := newPart(r, rawPart) - if err != nil { - return nil, err - } - r.currentPart = bp - return bp, nil - } - - if r.isFinalBoundary(line) { - // Expected EOF - return nil, io.EOF - } - - if expectNewPart { - return nil, fmt.Errorf("multipart: expecting a new Part; got line %q", string(line)) - } - - if r.partsRead == 0 { - // skip line - continue - } - - // Consume the "\n" or "\r\n" separator between the - // body of the previous part and the boundary line we - // now expect will follow. (either a new part or the - // end boundary) - if bytes.Equal(line, r.nl) { - expectNewPart = true - continue - } - - return nil, fmt.Errorf("multipart: unexpected line in Next(): %q", line) - } -} - -// isFinalBoundary reports whether line is the final boundary line -// indicating that all parts are over. -// It matches `^--boundary--[ \t]*(\r\n)?$`. -func (r *Reader) isFinalBoundary(line []byte) bool { - if !bytes.HasPrefix(line, r.dashBoundaryDash) { - return false - } - rest := line[len(r.dashBoundaryDash):] - rest = skipLWSPChar(rest) - return len(rest) == 0 || bytes.Equal(rest, r.nl) -} - -func (r *Reader) isBoundaryDelimiterLine(line []byte) (ret bool) { - // https://tools.ietf.org/html/rfc2046#section-5.1 - // The boundary delimiter line is then defined as a line - // consisting entirely of two hyphen characters ("-", - // decimal value 45) followed by the boundary parameter - // value from the Content-Type header field, optional linear - // whitespace, and a terminating CRLF. - if !bytes.HasPrefix(line, r.dashBoundary) { - return false - } - rest := line[len(r.dashBoundary):] - rest = skipLWSPChar(rest) - - // On the first part, see our lines are ending in \n instead of \r\n - // and switch into that mode if so. This is a violation of the spec, - // but occurs in practice. - if r.partsRead == 0 && len(rest) == 1 && rest[0] == '\n' { - r.nl = r.nl[1:] - r.nlDashBoundary = r.nlDashBoundary[1:] - } - return bytes.Equal(rest, r.nl) -} - -// skipLWSPChar returns b with leading spaces and tabs removed. -// RFC 822 defines: -// -// LWSP-char = SPACE / HTAB -func skipLWSPChar(b []byte) []byte { - for len(b) > 0 && (b[0] == ' ' || b[0] == '\t') { - b = b[1:] - } - return b -} diff --git a/internal/handler/multipart_test.go b/internal/handler/multipart_test.go deleted file mode 100644 index d7f52f4..0000000 --- a/internal/handler/multipart_test.go +++ /dev/null @@ -1,153 +0,0 @@ -//go:build !integration - -package handler - -import ( - "crypto/rand" - "fmt" - "io" - "mime/multipart" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func generateRandomFile(size int64) (string, error) { - file, err := os.CreateTemp("", "data") - if err != nil { - return "", err - } - - _, err = io.CopyN(file, rand.Reader, size) - if err != nil { - return "", err - } - - return file.Name(), file.Close() -} - -func BenchmarkAll(b *testing.B) { - fileName, err := generateRandomFile(1024 * 1024 * 256) - require.NoError(b, err) - fmt.Println(fileName) - defer os.Remove(fileName) - - b.Run("bare", func(b *testing.B) { - for i := 0; i < b.N; i++ { - err := bareRead(fileName) - require.NoError(b, err) - } - }) - - b.Run("default", func(b *testing.B) { - for i := 0; i < b.N; i++ { - err := defaultMultipart(fileName) - require.NoError(b, err) - } - }) - - b.Run("custom", func(b *testing.B) { - for i := 0; i < b.N; i++ { - err := customMultipart(fileName) - require.NoError(b, err) - } - }) -} - -func defaultMultipart(filename string) error { - r, bound := multipartFile(filename) - - file, err := fetchMultipartFileDefault(zap.NewNop(), r, bound) - if err != nil { - return err - } - - _, err = io.Copy(io.Discard, file) - return err -} - -func TestName(t *testing.T) { - fileName, err := generateRandomFile(1024 * 1024 * 256) - require.NoError(t, err) - fmt.Println(fileName) - defer os.Remove(fileName) - - err = defaultMultipart(fileName) - require.NoError(t, err) -} - -func customMultipart(filename string) error { - r, bound := multipartFile(filename) - - file, err := fetchMultipartFile(zap.NewNop(), r, bound) - if err != nil { - return err - } - - _, err = io.Copy(io.Discard, file) - return err -} - -func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (MultipartFile, error) { - reader := multipart.NewReader(r, boundary) - - for { - part, err := reader.NextPart() - if err != nil { - return nil, err - } - - name := part.FormName() - if name == "" { - l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath)) - continue - } - - filename := part.FileName() - - // ignore multipart/form-data values - if filename == "" { - l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath)) - continue - } - - return part, nil - } -} - -func bareRead(filename string) error { - r, _ := multipartFile(filename) - - _, err := io.Copy(io.Discard, r) - return err -} - -func multipartFile(filename string) (*io.PipeReader, string) { - r, w := io.Pipe() - m := multipart.NewWriter(w) - go func() { - defer w.Close() - defer m.Close() - part, err := m.CreateFormFile("myFile", "foo.txt") - if err != nil { - fmt.Println(err) - return - } - - file, err := os.Open(filename) - if err != nil { - fmt.Println(err) - return - } - defer file.Close() - if _, err = io.Copy(part, file); err != nil { - fmt.Println(err) - return - } - }() - - return r, m.Boundary() -} diff --git a/internal/handler/reader.go b/internal/handler/reader.go deleted file mode 100644 index 711bfd2..0000000 --- a/internal/handler/reader.go +++ /dev/null @@ -1,196 +0,0 @@ -package handler - -import ( - "bytes" - "context" - "io" - "mime" - "net/http" - "path" - "strconv" - "strings" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -type readCloser struct { - io.Reader - io.Closer -} - -// initializes io.Reader with the limited size and detects Content-Type from it. -// Returns r's error directly. Also returns the processed data. -func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), filename string) (string, []byte, error) { - if maxSize > sizeToDetectType { - maxSize = sizeToDetectType - } - - buf := make([]byte, maxSize) // maybe sync-pool the slice? - - r, err := rInit(maxSize) - if err != nil { - return "", nil, err - } - - n, err := r.Read(buf) - if err != nil && err != io.EOF { - return "", nil, err - } - - buf = buf[:n] - - contentType := http.DetectContentType(buf) - - // Since the detector detects the "text/plain" content type for various types of text files, - // including CSS, JavaScript, and CSV files, - // we'll determine the final content type based on the file's extension. - if strings.HasPrefix(contentType, "text/plain") { - ext := path.Ext(filename) - // If the file doesn't have a file extension, we'll keep the content type as is. - if len(ext) > 0 { - contentType = mime.TypeByExtension(ext) - } - } - - return contentType, buf, err // to not lose io.EOF -} - -type getMultiobjectBodyParams struct { - obj *Object - strSize string -} - -func (h *Handler) receiveFile(ctx context.Context, req *fasthttp.RequestCtx, objAddress oid.Address) { - var ( - shouldDownload = req.QueryArgs().GetBool("download") - start = time.Now() - filename string - filepath string - contentType string - ) - - prm := PrmObjectGet{ - PrmAuth: PrmAuth{ - BearerToken: bearerToken(ctx), - }, - Address: objAddress, - } - - rObj, err := h.frostfs.GetObject(ctx, prm) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetObject, err, zap.Stringer("elapsed", time.Since(start))) - return - } - - // we can't close reader in this function, so how to do it? - setIDs(req, rObj.Header) - payload := rObj.Payload - payloadSize := rObj.Header.PayloadSize() - for _, attr := range rObj.Header.Attributes() { - key := attr.Key() - val := attr.Value() - if !isValidToken(key) || !isValidValue(val) { - continue - } - - key = utils.BackwardTransformIfSystem(key) - - req.Response.Header.Set(utils.UserAttributeHeaderPrefix+key, val) - switch key { - case object.AttributeFileName: - filename = val - case object.AttributeTimestamp: - if err = setTimestamp(req, val); err != nil { - h.reqLogger(ctx).Error(logs.CouldntParseCreationDate, - zap.String("val", val), - zap.Error(err), - logs.TagField(logs.TagDatapath)) - } - case object.AttributeContentType: - contentType = val - case object.AttributeFilePath: - filepath = val - case attributeMultipartObjectSize: - payload, payloadSize, err = h.getPayload(ctx, getMultiobjectBodyParams{ - obj: rObj, - strSize: val, - }) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetObjectPayload, err, zap.Stringer("elapsed", time.Since(start))) - return - } - } - } - if filename == "" { - filename = filepath - } - - setDisposition(req, shouldDownload, filename) - - req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10)) - - if len(contentType) == 0 { - // determine the Content-Type from the payload head - var payloadHead []byte - - contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) { - return payload, nil - }, filename) - if err != nil && err != io.EOF { - h.logAndSendError(ctx, req, logs.FailedToDetectContentTypeFromPayload, err, zap.Stringer("elapsed", time.Since(start))) - return - } - - // reset payload reader since a part of the data has been read - var headReader io.Reader = bytes.NewReader(payloadHead) - - if err != io.EOF { // otherwise, we've already read full payload - headReader = io.MultiReader(headReader, payload) - } - - // note: we could do with io.Reader, but SetBodyStream below closes body stream - // if it implements io.Closer and that's useful for us. - payload = readCloser{headReader, payload} - } - req.SetContentType(contentType) - req.Response.SetBodyStream(payload, int(payloadSize)) -} - -func setIDs(r *fasthttp.RequestCtx, obj object.Object) { - objID, _ := obj.ID() - cnrID, _ := obj.ContainerID() - r.Response.Header.Set(hdrObjectID, objID.String()) - r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String()) - r.Response.Header.Set(hdrContainerID, cnrID.String()) -} - -func setDisposition(r *fasthttp.RequestCtx, shouldDownload bool, filename string) { - const ( - inlineDisposition = "inline" - attachmentDisposition = "attachment" - ) - - dis := inlineDisposition - if shouldDownload { - dis = attachmentDisposition - } - - r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename)) -} - -func setTimestamp(r *fasthttp.RequestCtx, timestamp string) error { - value, err := strconv.ParseInt(timestamp, 10, 64) - if err != nil { - return err - } - r.Response.Header.Set(fasthttp.HeaderLastModified, - time.Unix(value, 0).UTC().Format(http.TimeFormat)) - - return nil -} diff --git a/internal/handler/reader_test.go b/internal/handler/reader_test.go deleted file mode 100644 index f867677..0000000 --- a/internal/handler/reader_test.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build !integration - -package handler - -import ( - "io" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -const ( - txtContentType = "text/plain; charset=utf-8" - cssContentType = "text/css; charset=utf-8" - htmlContentType = "text/html; charset=utf-8" - javascriptContentType = "text/javascript; charset=utf-8" - - htmlBody = "Test Html" -) - -func TestDetector(t *testing.T) { - sb := strings.Builder{} - for i := 0; i < 10; i++ { - sb.WriteString("Some txt content. Content-Type must be detected properly by detector.") - } - - for _, tc := range []struct { - Name string - ExpectedContentType string - Content string - FileName string - }{ - { - Name: "less than 512b", - ExpectedContentType: txtContentType, - Content: sb.String()[:256], - FileName: "test.txt", - }, - { - Name: "more than 512b", - ExpectedContentType: txtContentType, - Content: sb.String(), - FileName: "test.txt", - }, - { - Name: "css content type", - ExpectedContentType: cssContentType, - Content: sb.String(), - FileName: "test.css", - }, - { - Name: "javascript content type", - ExpectedContentType: javascriptContentType, - Content: sb.String(), - FileName: "test.js", - }, - { - Name: "html content type by file content", - ExpectedContentType: htmlContentType, - Content: htmlBody, - FileName: "test.detect-by-content", - }, - { - Name: "html content type by file extension", - ExpectedContentType: htmlContentType, - Content: sb.String(), - FileName: "test.html", - }, - { - Name: "empty file extension", - ExpectedContentType: txtContentType, - Content: sb.String(), - FileName: "test", - }, - } { - t.Run(tc.Name, func(t *testing.T) { - contentType, data, err := readContentType(uint64(len(tc.Content)), - func(uint64) (io.Reader, error) { - return strings.NewReader(tc.Content), nil - }, tc.FileName, - ) - - require.NoError(t, err) - require.Equal(t, tc.ExpectedContentType, contentType) - require.True(t, strings.HasPrefix(tc.Content, string(data))) - }) - } -} diff --git a/internal/handler/tree_service_client_mock_test.go b/internal/handler/tree_service_client_mock_test.go deleted file mode 100644 index f3af52a..0000000 --- a/internal/handler/tree_service_client_mock_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package handler - -import ( - "context" - "errors" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" -) - -type nodeMeta struct { - key string - value []byte -} - -func (m nodeMeta) GetKey() string { - return m.key -} - -func (m nodeMeta) GetValue() []byte { - return m.value -} - -type nodeResponse struct { - meta []nodeMeta - nodeID uint64 - parentID uint64 - timestamp uint64 -} - -func (n nodeResponse) GetNodeID() []uint64 { - return []uint64{n.nodeID} -} - -func (n nodeResponse) GetParentID() []uint64 { - return []uint64{n.parentID} -} - -func (n nodeResponse) GetTimestamp() []uint64 { - return []uint64{n.timestamp} -} - -func (n nodeResponse) GetMeta() []tree.Meta { - res := make([]tree.Meta, len(n.meta)) - for i, value := range n.meta { - res[i] = value - } - return res -} - -type containerInfo struct { - trees map[string]map[string]nodeResponse -} - -type treeServiceClientMock struct { - containers map[string]containerInfo -} - -func newTreeServiceClientMock() *treeServiceClientMock { - return &treeServiceClientMock{ - containers: make(map[string]containerInfo), - } -} - -func (t *treeServiceClientMock) GetNodes(_ context.Context, p *tree.GetNodesParams) ([]tree.NodeResponse, error) { - cnr, ok := t.containers[p.CnrID.EncodeToString()] - if !ok { - return nil, tree.ErrNodeNotFound - } - - tr, ok := cnr.trees[p.TreeID] - if !ok { - return nil, tree.ErrNodeNotFound - } - - node, ok := tr[strings.Join(p.Path, "/")] - if !ok { - return nil, tree.ErrNodeNotFound - } - - return []tree.NodeResponse{node}, nil -} - -func (t *treeServiceClientMock) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, _ bool) ([]tree.NodeResponse, error) { - cnr, ok := t.containers[bktInfo.CID.EncodeToString()] - if !ok { - return nil, tree.ErrNodeNotFound - } - - tr, ok := cnr.trees[treeID] - if !ok { - return nil, tree.ErrNodeNotFound - } - - if len(rootID) != 1 { - return nil, errors.New("invalid rootID") - } - - var root *nodeResponse - for _, v := range tr { - if v.nodeID == rootID[0] { - root = &v - break - } - } - - if root == nil { - return nil, tree.ErrNodeNotFound - } - - var res []nodeResponse - if depth == 0 { - for _, v := range tr { - res = append(res, v) - } - } else { - res = append(res, *root) - depthIndex := 0 - for i := uint32(0); i < depth-1; i++ { - childrenCount := 0 - for _, v := range tr { - for j := range res[depthIndex:] { - if v.parentID == res[j].nodeID { - res = append(res, v) - childrenCount++ - break - } - } - } - depthIndex = len(res) - childrenCount - } - } - - res2 := make([]tree.NodeResponse, len(res)) - for i := range res { - res2[i] = res[i] - } - - return res2, nil -} diff --git a/internal/handler/upload.go b/internal/handler/upload.go deleted file mode 100644 index 05f4c97..0000000 --- a/internal/handler/upload.go +++ /dev/null @@ -1,270 +0,0 @@ -package handler - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "encoding/json" - "errors" - "io" - "net/http" - "path/filepath" - "strconv" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -const ( - jsonHeader = "application/json; charset=UTF-8" - drainBufSize = 4096 - explodeArchiveHeader = "X-Explode-Archive" -) - -type putResponse struct { - ObjectID string `json:"object_id"` - ContainerID string `json:"container_id"` -} - -func newPutResponse(addr oid.Address) *putResponse { - return &putResponse{ - ObjectID: addr.Object().EncodeToString(), - ContainerID: addr.Container().EncodeToString(), - } -} - -func (pr *putResponse) encode(w io.Writer) error { - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - return enc.Encode(pr) -} - -// Upload handles multipart upload request. -func (h *Handler) Upload(req *fasthttp.RequestCtx) { - ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(req), "handler.Upload") - defer span.End() - - var file MultipartFile - - scid, _ := req.UserValue("cid").(string) - bodyStream := req.RequestBodyStream() - drainBuf := make([]byte, drainBufSize) - - log := h.reqLogger(ctx) - ctx = utils.SetReqLog(ctx, log.With(zap.String("cid", scid))) - - bktInfo, err := h.getBucketInfo(ctx, scid) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetBucketInfo, err) - return - } - - boundary := string(req.Request.Header.MultipartFormBoundary()) - if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil { - h.logAndSendError(ctx, req, logs.CouldNotReceiveMultipartForm, err) - return - } - - filtered, err := filterHeaders(log, &req.Request.Header) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToFilterHeaders, err) - return - } - - if req.Request.Header.Peek(explodeArchiveHeader) != nil { - h.explodeArchive(ctx, req, bktInfo, file, filtered) - } else { - h.uploadSingleObject(ctx, req, bktInfo, file, filtered) - } - - // Multipart is multipart and thus can contain more than one part which - // we ignore at the moment. Also, when dealing with chunked encoding - // the last zero-length chunk might be left unread (because multipart - // reader only cares about its boundary and doesn't look further) and - // it will be (erroneously) interpreted as the start of the next - // pipelined header. Thus, we need to drain the body buffer. - for { - _, err = bodyStream.Read(drainBuf) - if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) { - break - } - } -} - -func (h *Handler) uploadSingleObject(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) { - ctx, span := tracing.StartSpanFromContext(ctx, "handler.uploadSingleObject") - defer span.End() - - setIfNotExist(filtered, object.AttributeFileName, file.FileName()) - - attributes, err := h.extractAttributes(ctx, req, filtered) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err) - return - } - - idObj, err := h.uploadObject(ctx, bkt, attributes, file) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUploadObject, err) - return - } - h.reqLogger(ctx).Debug(logs.ObjectUploaded, - zap.String("oid", idObj.EncodeToString()), - zap.String("FileName", file.FileName()), - logs.TagField(logs.TagExternalStorage), - ) - - addr := newAddress(bkt.CID, idObj) - req.Response.Header.SetContentType(jsonHeader) - // Try to return the response, otherwise, if something went wrong, throw an error. - if err = newPutResponse(addr).encode(req); err != nil { - h.logAndSendError(ctx, req, logs.CouldNotEncodeResponse, err) - return - } -} - -func (h *Handler) uploadObject(ctx context.Context, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) { - obj := object.New() - obj.SetContainerID(bkt.CID) - obj.SetOwnerID(*h.ownerID) - obj.SetAttributes(attrs...) - - prm := PrmObjectCreate{ - PrmAuth: PrmAuth{ - BearerToken: h.fetchBearerToken(ctx), - }, - Object: obj, - Payload: file, - ClientCut: h.config.ClientCut(), - WithoutHomomorphicHash: bkt.HomomorphicHashDisabled, - BufferMaxSize: h.config.BufferMaxSizeForPut(), - } - - idObj, err := h.frostfs.CreateObject(ctx, prm) - if err != nil { - return oid.ID{}, err - } - - return idObj, nil -} - -func (h *Handler) extractAttributes(ctx context.Context, req *fasthttp.RequestCtx, filtered map[string]string) ([]object.Attribute, error) { - now := time.Now() - if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil { - if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil { - h.reqLogger(ctx).Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err), - logs.TagField(logs.TagDatapath)) - } else { - now = parsed - } - } - if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil { - h.reqLogger(ctx).Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath)) - return nil, err - } - attributes := make([]object.Attribute, 0, len(filtered)) - // prepares attributes from filtered headers - for key, val := range filtered { - attribute := newAttribute(key, val) - attributes = append(attributes, attribute) - } - // sets Timestamp attribute if it wasn't set from header and enabled by settings - if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() { - timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10)) - attributes = append(attributes, timestamp) - } - - return attributes, nil -} - -func newAttribute(key string, val string) object.Attribute { - attr := object.NewAttribute() - attr.SetKey(key) - attr.SetValue(val) - return *attr -} - -// explodeArchive read files from archive and creates objects for each of them. -// Sets FilePath attribute with name from tar.Header. -func (h *Handler) explodeArchive(ctx context.Context, req *fasthttp.RequestCtx, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) { - ctx, span := tracing.StartSpanFromContext(ctx, "handler.explodeArchive") - defer span.End() - - // remove user attributes which vary for each file in archive - // to guarantee that they won't appear twice - delete(filtered, object.AttributeFileName) - delete(filtered, object.AttributeFilePath) - - commonAttributes, err := h.extractAttributes(ctx, req, filtered) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToGetAttributes, err) - return - } - attributes := commonAttributes - - reader := file - if bytes.EqualFold(req.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) { - h.reqLogger(ctx).Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath)) - gzipReader, err := gzip.NewReader(file) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToCreateGzipReader, err) - return - } - defer func() { - if err := gzipReader.Close(); err != nil { - h.reqLogger(ctx).Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - }() - reader = gzipReader - } - - tarReader := tar.NewReader(reader) - for { - obj, err := tarReader.Next() - if errors.Is(err, io.EOF) { - break - } else if err != nil { - h.logAndSendError(ctx, req, logs.FailedToReadFileFromTar, err) - return - } - - if isDir(obj.Name) { - continue - } - - // set varying attributes - attributes = attributes[:len(commonAttributes)] - fileName := filepath.Base(obj.Name) - attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name)) - attributes = append(attributes, newAttribute(object.AttributeFileName, fileName)) - - idObj, err := h.uploadObject(ctx, bkt, attributes, tarReader) - if err != nil { - h.logAndSendError(ctx, req, logs.FailedToUploadObject, err) - return - } - - h.reqLogger(ctx).Debug(logs.ObjectUploaded, - zap.String("oid", idObj.EncodeToString()), - zap.String("FileName", fileName), - logs.TagField(logs.TagExternalStorage), - ) - } -} - -func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token { - if tkn, err := tokens.LoadBearerToken(ctx); err == nil && tkn != nil { - return tkn - } - return nil -} diff --git a/internal/handler/utils.go b/internal/handler/utils.go deleted file mode 100644 index c17b878..0000000 --- a/internal/handler/utils.go +++ /dev/null @@ -1,111 +0,0 @@ -package handler - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -func bearerToken(ctx context.Context) *bearer.Token { - if tkn, err := tokens.LoadBearerToken(ctx); err == nil { - return tkn - } - return nil -} - -func isDir(name string) bool { - return name == "" || strings.HasSuffix(name, "/") -} - -func loadAttributes(attrs []object.Attribute) map[string]string { - result := make(map[string]string) - for _, attr := range attrs { - result[attr.Key()] = attr.Value() - } - return result -} - -func isValidToken(s string) bool { - for _, c := range s { - if c <= ' ' || c > 127 { - return false - } - if strings.ContainsRune("()<>@,;:\\\"/[]?={}", c) { - return false - } - } - return true -} - -func isValidValue(s string) bool { - for _, c := range s { - // HTTP specification allows for more technically, but we don't want to escape things. - if c < ' ' || c > 127 || c == '"' { - return false - } - } - return true -} - -func (h *Handler) reqLogger(ctx context.Context) *zap.Logger { - return utils.GetReqLogOrDefault(ctx, h.log) -} - -func (h *Handler) logAndSendError(ctx context.Context, c *fasthttp.RequestCtx, msg string, err error, additional ...zap.Field) { - utils.GetReqLogOrDefault(ctx, h.log).Error(msg, - append([]zap.Field{zap.Error(err), logs.TagField(logs.TagDatapath)}, additional...)...) - - msg, code := formErrorResponse(err) - ResponseError(c, msg, code) -} - -func newAddress(cnr cid.ID, obj oid.ID) oid.Address { - var addr oid.Address - addr.SetContainer(cnr) - addr.SetObject(obj) - return addr -} - -// setIfNotExist sets key value to map if key is not present yet. -func setIfNotExist(m map[string]string, key, value string) { - if _, ok := m[key]; !ok { - m[key] = value - } -} - -func ResponseError(r *fasthttp.RequestCtx, msg string, code int) { - r.Error(msg+"\n", code) -} - -func formErrorResponse(err error) (string, int) { - switch { - case errors.Is(err, ErrAccessDenied): - return fmt.Sprintf("Storage Access Denied:\n%v", err), fasthttp.StatusForbidden - case errors.Is(err, tree.ErrNodeAccessDenied): - return fmt.Sprintf("Tree Access Denied:\n%v", err), fasthttp.StatusForbidden - case errors.Is(err, ErrQuotaLimitReached): - return fmt.Sprintf("Quota Reached:\n%v", err), fasthttp.StatusConflict - case errors.Is(err, ErrContainerNotFound): - return fmt.Sprintf("Container Not Found:\n%v", err), fasthttp.StatusNotFound - case errors.Is(err, ErrObjectNotFound): - return fmt.Sprintf("Object Not Found:\n%v", err), fasthttp.StatusNotFound - case errors.Is(err, tree.ErrNodeNotFound): - return fmt.Sprintf("Tree Node Not Found:\n%v", err), fasthttp.StatusNotFound - case errors.Is(err, ErrGatewayTimeout): - return fmt.Sprintf("Gateway Timeout:\n%v", err), fasthttp.StatusGatewayTimeout - default: - return fmt.Sprintf("Bad Request:\n%v", err), fasthttp.StatusBadRequest - } -} diff --git a/internal/logs/logs.go b/internal/logs/logs.go deleted file mode 100644 index 86921dd..0000000 --- a/internal/logs/logs.go +++ /dev/null @@ -1,142 +0,0 @@ -package logs - -import "go.uber.org/zap" - -const ( - TagFieldName = "tag" - - TagApp = "app" - TagDatapath = "datapath" - TagExternalStorage = "external_storage" - TagExternalStorageTree = "external_storage_tree" -) - -func TagField(tag string) zap.Field { - return zap.String(TagFieldName, tag) -} - -// Log messages with the "app" tag. -const ( - ServiceIsRunning = "service is running" - ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" - ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" - ShuttingDownService = "shutting down service" - CantShutDownService = "can't shut down service" - CantGracefullyShutDownService = "can't gracefully shut down service, force stop" - FailedToCreateResolver = "failed to create resolver" - FailedToCreateWorkerPool = "failed to create worker pool" - StartingApplication = "starting application" - StartingServer = "starting server" - ListenAndServe = "listen and serve" - ShuttingDownWebServer = "shutting down web server" - FailedToShutdownTracing = "failed to shutdown tracing" - AddedPathUploadCid = "added path /upload/{cid}" - AddedPathGetCidOid = "added path /get/{cid}/{oid}" - AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" - AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" - FailedToAddServer = "failed to add server" - AddServer = "add server" - NoHealthyServers = "no healthy servers" - FailedToInitializeTracing = "failed to initialize tracing" - RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" - RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" - CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" - UsingCredentials = "using credentials" - FailedToCreateConnectionPool = "failed to create connection pool" - FailedToDialConnectionPool = "failed to dial connection pool" - FailedToCreateTreePool = "failed to create tree pool" - FailedToDialTreePool = "failed to dial tree pool" - ServerReconnecting = "reconnecting server..." - ServerReconnectedSuccessfully = "server reconnected successfully" - ServerReconnectFailed = "failed to reconnect server" - MultinetDialSuccess = "multinet dial successful" - MultinetDialFail = "multinet dial failed" - ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" - MetricsAreDisabled = "metrics are disabled" - NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" - SIGHUPConfigReloadStarted = "SIGHUP config reload started" - FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" - FailedToReloadConfig = "failed to reload config" - FailedToUpdateResolvers = "failed to update resolvers" - FailedToReloadServerParameters = "failed to reload server parameters" - SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" - TracingConfigUpdated = "tracing config updated" - ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" - AddedStoragePeer = "added storage peer" - InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" - InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" - WarnDuplicateAddress = "duplicate address" - FailedToLoadMultinetConfig = "failed to load multinet config" - MultinetConfigWontBeUpdated = "multinet config won't be updated" - LogLevelWontBeUpdated = "log level won't be updated" - TagsLogConfigWontBeUpdated = "tags log config won't be updated" - FailedToReadIndexPageTemplate = "failed to read index page template" - SetCustomIndexPageTemplate = "set custom index page template" - CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info" - InitRPCClientFailed = "init rpc client faileds" - InitContainerContractFailed = "init container contract failed" - FailedToResolveContractHash = "failed to resolve contract hash" -) - -// Log messages with the "datapath" tag. -const ( - CouldntParseCreationDate = "couldn't parse creation date" - FailedToDetectContentTypeFromPayload = "failed to detect Content-Type from payload" - FailedToAddObjectToArchive = "failed to add object to archive" - CloseZipWriter = "close zip writer" - IgnorePartEmptyFormName = "ignore part, empty form name" - IgnorePartEmptyFilename = "ignore part, empty filename" - CouldNotParseClientTime = "could not parse client time" - CouldNotPrepareExpirationHeader = "could not prepare expiration header" - CouldNotEncodeResponse = "could not encode response" - AddAttributeToResultObject = "add attribute to result object" - Request = "request" - CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" - CouldntPutBucketIntoCache = "couldn't put bucket info into cache" - FailedToIterateOverResponse = "failed to iterate over search response" - InvalidCacheEntryType = "invalid cache entry type" - FailedToUnescapeQuery = "failed to unescape query" - CouldntCacheNetmap = "couldn't cache netmap" - FailedToCloseReader = "failed to close reader" - FailedToFilterHeaders = "failed to filter headers" - FailedToReadFileFromTar = "failed to read file from tar" - FailedToGetAttributes = "failed to get attributes" - CloseGzipWriter = "close gzip writer" - CloseTarWriter = "close tar writer" - FailedToCreateGzipReader = "failed to create gzip reader" - GzipReaderSelected = "gzip reader selected" - CouldNotReceiveMultipartForm = "could not receive multipart/form" - ObjectsNotFound = "objects not found" - IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" - FailedToGetBucketInfo = "could not get bucket info" - FailedToSubmitTaskToPool = "failed to submit task to pool" - IndexWasDeleted = "index was deleted" - FailedToGetLatestVersionOfIndexObject = "failed to get latest version of index object" - FailedToCheckIfSettingsNodeExist = "failed to check if settings node exists" - FailedToListObjects = "failed to list objects" - FailedToParseTemplate = "failed to parse template" - FailedToExecuteTemplate = "failed to execute template" - FailedToUploadObject = "failed to upload object" - FailedToHeadObject = "failed to head object" - FailedToGetObject = "failed to get object" - FailedToGetObjectPayload = "failed to get object payload" - FailedToFindObjectByAttribute = "failed to get find object by attribute" - FailedToUnescapePath = "failed to unescape path" - CouldNotGetCORSConfiguration = "could not get cors configuration" - EmptyOriginRequestHeader = "empty Origin request header" - EmptyAccessControlRequestMethodHeader = "empty Access-Control-Request-Method request header" - CORSRuleWasNotMatched = "cors rule was not matched" - CouldntCacheCors = "couldn't cache cors" -) - -// Log messages with the "external_storage" tag. -const ( - ObjectNotFound = "object not found" - ReadObjectListFailed = "read object list failed" - ObjectUploaded = "object uploaded" -) - -// Log messages with the "external_storage_tree" tag. -const ( - FoundSeveralSystemTreeNodes = "found several system tree nodes" -) diff --git a/internal/net/config.go b/internal/net/config.go deleted file mode 100644 index b40e003..0000000 --- a/internal/net/config.go +++ /dev/null @@ -1,68 +0,0 @@ -package net - -import ( - "errors" - "fmt" - "net/netip" - "slices" - "time" - - "git.frostfs.info/TrueCloudLab/multinet" -) - -var errEmptySourceIPList = errors.New("empty source IP list") - -type Subnet struct { - Prefix string - SourceIPs []string -} - -type Config struct { - Enabled bool - Subnets []Subnet - Balancer string - Restrict bool - FallbackDelay time.Duration - EventHandler multinet.EventHandler -} - -func (c Config) toMultinetConfig() (multinet.Config, error) { - var subnets []multinet.Subnet - for _, s := range c.Subnets { - var ms multinet.Subnet - p, err := netip.ParsePrefix(s.Prefix) - if err != nil { - return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err) - } - ms.Prefix = p - for _, ip := range s.SourceIPs { - addr, err := netip.ParseAddr(ip) - if err != nil { - return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err) - } - ms.SourceIPs = append(ms.SourceIPs, addr) - } - if len(ms.SourceIPs) == 0 { - return multinet.Config{}, errEmptySourceIPList - } - subnets = append(subnets, ms) - } - return multinet.Config{ - Subnets: subnets, - Balancer: multinet.BalancerType(c.Balancer), - Restrict: c.Restrict, - FallbackDelay: c.FallbackDelay, - Dialer: newDefaultDialer(), - EventHandler: c.EventHandler, - }, nil -} - -func (c Config) equals(other Config) bool { - return c.Enabled == other.Enabled && - slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool { - return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs) - }) && - c.Balancer == other.Balancer && - c.Restrict == other.Restrict && - c.FallbackDelay == other.FallbackDelay -} diff --git a/internal/net/dial_target.go b/internal/net/dial_target.go deleted file mode 100644 index 6265f18..0000000 --- a/internal/net/dial_target.go +++ /dev/null @@ -1,54 +0,0 @@ -// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go - -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package net - -import ( - "net/url" - "strings" -) - -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { - net := "tcp" - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - return n, target[m1+1:] - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr := t.Path - if scheme == "unix" { - if addr == "" { - addr = t.Host - } - return scheme, addr - } - } - return net, target -} diff --git a/internal/net/dialer.go b/internal/net/dialer.go deleted file mode 100644 index 8441dd5..0000000 --- a/internal/net/dialer.go +++ /dev/null @@ -1,36 +0,0 @@ -package net - -import ( - "net" - "syscall" - "time" - - "golang.org/x/sys/unix" -) - -func newDefaultDialer() net.Dialer { - // From `grpc.WithContextDialer` comment: - // - // Note: All supported releases of Go (as of December 2023) override the OS - // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive - // with OS defaults for keepalive time and interval, use a net.Dialer that sets - // the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket - // option to true from the Control field. For a concrete example of how to do - // this, see internal.NetDialerWithTCPKeepalive(). - // - // https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432 - // - // From `internal.NetDialerWithTCPKeepalive` comment: - // - // TODO: Once https://github.com/golang/go/issues/62254 lands, and the - // appropriate Go version becomes less than our least supported Go version, we - // should look into using the new API to make things more straightforward. - return net.Dialer{ - KeepAlive: time.Duration(-1), - Control: func(_, _ string, c syscall.RawConn) error { - return c.Control(func(fd uintptr) { - _ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) - }) - }, - } -} diff --git a/internal/net/dialer_source.go b/internal/net/dialer_source.go deleted file mode 100644 index e6a142a..0000000 --- a/internal/net/dialer_source.go +++ /dev/null @@ -1,69 +0,0 @@ -package net - -import ( - "context" - "net" - "sync" - - "git.frostfs.info/TrueCloudLab/multinet" -) - -type DialerSource struct { - guard sync.RWMutex - - c Config - - md multinet.Dialer -} - -func NewDialerSource(c Config) (*DialerSource, error) { - result := &DialerSource{} - if err := result.build(c); err != nil { - return nil, err - } - return result, nil -} - -func (s *DialerSource) build(c Config) error { - if c.Enabled { - mc, err := c.toMultinetConfig() - if err != nil { - return err - } - md, err := multinet.NewDialer(mc) - if err != nil { - return err - } - s.md = md - s.c = c - return nil - } - s.md = nil - s.c = c - return nil -} - -// GrpcContextDialer returns grpc.WithContextDialer func. -// Returns nil if multinet disabled. -func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) { - s.guard.RLock() - defer s.guard.RUnlock() - - if s.c.Enabled { - return func(ctx context.Context, address string) (net.Conn, error) { - network, address := parseDialTarget(address) - return s.md.DialContext(ctx, network, address) - } - } - return nil -} - -func (s *DialerSource) Update(c Config) error { - s.guard.Lock() - defer s.guard.Unlock() - - if s.c.equals(c) { - return nil - } - return s.build(c) -} diff --git a/internal/net/event_handler.go b/internal/net/event_handler.go deleted file mode 100644 index 2826d35..0000000 --- a/internal/net/event_handler.go +++ /dev/null @@ -1,30 +0,0 @@ -package net - -import ( - "net" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "go.uber.org/zap" -) - -type LogEventHandler struct { - logger *zap.Logger -} - -func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err error) { - sourceIPString := "undefined" - if sourceIP != nil { - sourceIPString = sourceIP.Network() + "://" + sourceIP.String() - } - if err == nil { - l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), - zap.String("destination", address), logs.TagField(logs.TagApp)) - } else { - l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), - zap.String("destination", address), logs.TagField(logs.TagApp)) - } -} - -func NewLogEventHandler(logger *zap.Logger) LogEventHandler { - return LogEventHandler{logger: logger} -} diff --git a/internal/service/contracts/container/client.go b/internal/service/contracts/container/client.go deleted file mode 100644 index 09455be..0000000 --- a/internal/service/contracts/container/client.go +++ /dev/null @@ -1,73 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - containercontract "git.frostfs.info/TrueCloudLab/frostfs-contract/container" - containerclient "git.frostfs.info/TrueCloudLab/frostfs-contract/rpcclient/container" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/nspcc-dev/neo-go/pkg/rpcclient" - "github.com/nspcc-dev/neo-go/pkg/rpcclient/actor" - "github.com/nspcc-dev/neo-go/pkg/util" - "github.com/nspcc-dev/neo-go/pkg/wallet" -) - -type Client struct { - contract *containerclient.Contract -} - -type Config struct { - ContractHash util.Uint160 - Key *keys.PrivateKey - RPCClient *rpcclient.Client -} - -func New(cfg Config) (*Client, error) { - var err error - key := cfg.Key - if key == nil { - if key, err = keys.NewPrivateKey(); err != nil { - return nil, fmt.Errorf("generate anon private key for container contract: %w", err) - } - } - acc := wallet.NewAccountFromPrivateKey(key) - - act, err := actor.NewSimple(cfg.RPCClient, acc) - if err != nil { - return nil, fmt.Errorf("create new actor: %w", err) - } - - return &Client{ - contract: containerclient.New(act, cfg.ContractHash), - }, nil -} - -func (c *Client) GetContainerByID(cnrID cid.ID) (*container.Container, error) { - items, err := c.contract.Get(cnrID[:]) - if err != nil { - if strings.Contains(err.Error(), containercontract.NotFoundError) { - return nil, fmt.Errorf("%w: %s", handler.ErrContainerNotFound, err) - } - return nil, err - } - - if len(items) != 4 { - return nil, fmt.Errorf("unexpected container stack item count: %d", len(items)) - } - - cnrBytes, err := items[0].TryBytes() - if err != nil { - return nil, fmt.Errorf("could not get byte array of container: %w", err) - } - - var cnr container.Container - if err = cnr.Unmarshal(cnrBytes); err != nil { - return nil, fmt.Errorf("can't unmarshal container: %w", err) - } - - return &cnr, nil -} diff --git a/internal/service/contracts/util/util.go b/internal/service/contracts/util/util.go deleted file mode 100644 index 444504b..0000000 --- a/internal/service/contracts/util/util.go +++ /dev/null @@ -1,34 +0,0 @@ -package util - -import ( - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns" - "github.com/nspcc-dev/neo-go/pkg/util" -) - -// ResolveContractHash determine contract hash by resolving NNS name. -func ResolveContractHash(contractHash, rpcAddress string) (util.Uint160, error) { - if hash, err := util.Uint160DecodeStringLE(contractHash); err == nil { - return hash, nil - } - - splitName := strings.Split(contractHash, ".") - if len(splitName) != 2 { - return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", contractHash) - } - - var domain container.Domain - domain.SetName(splitName[0]) - domain.SetZone(splitName[1]) - - var nns ns.NNS - if err := nns.Dial(rpcAddress); err != nil { - return util.Uint160{}, fmt.Errorf("dial nns %s: %w", rpcAddress, err) - } - defer nns.Close() - - return nns.ResolveContractHash(domain) -} diff --git a/internal/service/frostfs/frostfs.go b/internal/service/frostfs/frostfs.go deleted file mode 100644 index 1841446..0000000 --- a/internal/service/frostfs/frostfs.go +++ /dev/null @@ -1,296 +0,0 @@ -package frostfs - -import ( - "context" - "errors" - "fmt" - "io" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// FrostFS represents virtual connection to the FrostFS network. -// It is used to provide an interface to dependent packages -// which work with FrostFS. -type FrostFS struct { - pool *pool.Pool -} - -// NewFrostFS creates new FrostFS using provided pool.Pool. -func NewFrostFS(p *pool.Pool) *FrostFS { - return &FrostFS{ - pool: p, - } -} - -// Container implements frostfs.FrostFS interface method. -func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.Container") - defer span.End() - - prm := pool.PrmContainerGet{ - ContainerID: containerPrm.ContainerID, - } - - res, err := x.pool.GetContainer(ctx, prm) - if err != nil { - return nil, handleStorageError("read container via connection pool", err) - } - - return &res, nil -} - -// CreateObject implements frostfs.FrostFS interface method. -func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.CreateObject") - defer span.End() - - var prmPut pool.PrmObjectPut - prmPut.SetHeader(*prm.Object) - prmPut.SetPayload(prm.Payload) - prmPut.SetClientCut(prm.ClientCut) - prmPut.WithoutHomomorphicHash(prm.WithoutHomomorphicHash) - prmPut.SetBufferMaxSize(prm.BufferMaxSize) - - if prm.BearerToken != nil { - prmPut.UseBearer(*prm.BearerToken) - } - - idObj, err := x.pool.PutObject(ctx, prmPut) - if err != nil { - return oid.ID{}, handleStorageError("save object via connection pool", err) - } - return idObj.ObjectID, nil -} - -// wraps io.ReadCloser and transforms Read errors related to access violation -// to frostfs.ErrAccessDenied. -type payloadReader struct { - io.ReadCloser -} - -func (x payloadReader) Read(p []byte) (int, error) { - n, err := x.ReadCloser.Read(p) - if err != nil && errors.Is(err, io.EOF) { - return n, err - } - return n, handleStorageError("read payload", err) -} - -// HeadObject implements frostfs.FrostFS interface method. -func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.HeadObject") - defer span.End() - - var prmHead pool.PrmObjectHead - prmHead.SetAddress(prm.Address) - - if prm.BearerToken != nil { - prmHead.UseBearer(*prm.BearerToken) - } - - res, err := x.pool.HeadObject(ctx, prmHead) - if err != nil { - return nil, handleStorageError("read object header via connection pool", err) - } - - return &res, nil -} - -// GetObject implements frostfs.FrostFS interface method. -func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetObject") - defer span.End() - - var prmGet pool.PrmObjectGet - prmGet.SetAddress(prm.Address) - - if prm.BearerToken != nil { - prmGet.UseBearer(*prm.BearerToken) - } - - res, err := x.pool.GetObject(ctx, prmGet) - if err != nil { - return nil, handleStorageError("init full object reading via connection pool", err) - } - - return &handler.Object{ - Header: res.Header, - Payload: res.Payload, - }, nil -} - -// RangeObject implements frostfs.FrostFS interface method. -func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.RangeObject") - defer span.End() - - var prmRange pool.PrmObjectRange - prmRange.SetAddress(prm.Address) - prmRange.SetOffset(prm.PayloadRange[0]) - prmRange.SetLength(prm.PayloadRange[1]) - - if prm.BearerToken != nil { - prmRange.UseBearer(*prm.BearerToken) - } - - res, err := x.pool.ObjectRange(ctx, prmRange) - if err != nil { - return nil, handleStorageError("init payload range reading via connection pool", err) - } - - return payloadReader{&res}, nil -} - -// SearchObjects implements frostfs.FrostFS interface method. -func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SearchObjects") - defer span.End() - - var prmSearch pool.PrmObjectSearch - prmSearch.SetContainerID(prm.Container) - prmSearch.SetFilters(prm.Filters) - - if prm.BearerToken != nil { - prmSearch.UseBearer(*prm.BearerToken) - } - - res, err := x.pool.SearchObjects(ctx, prmSearch) - if err != nil { - return nil, handleStorageError("init object search via connection pool", err) - } - - return &res, nil -} - -// GetEpochDurations implements frostfs.FrostFS interface method. -func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetEpochDurations") - defer span.End() - - networkInfo, err := x.pool.NetworkInfo(ctx) - if err != nil { - return nil, err - } - - res := &utils.EpochDurations{ - CurrentEpoch: networkInfo.CurrentEpoch(), - MsPerBlock: networkInfo.MsPerBlock(), - BlockPerEpoch: networkInfo.EpochDuration(), - } - - if res.BlockPerEpoch == 0 { - return nil, fmt.Errorf("EpochDuration is empty") - } - return res, nil -} - -func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.NetmapSnapshot") - defer span.End() - - netmapSnapshot, err := x.pool.NetMapSnapshot(ctx) - if err != nil { - return netmapSnapshot, handleStorageError("get netmap via connection pool", err) - } - - return netmapSnapshot, nil -} - -// ResolverFrostFS represents virtual connection to the FrostFS network. -// It implements resolver.FrostFS. -type ResolverFrostFS struct { - pool *pool.Pool -} - -// NewResolverFrostFS creates new ResolverFrostFS using provided pool.Pool. -func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS { - return &ResolverFrostFS{pool: p} -} - -// SystemDNS implements resolver.FrostFS interface method. -func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SystemDNS") - defer span.End() - - networkInfo, err := x.pool.NetworkInfo(ctx) - if err != nil { - return "", handleStorageError("read network info via client", err) - } - - domain := networkInfo.RawNetworkParameter("SystemDNS") - if domain == nil { - return "", errors.New("system DNS parameter not found or empty") - } - - return string(domain), nil -} - -func handleStorageError(msg string, err error) error { - if err == nil { - return nil - } - - if reason, ok := IsErrObjectAccessDenied(err); ok { - if strings.Contains(reason, "limit reached") { - return fmt.Errorf("%s: %w: %s", msg, handler.ErrQuotaLimitReached, reason) - } - - return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason) - } - - if client.IsErrContainerNotFound(err) { - return fmt.Errorf("%s: %w: %s", msg, handler.ErrContainerNotFound, err.Error()) - } - - if client.IsErrObjectNotFound(err) { - return fmt.Errorf("%s: %w: %s", msg, handler.ErrObjectNotFound, err.Error()) - } - - if IsTimeoutError(err) { - return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error()) - } - - return fmt.Errorf("%s: %w", msg, err) -} - -func UnwrapErr(err error) error { - unwrappedErr := errors.Unwrap(err) - for unwrappedErr != nil { - err = unwrappedErr - unwrappedErr = errors.Unwrap(err) - } - - return err -} - -func IsErrObjectAccessDenied(err error) (string, bool) { - err = UnwrapErr(err) - switch err := err.(type) { - default: - return "", false - case *apistatus.ObjectAccessDenied: - return err.Reason(), true - } -} - -func IsTimeoutError(err error) bool { - if strings.Contains(err.Error(), "timeout") || - errors.Is(err, context.DeadlineExceeded) { - return true - } - - return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded -} diff --git a/internal/service/frostfs/frostfs_test.go b/internal/service/frostfs/frostfs_test.go deleted file mode 100644 index e4344f7..0000000 --- a/internal/service/frostfs/frostfs_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package frostfs - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func TestHandleObjectError(t *testing.T) { - msg := "some msg" - - t.Run("nil error", func(t *testing.T) { - err := handleStorageError(msg, nil) - require.Nil(t, err) - }) - - t.Run("simple access denied", func(t *testing.T) { - reason := "some reason" - inputErr := new(apistatus.ObjectAccessDenied) - inputErr.WriteReason(reason) - - err := handleStorageError(msg, inputErr) - require.ErrorIs(t, err, handler.ErrAccessDenied) - require.Contains(t, err.Error(), reason) - require.Contains(t, err.Error(), msg) - }) - - t.Run("access denied - quota reached", func(t *testing.T) { - reason := "Quota limit reached" - inputErr := new(apistatus.ObjectAccessDenied) - inputErr.WriteReason(reason) - - err := handleStorageError(msg, inputErr) - require.ErrorIs(t, err, handler.ErrQuotaLimitReached) - require.Contains(t, err.Error(), reason) - require.Contains(t, err.Error(), msg) - }) - - t.Run("simple timeout", func(t *testing.T) { - inputErr := errors.New("timeout") - - err := handleStorageError(msg, inputErr) - require.ErrorIs(t, err, handler.ErrGatewayTimeout) - require.Contains(t, err.Error(), inputErr.Error()) - require.Contains(t, err.Error(), msg) - }) - - t.Run("deadline exceeded", func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - defer cancel() - <-ctx.Done() - - err := handleStorageError(msg, ctx.Err()) - require.ErrorIs(t, err, handler.ErrGatewayTimeout) - require.Contains(t, err.Error(), ctx.Err().Error()) - require.Contains(t, err.Error(), msg) - }) - - t.Run("grpc deadline exceeded", func(t *testing.T) { - inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error")) - - err := handleStorageError(msg, inputErr) - require.ErrorIs(t, err, handler.ErrGatewayTimeout) - require.Contains(t, err.Error(), inputErr.Error()) - require.Contains(t, err.Error(), msg) - }) - - t.Run("unknown error", func(t *testing.T) { - inputErr := errors.New("unknown error") - - err := handleStorageError(msg, inputErr) - require.ErrorIs(t, err, inputErr) - require.Contains(t, err.Error(), msg) - }) -} diff --git a/internal/service/frostfs/multi_object_reader.go b/internal/service/frostfs/multi_object_reader.go deleted file mode 100644 index b943474..0000000 --- a/internal/service/frostfs/multi_object_reader.go +++ /dev/null @@ -1,248 +0,0 @@ -package frostfs - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "time" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" -) - -// PartInfo is upload information about part. -type PartInfo struct { - Key string `json:"key"` - UploadID string `json:"uploadId"` - Number int `json:"number"` - OID oid.ID `json:"oid"` - Size uint64 `json:"size"` - ETag string `json:"etag"` - MD5 string `json:"md5"` - Created time.Time `json:"created"` -} - -type GetFrostFSParams struct { - // payload range - Off, Ln uint64 - Addr oid.Address -} - -type PartObj struct { - OID oid.ID - Size uint64 -} - -type readerInitiator interface { - InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) -} - -// MultiObjectReader implements io.Reader of payloads of the object list stored in the FrostFS network. -type MultiObjectReader struct { - ctx context.Context - - layer readerInitiator - - startPartOffset uint64 - endPartLength uint64 - - prm GetFrostFSParams - - curIndex int - curReader io.ReadCloser - - parts []PartObj -} - -type MultiObjectReaderConfig struct { - Initiator readerInitiator - - // the offset of complete object and total size to read - Off, Ln uint64 - - Addr oid.Address - Parts []PartObj -} - -var ( - errOffsetIsOutOfRange = errors.New("offset is out of payload range") - errLengthIsOutOfRange = errors.New("length is out of payload range") - errEmptyPartsList = errors.New("empty parts list") - errorZeroRangeLength = errors.New("zero range length") -) - -func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitMultiObjectReader") - defer span.End() - - combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{ - PrmAuth: handler.PrmAuth{BearerToken: p.Bearer}, - Address: p.Addr, - }) - if err != nil { - return nil, fmt.Errorf("get combined object '%s': %w", p.Addr.Object().EncodeToString(), err) - } - - var parts []*PartInfo - if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil { - return nil, fmt.Errorf("unmarshal combined object parts: %w", err) - } - - objParts := make([]PartObj, len(parts)) - for i, part := range parts { - objParts[i] = PartObj{ - OID: part.OID, - Size: part.Size, - } - } - - return NewMultiObjectReader(ctx, MultiObjectReaderConfig{ - Initiator: x, - Off: p.Off, - Ln: p.Ln, - Parts: objParts, - Addr: p.Addr, - }) -} - -func NewMultiObjectReader(ctx context.Context, cfg MultiObjectReaderConfig) (*MultiObjectReader, error) { - if len(cfg.Parts) == 0 { - return nil, errEmptyPartsList - } - - r := &MultiObjectReader{ - ctx: ctx, - layer: cfg.Initiator, - prm: GetFrostFSParams{ - Addr: cfg.Addr, - }, - parts: cfg.Parts, - } - - if cfg.Off+cfg.Ln == 0 { - return r, nil - } - - if cfg.Off > 0 && cfg.Ln == 0 { - return nil, errorZeroRangeLength - } - - startPartIndex, startPartOffset := findStartPart(cfg) - if startPartIndex == -1 { - return nil, errOffsetIsOutOfRange - } - r.startPartOffset = startPartOffset - - endPartIndex, endPartLength := findEndPart(cfg) - if endPartIndex == -1 { - return nil, errLengthIsOutOfRange - } - r.endPartLength = endPartLength - - r.parts = cfg.Parts[startPartIndex : endPartIndex+1] - - return r, nil -} - -func findStartPart(cfg MultiObjectReaderConfig) (index int, offset uint64) { - position := cfg.Off - for i, part := range cfg.Parts { - // Strict inequality when searching for start position to avoid reading zero length part. - if position < part.Size { - return i, position - } - position -= part.Size - } - - return -1, 0 -} - -func findEndPart(cfg MultiObjectReaderConfig) (index int, length uint64) { - position := cfg.Off + cfg.Ln - for i, part := range cfg.Parts { - // Non-strict inequality when searching for end position to avoid out of payload range error. - if position <= part.Size { - return i, position - } - position -= part.Size - } - - return -1, 0 -} - -func (x *MultiObjectReader) Read(p []byte) (n int, err error) { - if x.curReader != nil { - n, err = x.curReader.Read(p) - if err != nil { - if closeErr := x.curReader.Close(); closeErr != nil { - return n, fmt.Errorf("%w (close err: %v)", err, closeErr) - } - } - if !errors.Is(err, io.EOF) { - return n, err - } - - x.curIndex++ - } - - if x.curIndex == len(x.parts) { - return n, io.EOF - } - - x.prm.Addr.SetObject(x.parts[x.curIndex].OID) - - if x.curIndex == 0 { - x.prm.Off = x.startPartOffset - x.prm.Ln = x.parts[x.curIndex].Size - x.startPartOffset - } - - if x.curIndex == len(x.parts)-1 { - x.prm.Ln = x.endPartLength - x.prm.Off - } - - x.curReader, err = x.layer.InitFrostFSObjectPayloadReader(x.ctx, x.prm) - if err != nil { - return n, fmt.Errorf("init payload reader for the next part: %w", err) - } - - x.prm.Off = 0 - x.prm.Ln = 0 - - next, err := x.Read(p[n:]) - - return n + next, err -} - -// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object. -// Zero range corresponds to full payload (panics if only offset is set). -func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitFrostFSObjectPayloadReader") - defer span.End() - - var prmAuth handler.PrmAuth - - if p.Off+p.Ln != 0 { - prm := handler.PrmObjectRange{ - PrmAuth: prmAuth, - PayloadRange: [2]uint64{p.Off, p.Ln}, - Address: p.Addr, - } - - return x.RangeObject(ctx, prm) - } - - prm := handler.PrmObjectGet{ - PrmAuth: prmAuth, - Address: p.Addr, - } - - res, err := x.GetObject(ctx, prm) - if err != nil { - return nil, err - } - - return res.Payload, nil -} diff --git a/internal/service/frostfs/multi_object_reader_test.go b/internal/service/frostfs/multi_object_reader_test.go deleted file mode 100644 index 4127cdc..0000000 --- a/internal/service/frostfs/multi_object_reader_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package frostfs - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "testing" - - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" - "github.com/stretchr/testify/require" -) - -type readerInitiatorMock struct { - parts map[oid.ID][]byte -} - -func (r *readerInitiatorMock) InitFrostFSObjectPayloadReader(_ context.Context, p GetFrostFSParams) (io.ReadCloser, error) { - partPayload, ok := r.parts[p.Addr.Object()] - if !ok { - return nil, errors.New("part not found") - } - - if p.Off+p.Ln == 0 { - return io.NopCloser(bytes.NewReader(partPayload)), nil - } - - if p.Off > uint64(len(partPayload)-1) { - return nil, fmt.Errorf("invalid offset: %d/%d", p.Off, len(partPayload)) - } - - if p.Off+p.Ln > uint64(len(partPayload)) { - return nil, fmt.Errorf("invalid range: %d-%d/%d", p.Off, p.Off+p.Ln, len(partPayload)) - } - - return io.NopCloser(bytes.NewReader(partPayload[p.Off : p.Off+p.Ln])), nil -} - -func prepareDataReader() ([]byte, []PartObj, *readerInitiatorMock) { - mockInitReader := &readerInitiatorMock{ - parts: map[oid.ID][]byte{ - oidtest.ID(): []byte("first part 1"), - oidtest.ID(): []byte("second part 2"), - oidtest.ID(): []byte("third part 3"), - }, - } - - var fullPayload []byte - parts := make([]PartObj, 0, len(mockInitReader.parts)) - for id, payload := range mockInitReader.parts { - parts = append(parts, PartObj{OID: id, Size: uint64(len(payload))}) - fullPayload = append(fullPayload, payload...) - } - - return fullPayload, parts, mockInitReader -} - -func TestMultiReader(t *testing.T) { - ctx := context.Background() - - fullPayload, parts, mockInitReader := prepareDataReader() - - for _, tc := range []struct { - name string - off uint64 - ln uint64 - err error - }{ - { - name: "simple read all", - }, - { - name: "simple read with length", - ln: uint64(len(fullPayload)), - }, - { - name: "middle of parts", - off: parts[0].Size + 2, - ln: 4, - }, - { - name: "first and second", - off: parts[0].Size - 4, - ln: 8, - }, - { - name: "first and third", - off: parts[0].Size - 4, - ln: parts[1].Size + 8, - }, - { - name: "second part", - off: parts[0].Size, - ln: parts[1].Size, - }, - { - name: "second and third", - off: parts[0].Size, - ln: parts[1].Size + parts[2].Size, - }, - { - name: "offset out of range", - off: uint64(len(fullPayload) + 1), - ln: 1, - err: errOffsetIsOutOfRange, - }, - { - name: "zero length", - off: parts[1].Size + 1, - ln: 0, - err: errorZeroRangeLength, - }, - } { - t.Run(tc.name, func(t *testing.T) { - multiReader, err := NewMultiObjectReader(ctx, MultiObjectReaderConfig{ - Initiator: mockInitReader, - Parts: parts, - Off: tc.off, - Ln: tc.ln, - }) - require.ErrorIs(t, err, tc.err) - - if tc.err == nil { - off := tc.off - ln := tc.ln - if off+ln == 0 { - ln = uint64(len(fullPayload)) - } - data, err := io.ReadAll(multiReader) - require.NoError(t, err) - require.Equal(t, fullPayload[off:off+ln], data) - } - }) - } -} diff --git a/internal/service/frostfs/source.go b/internal/service/frostfs/source.go deleted file mode 100644 index 84f7b74..0000000 --- a/internal/service/frostfs/source.go +++ /dev/null @@ -1,69 +0,0 @@ -package frostfs - -import ( - "context" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" - "go.uber.org/zap" -) - -type Source struct { - frostFS *FrostFS - netmapCache *cache.NetmapCache - bucketCache *cache.BucketCache - log *zap.Logger -} - -func NewSource(frostFS *FrostFS, netmapCache *cache.NetmapCache, bucketCache *cache.BucketCache, log *zap.Logger) *Source { - return &Source{ - frostFS: frostFS, - netmapCache: netmapCache, - bucketCache: bucketCache, - log: log, - } -} - -func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) { - cachedNetmap := s.netmapCache.Get() - if cachedNetmap != nil { - return *cachedNetmap, nil - } - - netmapSnapshot, err := s.frostFS.NetmapSnapshot(ctx) - if err != nil { - return netmap.NetMap{}, fmt.Errorf("get netmap: %w", err) - } - - if err = s.netmapCache.Put(netmapSnapshot); err != nil { - s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath)) - } - - return netmapSnapshot, nil -} - -func (s *Source) PlacementPolicy(ctx context.Context, cnrID cid.ID) (netmap.PlacementPolicy, error) { - info := s.bucketCache.GetByCID(cnrID) - if info != nil { - return info.PlacementPolicy, nil - } - - prm := handler.PrmContainer{ - ContainerID: cnrID, - } - res, err := s.frostFS.Container(ctx, prm) - if err != nil { - return netmap.PlacementPolicy{}, fmt.Errorf("get container: %w", err) - } - - // We don't put container back to the cache to keep cache - // coherent to the requests made by users. FrostFS Source - // is being used by SDK Tree Pool and it should not fill cache - // with possibly irrelevant container values. - - return res.PlacementPolicy(), nil -} diff --git a/internal/service/frostfs/tree_pool_wrapper.go b/internal/service/frostfs/tree_pool_wrapper.go deleted file mode 100644 index d0b5501..0000000 --- a/internal/service/frostfs/tree_pool_wrapper.go +++ /dev/null @@ -1,170 +0,0 @@ -package frostfs - -import ( - "context" - "errors" - "fmt" - "io" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree" - treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree" -) - -type GetNodeByPathResponseInfoWrapper struct { - response *apitree.GetNodeByPathResponseInfo -} - -func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 { - return []uint64{n.response.GetNodeID()} -} - -func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 { - return []uint64{n.response.GetParentID()} -} - -func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 { - return []uint64{n.response.GetTimestamp()} -} - -func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta { - res := make([]tree.Meta, len(n.response.GetMeta())) - for i, value := range n.response.GetMeta() { - res[i] = value - } - return res -} - -type PoolWrapper struct { - p *treepool.Pool -} - -func NewPoolWrapper(p *treepool.Pool) *PoolWrapper { - return &PoolWrapper{p: p} -} - -func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetNodes") - defer span.End() - - poolPrm := treepool.GetNodesParams{ - CID: prm.CnrID, - TreeID: prm.TreeID, - Path: prm.Path, - Meta: prm.Meta, - PathAttribute: tree.FileNameKey, - LatestOnly: prm.LatestOnly, - AllAttrs: prm.AllAttrs, - BearerToken: getBearer(ctx), - } - - nodes, err := w.p.GetNodes(ctx, poolPrm) - if err != nil { - return nil, handleTreeError(err) - } - - res := make([]tree.NodeResponse, len(nodes)) - for i, info := range nodes { - res[i] = GetNodeByPathResponseInfoWrapper{info} - } - - return res, nil -} - -func getBearer(ctx context.Context) []byte { - token, err := tokens.LoadBearerToken(ctx) - if err != nil { - return nil - } - return token.Marshal() -} - -func handleTreeError(err error) error { - if err == nil { - return nil - } - if errors.Is(err, treepool.ErrNodeNotFound) { - return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error()) - } - if errors.Is(err, treepool.ErrNodeAccessDenied) { - return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error()) - } - - return err -} - -func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetSubTree") - defer span.End() - - order := treepool.NoneOrder - if sort { - order = treepool.AscendingOrder - } - poolPrm := treepool.GetSubTreeParams{ - CID: bktInfo.CID, - TreeID: treeID, - RootID: rootID, - Depth: depth, - BearerToken: getBearer(ctx), - Order: order, - } - if len(rootID) == 1 && rootID[0] == 0 { - // storage node interprets 'nil' value as []uint64{0} - // gate wants to send 'nil' value instead of []uint64{0}, because - // it provides compatibility with previous tree service api where - // single uint64(0) value is dropped from signature - poolPrm.RootID = nil - } - - subTreeReader, err := w.p.GetSubTree(ctx, poolPrm) - if err != nil { - return nil, handleTreeError(err) - } - - var subtree []tree.NodeResponse - - node, err := subTreeReader.Next() - for err == nil { - subtree = append(subtree, GetSubTreeResponseBodyWrapper{node}) - node, err = subTreeReader.Next() - } - if err != io.EOF { - return nil, handleTreeError(err) - } - - return subtree, nil -} - -type GetSubTreeResponseBodyWrapper struct { - response *apitree.GetSubTreeResponseBody -} - -func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 { - return n.response.GetNodeID() -} - -func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 { - resp := n.response.GetParentID() - if resp == nil { - // storage sends nil that should be interpreted as []uint64{0} - // due to protobuf compatibility, see 'GetSubTree' function - return []uint64{0} - } - return resp -} - -func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 { - return n.response.GetTimestamp() -} - -func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta { - res := make([]tree.Meta, len(n.response.GetMeta())) - for i, value := range n.response.GetMeta() { - res[i] = value - } - return res -} diff --git a/internal/templates/index.gotmpl b/internal/templates/index.gotmpl deleted file mode 100644 index 4c03404..0000000 --- a/internal/templates/index.gotmpl +++ /dev/null @@ -1,110 +0,0 @@ -{{$container := .Container}} - - - - - Index of {{.Protocol}}://{{$container}}/{{.Prefix}} - - - -

Index of {{.Protocol}}://{{$container}}/{{.Prefix}}

-{{ if .HasErrors }} -
- Errors occurred while processing the request. Perhaps some objects are missing -
-{{ end }} - - - - - - - - - - - - {{ $parentPrefix := getParent .Prefix }} - {{if $parentPrefix }} - - - - - - - - {{else}} - - - - - - - - {{end}} - {{range .Objects}} - - - - - - - - {{end}} - -
FilenameOIDSizeCreatedDownload
- ⮐.. -
- ⮐.. -
- {{if .IsDir}} - 🗀 - - {{.FileName}}/ - - {{else}} - 🗎 - - {{.FileName}} - - {{end}} - {{.OID}}{{if not .IsDir}}{{ formatSize .Size }}{{end}}{{ .Created }} - {{ if .OID }} - - Link - - {{ end }} -
- - diff --git a/internal/templates/template.go b/internal/templates/template.go deleted file mode 100644 index b9885e6..0000000 --- a/internal/templates/template.go +++ /dev/null @@ -1,6 +0,0 @@ -package templates - -import _ "embed" - -//go:embed index.gotmpl -var DefaultIndexTemplate string diff --git a/metrics/desc.go b/metrics/desc.go deleted file mode 100644 index a00ab3e..0000000 --- a/metrics/desc.go +++ /dev/null @@ -1,168 +0,0 @@ -package metrics - -import ( - "encoding/json" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" -) - -var appMetricsDesc = map[string]map[string]Description{ - poolSubsystem: { - overallErrorsMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: poolSubsystem, - Name: overallErrorsMetric, - Help: "Total number of errors in pool", - }, - overallNodeErrorsMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: poolSubsystem, - Name: overallNodeErrorsMetric, - Help: "Total number of errors for connection in pool", - VariableLabels: []string{"node"}, - }, - overallNodeRequestsMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: poolSubsystem, - Name: overallNodeRequestsMetric, - Help: "Total number of requests to specific node in pool", - VariableLabels: []string{"node"}, - }, - currentErrorMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: poolSubsystem, - Name: currentErrorMetric, - Help: "Number of errors on current connections that will be reset after the threshold", - VariableLabels: []string{"node"}, - }, - avgRequestDurationMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: poolSubsystem, - Name: avgRequestDurationMetric, - Help: "Average request duration (in milliseconds) for specific method on node in pool", - VariableLabels: []string{"node", "method"}, - }, - }, - stateSubsystem: { - healthMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: stateSubsystem, - Name: healthMetric, - Help: "Current HTTP gateway state", - }, - versionInfoMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: stateSubsystem, - Name: versionInfoMetric, - Help: "Version of current FrostFS HTTP Gate instance", - VariableLabels: []string{"version"}, - }, - }, - serverSubsystem: { - healthMetric: Description{ - Type: dto.MetricType_GAUGE, - Namespace: namespace, - Subsystem: serverSubsystem, - Name: healthMetric, - Help: "HTTP Server endpoint health", - VariableLabels: []string{"endpoint"}, - }, - }, - statisticSubsystem: { - droppedLogs: Description{ - Type: dto.MetricType_COUNTER, - Namespace: namespace, - Subsystem: statisticSubsystem, - Name: droppedLogs, - Help: "Dropped logs (by sampling) count", - }, - }, -} - -type Description struct { - Type dto.MetricType - Namespace string - Subsystem string - Name string - Help string - ConstantLabels prometheus.Labels - VariableLabels []string -} - -func (d *Description) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Type string `json:"type"` - FQName string `json:"name"` - Help string `json:"help"` - ConstantLabels prometheus.Labels `json:"constant_labels,omitempty"` - VariableLabels []string `json:"variable_labels,omitempty"` - }{ - Type: d.Type.String(), - FQName: d.BuildFQName(), - Help: d.Help, - ConstantLabels: d.ConstantLabels, - VariableLabels: d.VariableLabels, - }) -} - -func (d *Description) BuildFQName() string { - return prometheus.BuildFQName(d.Namespace, d.Subsystem, d.Name) -} - -// DescribeAll returns descriptions for metrics. -func DescribeAll() []Description { - var list []Description - for _, m := range appMetricsDesc { - for _, description := range m { - list = append(list, description) - } - } - - return list -} - -func newOpts(description Description) prometheus.Opts { - return prometheus.Opts{ - Namespace: description.Namespace, - Subsystem: description.Subsystem, - Name: description.Name, - Help: description.Help, - ConstLabels: description.ConstantLabels, - } -} - -func mustNewGauge(description Description) prometheus.Gauge { - if description.Type != dto.MetricType_GAUGE { - panic("invalid metric type") - } - return prometheus.NewGauge( - prometheus.GaugeOpts(newOpts(description)), - ) -} - -func mustNewGaugeVec(description Description) *prometheus.GaugeVec { - if description.Type != dto.MetricType_GAUGE { - panic("invalid metric type") - } - return prometheus.NewGaugeVec( - prometheus.GaugeOpts(newOpts(description)), - description.VariableLabels, - ) -} - -func mustNewCounter(description Description) prometheus.Counter { - if description.Type != dto.MetricType_COUNTER { - panic("invalid metric type") - } - return prometheus.NewCounter( - prometheus.CounterOpts(newOpts(description)), - ) -} diff --git a/metrics/desc_test.go b/metrics/desc_test.go deleted file mode 100644 index b3e98ae..0000000 --- a/metrics/desc_test.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build dump_metrics - -package metrics - -import ( - "encoding/json" - "flag" - "os" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - "github.com/stretchr/testify/require" -) - -type mock struct{} - -func (m mock) Statistic() pool.Statistic { - return pool.Statistic{} -} - -var metricsPath = flag.String("out", "", "File to export http gateway metrics to.") - -func TestDescribeAll(t *testing.T) { - // to check correct metrics type mapping - _ = NewGateMetrics(mock{}) - - flag.Parse() - - require.NotEmpty(t, metricsPath, "flag 'out' must be provided to dump metrics description") - - desc := DescribeAll() - data, err := json.Marshal(desc) - require.NoError(t, err) - - err = os.WriteFile(*metricsPath, data, 0644) - require.NoError(t, err) -} diff --git a/metrics/metrics.go b/metrics/metrics.go deleted file mode 100644 index 1c06868..0000000 --- a/metrics/metrics.go +++ /dev/null @@ -1,272 +0,0 @@ -package metrics - -import ( - "net/http" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.uber.org/zap" -) - -const ( - namespace = "frostfs_http_gw" - stateSubsystem = "state" - poolSubsystem = "pool" - serverSubsystem = "server" - statisticSubsystem = "statistic" -) - -const ( - healthMetric = "health" - versionInfoMetric = "version_info" - droppedLogs = "dropped_logs" -) - -const ( - overallErrorsMetric = "overall_errors" - overallNodeErrorsMetric = "overall_node_errors" - overallNodeRequestsMetric = "overall_node_requests" - currentErrorMetric = "current_errors" - avgRequestDurationMetric = "avg_request_duration" -) - -const ( - methodGetBalance = "get_balance" - methodPutContainer = "put_container" - methodGetContainer = "get_container" - methodListContainer = "list_container" - methodDeleteContainer = "delete_container" - methodEndpointInfo = "endpoint_info" - methodNetworkInfo = "network_info" - methodPutObject = "put_object" - methodDeleteObject = "delete_object" - methodGetObject = "get_object" - methodHeadObject = "head_object" - methodRangeObject = "range_object" - methodCreateSession = "create_session" -) - -// HealthStatus of the gate application. -type HealthStatus int32 - -const ( - HealthStatusUndefined HealthStatus = 0 - HealthStatusStarting HealthStatus = 1 - HealthStatusReady HealthStatus = 2 - HealthStatusShuttingDown HealthStatus = 3 -) - -type StatisticScraper interface { - Statistic() pool.Statistic -} - -type serverMetrics struct { - endpointHealth *prometheus.GaugeVec -} - -type GateMetrics struct { - stateMetrics - poolMetricsCollector - serverMetrics - statisticMetrics -} - -type stateMetrics struct { - healthCheck prometheus.Gauge - versionInfo *prometheus.GaugeVec -} - -type statisticMetrics struct { - droppedLogs prometheus.Counter -} - -type poolMetricsCollector struct { - scraper StatisticScraper - overallErrors prometheus.Gauge - overallNodeErrors *prometheus.GaugeVec - overallNodeRequests *prometheus.GaugeVec - currentErrors *prometheus.GaugeVec - requestDuration *prometheus.GaugeVec -} - -// NewGateMetrics creates new metrics for http gate. -func NewGateMetrics(p StatisticScraper) *GateMetrics { - stateMetric := newStateMetrics() - stateMetric.register() - - poolMetric := newPoolMetricsCollector(p) - poolMetric.register() - - serverMetric := newServerMetrics() - serverMetric.register() - - statsMetric := newStatisticMetrics() - statsMetric.register() - - return &GateMetrics{ - stateMetrics: *stateMetric, - poolMetricsCollector: *poolMetric, - serverMetrics: *serverMetric, - statisticMetrics: *statsMetric, - } -} - -func (g *GateMetrics) Unregister() { - g.stateMetrics.unregister() - prometheus.Unregister(&g.poolMetricsCollector) - g.serverMetrics.unregister() - g.statisticMetrics.unregister() -} - -func newStateMetrics() *stateMetrics { - return &stateMetrics{ - healthCheck: mustNewGauge(appMetricsDesc[stateSubsystem][healthMetric]), - versionInfo: mustNewGaugeVec(appMetricsDesc[stateSubsystem][versionInfoMetric]), - } -} - -func newStatisticMetrics() *statisticMetrics { - return &statisticMetrics{ - droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]), - } -} - -func (s *statisticMetrics) register() { - prometheus.MustRegister(s.droppedLogs) -} - -func (s *statisticMetrics) unregister() { - prometheus.Unregister(s.droppedLogs) -} - -func (m stateMetrics) register() { - prometheus.MustRegister(m.healthCheck) - prometheus.MustRegister(m.versionInfo) -} - -func (m stateMetrics) unregister() { - prometheus.Unregister(m.healthCheck) - prometheus.Unregister(m.versionInfo) -} - -func (m stateMetrics) SetHealth(s HealthStatus) { - m.healthCheck.Set(float64(s)) -} - -func (m stateMetrics) SetVersion(ver string) { - m.versionInfo.WithLabelValues(ver).Set(1) -} - -func (s *statisticMetrics) DroppedLogsInc() { - if s == nil { - return - } - s.droppedLogs.Inc() -} - -func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector { - return &poolMetricsCollector{ - scraper: p, - overallErrors: mustNewGauge(appMetricsDesc[poolSubsystem][overallErrorsMetric]), - overallNodeErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeErrorsMetric]), - overallNodeRequests: mustNewGaugeVec(appMetricsDesc[poolSubsystem][overallNodeRequestsMetric]), - currentErrors: mustNewGaugeVec(appMetricsDesc[poolSubsystem][currentErrorMetric]), - requestDuration: mustNewGaugeVec(appMetricsDesc[poolSubsystem][avgRequestDurationMetric]), - } -} - -func (m *poolMetricsCollector) Collect(ch chan<- prometheus.Metric) { - m.updateStatistic() - m.overallErrors.Collect(ch) - m.overallNodeErrors.Collect(ch) - m.overallNodeRequests.Collect(ch) - m.currentErrors.Collect(ch) - m.requestDuration.Collect(ch) -} - -func (m *poolMetricsCollector) Describe(descs chan<- *prometheus.Desc) { - m.overallErrors.Describe(descs) - m.overallNodeErrors.Describe(descs) - m.overallNodeRequests.Describe(descs) - m.currentErrors.Describe(descs) - m.requestDuration.Describe(descs) -} - -func (m *poolMetricsCollector) register() { - prometheus.MustRegister(m) -} - -func (m *poolMetricsCollector) updateStatistic() { - stat := m.scraper.Statistic() - - m.overallNodeErrors.Reset() - m.overallNodeRequests.Reset() - m.currentErrors.Reset() - m.requestDuration.Reset() - - for _, node := range stat.Nodes() { - m.overallNodeErrors.WithLabelValues(node.Address()).Set(float64(node.OverallErrors())) - m.overallNodeRequests.WithLabelValues(node.Address()).Set(float64(node.Requests())) - - m.currentErrors.WithLabelValues(node.Address()).Set(float64(node.CurrentErrors())) - m.updateRequestsDuration(node) - } - - m.overallErrors.Set(float64(stat.OverallErrors())) -} - -func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) { - m.requestDuration.WithLabelValues(node.Address(), methodGetBalance).Set(float64(node.AverageGetBalance().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodPutContainer).Set(float64(node.AveragePutContainer().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodDeleteObject).Set(float64(node.AverageDeleteObject().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodGetObject).Set(float64(node.AverageGetObject().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodHeadObject).Set(float64(node.AverageHeadObject().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodRangeObject).Set(float64(node.AverageRangeObject().Milliseconds())) - m.requestDuration.WithLabelValues(node.Address(), methodCreateSession).Set(float64(node.AverageCreateSession().Milliseconds())) -} - -func newServerMetrics() *serverMetrics { - return &serverMetrics{ - endpointHealth: mustNewGaugeVec(appMetricsDesc[serverSubsystem][healthMetric]), - } -} - -func (m serverMetrics) register() { - prometheus.MustRegister(m.endpointHealth) -} - -func (m serverMetrics) unregister() { - prometheus.Unregister(m.endpointHealth) -} - -func (m serverMetrics) MarkHealthy(endpoint string) { - m.endpointHealth.WithLabelValues(endpoint).Set(float64(1)) -} - -func (m serverMetrics) MarkUnhealthy(endpoint string) { - m.endpointHealth.WithLabelValues(endpoint).Set(float64(0)) -} - -// NewPrometheusService creates a new service for gathering prometheus metrics. -func NewPrometheusService(log *zap.Logger, cfg Config) *Service { - if log == nil { - return nil - } - - return &Service{ - Server: &http.Server{ - Addr: cfg.Address, - Handler: promhttp.Handler(), - }, - enabled: cfg.Enabled, - serviceType: "Prometheus", - log: log.With(zap.String("service", "Prometheus")), - } -} diff --git a/metrics/pprof.go b/metrics/pprof.go deleted file mode 100644 index 4719a69..0000000 --- a/metrics/pprof.go +++ /dev/null @@ -1,33 +0,0 @@ -package metrics - -import ( - "net/http" - "net/http/pprof" - - "go.uber.org/zap" -) - -// NewPprofService creates a new service for gathering pprof metrics. -func NewPprofService(l *zap.Logger, cfg Config) *Service { - handler := http.NewServeMux() - handler.HandleFunc("/debug/pprof/", pprof.Index) - handler.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - handler.HandleFunc("/debug/pprof/profile", pprof.Profile) - handler.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - handler.HandleFunc("/debug/pprof/trace", pprof.Trace) - - // Manually add support for paths linked to by index page at /debug/pprof/ - for _, item := range []string{"allocs", "block", "heap", "goroutine", "mutex", "threadcreate"} { - handler.Handle("/debug/pprof/"+item, pprof.Handler(item)) - } - - return &Service{ - Server: &http.Server{ - Addr: cfg.Address, - Handler: handler, - }, - enabled: cfg.Enabled, - serviceType: "Pprof", - log: l.With(zap.String("service", "Pprof")), - } -} diff --git a/metrics/service.go b/metrics/service.go deleted file mode 100644 index e6b803b..0000000 --- a/metrics/service.go +++ /dev/null @@ -1,48 +0,0 @@ -package metrics - -import ( - "context" - "net/http" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "go.uber.org/zap" -) - -// Service serves metrics. -type Service struct { - *http.Server - enabled bool - log *zap.Logger - serviceType string -} - -// Config is a params to configure service. -type Config struct { - Address string - Enabled bool -} - -// Start runs http service with the exposed endpoint on the configured port. -func (ms *Service) Start() { - if ms.enabled { - ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp)) - err := ms.ListenAndServe() - if err != nil && err != http.ErrServerClosed { - ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp)) - } - } else { - ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp)) - } -} - -// ShutDown stops the service. -func (ms *Service) ShutDown(ctx context.Context) { - ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp)) - err := ms.Shutdown(ctx) - if err != nil { - ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp)) - if err = ms.Close(); err != nil { - ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp)) - } - } -} diff --git a/resolver/resolver.go b/resolver/resolver.go deleted file mode 100644 index 6d7c5d5..0000000 --- a/resolver/resolver.go +++ /dev/null @@ -1,206 +0,0 @@ -package resolver - -import ( - "context" - "errors" - "fmt" - "sync" - - v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns" -) - -const ( - NNSResolver = "nns" - DNSResolver = "dns" -) - -// ErrNoResolvers returns when trying to resolve container without any resolver. -var ErrNoResolvers = errors.New("no resolvers") - -// FrostFS represents virtual connection to the FrostFS network. -type FrostFS interface { - // SystemDNS reads system DNS network parameters of the FrostFS. - // - // Returns exactly on non-zero value. Returns any error encountered - // which prevented the parameter to be read. - SystemDNS(context.Context) (string, error) -} - -type Config struct { - FrostFS FrostFS - RPCAddress string -} - -type ContainerResolver struct { - mu sync.RWMutex - resolvers []*Resolver -} - -type Resolver struct { - Name string - resolve func(context.Context, string, string) (*cid.ID, error) -} - -func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (*cid.ID, error)) { - r.resolve = fn -} - -func (r *Resolver) Resolve(ctx context.Context, zone, name string) (*cid.ID, error) { - return r.resolve(ctx, zone, name) -} - -func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolver, error) { - resolvers, err := createResolvers(resolverNames, cfg) - if err != nil { - return nil, err - } - - return &ContainerResolver{ - resolvers: resolvers, - }, nil -} - -func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) { - resolvers := make([]*Resolver, len(resolverNames)) - for i, name := range resolverNames { - cnrResolver, err := newResolver(name, cfg) - if err != nil { - return nil, err - } - resolvers[i] = cnrResolver - } - - return resolvers, nil -} - -func (r *ContainerResolver) Resolve(ctx context.Context, cnrZone, cnrName string) (*cid.ID, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - var err error - for _, resolver := range r.resolvers { - cnrID, resolverErr := resolver.Resolve(ctx, cnrZone, cnrName) - if resolverErr != nil { - resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr) - if err == nil { - err = resolverErr - } else { - err = fmt.Errorf("%s: %w", err.Error(), resolverErr) - } - continue - } - return cnrID, nil - } - - if err != nil { - return nil, err - } - - return nil, ErrNoResolvers -} - -func (r *ContainerResolver) UpdateResolvers(resolverNames []string, cfg *Config) error { - r.mu.Lock() - defer r.mu.Unlock() - - if r.equals(resolverNames) { - return nil - } - - resolvers, err := createResolvers(resolverNames, cfg) - if err != nil { - return err - } - - r.resolvers = resolvers - - return nil -} - -func (r *ContainerResolver) equals(resolverNames []string) bool { - if len(r.resolvers) != len(resolverNames) { - return false - } - - for i := 0; i < len(resolverNames); i++ { - if r.resolvers[i].Name != resolverNames[i] { - return false - } - } - return true -} - -func newResolver(name string, cfg *Config) (*Resolver, error) { - switch name { - case DNSResolver: - return NewDNSResolver(cfg.FrostFS) - case NNSResolver: - return NewNNSResolver(cfg.RPCAddress) - default: - return nil, fmt.Errorf("unknown resolver: %s", name) - } -} - -func NewDNSResolver(frostFS FrostFS) (*Resolver, error) { - if frostFS == nil { - return nil, fmt.Errorf("pool must not be nil for DNS resolver") - } - - var dns ns.DNS - - resolveFunc := func(ctx context.Context, zone, name string) (*cid.ID, error) { - var err error - - if zone == v2container.SysAttributeZoneDefault { - zone, err = frostFS.SystemDNS(ctx) - if err != nil { - return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err) - } - } - - domain := name + "." + zone - cnrID, err := dns.ResolveContainerName(domain) - - if err != nil { - return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err) - } - return &cnrID, nil - } - - return &Resolver{ - Name: DNSResolver, - resolve: resolveFunc, - }, nil -} - -func NewNNSResolver(rpcAddress string) (*Resolver, error) { - if rpcAddress == "" { - return nil, fmt.Errorf("rpc address must not be empty for NNS resolver") - } - - var nns ns.NNS - - if err := nns.Dial(rpcAddress); err != nil { - return nil, fmt.Errorf("could not dial nns: %w", err) - } - - resolveFunc := func(_ context.Context, zone, name string) (*cid.ID, error) { - var d container.Domain - d.SetName(name) - d.SetZone(zone) - - cnrID, err := nns.ResolveContainerDomain(d) - if err != nil { - return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err) - } - return &cnrID, nil - } - - return &Resolver{ - Name: NNSResolver, - resolve: resolveFunc, - }, nil -} diff --git a/tokens/bearer-token.go b/tokens/bearer-token.go deleted file mode 100644 index 24ffcbe..0000000 --- a/tokens/bearer-token.go +++ /dev/null @@ -1,107 +0,0 @@ -package tokens - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "github.com/valyala/fasthttp" -) - -type fromHandler = func(h *fasthttp.RequestHeader) []byte - -type ctxKey string - -const ( - bearerTokenHdr = "Bearer" - bearerTokenKey ctxKey = "__context_bearer_token_key" -) - -// BearerToken usage: -// -// if err = storeBearerToken(ctx); err != nil { -// log.Error("could not fetch bearer token", zap.Error(err)) -// c.Error("could not fetch bearer token", fasthttp.StatusBadRequest) -// return -// } - -// BearerTokenFromHeader extracts a bearer token from Authorization request header. -func BearerTokenFromHeader(h *fasthttp.RequestHeader) []byte { - auth := h.Peek(fasthttp.HeaderAuthorization) - if auth == nil || !bytes.HasPrefix(auth, []byte(bearerTokenHdr)) { - return nil - } - if auth = bytes.TrimPrefix(auth, []byte(bearerTokenHdr+" ")); len(auth) == 0 { - return nil - } - return auth -} - -// BearerTokenFromCookie extracts a bearer token from cookies. -func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte { - auth := h.Cookie(bearerTokenHdr) - if len(auth) == 0 { - return nil - } - - return auth -} - -// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores -// it in the application context. -func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) { - tkn, err := fetchBearerToken(c) - if err != nil { - return nil, err - } - newCtx := context.WithValue(ctx, bearerTokenKey, tkn) - return newCtx, nil -} - -// LoadBearerToken returns a bearer token stored in the context given (if it's -// present there). -func LoadBearerToken(ctx context.Context) (*bearer.Token, error) { - if tkn, ok := ctx.Value(bearerTokenKey).(*bearer.Token); ok && tkn != nil { - return tkn, nil - } - return nil, errors.New("found empty bearer token") -} - -func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) { - // ignore empty value - if ctx == nil { - return nil, nil - } - var ( - lastErr error - - buf []byte - tkn = new(bearer.Token) - ) - for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} { - buf = parse(&ctx.Request.Header) - if buf == nil { - continue - } - - data, err := base64.StdEncoding.DecodeString(string(buf)) - if err != nil { - lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err) - continue - } - - if err = tkn.Unmarshal(data); err != nil { - if err = tkn.UnmarshalJSON(data); err != nil { - lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err) - continue - } - } - - return tkn, nil - } - - return nil, lastErr -} diff --git a/tokens/bearer-token_test.go b/tokens/bearer-token_test.go deleted file mode 100644 index 60e9ea2..0000000 --- a/tokens/bearer-token_test.go +++ /dev/null @@ -1,335 +0,0 @@ -//go:build !integration - -package tokens - -import ( - "context" - "encoding/base64" - "testing" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" - "github.com/nspcc-dev/neo-go/pkg/crypto/keys" - "github.com/stretchr/testify/require" - "github.com/valyala/fasthttp" -) - -func makeTestCookie(value []byte) *fasthttp.RequestHeader { - header := new(fasthttp.RequestHeader) - header.SetCookie(bearerTokenHdr, string(value)) - return header -} - -func makeTestHeader(value []byte) *fasthttp.RequestHeader { - header := new(fasthttp.RequestHeader) - if value != nil { - header.Set(fasthttp.HeaderAuthorization, string(value)) - } - return header -} - -func makeBearer(value string) string { - return bearerTokenHdr + " " + value -} - -func TestBearerTokenFromCookie(t *testing.T) { - cases := []struct { - name string - actual []byte - expect []byte - }{ - { - name: "empty", - }, - { - name: "normal", - actual: []byte("TOKEN"), - expect: []byte("TOKEN"), - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expect, BearerTokenFromCookie(makeTestCookie(tt.actual))) - }) - } -} - -func TestBearerTokenFromHeader(t *testing.T) { - validToken := "token" - tokenWithoutPrefix := "invalid-token" - - cases := []struct { - name string - actual []byte - expect []byte - }{ - { - name: "empty", - }, - { - name: "token without the bearer prefix", - actual: []byte(tokenWithoutPrefix), - }, - { - name: "token without payload", - actual: []byte(makeBearer("")), - }, - { - name: "normal", - actual: []byte(makeBearer(validToken)), - expect: []byte(validToken), - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expect, BearerTokenFromHeader(makeTestHeader(tt.actual))) - }) - } -} - -func TestFetchBearerToken(t *testing.T) { - key, err := keys.NewPrivateKey() - require.NoError(t, err) - var uid user.ID - user.IDFromKey(&uid, key.PrivateKey.PublicKey) - - tkn := new(bearer.Token) - tkn.ForUser(uid) - - jsonToken, err := tkn.MarshalJSON() - require.NoError(t, err) - - jsonTokenBase64 := base64.StdEncoding.EncodeToString(jsonToken) - binaryTokenBase64 := base64.StdEncoding.EncodeToString(tkn.Marshal()) - - require.NotEmpty(t, jsonTokenBase64) - require.NotEmpty(t, binaryTokenBase64) - - cases := []struct { - name string - cookie string - header string - error string - nilCtx bool - expect *bearer.Token - }{ - { - name: "empty", - }, - { - name: "nil context", - nilCtx: true, - }, - { - name: "bad base64 header", - header: "WRONG BASE64", - error: "can't base64-decode bearer token", - }, - { - name: "bad base64 cookie", - cookie: "WRONG BASE64", - error: "can't base64-decode bearer token", - }, - { - name: "header token unmarshal error", - header: "dGVzdAo=", - error: "can't unmarshal bearer token", - }, - { - name: "cookie token unmarshal error", - cookie: "dGVzdAo=", - error: "can't unmarshal bearer token", - }, - { - name: "bad header and cookie", - header: "WRONG BASE64", - cookie: "dGVzdAo=", - error: "can't unmarshal bearer token", - }, - { - name: "bad header, but good cookie with binary token", - header: "dGVzdAo=", - cookie: binaryTokenBase64, - expect: tkn, - }, - { - name: "bad cookie, but good header with binary token", - header: binaryTokenBase64, - cookie: "dGVzdAo=", - expect: tkn, - }, - { - name: "bad header, but good cookie with json token", - header: "dGVzdAo=", - cookie: jsonTokenBase64, - expect: tkn, - }, - { - name: "bad cookie, but good header with json token", - header: jsonTokenBase64, - cookie: "dGVzdAo=", - expect: tkn, - }, - { - name: "ok for header with binary token", - header: binaryTokenBase64, - expect: tkn, - }, - { - name: "ok for cookie with binary token", - cookie: binaryTokenBase64, - expect: tkn, - }, - { - name: "ok for header with json token", - header: jsonTokenBase64, - expect: tkn, - }, - { - name: "ok for cookie with json token", - cookie: jsonTokenBase64, - expect: tkn, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - var ctx *fasthttp.RequestCtx - if !tt.nilCtx { - ctx = makeTestRequest(tt.cookie, tt.header) - } - - actual, err := fetchBearerToken(ctx) - - if tt.error == "" { - require.NoError(t, err) - require.Equal(t, tt.expect, actual) - - return - } - - require.Contains(t, err.Error(), tt.error) - }) - } -} - -func makeTestRequest(cookie, header string) *fasthttp.RequestCtx { - ctx := new(fasthttp.RequestCtx) - - if cookie != "" { - ctx.Request.Header.SetCookie(bearerTokenHdr, cookie) - } - - if header != "" { - ctx.Request.Header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+header) - } - return ctx -} - -func TestCheckAndPropagateBearerToken(t *testing.T) { - key, err := keys.NewPrivateKey() - require.NoError(t, err) - var uid user.ID - user.IDFromKey(&uid, key.PrivateKey.PublicKey) - - tkn := new(bearer.Token) - tkn.ForUser(uid) - - t64 := base64.StdEncoding.EncodeToString(tkn.Marshal()) - require.NotEmpty(t, t64) - - req := makeTestRequest(t64, "") - - // Expect to see the token within the context. - appCtx, err := StoreBearerTokenAppCtx(context.Background(), req) - require.NoError(t, err) - - // Expect to see the same token without errors. - actual, err := LoadBearerToken(appCtx) - require.NoError(t, err) - require.Equal(t, tkn, actual) -} - -func TestLoadBearerToken(t *testing.T) { - ctx := context.Background() - token := new(bearer.Token) - - cases := []struct { - name string - appCtx context.Context - error string - }{ - { - name: "token is missing in the context", - appCtx: ctx, - error: "found empty bearer token", - }, - { - name: "normal", - appCtx: context.WithValue(ctx, bearerTokenKey, token), - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - tkn, err := LoadBearerToken(tt.appCtx) - - if tt.error == "" { - require.NoError(t, err) - require.Equal(t, token, tkn) - - return - } - - require.Contains(t, err.Error(), tt.error) - }) - } -} - -func TestStoreBearerTokenAppCtx(t *testing.T) { - key, err := keys.NewPrivateKey() - require.NoError(t, err) - var uid user.ID - user.IDFromKey(&uid, key.PrivateKey.PublicKey) - - tkn := new(bearer.Token) - tkn.ForUser(uid) - - t64 := base64.StdEncoding.EncodeToString(tkn.Marshal()) - require.NotEmpty(t, t64) - - cases := []struct { - name string - req *fasthttp.RequestCtx - error string - }{ - { - name: "invalid token", - req: makeTestRequest("dGVzdAo=", ""), - error: "can't unmarshal bearer token", - }, - { - name: "normal", - req: makeTestRequest(t64, ""), - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - ctx, err := StoreBearerTokenAppCtx(context.Background(), tt.req) - - if tt.error == "" { - require.NoError(t, err) - actualToken, ok := ctx.Value(bearerTokenKey).(*bearer.Token) - require.True(t, ok) - require.Equal(t, tkn, actualToken) - - return - } - - require.Contains(t, err.Error(), tt.error) - }) - } -} diff --git a/tree/tree.go b/tree/tree.go deleted file mode 100644 index d99e24b..0000000 --- a/tree/tree.go +++ /dev/null @@ -1,465 +0,0 @@ -package tree - -import ( - "context" - "errors" - "fmt" - "strings" - - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs" - "git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils" - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" - oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" - "go.uber.org/zap" -) - -type ( - Tree struct { - service ServiceClient - log *zap.Logger - } - - // ServiceClient is a client to interact with tree service. - // Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant. - ServiceClient interface { - GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error) - GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error) - } - - treeNode struct { - ObjID oid.ID - Meta map[string]string - } - - multiSystemNode struct { - // the first element is latest - nodes []*treeNode - } - - GetNodesParams struct { - CnrID cid.ID - BktInfo *data.BucketInfo - TreeID string - Path []string - Meta []string - LatestOnly bool - AllAttrs bool - } -) - -var ( - // ErrNodeNotFound is returned from ServiceClient in case of not found error. - ErrNodeNotFound = errors.New("not found") - - // ErrNodeAccessDenied is returned from ServiceClient service in case of access denied error. - ErrNodeAccessDenied = errors.New("access denied") -) - -const ( - FileNameKey = "FileName" - settingsFileName = "bucket-settings" - - oidKV = "OID" - uploadIDKV = "UploadId" - sizeKV = "Size" - - // keys for delete marker nodes. - isDeleteMarkerKV = "IsDeleteMarker" - - // versionTree -- ID of a tree with object versions. - versionTree = "version" - systemTree = "system" - - separator = "/" -) - -// NewTree creates instance of Tree using provided address and create grpc connection. -func NewTree(service ServiceClient, log *zap.Logger) *Tree { - return &Tree{service: service, log: log} -} - -type Meta interface { - GetKey() string - GetValue() []byte -} - -type NodeResponse interface { - GetMeta() []Meta - GetTimestamp() []uint64 - GetNodeID() []uint64 - GetParentID() []uint64 -} - -func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) { - tNode := &treeNode{ - Meta: make(map[string]string, len(nodeInfo.GetMeta())), - } - - for _, kv := range nodeInfo.GetMeta() { - switch kv.GetKey() { - case oidKV: - if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil { - return nil, err - } - default: - tNode.Meta[kv.GetKey()] = string(kv.GetValue()) - } - } - - return tNode, nil -} - -func (n *treeNode) Get(key string) (string, bool) { - value, ok := n.Meta[key] - return value, ok -} - -func (n *treeNode) FileName() (string, bool) { - value, ok := n.Meta[FileNameKey] - return value, ok -} - -func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) { - tNode, err := newTreeNode(node) - if err != nil { - return nil, fmt.Errorf("invalid tree node: %w", err) - } - - return newNodeVersionFromTreeNode(tNode), nil -} - -func newNodeVersionFromTreeNode(treeNode *treeNode) *data.NodeVersion { - _, isDeleteMarker := treeNode.Get(isDeleteMarkerKV) - version := &data.NodeVersion{ - BaseNodeVersion: data.BaseNodeVersion{ - OID: treeNode.ObjID, - IsDeleteMarker: isDeleteMarker, - }, - } - - return version -} - -func newNodeInfo(node NodeResponse) data.NodeInfo { - nodeMeta := node.GetMeta() - nodeInfo := data.NodeInfo{ - Meta: make([]data.NodeMeta, 0, len(nodeMeta)), - } - for _, meta := range nodeMeta { - nodeInfo.Meta = append(nodeInfo.Meta, meta) - } - - return nodeInfo -} - -func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) { - var ( - err error - index int - maxTimestamp uint64 - ) - - if len(nodes) == 0 { - return nil, errors.New("multi node must have at least one node") - } - - treeNodes := make([]*treeNode, len(nodes)) - - for i, node := range nodes { - if treeNodes[i], err = newTreeNode(node); err != nil { - return nil, fmt.Errorf("parse system node response: %w", err) - } - - if timestamp := getMaxTimestamp(node); timestamp > maxTimestamp { - index = i - maxTimestamp = timestamp - } - } - - treeNodes[0], treeNodes[index] = treeNodes[index], treeNodes[0] - - return &multiSystemNode{ - nodes: treeNodes, - }, nil -} - -func (m *multiSystemNode) Latest() *treeNode { - return m.nodes[0] -} - -func (m *multiSystemNode) Old() []*treeNode { - return m.nodes[1:] -} - -func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetLatestVersion") - defer span.End() - - nodes, err := c.GetVersions(ctx, cnrID, objectName) - if err != nil { - return nil, err - } - - latestNode, err := getLatestVersionNode(nodes) - if err != nil { - return nil, err - } - - return newNodeVersion(latestNode) -} - -func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetVersions") - defer span.End() - - meta := []string{oidKV, isDeleteMarkerKV, sizeKV} - path := pathFromName(objectName) - - p := &GetNodesParams{ - CnrID: *cnrID, - TreeID: versionTree, - Path: path, - Meta: meta, - LatestOnly: false, - AllAttrs: false, - } - - return c.service.GetNodes(ctx, p) -} - -func (c *Tree) CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error { - ctx, span := tracing.StartSpanFromContext(ctx, "tree.CheckSettingsNodeExists") - defer span.End() - - _, err := c.getSystemNode(ctx, bktInfo, settingsFileName) - if err != nil { - return err - } - - return nil -} - -func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name string) (*multiSystemNode, error) { - p := &GetNodesParams{ - CnrID: bktInfo.CID, - BktInfo: bktInfo, - TreeID: systemTree, - Path: []string{name}, - LatestOnly: false, - AllAttrs: true, - } - nodes, err := c.service.GetNodes(ctx, p) - if err != nil { - return nil, err - } - - nodes = filterMultipartNodes(nodes) - - if len(nodes) == 0 { - return nil, ErrNodeNotFound - } - if len(nodes) != 1 { - c.reqLogger(ctx).Warn(logs.FoundSeveralSystemTreeNodes, zap.String("name", name), logs.TagField(logs.TagExternalStorageTree)) - } - - return newMultiNode(nodes) -} - -func filterMultipartNodes(nodes []NodeResponse) []NodeResponse { - res := make([]NodeResponse, 0, len(nodes)) - -LOOP: - for _, node := range nodes { - for _, meta := range node.GetMeta() { - if meta.GetKey() == uploadIDKV { - continue LOOP - } - } - - res = append(res, node) - } - - return res -} - -func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) { - var ( - maxCreationTime uint64 - targetIndexNode = -1 - ) - - for i, node := range nodes { - if !checkExistOID(node.GetMeta()) { - continue - } - - if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime { - targetIndexNode = i - maxCreationTime = currentCreationTime - } - } - - if targetIndexNode == -1 { - return nil, fmt.Errorf("latest version: %w", ErrNodeNotFound) - } - - return nodes[targetIndexNode], nil -} - -func checkExistOID(meta []Meta) bool { - for _, kv := range meta { - if kv.GetKey() == "OID" { - return true - } - } - - return false -} - -// pathFromName splits name by '/'. -func pathFromName(objectName string) []string { - return strings.Split(objectName, separator) -} - -func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, error) { - ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix") - defer span.End() - - rootID, err := c.getPrefixNodeID(ctx, bktInfo, versionTree, strings.Split(prefix, separator)) - if err != nil { - if errors.Is(err, ErrNodeNotFound) { - return nil, nil - } - return nil, err - } - subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false) - if err != nil { - if errors.Is(err, ErrNodeNotFound) { - return nil, nil - } - return nil, err - } - - nodesMap := make(map[string][]NodeResponse, len(subTree)) - for _, node := range subTree { - if MultiID(rootID).Equal(node.GetNodeID()) { - continue - } - - fileName := GetFilename(node) - nodes := nodesMap[fileName] - - // Add all nodes if flag latestOnly is false. - // Add all intermediate nodes - // and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0] - if len(nodes) == 0 { - nodes = []NodeResponse{node} - } else if !latestOnly || isIntermediate(node) { - nodes = append(nodes, node) - } else if isIntermediate(nodes[0]) { - nodes = append([]NodeResponse{node}, nodes...) - } else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) { - nodes[0] = node - } - - nodesMap[fileName] = nodes - } - - result := make([]data.NodeInfo, 0, len(subTree)) - for _, nodes := range nodesMap { - result = append(result, nodeResponseToNodeInfo(nodes)...) - } - - return result, nil -} - -func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo { - nodesInfo := make([]data.NodeInfo, 0, len(nodes)) - for _, node := range nodes { - nodesInfo = append(nodesInfo, newNodeInfo(node)) - } - - return nodesInfo -} - -func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) { - p := &GetNodesParams{ - CnrID: bktInfo.CID, - BktInfo: bktInfo, - TreeID: treeID, - Path: prefixPath, - LatestOnly: false, - AllAttrs: true, - } - nodes, err := c.service.GetNodes(ctx, p) - if err != nil { - return nil, err - } - - var intermediateNodes []uint64 - for _, node := range nodes { - if isIntermediate(node) { - intermediateNodes = append(intermediateNodes, node.GetNodeID()...) - } - } - - if len(intermediateNodes) == 0 { - return nil, ErrNodeNotFound - } - - return intermediateNodes, nil -} - -func (c *Tree) reqLogger(ctx context.Context) *zap.Logger { - return utils.GetReqLogOrDefault(ctx, c.log) -} - -func GetFilename(node NodeResponse) string { - for _, kv := range node.GetMeta() { - if kv.GetKey() == FileNameKey { - return string(kv.GetValue()) - } - } - - return "" -} - -func isIntermediate(node NodeResponse) bool { - if len(node.GetMeta()) != 1 { - return false - } - - return node.GetMeta()[0].GetKey() == FileNameKey -} - -func getMaxTimestamp(node NodeResponse) uint64 { - var maxTimestamp uint64 - - for _, timestamp := range node.GetTimestamp() { - if timestamp > maxTimestamp { - maxTimestamp = timestamp - } - } - - return maxTimestamp -} - -type MultiID []uint64 - -func (m MultiID) Equal(id MultiID) bool { - seen := make(map[uint64]struct{}, len(m)) - - for i := range m { - seen[m[i]] = struct{}{} - } - - for i := range id { - if _, ok := seen[id[i]]; !ok { - return false - } - } - - return true -} diff --git a/tree/tree_test.go b/tree/tree_test.go deleted file mode 100644 index 62f9914..0000000 --- a/tree/tree_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package tree - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -type nodeMeta struct { - key string - value []byte -} - -func (m nodeMeta) GetKey() string { - return m.key -} - -func (m nodeMeta) GetValue() []byte { - return m.value -} - -type nodeResponse struct { - meta []nodeMeta - timestamp []uint64 -} - -func (n nodeResponse) GetTimestamp() []uint64 { - return n.timestamp -} - -func (n nodeResponse) GetMeta() []Meta { - res := make([]Meta, len(n.meta)) - for i, value := range n.meta { - res[i] = value - } - return res -} - -func (n nodeResponse) GetNodeID() []uint64 { - return nil -} -func (n nodeResponse) GetParentID() []uint64 { - return nil -} - -func TestGetLatestNode(t *testing.T) { - for _, tc := range []struct { - name string - nodes []NodeResponse - exceptedOID string - error bool - }{ - { - name: "empty", - nodes: []NodeResponse{}, - error: true, - }, - { - name: "one node of the object version", - nodes: []NodeResponse{ - nodeResponse{ - timestamp: []uint64{1}, - meta: []nodeMeta{ - { - key: oidKV, - value: []byte("oid1"), - }, - }, - }, - }, - exceptedOID: "oid1", - }, - { - name: "one node of the object version and one node of the secondary object", - nodes: []NodeResponse{ - nodeResponse{ - timestamp: []uint64{3}, - meta: []nodeMeta{}, - }, - nodeResponse{ - timestamp: []uint64{1}, - meta: []nodeMeta{ - { - key: oidKV, - value: []byte("oid1"), - }, - }, - }, - }, - exceptedOID: "oid1", - }, - { - name: "all nodes represent a secondary object", - nodes: []NodeResponse{ - nodeResponse{ - timestamp: []uint64{3}, - meta: []nodeMeta{}, - }, - nodeResponse{ - timestamp: []uint64{5}, - meta: []nodeMeta{}, - }, - }, - error: true, - }, - { - name: "several nodes of different types and with different timestamp", - nodes: []NodeResponse{ - nodeResponse{ - timestamp: []uint64{1}, - meta: []nodeMeta{ - { - key: oidKV, - value: []byte("oid1"), - }, - }, - }, - nodeResponse{ - timestamp: []uint64{3}, - meta: []nodeMeta{}, - }, - nodeResponse{ - timestamp: []uint64{4}, - meta: []nodeMeta{ - { - key: oidKV, - value: []byte("oid2"), - }, - }, - }, - nodeResponse{ - timestamp: []uint64{6}, - meta: []nodeMeta{}, - }, - }, - exceptedOID: "oid2", - }, - } { - t.Run(tc.name, func(t *testing.T) { - actualNode, err := getLatestVersionNode(tc.nodes) - if tc.error { - require.Error(t, err) - return - } - - require.NoError(t, err) - require.Equal(t, tc.exceptedOID, string(actualNode.GetMeta()[0].GetValue())) - }) - } -} diff --git a/utils/attributes.go b/utils/attributes.go deleted file mode 100644 index 55fadaa..0000000 --- a/utils/attributes.go +++ /dev/null @@ -1,269 +0,0 @@ -package utils - -import ( - "bytes" - "context" - "errors" - "fmt" - "math" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - - "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" -) - -type EpochDurations struct { - CurrentEpoch uint64 - MsPerBlock int64 - BlockPerEpoch uint64 -} - -type EpochInfoFetcher interface { - GetEpochDurations(context.Context) (*EpochDurations, error) -} - -const ( - UserAttributeHeaderPrefix = "X-Attribute-" -) - -const ( - systemAttributePrefix = "__SYSTEM__" - - // deprecated: use systemAttributePrefix - systemAttributePrefixNeoFS = "__NEOFS__" -) - -type systemTransformer struct { - prefix string - backwardPrefix string - xAttrPrefixes [][]byte -} - -var transformers = []systemTransformer{ - { - prefix: systemAttributePrefix, - backwardPrefix: "System-", - xAttrPrefixes: [][]byte{[]byte("System-"), []byte("SYSTEM-"), []byte("system-")}, - }, - { - prefix: systemAttributePrefixNeoFS, - backwardPrefix: "Neofs-", - xAttrPrefixes: [][]byte{[]byte("Neofs-"), []byte("NEOFS-"), []byte("neofs-")}, - }, -} - -func (t systemTransformer) existsExpirationAttributes(headers map[string]string) bool { - _, ok0 := headers[t.expirationEpochAttr()] - _, ok1 := headers[t.expirationDurationAttr()] - _, ok2 := headers[t.expirationTimestampAttr()] - _, ok3 := headers[t.expirationRFC3339Attr()] - return ok0 || ok1 || ok2 || ok3 -} - -func (t systemTransformer) expirationEpochAttr() string { - return t.prefix + "EXPIRATION_EPOCH" -} - -func (t systemTransformer) expirationDurationAttr() string { - return t.prefix + "EXPIRATION_DURATION" -} - -func (t systemTransformer) expirationTimestampAttr() string { - return t.prefix + "EXPIRATION_TIMESTAMP" -} - -func (t systemTransformer) expirationRFC3339Attr() string { - return t.prefix + "EXPIRATION_RFC3339" -} - -func (t systemTransformer) systemTranslator(key, prefix []byte) []byte { - // replace the specified prefix with system prefix - key = bytes.Replace(key, prefix, []byte(t.prefix), 1) - - // replace `-` with `_` - key = bytes.ReplaceAll(key, []byte("-"), []byte("_")) - - // replace with uppercase - return bytes.ToUpper(key) -} - -func (t systemTransformer) transformIfSystem(key []byte) ([]byte, bool) { - // checks that it's a system FrostFS header - for _, system := range t.xAttrPrefixes { - if bytes.HasPrefix(key, system) { - return t.systemTranslator(key, system), true - } - } - - return key, false -} - -// systemBackwardTranslator is used to convert headers looking like '__PREFIX__ATTR_NAME' to 'Prefix-Attr-Name'. -func (t systemTransformer) systemBackwardTranslator(key string) string { - // trim specified prefix '__PREFIX__' - key = strings.TrimPrefix(key, t.prefix) - - var res strings.Builder - res.WriteString(t.backwardPrefix) - - strs := strings.Split(key, "_") - for i, s := range strs { - s = title(strings.ToLower(s)) - res.WriteString(s) - if i != len(strs)-1 { - res.WriteString("-") - } - } - - return res.String() -} - -func (t systemTransformer) backwardTransformIfSystem(key string) (string, bool) { - if strings.HasPrefix(key, t.prefix) { - return t.systemBackwardTranslator(key), true - } - - return key, false -} - -func TransformIfSystem(key []byte) []byte { - for _, transformer := range transformers { - key, transformed := transformer.transformIfSystem(key) - if transformed { - return key - } - } - - return key -} - -func BackwardTransformIfSystem(key string) string { - for _, transformer := range transformers { - key, transformed := transformer.backwardTransformIfSystem(key) - if transformed { - return key - } - } - - return key -} - -func title(str string) string { - if str == "" { - return "" - } - - r, size := utf8.DecodeRuneInString(str) - r0 := unicode.ToTitle(r) - return string(r0) + str[size:] -} - -func PrepareExpirationHeader(ctx context.Context, epochFetcher EpochInfoFetcher, headers map[string]string, now time.Time) error { - formatsNum := 0 - index := -1 - for i, transformer := range transformers { - if transformer.existsExpirationAttributes(headers) { - formatsNum++ - index = i - } - } - - switch formatsNum { - case 0: - return nil - case 1: - epochDuration, err := epochFetcher.GetEpochDurations(ctx) - if err != nil { - return fmt.Errorf("couldn't get epoch durations from network info: %w", err) - } - return transformers[index].prepareExpirationHeader(headers, epochDuration, now) - default: - return errors.New("both deprecated and new system attributes formats are used, please use only one") - } -} - -func (t systemTransformer) prepareExpirationHeader(headers map[string]string, epochDurations *EpochDurations, now time.Time) error { - expirationInEpoch := headers[t.expirationEpochAttr()] - - if timeRFC3339, ok := headers[t.expirationRFC3339Attr()]; ok { - expTime, err := time.Parse(time.RFC3339, timeRFC3339) - if err != nil { - return fmt.Errorf("couldn't parse value %s of header %s", timeRFC3339, t.expirationRFC3339Attr()) - } - - if expTime.Before(now) { - return fmt.Errorf("value %s of header %s must be in the future", timeRFC3339, t.expirationRFC3339Attr()) - } - t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now)) - delete(headers, t.expirationRFC3339Attr()) - } - - if timestamp, ok := headers[t.expirationTimestampAttr()]; ok { - value, err := strconv.ParseInt(timestamp, 10, 64) - if err != nil { - return fmt.Errorf("couldn't parse value %s of header %s", timestamp, t.expirationTimestampAttr()) - } - expTime := time.Unix(value, 0) - - if expTime.Before(now) { - return fmt.Errorf("value %s of header %s must be in the future", timestamp, t.expirationTimestampAttr()) - } - t.updateExpirationHeader(headers, epochDurations, expTime.Sub(now)) - delete(headers, t.expirationTimestampAttr()) - } - - if duration, ok := headers[t.expirationDurationAttr()]; ok { - expDuration, err := time.ParseDuration(duration) - if err != nil { - return fmt.Errorf("couldn't parse value %s of header %s", duration, t.expirationDurationAttr()) - } - if expDuration <= 0 { - return fmt.Errorf("value %s of header %s must be positive", expDuration, t.expirationDurationAttr()) - } - t.updateExpirationHeader(headers, epochDurations, expDuration) - delete(headers, t.expirationDurationAttr()) - } - - if expirationInEpoch != "" { - expEpoch, err := strconv.ParseUint(expirationInEpoch, 10, 64) - if err != nil { - return fmt.Errorf("parse expiration epoch '%s': %w", expirationInEpoch, err) - } - if expEpoch < epochDurations.CurrentEpoch { - return fmt.Errorf("expiration epoch '%d' must be greater than current epoch '%d'", expEpoch, epochDurations.CurrentEpoch) - } - - headers[t.expirationEpochAttr()] = expirationInEpoch - } - - return nil -} - -func (t systemTransformer) updateExpirationHeader(headers map[string]string, durations *EpochDurations, expDuration time.Duration) { - epochDuration := uint64(durations.MsPerBlock) * durations.BlockPerEpoch - currentEpoch := durations.CurrentEpoch - numEpoch := uint64(expDuration.Milliseconds()) / epochDuration - - if uint64(expDuration.Milliseconds())%epochDuration != 0 { - numEpoch++ - } - - expirationEpoch := uint64(math.MaxUint64) - if numEpoch < math.MaxUint64-currentEpoch { - expirationEpoch = currentEpoch + numEpoch - } - - headers[t.expirationEpochAttr()] = strconv.FormatUint(expirationEpoch, 10) -} - -func GetAttributeValue(attrs []object.Attribute, key string) string { - for _, attr := range attrs { - if attr.Key() == key { - return attr.Value() - } - } - return "" -} diff --git a/utils/attributes_test.go b/utils/attributes_test.go deleted file mode 100644 index 251113b..0000000 --- a/utils/attributes_test.go +++ /dev/null @@ -1,189 +0,0 @@ -//go:build !integration - -package utils - -import ( - "math" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestPrepareExpirationHeader(t *testing.T) { - tomorrow := time.Now().Add(24 * time.Hour) - tomorrowUnix := tomorrow.Unix() - tomorrowUnixNano := tomorrow.UnixNano() - tomorrowUnixMilli := tomorrowUnixNano / 1e6 - - epoch := "100" - duration := "24h" - timestampSec := strconv.FormatInt(tomorrowUnix, 10) - timestampMilli := strconv.FormatInt(tomorrowUnixMilli, 10) - timestampNano := strconv.FormatInt(tomorrowUnixNano, 10) - - defaultDurations := &EpochDurations{ - CurrentEpoch: 10, - MsPerBlock: 1000, - BlockPerEpoch: 101, - } - - msPerBlock := defaultDurations.BlockPerEpoch * uint64(defaultDurations.MsPerBlock) - epochPerDay := uint64((24 * time.Hour).Milliseconds()) / msPerBlock - if uint64((24*time.Hour).Milliseconds())%msPerBlock != 0 { - epochPerDay++ - } - - defaultExpEpoch := strconv.FormatUint(defaultDurations.CurrentEpoch+epochPerDay, 10) - - for _, transformer := range transformers { - for _, tc := range []struct { - name string - headers map[string]string - durations *EpochDurations - err bool - expected map[string]string - }{ - { - name: "valid epoch", - headers: map[string]string{transformer.expirationEpochAttr(): epoch}, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - durations: defaultDurations, - }, - { - name: "valid epoch, valid duration", - headers: map[string]string{ - transformer.expirationEpochAttr(): epoch, - transformer.expirationDurationAttr(): duration, - }, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - }, - { - name: "valid epoch, valid rfc3339", - headers: map[string]string{ - transformer.expirationEpochAttr(): epoch, - transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339), - }, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - }, - { - name: "valid epoch, valid timestamp sec", - headers: map[string]string{ - transformer.expirationEpochAttr(): epoch, - transformer.expirationTimestampAttr(): timestampSec, - }, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - }, - { - name: "valid epoch, valid timestamp milli", - headers: map[string]string{ - transformer.expirationEpochAttr(): epoch, - transformer.expirationTimestampAttr(): timestampMilli, - }, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - }, - { - name: "valid epoch, valid timestamp nano", - headers: map[string]string{ - transformer.expirationEpochAttr(): epoch, - transformer.expirationTimestampAttr(): timestampNano, - }, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): epoch}, - }, - { - name: "valid timestamp sec", - headers: map[string]string{transformer.expirationTimestampAttr(): timestampSec}, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch}, - }, - { - name: "valid duration", - headers: map[string]string{transformer.expirationDurationAttr(): duration}, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch}, - }, - { - name: "valid rfc3339", - headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)}, - durations: defaultDurations, - expected: map[string]string{transformer.expirationEpochAttr(): defaultExpEpoch}, - }, - { - name: "valid max uint 64", - headers: map[string]string{transformer.expirationRFC3339Attr(): tomorrow.Format(time.RFC3339)}, - durations: &EpochDurations{ - CurrentEpoch: math.MaxUint64 - 1, - MsPerBlock: defaultDurations.MsPerBlock, - BlockPerEpoch: defaultDurations.BlockPerEpoch, - }, - expected: map[string]string{transformer.expirationEpochAttr(): strconv.FormatUint(uint64(math.MaxUint64), 10)}, - }, - { - name: "invalid timestamp sec", - headers: map[string]string{transformer.expirationTimestampAttr(): "abc"}, - err: true, - }, - { - name: "invalid timestamp sec zero", - headers: map[string]string{transformer.expirationTimestampAttr(): "0"}, - err: true, - }, - { - name: "invalid duration", - headers: map[string]string{transformer.expirationDurationAttr(): "1d"}, - err: true, - }, - { - name: "invalid duration negative", - headers: map[string]string{transformer.expirationDurationAttr(): "-5h"}, - err: true, - }, - { - name: "invalid rfc3339", - headers: map[string]string{transformer.expirationRFC3339Attr(): "abc"}, - err: true, - }, - { - name: "invalid rfc3339 zero", - headers: map[string]string{transformer.expirationRFC3339Attr(): time.RFC3339}, - err: true, - }, - } { - t.Run(tc.name, func(t *testing.T) { - err := transformer.prepareExpirationHeader(tc.headers, tc.durations, time.Now()) - if tc.err { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tc.expected, tc.headers) - } - }) - } - } -} - -func TestSystemBackwardTranslator(t *testing.T) { - input := []string{ - "__SYSTEM__EXPIRATION_EPOCH", - "__SYSTEM__RANDOM_ATTR", - "__NEOFS__EXPIRATION_EPOCH", - "__NEOFS__RANDOM_ATTR", - } - expected := []string{ - "System-Expiration-Epoch", - "System-Random-Attr", - "Neofs-Expiration-Epoch", - "Neofs-Random-Attr", - } - - for i, str := range input { - res := BackwardTransformIfSystem(str) - require.Equal(t, expected[i], res) - } -} diff --git a/utils/tracing.go b/utils/tracing.go deleted file mode 100644 index c8e467d..0000000 --- a/utils/tracing.go +++ /dev/null @@ -1,83 +0,0 @@ -package utils - -import ( - "context" - - "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing" - "github.com/valyala/fasthttp" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" - "go.opentelemetry.io/otel/trace" -) - -type httpCarrier struct { - r *fasthttp.RequestCtx -} - -func (c *httpCarrier) Get(key string) string { - bytes := c.r.Request.Header.Peek(key) - if len(bytes) == 0 { - return "" - } - return string(bytes) -} - -func (c *httpCarrier) Set(key string, value string) { - c.r.Response.Header.Set(key, value) -} - -func (c *httpCarrier) Keys() []string { - dict := make(map[string]interface{}) - c.r.Request.Header.VisitAll( - func(key, _ []byte) { - dict[string(key)] = true - }, - ) - c.r.Response.Header.VisitAll( - func(key, _ []byte) { - dict[string(key)] = true - }, - ) - result := make([]string, 0, len(dict)) - for key := range dict { - result = append(result, key) - } - return result -} - -func extractHTTPTraceInfo(ctx context.Context, req *fasthttp.RequestCtx) context.Context { - if req == nil { - return ctx - } - carrier := &httpCarrier{r: req} - return tracing.Propagator.Extract(ctx, carrier) -} - -// SetHTTPTraceInfo saves trace headers to response. -func SetHTTPTraceInfo(ctx context.Context, span trace.Span, req *fasthttp.RequestCtx) { - if req == nil { - return - } - if err := req.Err(); err != nil { - span.SetStatus(codes.Error, err.Error()) - } - span.SetAttributes( - semconv.HTTPStatusCode(req.Response.StatusCode()), - ) - carrier := &httpCarrier{r: req} - tracing.Propagator.Inject(ctx, carrier) -} - -// StartHTTPServerSpan starts root HTTP server span. -func StartHTTPServerSpan(ctx context.Context, req *fasthttp.RequestCtx, operationName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - ctx = extractHTTPTraceInfo(ctx, req) - opts = append(opts, trace.WithAttributes( - attribute.String("http.client_address", req.RemoteAddr().String()), - attribute.String("http.path", string(req.Path())), - semconv.HTTPMethod(string(req.Method())), - semconv.RPCService("frostfs-http-gw"), - attribute.String("http.query", req.QueryArgs().String()), - ), trace.WithSpanKind(trace.SpanKindServer)) - return tracing.StartSpanFromContext(ctx, operationName, opts...) -} diff --git a/utils/util.go b/utils/util.go deleted file mode 100644 index b7f5e39..0000000 --- a/utils/util.go +++ /dev/null @@ -1,49 +0,0 @@ -package utils - -import ( - "context" - - "github.com/valyala/fasthttp" - "go.uber.org/zap" -) - -// SetContextToRequest adds new context to fasthttp request. -func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) { - c.SetUserValue("context", ctx) -} - -// GetContextFromRequest returns main context from fasthttp request context. -func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context { - return c.UserValue("context").(context.Context) -} - -type ctxReqLoggerKeyType struct{} - -// SetReqLog sets child zap.Logger in the context. -func SetReqLog(ctx context.Context, log *zap.Logger) context.Context { - if ctx == nil { - return nil - } - return context.WithValue(ctx, ctxReqLoggerKeyType{}, log) -} - -// GetReqLog returns log if set. -// If zap.Logger isn't set returns nil. -func GetReqLog(ctx context.Context) *zap.Logger { - if ctx == nil { - return nil - } else if r, ok := ctx.Value(ctxReqLoggerKeyType{}).(*zap.Logger); ok { - return r - } - return nil -} - -// GetReqLogOrDefault returns log from context, if it exists. -// If the log is missing from the context, the default logger is returned. -func GetReqLogOrDefault(ctx context.Context, defaultLog *zap.Logger) *zap.Logger { - log := GetReqLog(ctx) - if log == nil { - log = defaultLog - } - return log -}