Compare commits
1 commit
master
...
feat/lint-
Author | SHA1 | Date | |
---|---|---|---|
c6875030f6 |
71 changed files with 2061 additions and 5621 deletions
|
@ -1,9 +1,9 @@
|
||||||
FROM golang:1.22-alpine AS basebuilder
|
FROM golang:1.22-alpine as basebuilder
|
||||||
RUN apk add --update make bash ca-certificates
|
RUN apk add --update make bash ca-certificates
|
||||||
|
|
||||||
FROM basebuilder AS builder
|
FROM basebuilder as builder
|
||||||
ENV GOGC=off
|
ENV GOGC off
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED 0
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
on:
|
on: [pull_request]
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
builds:
|
builds:
|
||||||
|
@ -10,7 +6,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.21', '1.22' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.21'
|
||||||
|
|
||||||
- name: Run commit format checker
|
- name: Run commit format checker
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image:
|
|
||||||
name: OCI image
|
|
||||||
runs-on: docker
|
|
||||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Clone git repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Build OCI image
|
|
||||||
run: make image
|
|
||||||
|
|
||||||
- name: Push image to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "$REGISTRY_PASSWORD" \
|
|
||||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
|
||||||
make image-push
|
|
||||||
if: >-
|
|
||||||
startsWith(github.ref, 'refs/tags/v') &&
|
|
||||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
|
||||||
env:
|
|
||||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
|
@ -1,8 +1,4 @@
|
||||||
on:
|
on: [pull_request]
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
|
@ -14,7 +10,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.22'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -28,7 +24,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.21', '1.22' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -43,19 +39,3 @@ jobs:
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
integration:
|
|
||||||
name: Integration tests
|
|
||||||
runs-on: oci-runner
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run integration tests
|
|
||||||
run: |-
|
|
||||||
podman-service.sh
|
|
||||||
make integration-test
|
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
on:
|
on: [pull_request]
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
vulncheck:
|
vulncheck:
|
||||||
|
@ -16,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.22.11'
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* @alexvanin @dkirillov
|
0
.forgejo/logo.svg → .github/logo.svg
vendored
0
.forgejo/logo.svg → .github/logo.svg
vendored
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 5.5 KiB |
81
CHANGELOG.md
81
CHANGELOG.md
|
@ -4,78 +4,6 @@ This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add handling quota limit reached error (#187)
|
|
||||||
|
|
||||||
## [0.32.2] - 2025-02-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Possible memory leak in gRPC client (#202)
|
|
||||||
|
|
||||||
## [0.32.1] - 2025-01-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- SIGHUP panic (#198)
|
|
||||||
|
|
||||||
## [0.32.0] - Khumbu - 2024-12-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Getting S3 object with FrostFS Object ID-like key (#166)
|
|
||||||
- Ignore delete marked objects in versioned bucket in index page (#181)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Metric of dropped logs by log sampler (#150)
|
|
||||||
- Fallback FileName attribute search during FilePath attribute search (#174)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated tree service pool without api-go dependency (#178)
|
|
||||||
|
|
||||||
## [0.31.0] - Rongbuk - 2024-11-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Docker warnings during image build (#126)
|
|
||||||
- `trace_id` parameter in logs (#148)
|
|
||||||
- SIGHUP support for `tracing.enabled` config parameter (#157)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Vulnerability report document (#123)
|
|
||||||
- Root CA configuration for tracing (#139)
|
|
||||||
- Log sampling policy configuration (#147)
|
|
||||||
- Index page support for buckets and containers (#137, #151)
|
|
||||||
- CORS support (#158)
|
|
||||||
- Source IP binding configuration for FrostFS requests (#160)
|
|
||||||
- Tracing attributes (#164)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated Go version to 1.22 (#132)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
- Duplicated NNS Resolver code (#129)
|
|
||||||
|
|
||||||
## [0.30.3] - 2024-10-18
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Get response on S3 multipart object (#142)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support percent-encoding for GET queries (#134)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Split `FrostFS` interface into separate read methods (#127)
|
|
||||||
|
|
||||||
## [0.30.2] - 2024-09-03
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Fuzzing tests (#135)
|
|
||||||
|
|
||||||
## [0.30.1] - 2024-08-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Error counting in pool component before connection switch (#131)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Log of endpoint address during tree pool errors (#131)
|
|
||||||
|
|
||||||
## [0.30.0] - Kangshung - 2024-07-22
|
## [0.30.0] - Kangshung - 2024-07-22
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
@ -192,11 +120,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
|
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
|
||||||
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
|
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
|
||||||
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...v0.30.1
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...master
|
||||||
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
|
|
||||||
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
|
|
||||||
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
|
|
||||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0
|
|
||||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1
|
|
||||||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...master
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
|
||||||
.forgejo/.* @potyarkin
|
|
||||||
Makefile @potyarkin
|
|
38
Makefile
38
Makefile
|
@ -3,11 +3,11 @@
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.60.3
|
LINT_VERSION ?= 1.60.1
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||||
BUILD ?= $(shell date -u --iso=seconds)
|
BUILD ?= $(shell date -u --iso=seconds)
|
||||||
|
|
||||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
|
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
|
@ -30,11 +30,6 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
||||||
sed "s/-/~/")-${OS_RELEASE}
|
sed "s/-/~/")-${OS_RELEASE}
|
||||||
.PHONY: debpackage debclean
|
.PHONY: debpackage debclean
|
||||||
|
|
||||||
FUZZ_NGFUZZ_DIR ?= ""
|
|
||||||
FUZZ_TIMEOUT ?= 30
|
|
||||||
FUZZ_FUNCTIONS ?= "all"
|
|
||||||
FUZZ_AUX ?= ""
|
|
||||||
|
|
||||||
# Make all binaries
|
# Make all binaries
|
||||||
all: $(BINS)
|
all: $(BINS)
|
||||||
$(BINS): $(DIRS) dep
|
$(BINS): $(DIRS) dep
|
||||||
|
@ -83,35 +78,6 @@ cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
# Run fuzzing
|
|
||||||
CLANG := $(shell which clang-17 2>/dev/null)
|
|
||||||
.PHONY: check-clang all
|
|
||||||
check-clang:
|
|
||||||
ifeq ($(CLANG),)
|
|
||||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
|
||||||
@exit 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: check-ngfuzz all
|
|
||||||
check-ngfuzz:
|
|
||||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
|
||||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: install-fuzzing-deps
|
|
||||||
install-fuzzing-deps: check-clang check-ngfuzz
|
|
||||||
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: install-fuzzing-deps
|
|
||||||
@START_PATH=$$(pwd); \
|
|
||||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
|
||||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
|
||||||
./ngfuzz -clean && \
|
|
||||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
|
||||||
./ngfuzz -report
|
|
||||||
|
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
fmt:
|
fmt:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
|
166
README.md
166
README.md
|
@ -1,5 +1,5 @@
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo">
|
<img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
|
||||||
</p>
|
</p>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
|
||||||
|
@ -38,7 +38,7 @@ version Show current version
|
||||||
```
|
```
|
||||||
|
|
||||||
Or you can also use a [Docker
|
Or you can also use a [Docker
|
||||||
image](https://git.frostfs.info/TrueCloudLab/-/packages/container/frostfs-http-gw) provided for the released
|
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
|
||||||
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
||||||
latest stable release).
|
latest stable release).
|
||||||
|
|
||||||
|
@ -217,8 +217,41 @@ Also, in case of downloading, you need to have a file inside a container.
|
||||||
### NNS
|
### NNS
|
||||||
|
|
||||||
In all download/upload routes you can use container name instead of its id (`$CID`).
|
In all download/upload routes you can use container name instead of its id (`$CID`).
|
||||||
Read more about it in [docs/nns.md](./docs/nns.md).
|
|
||||||
|
|
||||||
|
Steps to start using name resolving:
|
||||||
|
|
||||||
|
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
|
resolve_order:
|
||||||
|
- nns
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
||||||
|
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
||||||
|
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
||||||
|
|
||||||
|
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
||||||
|
|
||||||
|
$ docker exec -it morph_chain neo-go \
|
||||||
|
contract testinvokefunction \
|
||||||
|
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
||||||
|
resolve string:container-name.container int:16 \
|
||||||
|
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
||||||
|
| base64 -d && echo
|
||||||
|
|
||||||
|
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Use container name instead of its `$CID`. For example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
|
||||||
|
```
|
||||||
|
|
||||||
#### Create a container
|
#### Create a container
|
||||||
|
|
||||||
|
@ -429,7 +462,109 @@ object ID, like this:
|
||||||
|
|
||||||
#### Authentication
|
#### Authentication
|
||||||
|
|
||||||
Read more about request authentication in [docs/authentication.md](./docs/authemtnication.md)
|
You can always upload files to public containers (open for anyone to put
|
||||||
|
objects into), but for restricted containers you need to explicitly allow PUT
|
||||||
|
operations for a request signed with your HTTP Gateway keys.
|
||||||
|
|
||||||
|
If you don't want to manage gateway's secret keys and adjust policies when
|
||||||
|
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
||||||
|
public services, there is an option to let your application backend (or you) to
|
||||||
|
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
||||||
|
to grant access.
|
||||||
|
|
||||||
|
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
||||||
|
documentation for more details). There are two options to pass them to gateway:
|
||||||
|
* "Authorization" header with "Bearer" type and base64-encoded token in
|
||||||
|
credentials field
|
||||||
|
* "Bearer" cookie with base64-encoded token contents
|
||||||
|
|
||||||
|
For example, you have a mobile application frontend with a backend part storing
|
||||||
|
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
||||||
|
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
||||||
|
some data and upload it via any available FrostFS HTTP Gateway by adding
|
||||||
|
the corresponding header to the upload request. Accessing policy protected data
|
||||||
|
works the same way.
|
||||||
|
|
||||||
|
##### Example
|
||||||
|
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
||||||
|
|
||||||
|
1. Suppose you have a container with private policy for wallet key
|
||||||
|
|
||||||
|
```
|
||||||
|
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
||||||
|
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
||||||
|
|
||||||
|
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
||||||
|
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
||||||
|
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
||||||
|
--chain-id <chainID>
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
||||||
|
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"body": {
|
||||||
|
"allowImpersonate": true,
|
||||||
|
"lifetime": {
|
||||||
|
"exp": "10000",
|
||||||
|
"nbf": "0",
|
||||||
|
"iat": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"signature": null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Sign it with the wallet:
|
||||||
|
```
|
||||||
|
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Encode to base64 to use in header:
|
||||||
|
```
|
||||||
|
$ base64 -w 0 signed.json
|
||||||
|
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
||||||
|
```
|
||||||
|
|
||||||
|
After that, the Bearer token can be used:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
|
||||||
|
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
||||||
|
# output:
|
||||||
|
# {
|
||||||
|
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
|
||||||
|
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
|
||||||
|
# }
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Note: Bearer Token owner
|
||||||
|
|
||||||
|
You can specify exact key who can use Bearer Token (gateway wallet address).
|
||||||
|
To do this, encode wallet address in base64 format
|
||||||
|
|
||||||
|
```
|
||||||
|
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
||||||
|
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
||||||
|
```
|
||||||
|
|
||||||
|
Then specify this value in Bearer Token Json
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"body": {
|
||||||
|
"ownerID": {
|
||||||
|
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
||||||
|
},
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Note: Policy override
|
||||||
|
|
||||||
|
Instead of impersonation, you can define the set of policies that will be applied
|
||||||
|
to the request sender. This allows to restrict access to specific operation and
|
||||||
|
specific objects without giving full impersonation control to the token user.
|
||||||
|
|
||||||
### Metrics and Pprof
|
### Metrics and Pprof
|
||||||
|
|
||||||
|
@ -440,26 +575,3 @@ See [configuration](./docs/gate-configuration.md).
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
Please see [CREDITS](CREDITS.md) for details.
|
||||||
|
|
||||||
## Fuzzing
|
|
||||||
|
|
||||||
To run fuzzing tests use the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
|
||||||
|
|
||||||
You can also use the following arguments:
|
|
||||||
|
|
||||||
```
|
|
||||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
|
||||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
|
||||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
|
||||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
|
||||||
````
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
|
||||||
|
|
26
SECURITY.md
26
SECURITY.md
|
@ -1,26 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
|
|
||||||
## How To Report a Vulnerability
|
|
||||||
|
|
||||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
|
||||||
|
|
||||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
|
||||||
|
|
||||||
Instead, you can report it using one of the following ways:
|
|
||||||
|
|
||||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
|
||||||
|
|
||||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
|
||||||
|
|
||||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
|
||||||
* Affected version(s)
|
|
||||||
* Impact of the issue, including how an attacker might exploit the issue
|
|
||||||
* Step-by-step instructions to reproduce the issue
|
|
||||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
|
||||||
* Full paths of source file(s) related to the manifestation of the issue
|
|
||||||
* Any special configuration required to reproduce the issue
|
|
||||||
* Any log files that are related to this issue (if possible)
|
|
||||||
* Proof-of-concept or exploit code (if possible)
|
|
||||||
|
|
||||||
This information will help us triage your report more quickly.
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.32.2
|
v0.30.0
|
||||||
|
|
|
@ -3,7 +3,6 @@ package main
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/x509"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -16,20 +15,20 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
@ -39,10 +38,8 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.opentelemetry.io/otel/trace"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
@ -56,32 +53,28 @@ type (
|
||||||
treePool *treepool.Pool
|
treePool *treepool.Pool
|
||||||
key *keys.PrivateKey
|
key *keys.PrivateKey
|
||||||
owner *user.ID
|
owner *user.ID
|
||||||
cfg *appCfg
|
cfg *viper.Viper
|
||||||
webServer *fasthttp.Server
|
webServer *fasthttp.Server
|
||||||
webDone chan struct{}
|
webDone chan struct{}
|
||||||
resolver *resolver.ContainerResolver
|
resolver *resolver.ContainerResolver
|
||||||
metrics *gateMetrics
|
metrics *gateMetrics
|
||||||
services []*metrics.Service
|
services []*metrics.Service
|
||||||
settings *appSettings
|
settings *appSettings
|
||||||
loggerSettings *loggerSettings
|
|
||||||
bucketCache *cache.BucketCache
|
|
||||||
|
|
||||||
servers []Server
|
servers []Server
|
||||||
unbindServers []ServerInfo
|
unbindServers []ServerInfo
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
loggerSettings struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
appMetrics *metrics.GateMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// App is an interface for the main gateway function.
|
// App is an interface for the main gateway function.
|
||||||
App interface {
|
App interface {
|
||||||
Wait()
|
Wait()
|
||||||
Serve()
|
Serve()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Option is an application option.
|
||||||
|
Option func(a *app)
|
||||||
|
|
||||||
gateMetrics struct {
|
gateMetrics struct {
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
provider *metrics.GateMetrics
|
provider *metrics.GateMetrics
|
||||||
|
@ -92,68 +85,64 @@ type (
|
||||||
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
||||||
appSettings struct {
|
appSettings struct {
|
||||||
reconnectInterval time.Duration
|
reconnectInterval time.Duration
|
||||||
dialerSource *internalnet.DialerSource
|
|
||||||
workerPoolSize int
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
defaultTimestamp bool
|
defaultTimestamp bool
|
||||||
archiveCompression bool
|
zipCompression bool
|
||||||
clientCut bool
|
clientCut bool
|
||||||
returnIndexPage bool
|
|
||||||
indexPageTemplate string
|
|
||||||
bufferMaxSizeForPut uint64
|
bufferMaxSizeForPut uint64
|
||||||
namespaceHeader string
|
namespaceHeader string
|
||||||
defaultNamespaces []string
|
defaultNamespaces []string
|
||||||
corsAllowOrigin string
|
|
||||||
corsAllowMethods []string
|
|
||||||
corsAllowHeaders []string
|
|
||||||
corsExposeHeaders []string
|
|
||||||
corsAllowCredentials bool
|
|
||||||
corsMaxAge int
|
|
||||||
enableFilepathFallback bool
|
|
||||||
}
|
|
||||||
|
|
||||||
CORS struct {
|
|
||||||
AllowOrigin string
|
|
||||||
AllowMethods []string
|
|
||||||
AllowHeaders []string
|
|
||||||
ExposeHeaders []string
|
|
||||||
AllowCredentials bool
|
|
||||||
MaxAge int
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func newApp(ctx context.Context, cfg *appCfg) App {
|
// WithLogger returns Option to set a specific logger.
|
||||||
logSettings := &loggerSettings{}
|
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
||||||
log := pickLogger(cfg.config(), logSettings)
|
return func(a *app) {
|
||||||
|
if l == nil {
|
||||||
a := &app{
|
return
|
||||||
ctx: ctx,
|
}
|
||||||
log: log.logger,
|
a.log = l
|
||||||
logLevel: log.lvl,
|
a.logLevel = lvl
|
||||||
cfg: cfg,
|
}
|
||||||
loggerSettings: logSettings,
|
|
||||||
webServer: new(fasthttp.Server),
|
|
||||||
webDone: make(chan struct{}),
|
|
||||||
bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a.initAppSettings()
|
// WithConfig returns Option to use specific Viper configuration.
|
||||||
|
func WithConfig(c *viper.Viper) Option {
|
||||||
|
return func(a *app) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.cfg = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newApp(ctx context.Context, opt ...Option) App {
|
||||||
|
a := &app{
|
||||||
|
ctx: ctx,
|
||||||
|
log: zap.L(),
|
||||||
|
cfg: viper.GetViper(),
|
||||||
|
webServer: new(fasthttp.Server),
|
||||||
|
webDone: make(chan struct{}),
|
||||||
|
}
|
||||||
|
for i := range opt {
|
||||||
|
opt[i](a)
|
||||||
|
}
|
||||||
|
|
||||||
// -- setup FastHTTP server --
|
// -- setup FastHTTP server --
|
||||||
a.webServer.Name = "frost-http-gw"
|
a.webServer.Name = "frost-http-gw"
|
||||||
a.webServer.ReadBufferSize = a.config().GetInt(cfgWebReadBufferSize)
|
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
||||||
a.webServer.WriteBufferSize = a.config().GetInt(cfgWebWriteBufferSize)
|
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
||||||
a.webServer.ReadTimeout = a.config().GetDuration(cfgWebReadTimeout)
|
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||||
a.webServer.WriteTimeout = a.config().GetDuration(cfgWebWriteTimeout)
|
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||||
a.webServer.DisableHeaderNamesNormalizing = true
|
a.webServer.DisableHeaderNamesNormalizing = true
|
||||||
a.webServer.NoDefaultServerHeader = true
|
a.webServer.NoDefaultServerHeader = true
|
||||||
a.webServer.NoDefaultContentType = true
|
a.webServer.NoDefaultContentType = true
|
||||||
a.webServer.MaxRequestBodySize = a.config().GetInt(cfgWebMaxRequestBodySize)
|
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
||||||
a.webServer.DisablePreParseMultipartForm = true
|
a.webServer.DisablePreParseMultipartForm = true
|
||||||
a.webServer.StreamRequestBody = a.config().GetBool(cfgWebStreamRequestBody)
|
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
||||||
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
||||||
a.initPools(ctx)
|
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg)
|
||||||
|
|
||||||
var owner user.ID
|
var owner user.ID
|
||||||
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
||||||
|
@ -161,6 +150,7 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
||||||
|
|
||||||
a.setRuntimeParameters()
|
a.setRuntimeParameters()
|
||||||
|
|
||||||
|
a.initAppSettings()
|
||||||
a.initResolver()
|
a.initResolver()
|
||||||
a.initMetrics()
|
a.initMetrics()
|
||||||
a.initTracing(ctx)
|
a.initTracing(ctx)
|
||||||
|
@ -168,121 +158,28 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) config() *viper.Viper {
|
|
||||||
return a.cfg.config()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initAppSettings() {
|
|
||||||
a.settings = &appSettings{
|
|
||||||
reconnectInterval: fetchReconnectInterval(a.config()),
|
|
||||||
dialerSource: getDialerSource(a.log, a.config()),
|
|
||||||
workerPoolSize: a.config().GetInt(cfgWorkerPoolSize),
|
|
||||||
}
|
|
||||||
a.settings.update(a.config(), a.log)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
|
||||||
defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp)
|
|
||||||
archiveCompression := fetchArchiveCompression(v)
|
|
||||||
returnIndexPage := v.GetBool(cfgIndexPageEnabled)
|
|
||||||
clientCut := v.GetBool(cfgClientCut)
|
|
||||||
bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
|
||||||
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
|
|
||||||
defaultNamespaces := fetchDefaultNamespaces(v)
|
|
||||||
indexPage, indexEnabled := fetchIndexPageTemplate(v, l)
|
|
||||||
corsAllowOrigin := v.GetString(cfgCORSAllowOrigin)
|
|
||||||
corsAllowMethods := v.GetStringSlice(cfgCORSAllowMethods)
|
|
||||||
corsAllowHeaders := v.GetStringSlice(cfgCORSAllowHeaders)
|
|
||||||
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
|
|
||||||
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
|
|
||||||
corsMaxAge := fetchCORSMaxAge(v)
|
|
||||||
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
s.defaultTimestamp = defaultTimestamp
|
|
||||||
s.archiveCompression = archiveCompression
|
|
||||||
s.returnIndexPage = returnIndexPage
|
|
||||||
s.clientCut = clientCut
|
|
||||||
s.bufferMaxSizeForPut = bufferMaxSizeForPut
|
|
||||||
s.namespaceHeader = namespaceHeader
|
|
||||||
s.defaultNamespaces = defaultNamespaces
|
|
||||||
s.returnIndexPage = indexEnabled
|
|
||||||
s.indexPageTemplate = indexPage
|
|
||||||
s.corsAllowOrigin = corsAllowOrigin
|
|
||||||
s.corsAllowMethods = corsAllowMethods
|
|
||||||
s.corsAllowHeaders = corsAllowHeaders
|
|
||||||
s.corsExposeHeaders = corsExposeHeaders
|
|
||||||
s.corsAllowCredentials = corsAllowCredentials
|
|
||||||
s.corsMaxAge = corsMaxAge
|
|
||||||
s.enableFilepathFallback = enableFilepathFallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *loggerSettings) DroppedLogsInc() {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
|
|
||||||
if s.appMetrics != nil {
|
|
||||||
s.appMetrics.DroppedLogsInc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *loggerSettings) setMetrics(appMetrics *metrics.GateMetrics) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
s.appMetrics = appMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) DefaultTimestamp() bool {
|
func (s *appSettings) DefaultTimestamp() bool {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.defaultTimestamp
|
return s.defaultTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) ArchiveCompression() bool {
|
func (s *appSettings) setDefaultTimestamp(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.defaultTimestamp = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) ZipCompression() bool {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.archiveCompression
|
return s.zipCompression
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) IndexPageEnabled() bool {
|
func (s *appSettings) setZipCompression(val bool) {
|
||||||
s.mu.RLock()
|
s.mu.Lock()
|
||||||
defer s.mu.RUnlock()
|
s.zipCompression = val
|
||||||
return s.returnIndexPage
|
s.mu.Unlock()
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) IndexPageTemplate() string {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
if s.indexPageTemplate == "" {
|
|
||||||
return templates.DefaultIndexTemplate
|
|
||||||
}
|
|
||||||
return s.indexPageTemplate
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) CORS() CORS {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
|
|
||||||
allowMethods := make([]string, len(s.corsAllowMethods))
|
|
||||||
copy(allowMethods, s.corsAllowMethods)
|
|
||||||
|
|
||||||
allowHeaders := make([]string, len(s.corsAllowHeaders))
|
|
||||||
copy(allowHeaders, s.corsAllowHeaders)
|
|
||||||
|
|
||||||
exposeHeaders := make([]string, len(s.corsExposeHeaders))
|
|
||||||
copy(exposeHeaders, s.corsExposeHeaders)
|
|
||||||
|
|
||||||
return CORS{
|
|
||||||
AllowOrigin: s.corsAllowOrigin,
|
|
||||||
AllowMethods: allowMethods,
|
|
||||||
AllowHeaders: allowHeaders,
|
|
||||||
ExposeHeaders: exposeHeaders,
|
|
||||||
AllowCredentials: s.corsAllowCredentials,
|
|
||||||
MaxAge: s.corsMaxAge,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) ClientCut() bool {
|
func (s *appSettings) ClientCut() bool {
|
||||||
|
@ -291,33 +188,29 @@ func (s *appSettings) ClientCut() bool {
|
||||||
return s.clientCut
|
return s.clientCut
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setClientCut(val bool) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.clientCut = val
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.bufferMaxSizeForPut
|
return s.bufferMaxSizeForPut
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) NamespaceHeader() string {
|
func (s *appSettings) setBufferMaxSizeForPut(val uint64) {
|
||||||
s.mu.RLock()
|
s.mu.Lock()
|
||||||
defer s.mu.RUnlock()
|
s.bufferMaxSizeForPut = val
|
||||||
return s.namespaceHeader
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
func (a *app) initAppSettings() {
|
||||||
s.mu.RLock()
|
a.settings = &appSettings{
|
||||||
namespaces := s.defaultNamespaces
|
reconnectInterval: fetchReconnectInterval(a.cfg),
|
||||||
s.mu.RUnlock()
|
|
||||||
if slices.Contains(namespaces, ns) {
|
|
||||||
return v2container.SysAttributeZoneDefault, true
|
|
||||||
}
|
}
|
||||||
|
a.updateSettings()
|
||||||
return ns + ".ns", false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) EnableFilepathFallback() bool {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.enableFilepathFallback
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initResolver() {
|
func (a *app) initResolver() {
|
||||||
|
@ -331,11 +224,11 @@ func (a *app) initResolver() {
|
||||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
resolveCfg := &resolver.Config{
|
resolveCfg := &resolver.Config{
|
||||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
Settings: a.settings,
|
Settings: a.settings,
|
||||||
}
|
}
|
||||||
|
|
||||||
order := a.config().GetStringSlice(cfgResolveOrder)
|
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||||
if resolveCfg.RPCAddress == "" {
|
if resolveCfg.RPCAddress == "" {
|
||||||
order = remove(order, resolver.NNSResolver)
|
order = remove(order, resolver.NNSResolver)
|
||||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||||
|
@ -350,9 +243,8 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
|
|
||||||
func (a *app) initMetrics() {
|
func (a *app) initMetrics() {
|
||||||
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
||||||
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.config().GetBool(cfgPrometheusEnabled))
|
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||||
a.loggerSettings.setMetrics(a.metrics.provider)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||||
|
@ -508,16 +400,10 @@ func (a *app) setHealthStatus() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) Serve() {
|
func (a *app) Serve() {
|
||||||
workerPool := a.initWorkerPool()
|
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(services.NewPoolWrapper(a.treePool)))
|
||||||
defer func() {
|
|
||||||
workerPool.Release()
|
|
||||||
close(a.webDone)
|
|
||||||
}()
|
|
||||||
|
|
||||||
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
|
|
||||||
|
|
||||||
// Configure router.
|
// Configure router.
|
||||||
a.configureRouter(handle)
|
a.configureRouter(handler)
|
||||||
|
|
||||||
a.startServices()
|
a.startServices()
|
||||||
a.initServers(a.ctx)
|
a.initServers(a.ctx)
|
||||||
|
@ -556,14 +442,8 @@ LOOP:
|
||||||
a.metrics.Shutdown()
|
a.metrics.Shutdown()
|
||||||
a.stopServices()
|
a.stopServices()
|
||||||
a.shutdownTracing()
|
a.shutdownTracing()
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initWorkerPool() *ants.Pool {
|
close(a.webDone)
|
||||||
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
|
|
||||||
}
|
|
||||||
return workerPool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) shutdownTracing() {
|
func (a *app) shutdownTracing() {
|
||||||
|
@ -578,25 +458,21 @@ func (a *app) shutdownTracing() {
|
||||||
|
|
||||||
func (a *app) configReload(ctx context.Context) {
|
func (a *app) configReload(ctx context.Context) {
|
||||||
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := a.cfg.reload(); err != nil {
|
if err := readInConfig(a.cfg); err != nil {
|
||||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if lvl, err := getLogLevel(a.config()); err != nil {
|
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||||
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
a.logLevel.SetLevel(lvl)
|
a.logLevel.SetLevel(lvl)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
|
||||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
@ -610,24 +486,31 @@ func (a *app) configReload(ctx context.Context) {
|
||||||
a.stopServices()
|
a.stopServices()
|
||||||
a.startServices()
|
a.startServices()
|
||||||
|
|
||||||
a.settings.update(a.config(), a.log)
|
a.updateSettings()
|
||||||
|
|
||||||
a.metrics.SetEnabled(a.config().GetBool(cfgPrometheusEnabled))
|
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
a.initTracing(ctx)
|
a.initTracing(ctx)
|
||||||
a.setHealthStatus()
|
a.setHealthStatus()
|
||||||
|
|
||||||
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) startServices() {
|
func (a *app) updateSettings() {
|
||||||
a.services = a.services[:0]
|
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||||
|
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||||
|
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||||
|
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
||||||
|
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
||||||
|
a.settings.setDefaultNamespaces(a.cfg.GetStringSlice(cfgResolveDefaultNamespaces))
|
||||||
|
}
|
||||||
|
|
||||||
pprofConfig := metrics.Config{Enabled: a.config().GetBool(cfgPprofEnabled), Address: a.config().GetString(cfgPprofAddress)}
|
func (a *app) startServices() {
|
||||||
|
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
||||||
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
||||||
a.services = append(a.services, pprofService)
|
a.services = append(a.services, pprofService)
|
||||||
go pprofService.Start()
|
go pprofService.Start()
|
||||||
|
|
||||||
prometheusConfig := metrics.Config{Enabled: a.config().GetBool(cfgPrometheusEnabled), Address: a.config().GetString(cfgPrometheusAddress)}
|
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
||||||
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
||||||
a.services = append(a.services, prometheusService)
|
a.services = append(a.services, prometheusService)
|
||||||
go prometheusService.Start()
|
go prometheusService.Start()
|
||||||
|
@ -642,128 +525,37 @@ func (a *app) stopServices() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) configureRouter(h *handler.Handler) {
|
func (a *app) configureRouter(handler *handler.Handler) {
|
||||||
r := router.New()
|
r := router.New()
|
||||||
r.RedirectTrailingSlash = true
|
r.RedirectTrailingSlash = true
|
||||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||||
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
|
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
||||||
}
|
}
|
||||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||||
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
|
r.POST("/upload/{cid}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.Upload))))))
|
||||||
r.OPTIONS("/upload/{cid}", a.addPreflight())
|
|
||||||
a.log.Info(logs.AddedPathUploadCid)
|
a.log.Info(logs.AddedPathUploadCid)
|
||||||
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
|
r.GET("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAddressOrBucketName))))))
|
||||||
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
|
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAddressOrBucketName))))))
|
||||||
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
|
|
||||||
a.log.Info(logs.AddedPathGetCidOid)
|
a.log.Info(logs.AddedPathGetCidOid)
|
||||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
|
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAttribute))))))
|
||||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
|
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAttribute))))))
|
||||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
|
||||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
|
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadZipped))))))
|
||||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
|
||||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
|
|
||||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
|
|
||||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||||
|
|
||||||
a.webServer.Handler = r.Handler
|
a.webServer.Handler = r.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) addMiddlewares(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|
||||||
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
|
|
||||||
a.tracer,
|
|
||||||
a.logger,
|
|
||||||
a.canonicalizer,
|
|
||||||
a.tokenizer,
|
|
||||||
a.reqNamespace,
|
|
||||||
a.cors,
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := len(list) - 1; i >= 0; i-- {
|
|
||||||
h = list[i](h)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) addPreflight() fasthttp.RequestHandler {
|
|
||||||
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
|
|
||||||
a.tracer,
|
|
||||||
a.logger,
|
|
||||||
a.reqNamespace,
|
|
||||||
}
|
|
||||||
|
|
||||||
h := a.preflightHandler
|
|
||||||
for i := len(list) - 1; i >= 0; i-- {
|
|
||||||
h = list[i](h)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) preflightHandler(c *fasthttp.RequestCtx) {
|
|
||||||
cors := a.settings.CORS()
|
|
||||||
setCORSHeaders(c, cors)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) cors(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|
||||||
return func(c *fasthttp.RequestCtx) {
|
|
||||||
h(c)
|
|
||||||
code := c.Response.StatusCode()
|
|
||||||
if code >= fasthttp.StatusOK && code < fasthttp.StatusMultipleChoices {
|
|
||||||
cors := a.settings.CORS()
|
|
||||||
setCORSHeaders(c, cors)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setCORSHeaders(c *fasthttp.RequestCtx, cors CORS) {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlMaxAge, strconv.Itoa(cors.MaxAge))
|
|
||||||
|
|
||||||
if len(cors.AllowOrigin) != 0 {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowOrigin, cors.AllowOrigin)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cors.AllowMethods) != 0 {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowMethods, strings.Join(cors.AllowMethods, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cors.AllowHeaders) != 0 {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowHeaders, strings.Join(cors.AllowHeaders, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(cors.ExposeHeaders) != 0 {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlExposeHeaders, strings.Join(cors.ExposeHeaders, ","))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cors.AllowCredentials {
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderAccessControlAllowCredentials, "true")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return func(req *fasthttp.RequestCtx) {
|
||||||
requiredFields := []zap.Field{zap.Uint64("id", req.ID())}
|
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||||
reqCtx := utils.GetContextFromRequest(req)
|
|
||||||
if traceID := trace.SpanFromContext(reqCtx).SpanContext().TraceID(); traceID.IsValid() {
|
|
||||||
requiredFields = append(requiredFields, zap.String("trace_id", traceID.String()))
|
|
||||||
}
|
|
||||||
log := a.log.With(requiredFields...)
|
|
||||||
|
|
||||||
reqCtx = utils.SetReqLog(reqCtx, log)
|
|
||||||
utils.SetContextToRequest(reqCtx, req)
|
|
||||||
|
|
||||||
fields := []zap.Field{
|
|
||||||
zap.String("remote", req.RemoteAddr().String()),
|
|
||||||
zap.ByteString("method", req.Method()),
|
zap.ByteString("method", req.Method()),
|
||||||
zap.ByteString("path", req.Path()),
|
zap.ByteString("path", req.Path()),
|
||||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||||
}
|
zap.Uint64("id", req.ID()))
|
||||||
|
|
||||||
log.Info(logs.Request, fields...)
|
|
||||||
h(req)
|
h(req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -802,13 +594,10 @@ func (a *app) canonicalizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
|
||||||
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return func(req *fasthttp.RequestCtx) {
|
||||||
reqCtx := utils.GetContextFromRequest(req)
|
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
||||||
appCtx, err := tokens.StoreBearerTokenAppCtx(reqCtx, req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Uint64("id", req.ID()), zap.Error(err))
|
||||||
|
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
|
||||||
handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
utils.SetContextToRequest(appCtx, req)
|
utils.SetContextToRequest(appCtx, req)
|
||||||
|
@ -818,7 +607,9 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
|
||||||
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return func(req *fasthttp.RequestCtx) {
|
||||||
appCtx, span := utils.StartHTTPServerSpan(a.ctx, req, "REQUEST")
|
appCtx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
|
appCtx, span := utils.StartHTTPServerSpan(appCtx, req, "REQUEST")
|
||||||
defer func() {
|
defer func() {
|
||||||
utils.SetHTTPTraceInfo(appCtx, span, req)
|
utils.SetHTTPTraceInfo(appCtx, span, req)
|
||||||
span.End()
|
span.End()
|
||||||
|
@ -849,12 +640,12 @@ func (a *app) AppParams() *handler.AppParams {
|
||||||
FrostFS: frostfs.NewFrostFS(a.pool),
|
FrostFS: frostfs.NewFrostFS(a.pool),
|
||||||
Owner: a.owner,
|
Owner: a.owner,
|
||||||
Resolver: a.resolver,
|
Resolver: a.resolver,
|
||||||
Cache: a.bucketCache,
|
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initServers(ctx context.Context) {
|
func (a *app) initServers(ctx context.Context) {
|
||||||
serversInfo := fetchServers(a.config(), a.log)
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
a.servers = make([]Server, 0, len(serversInfo))
|
a.servers = make([]Server, 0, len(serversInfo))
|
||||||
for _, serverInfo := range serversInfo {
|
for _, serverInfo := range serversInfo {
|
||||||
|
@ -881,7 +672,7 @@ func (a *app) initServers(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) updateServers() error {
|
func (a *app) updateServers() error {
|
||||||
serversInfo := fetchServers(a.config(), a.log)
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
@ -894,8 +685,8 @@ func (a *app) updateServers() error {
|
||||||
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
found = true
|
found = true
|
||||||
|
}
|
||||||
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
|
@ -939,36 +730,13 @@ func (a *app) initTracing(ctx context.Context) {
|
||||||
instanceID = a.servers[0].Address()
|
instanceID = a.servers[0].Address()
|
||||||
}
|
}
|
||||||
cfg := tracing.Config{
|
cfg := tracing.Config{
|
||||||
Enabled: a.config().GetBool(cfgTracingEnabled),
|
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||||
Exporter: tracing.Exporter(a.config().GetString(cfgTracingExporter)),
|
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||||
Endpoint: a.config().GetString(cfgTracingEndpoint),
|
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||||
Service: "frostfs-http-gw",
|
Service: "frostfs-http-gw",
|
||||||
InstanceID: instanceID,
|
InstanceID: instanceID,
|
||||||
Version: Version,
|
Version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
|
||||||
caBytes, err := os.ReadFile(trustedCa)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
certPool := x509.NewCertPool()
|
|
||||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
|
||||||
if !ok {
|
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cfg.ServerCaCertPool = certPool
|
|
||||||
}
|
|
||||||
|
|
||||||
attributes, err := fetchTracingAttributes(a.config())
|
|
||||||
if err != nil {
|
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cfg.Attributes = attributes
|
|
||||||
|
|
||||||
updated, err := tracing.Setup(ctx, cfg)
|
updated, err := tracing.Setup(ctx, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
|
@ -985,7 +753,7 @@ func (a *app) setRuntimeParameters() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
softMemoryLimit := fetchSoftMemoryLimit(a.config())
|
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||||
previous := debug.SetMemoryLimit(softMemoryLimit)
|
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||||
if softMemoryLimit != previous {
|
if softMemoryLimit != previous {
|
||||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||||
|
@ -994,6 +762,39 @@ func (a *app) setRuntimeParameters() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) NamespaceHeader() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.namespaceHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setNamespaceHeader(nsHeader string) {
|
||||||
|
s.mu.Lock()
|
||||||
|
s.namespaceHeader = nsHeader
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||||
|
s.mu.RLock()
|
||||||
|
namespaces := s.defaultNamespaces
|
||||||
|
s.mu.RUnlock()
|
||||||
|
if slices.Contains(namespaces, ns) {
|
||||||
|
return v2container.SysAttributeZoneDefault, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return ns + ".ns", false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setDefaultNamespaces(namespaces []string) {
|
||||||
|
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
||||||
|
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.defaultNamespaces = namespaces
|
||||||
|
s.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) {
|
func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) {
|
||||||
go func() {
|
go func() {
|
||||||
t := time.NewTicker(a.settings.reconnectInterval)
|
t := time.NewTicker(a.settings.reconnectInterval)
|
||||||
|
|
|
@ -14,11 +14,10 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
@ -29,12 +28,13 @@ import (
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
docker "github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
"github.com/spf13/viper"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
|
@ -50,12 +50,11 @@ const (
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
rootCtx := context.Background()
|
rootCtx := context.Background()
|
||||||
aioImage := "git.frostfs.info/truecloudlab/frostfs-aio:"
|
aioImage := "truecloudlab/frostfs-aio:"
|
||||||
versions := []string{
|
versions := []string{
|
||||||
"1.2.7",
|
"1.2.7",
|
||||||
"1.3.0",
|
"1.3.0",
|
||||||
"1.5.0",
|
"1.5.0",
|
||||||
"1.6.5",
|
|
||||||
}
|
}
|
||||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -72,28 +71,21 @@ func TestIntegration(t *testing.T) {
|
||||||
ctx, cancel2 := context.WithCancel(rootCtx)
|
ctx, cancel2 := context.WithCancel(rootCtx)
|
||||||
|
|
||||||
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
||||||
if strings.HasPrefix(version, "1.6") {
|
|
||||||
registerUser(t, ctx, aioContainer, file.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the logs from the command execution.
|
|
||||||
server, cancel := runServer(file.Name())
|
server, cancel := runServer(file.Name())
|
||||||
clientPool := getPool(ctx, t, key)
|
clientPool := getPool(ctx, t, key)
|
||||||
CID, err := createContainer(ctx, t, clientPool, ownerID)
|
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
||||||
require.NoError(t, err, version)
|
require.NoError(t, err, version)
|
||||||
|
|
||||||
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
|
token := makeBearerToken(t, key, ownerID, version)
|
||||||
|
|
||||||
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID) })
|
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
||||||
t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) })
|
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
|
||||||
t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) })
|
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
|
||||||
t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
||||||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID) })
|
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
|
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
|
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
|
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
server.Wait()
|
server.Wait()
|
||||||
|
@ -107,16 +99,17 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
|
||||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
v := getDefaultConfig()
|
v := getDefaultConfig()
|
||||||
v.config().Set(cfgWalletPath, pathToWallet)
|
v.Set(cfgWalletPath, pathToWallet)
|
||||||
v.config().Set(cfgWalletPassphrase, "")
|
v.Set(cfgWalletPassphrase, "")
|
||||||
|
|
||||||
application := newApp(cancelCtx, v)
|
l, lvl := newStdoutLogger(zapcore.DebugLevel)
|
||||||
|
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
|
|
||||||
return application, cancel
|
return application, cancel
|
||||||
}
|
}
|
||||||
|
|
||||||
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID) {
|
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, version string) {
|
||||||
url := testHost + "/upload/" + CID.String()
|
url := testHost + "/upload/" + CID.String()
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||||
|
|
||||||
|
@ -264,7 +257,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
|
||||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"some-attr": "some-get-value",
|
"some-attr": "some-get-value",
|
||||||
|
@ -311,7 +304,7 @@ func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, a
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
|
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{keyAttr: valAttr}
|
attributes := map[string]string{keyAttr: valAttr}
|
||||||
|
@ -333,7 +326,7 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
||||||
contents := []string{"content of file1", "content of file2"}
|
contents := []string{"content of file1", "content of file2"}
|
||||||
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
||||||
|
@ -398,7 +391,7 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"some-attr": "some-get-value",
|
"some-attr": "some-get-value",
|
||||||
|
@ -435,12 +428,10 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
||||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||||
req := testcontainers.ContainerRequest{
|
req := testcontainers.ContainerRequest{
|
||||||
Image: image,
|
Image: image,
|
||||||
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(2 * time.Minute),
|
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(30 * time.Second),
|
||||||
Name: "aio",
|
Name: "aio",
|
||||||
Hostname: "aio",
|
Hostname: "aio",
|
||||||
HostConfigModifier: func(hc *docker.HostConfig) {
|
NetworkMode: "host",
|
||||||
hc.NetworkMode = "host"
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
ContainerRequest: req,
|
ContainerRequest: req,
|
||||||
|
@ -451,14 +442,14 @@ func createDockerContainer(ctx context.Context, t *testing.T, image string) test
|
||||||
return aioC
|
return aioC
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultConfig() *appCfg {
|
func getDefaultConfig() *viper.Viper {
|
||||||
v := settings()
|
v := settings()
|
||||||
v.config().SetDefault(cfgPeers+".0.address", "localhost:8080")
|
v.SetDefault(cfgPeers+".0.address", "localhost:8080")
|
||||||
v.config().SetDefault(cfgPeers+".0.weight", 1)
|
v.SetDefault(cfgPeers+".0.weight", 1)
|
||||||
v.config().SetDefault(cfgPeers+".0.priority", 1)
|
v.SetDefault(cfgPeers+".0.priority", 1)
|
||||||
|
|
||||||
v.config().SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
||||||
v.config().SetDefault("server.0.address", testListenAddress)
|
v.SetDefault("server.0.address", testListenAddress)
|
||||||
|
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
@ -477,7 +468,7 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
|
||||||
return clientPool
|
return clientPool
|
||||||
}
|
}
|
||||||
|
|
||||||
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
|
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) (cid.ID, error) {
|
||||||
var policy netmap.PlacementPolicy
|
var policy netmap.PlacementPolicy
|
||||||
err := policy.DecodeString("REP 1")
|
err := policy.DecodeString("REP 1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -534,22 +525,10 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
id, err := clientPool.PutObject(ctx, prm)
|
id, err := clientPool.PutObject(ctx, prm)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return id.ObjectID
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers.Container, pathToWallet string) {
|
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
|
||||||
err := aioContainer.CopyFileToContainer(ctx, pathToWallet, "/usr/wallet.json", 644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, err = aioContainer.Exec(ctx, []string{
|
|
||||||
"/usr/bin/frostfs-s3-authmate", "register-user",
|
|
||||||
"--wallet", "/usr/wallet.json",
|
|
||||||
"--rpc-endpoint", "http://localhost:30333",
|
|
||||||
"--contract-wallet", "/config/s3-gw-wallet.json"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
|
|
||||||
tkn := new(bearer.Token)
|
tkn := new(bearer.Token)
|
||||||
tkn.ForUser(ownerID)
|
tkn.ForUser(ownerID)
|
||||||
tkn.SetExp(10000)
|
tkn.SetExp(10000)
|
||||||
|
@ -563,16 +542,10 @@ func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, versi
|
||||||
err := tkn.Sign(key.PrivateKey)
|
err := tkn.Sign(key.PrivateKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
jsonToken, err := tkn.MarshalJSON()
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
require.NoError(t, err)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken)
|
return t64
|
||||||
binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
|
|
||||||
require.NotEmpty(t, jsonTokenBase64)
|
|
||||||
require.NotEmpty(t, binaryTokenBase64)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
||||||
|
|
|
@ -8,9 +8,10 @@ import (
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
cfg := settings()
|
v := settings()
|
||||||
|
logger, atomicLevel := pickLogger(v)
|
||||||
|
|
||||||
application := newApp(globalContext, cfg)
|
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
application.Wait()
|
application.Wait()
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
@ -12,18 +11,16 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/ssgreg/journald"
|
"github.com/ssgreg/journald"
|
||||||
|
@ -44,8 +41,6 @@ const (
|
||||||
defaultConnectTimeout = 10 * time.Second
|
defaultConnectTimeout = 10 * time.Second
|
||||||
defaultStreamTimeout = 10 * time.Second
|
defaultStreamTimeout = 10 * time.Second
|
||||||
|
|
||||||
defaultLoggerSamplerInterval = 1 * time.Second
|
|
||||||
|
|
||||||
defaultShutdownTimeout = 15 * time.Second
|
defaultShutdownTimeout = 15 * time.Second
|
||||||
|
|
||||||
defaultPoolErrorThreshold uint32 = 100
|
defaultPoolErrorThreshold uint32 = 100
|
||||||
|
@ -58,10 +53,6 @@ const (
|
||||||
|
|
||||||
defaultReconnectInterval = time.Minute
|
defaultReconnectInterval = time.Minute
|
||||||
|
|
||||||
defaultCORSMaxAge = 600 // seconds
|
|
||||||
|
|
||||||
defaultMultinetFallbackDelay = 300 * time.Millisecond
|
|
||||||
|
|
||||||
cfgServer = "server"
|
cfgServer = "server"
|
||||||
cfgTLSEnabled = "tls.enabled"
|
cfgTLSEnabled = "tls.enabled"
|
||||||
cfgTLSCertFile = "tls.cert_file"
|
cfgTLSCertFile = "tls.cert_file"
|
||||||
|
@ -69,11 +60,6 @@ const (
|
||||||
|
|
||||||
cfgReconnectInterval = "reconnect_interval"
|
cfgReconnectInterval = "reconnect_interval"
|
||||||
|
|
||||||
cfgIndexPageEnabled = "index_page.enabled"
|
|
||||||
cfgIndexPageTemplatePath = "index_page.template_path"
|
|
||||||
|
|
||||||
cfgWorkerPoolSize = "worker_pool_size"
|
|
||||||
|
|
||||||
// Web.
|
// Web.
|
||||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||||
|
@ -92,8 +78,6 @@ const (
|
||||||
cfgTracingEnabled = "tracing.enabled"
|
cfgTracingEnabled = "tracing.enabled"
|
||||||
cfgTracingExporter = "tracing.exporter"
|
cfgTracingExporter = "tracing.exporter"
|
||||||
cfgTracingEndpoint = "tracing.endpoint"
|
cfgTracingEndpoint = "tracing.endpoint"
|
||||||
cfgTracingTrustedCa = "tracing.trusted_ca"
|
|
||||||
cfgTracingAttributes = "tracing.attributes"
|
|
||||||
|
|
||||||
// Pool config.
|
// Pool config.
|
||||||
cfgConTimeout = "connect_timeout"
|
cfgConTimeout = "connect_timeout"
|
||||||
|
@ -106,11 +90,6 @@ const (
|
||||||
cfgLoggerLevel = "logger.level"
|
cfgLoggerLevel = "logger.level"
|
||||||
cfgLoggerDestination = "logger.destination"
|
cfgLoggerDestination = "logger.destination"
|
||||||
|
|
||||||
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
|
|
||||||
cfgLoggerSamplingInitial = "logger.sampling.initial"
|
|
||||||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
|
||||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
|
||||||
|
|
||||||
// Wallet.
|
// Wallet.
|
||||||
cfgWalletPassphrase = "wallet.passphrase"
|
cfgWalletPassphrase = "wallet.passphrase"
|
||||||
cfgWalletPath = "wallet.path"
|
cfgWalletPath = "wallet.path"
|
||||||
|
@ -129,13 +108,8 @@ const (
|
||||||
cfgResolveOrder = "resolve_order"
|
cfgResolveOrder = "resolve_order"
|
||||||
|
|
||||||
// Zip compression.
|
// Zip compression.
|
||||||
//
|
|
||||||
// Deprecated: Use cfgArchiveCompression instead.
|
|
||||||
cfgZipCompression = "zip.compression"
|
cfgZipCompression = "zip.compression"
|
||||||
|
|
||||||
// Archive compression.
|
|
||||||
cfgArchiveCompression = "archive.compression"
|
|
||||||
|
|
||||||
// Runtime.
|
// Runtime.
|
||||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||||
|
|
||||||
|
@ -150,31 +124,11 @@ const (
|
||||||
// Caching.
|
// Caching.
|
||||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||||
cfgBucketsCacheSize = "cache.buckets.size"
|
cfgBucketsCacheSize = "cache.buckets.size"
|
||||||
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
|
|
||||||
|
|
||||||
// Bucket resolving options.
|
// Bucket resolving options.
|
||||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||||
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
||||||
|
|
||||||
// CORS.
|
|
||||||
cfgCORSAllowOrigin = "cors.allow_origin"
|
|
||||||
cfgCORSAllowMethods = "cors.allow_methods"
|
|
||||||
cfgCORSAllowHeaders = "cors.allow_headers"
|
|
||||||
cfgCORSExposeHeaders = "cors.expose_headers"
|
|
||||||
cfgCORSAllowCredentials = "cors.allow_credentials"
|
|
||||||
cfgCORSMaxAge = "cors.max_age"
|
|
||||||
|
|
||||||
// Multinet.
|
|
||||||
cfgMultinetEnabled = "multinet.enabled"
|
|
||||||
cfgMultinetBalancer = "multinet.balancer"
|
|
||||||
cfgMultinetRestrict = "multinet.restrict"
|
|
||||||
cfgMultinetFallbackDelay = "multinet.fallback_delay"
|
|
||||||
cfgMultinetSubnets = "multinet.subnets"
|
|
||||||
|
|
||||||
// Feature.
|
|
||||||
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
|
||||||
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
|
||||||
|
|
||||||
// Command line args.
|
// Command line args.
|
||||||
cmdHelp = "help"
|
cmdHelp = "help"
|
||||||
cmdVersion = "version"
|
cmdVersion = "version"
|
||||||
|
@ -193,77 +147,14 @@ var ignore = map[string]struct{}{
|
||||||
cmdVersion: {},
|
cmdVersion: {},
|
||||||
}
|
}
|
||||||
|
|
||||||
type Logger struct {
|
func settings() *viper.Viper {
|
||||||
logger *zap.Logger
|
|
||||||
lvl zap.AtomicLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
type appCfg struct {
|
|
||||||
flags *pflag.FlagSet
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
settings *viper.Viper
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) reload() error {
|
|
||||||
old := a.config()
|
|
||||||
|
|
||||||
v, err := newViper(a.flags)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if old.IsSet(cmdConfig) {
|
|
||||||
v.Set(cmdConfig, old.Get(cmdConfig))
|
|
||||||
}
|
|
||||||
if old.IsSet(cmdConfigDir) {
|
|
||||||
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = readInConfig(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.setConfig(v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) config() *viper.Viper {
|
|
||||||
a.mu.RLock()
|
|
||||||
defer a.mu.RUnlock()
|
|
||||||
|
|
||||||
return a.settings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) setConfig(v *viper.Viper) {
|
|
||||||
a.mu.Lock()
|
|
||||||
a.settings = v
|
|
||||||
a.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
|
||||||
v := viper.New()
|
v := viper.New()
|
||||||
|
|
||||||
v.AutomaticEnv()
|
v.AutomaticEnv()
|
||||||
v.SetEnvPrefix(Prefix)
|
v.SetEnvPrefix(Prefix)
|
||||||
v.AllowEmptyEnv(true)
|
v.AllowEmptyEnv(true)
|
||||||
v.SetConfigType("yaml")
|
v.SetConfigType("yaml")
|
||||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||||
|
|
||||||
if err := bindFlags(v, flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
setDefaults(v, flags)
|
|
||||||
|
|
||||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
|
||||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func settings() *appCfg {
|
|
||||||
// flags setup:
|
// flags setup:
|
||||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
||||||
flags.SetOutput(os.Stdout)
|
flags.SetOutput(os.Stdout)
|
||||||
|
@ -287,17 +178,84 @@ func settings() *appCfg {
|
||||||
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
||||||
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
||||||
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
||||||
flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
||||||
|
|
||||||
flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
||||||
|
|
||||||
|
// set defaults:
|
||||||
|
|
||||||
|
// logger:
|
||||||
|
v.SetDefault(cfgLoggerLevel, "debug")
|
||||||
|
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||||
|
|
||||||
|
// pool:
|
||||||
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
||||||
|
// frostfs:
|
||||||
|
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||||
|
|
||||||
|
// web-server:
|
||||||
|
v.SetDefault(cfgWebReadBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
||||||
|
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
||||||
|
v.SetDefault(cfgWebStreamRequestBody, true)
|
||||||
|
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
||||||
|
|
||||||
|
// upload header
|
||||||
|
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
||||||
|
|
||||||
|
// zip:
|
||||||
|
v.SetDefault(cfgZipCompression, false)
|
||||||
|
|
||||||
|
// metrics
|
||||||
|
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||||
|
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||||
|
|
||||||
|
// resolve bucket
|
||||||
|
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||||
|
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
||||||
|
|
||||||
|
// Binding flags
|
||||||
|
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlags(flags); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := flags.Parse(os.Args); err != nil {
|
if err := flags.Parse(os.Args); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err := newViper(flags)
|
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||||
if err != nil {
|
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||||
panic(fmt.Errorf("bind flags: %w", err))
|
}
|
||||||
|
|
||||||
|
if resolveMethods != nil {
|
||||||
|
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -342,97 +300,15 @@ func settings() *appCfg {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &appCfg{
|
if peers != nil && len(*peers) > 0 {
|
||||||
flags: flags,
|
for i := range *peers {
|
||||||
settings: v,
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
|
||||||
// set defaults:
|
|
||||||
|
|
||||||
// logger:
|
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
|
||||||
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
|
||||||
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
|
||||||
|
|
||||||
// pool:
|
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
|
||||||
|
|
||||||
// frostfs:
|
|
||||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
|
||||||
|
|
||||||
// web-server:
|
|
||||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
|
||||||
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
|
||||||
v.SetDefault(cfgWebStreamRequestBody, true)
|
|
||||||
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
|
||||||
|
|
||||||
v.SetDefault(cfgWorkerPoolSize, 1000)
|
|
||||||
// upload header
|
|
||||||
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
|
||||||
|
|
||||||
// metrics
|
|
||||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
|
||||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
|
||||||
|
|
||||||
// resolve bucket
|
|
||||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
|
||||||
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
|
||||||
|
|
||||||
// multinet
|
|
||||||
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
|
||||||
|
|
||||||
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
|
|
||||||
v.SetDefault(cfgResolveOrder, resolveMethods)
|
|
||||||
}
|
|
||||||
|
|
||||||
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
|
|
||||||
for i := range peers {
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
return v
|
||||||
// Binding flags
|
|
||||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlags(flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readInConfig(v *viper.Viper) error {
|
func readInConfig(v *viper.Viper) error {
|
||||||
|
@ -499,11 +375,7 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
||||||
return v.MergeConfig(cfgFile)
|
return v.MergeConfig(cfgFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
type LoggerAppSettings interface {
|
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||||
DroppedLogsInc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
|
||||||
lvl, err := getLogLevel(v)
|
lvl, err := getLogLevel(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -513,9 +385,9 @@ func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
||||||
|
|
||||||
switch dest {
|
switch dest {
|
||||||
case destinationStdout:
|
case destinationStdout:
|
||||||
return newStdoutLogger(v, lvl, settings)
|
return newStdoutLogger(lvl)
|
||||||
case destinationJournald:
|
case destinationJournald:
|
||||||
return newJournaldLogger(v, lvl, settings)
|
return newJournaldLogger(lvl)
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||||
}
|
}
|
||||||
|
@ -532,60 +404,39 @@ func pickLogger(v *viper.Viper, settings LoggerAppSettings) *Logger {
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
// Logger records a stack trace for all messages at or above fatal level.
|
||||||
//
|
//
|
||||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
func newStdoutLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
stdout := zapcore.AddSync(os.Stderr)
|
c := zap.NewProductionConfig()
|
||||||
level := zap.NewAtomicLevelAt(lvl)
|
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||||
|
c.Encoding = "console"
|
||||||
|
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
l, err := c.Build(
|
||||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, settings)
|
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
||||||
|
)
|
||||||
return &Logger{
|
if err != nil {
|
||||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
panic(fmt.Sprintf("build zap logger instance: %v", err))
|
||||||
lvl: level,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level, settings LoggerAppSettings) *Logger {
|
return l, c.Level
|
||||||
level := zap.NewAtomicLevelAt(lvl)
|
}
|
||||||
|
|
||||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
func newJournaldLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
c := zap.NewProductionConfig()
|
||||||
|
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||||
|
|
||||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||||
|
|
||||||
|
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||||
coreWithContext := core.With([]zapcore.Field{
|
coreWithContext := core.With([]zapcore.Field{
|
||||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||||
zapjournald.SyslogIdentifier(),
|
zapjournald.SyslogIdentifier(),
|
||||||
zapjournald.SyslogPid(),
|
zapjournald.SyslogPid(),
|
||||||
})
|
})
|
||||||
|
|
||||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, settings)
|
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||||
|
|
||||||
return &Logger{
|
return l, c.Level
|
||||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
|
||||||
lvl: level,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogEncoder() zapcore.Encoder {
|
|
||||||
c := zap.NewProductionEncoderConfig()
|
|
||||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
|
|
||||||
return zapcore.NewConsoleEncoder(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, settings LoggerAppSettings) zapcore.Core {
|
|
||||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
|
||||||
core = zapcore.NewSamplerWithOptions(core,
|
|
||||||
v.GetDuration(cfgLoggerSamplingInterval),
|
|
||||||
v.GetInt(cfgLoggerSamplingInitial),
|
|
||||||
v.GetInt(cfgLoggerSamplingThereafter),
|
|
||||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
|
||||||
if dec&zapcore.LogDropped > 0 {
|
|
||||||
settings.DroppedLogsInc()
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return core
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
|
@ -616,46 +467,6 @@ func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||||
return reconnect
|
return reconnect
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
|
|
||||||
if !v.GetBool(cfgIndexPageEnabled) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := io.ReadAll(reader)
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
|
||||||
return "", true
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Info(logs.SetCustomIndexPageTemplate)
|
|
||||||
return string(tmpl), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchDefaultNamespaces(v *viper.Viper) []string {
|
|
||||||
namespaces := v.GetStringSlice(cfgResolveDefaultNamespaces)
|
|
||||||
|
|
||||||
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
|
||||||
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
|
||||||
}
|
|
||||||
|
|
||||||
return namespaces
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCORSMaxAge(v *viper.Viper) int {
|
|
||||||
maxAge := v.GetInt(cfgCORSMaxAge)
|
|
||||||
if maxAge <= 0 {
|
|
||||||
maxAge = defaultCORSMaxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
var servers []ServerInfo
|
var servers []ServerInfo
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
|
@ -684,10 +495,10 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initPools(ctx context.Context) {
|
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||||
key, err := getFrostFSKey(a.config(), a.log)
|
key, err := getFrostFSKey(cfg, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
var prm pool.InitParameters
|
var prm pool.InitParameters
|
||||||
|
@ -695,83 +506,82 @@ func (a *app) initPools(ctx context.Context) {
|
||||||
|
|
||||||
prm.SetKey(&key.PrivateKey)
|
prm.SetKey(&key.PrivateKey)
|
||||||
prmTree.SetKey(key)
|
prmTree.SetKey(key)
|
||||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||||
|
|
||||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
for _, peer := range fetchPeers(logger, cfg) {
|
||||||
prm.AddNode(peer)
|
prm.AddNode(peer)
|
||||||
prmTree.AddNode(peer)
|
prmTree.AddNode(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
connTimeout := a.config().GetDuration(cfgConTimeout)
|
connTimeout := cfg.GetDuration(cfgConTimeout)
|
||||||
if connTimeout <= 0 {
|
if connTimeout <= 0 {
|
||||||
connTimeout = defaultConnectTimeout
|
connTimeout = defaultConnectTimeout
|
||||||
}
|
}
|
||||||
prm.SetNodeDialTimeout(connTimeout)
|
prm.SetNodeDialTimeout(connTimeout)
|
||||||
prmTree.SetNodeDialTimeout(connTimeout)
|
prmTree.SetNodeDialTimeout(connTimeout)
|
||||||
|
|
||||||
streamTimeout := a.config().GetDuration(cfgStreamTimeout)
|
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||||
if streamTimeout <= 0 {
|
if streamTimeout <= 0 {
|
||||||
streamTimeout = defaultStreamTimeout
|
streamTimeout = defaultStreamTimeout
|
||||||
}
|
}
|
||||||
prm.SetNodeStreamTimeout(streamTimeout)
|
prm.SetNodeStreamTimeout(streamTimeout)
|
||||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||||
|
|
||||||
healthCheckTimeout := a.config().GetDuration(cfgReqTimeout)
|
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
|
||||||
if healthCheckTimeout <= 0 {
|
if healthCheckTimeout <= 0 {
|
||||||
healthCheckTimeout = defaultRequestTimeout
|
healthCheckTimeout = defaultRequestTimeout
|
||||||
}
|
}
|
||||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
|
|
||||||
rebalanceInterval := a.config().GetDuration(cfgRebalance)
|
rebalanceInterval := cfg.GetDuration(cfgRebalance)
|
||||||
if rebalanceInterval <= 0 {
|
if rebalanceInterval <= 0 {
|
||||||
rebalanceInterval = defaultRebalanceTimer
|
rebalanceInterval = defaultRebalanceTimer
|
||||||
}
|
}
|
||||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
|
|
||||||
errorThreshold := a.config().GetUint32(cfgPoolErrorThreshold)
|
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||||
if errorThreshold <= 0 {
|
if errorThreshold <= 0 {
|
||||||
errorThreshold = defaultPoolErrorThreshold
|
errorThreshold = defaultPoolErrorThreshold
|
||||||
}
|
}
|
||||||
prm.SetErrorThreshold(errorThreshold)
|
prm.SetErrorThreshold(errorThreshold)
|
||||||
prm.SetLogger(a.log)
|
prm.SetLogger(logger)
|
||||||
prmTree.SetLogger(a.log)
|
prmTree.SetLogger(logger)
|
||||||
|
|
||||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||||
|
|
||||||
|
var apiGRPCDialOpts []grpc.DialOption
|
||||||
|
var treeGRPCDialOpts []grpc.DialOption
|
||||||
|
if cfg.GetBool(cfgTracingEnabled) {
|
||||||
interceptors := []grpc.DialOption{
|
interceptors := []grpc.DialOption{
|
||||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||||
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
|
|
||||||
}
|
}
|
||||||
prm.SetGRPCDialOptions(interceptors...)
|
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
|
||||||
prmTree.SetGRPCDialOptions(interceptors...)
|
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
|
||||||
|
}
|
||||||
|
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
|
||||||
|
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
|
||||||
|
|
||||||
p, err := pool.NewPool(prm)
|
p, err := pool.NewPool(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = p.Dial(ctx); err != nil {
|
if err = p.Dial(ctx); err != nil {
|
||||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||||
}
|
|
||||||
|
|
||||||
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
|
|
||||||
prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p), cache.NewNetmapCache(getNetmapCacheOptions(a.config(), a.log)), a.bucketCache, a.log))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
treePool, err := treepool.NewPool(prmTree)
|
treePool, err := treepool.NewPool(prmTree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||||
}
|
}
|
||||||
if err = treePool.Dial(ctx); err != nil {
|
if err = treePool.Dial(ctx); err != nil {
|
||||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.pool = p
|
return p, treePool, key
|
||||||
a.treePool = treePool
|
|
||||||
a.key = key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||||
|
@ -812,7 +622,7 @@ func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
||||||
return int64(softMemoryLimit)
|
return int64(softMemoryLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
cacheCfg := cache.DefaultBucketConfig(l)
|
cacheCfg := cache.DefaultBucketConfig(l)
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
||||||
|
@ -821,14 +631,6 @@ func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
return cacheCfg
|
return cacheCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConfig {
|
|
||||||
cacheCfg := cache.DefaultNetmapConfig(l)
|
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgNetmapCacheLifetime, cacheCfg.Lifetime)
|
|
||||||
|
|
||||||
return cacheCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
||||||
if v.IsSet(cfgEntry) {
|
if v.IsSet(cfgEntry) {
|
||||||
lifetime := v.GetDuration(cfgEntry)
|
lifetime := v.GetDuration(cfgEntry)
|
||||||
|
@ -860,65 +662,3 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
||||||
|
|
||||||
return defaultValue
|
return defaultValue
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
|
||||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
|
||||||
if err != nil {
|
|
||||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
|
|
||||||
}
|
|
||||||
return source
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchMultinetConfig(v *viper.Viper, l *zap.Logger) (cfg internalnet.Config) {
|
|
||||||
cfg.Enabled = v.GetBool(cfgMultinetEnabled)
|
|
||||||
cfg.Balancer = v.GetString(cfgMultinetBalancer)
|
|
||||||
cfg.Restrict = v.GetBool(cfgMultinetRestrict)
|
|
||||||
cfg.FallbackDelay = v.GetDuration(cfgMultinetFallbackDelay)
|
|
||||||
cfg.Subnets = make([]internalnet.Subnet, 0, 5)
|
|
||||||
cfg.EventHandler = internalnet.NewLogEventHandler(l)
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgMultinetSubnets + "." + strconv.Itoa(i) + "."
|
|
||||||
subnet := internalnet.Subnet{}
|
|
||||||
|
|
||||||
subnet.Prefix = v.GetString(key + "mask")
|
|
||||||
if subnet.Prefix == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
subnet.SourceIPs = v.GetStringSlice(key + "source_ips")
|
|
||||||
cfg.Subnets = append(cfg.Subnets, subnet)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
|
|
||||||
attributes := make(map[string]string)
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
key := cfgTracingAttributes + "." + strconv.Itoa(i) + "."
|
|
||||||
attrKey := v.GetString(key + "key")
|
|
||||||
attrValue := v.GetString(key + "value")
|
|
||||||
if attrKey == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := attributes[attrKey]; ok {
|
|
||||||
return nil, fmt.Errorf("tracing attribute key %s defined more than once", attrKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
if attrValue == "" {
|
|
||||||
return nil, fmt.Errorf("empty tracing attribute value for key %s", attrKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
attributes[attrKey] = attrValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchArchiveCompression(v *viper.Viper) bool {
|
|
||||||
if v.IsSet(cfgZipCompression) {
|
|
||||||
return v.GetBool(cfgZipCompression)
|
|
||||||
}
|
|
||||||
return v.GetBool(cfgArchiveCompression)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,60 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigReload(t *testing.T) {
|
|
||||||
f, err := os.CreateTemp("", "conf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, os.Remove(f.Name()))
|
|
||||||
}()
|
|
||||||
|
|
||||||
confData := `
|
|
||||||
pprof:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
resolve_bucket:
|
|
||||||
default_namespaces: [""]
|
|
||||||
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
`
|
|
||||||
|
|
||||||
_, err = f.WriteString(confData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.Close())
|
|
||||||
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--connect_timeout", "15s"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{""}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
|
|
||||||
require.NoError(t, os.Truncate(f.Name(), 0))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.False(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{"", "root"}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver, resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetTLSEnabled(t *testing.T) {
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--" + cfgTLSCertFile, "tls.crt", "--" + cfgTLSKeyFile, "tls.key"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgServer+".0."+cfgTLSEnabled))
|
|
||||||
}
|
|
|
@ -14,12 +14,8 @@ HTTP_GW_PPROF_ADDRESS=localhost:8083
|
||||||
HTTP_GW_PROMETHEUS_ENABLED=true
|
HTTP_GW_PROMETHEUS_ENABLED=true
|
||||||
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
HTTP_GW_PROMETHEUS_ADDRESS=localhost:8084
|
||||||
|
|
||||||
# Logger.
|
# Log level.
|
||||||
HTTP_GW_LOGGER_LEVEL=debug
|
HTTP_GW_LOGGER_LEVEL=debug
|
||||||
HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
|
||||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
|
||||||
|
|
||||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||||
|
@ -97,21 +93,12 @@ HTTP_GW_REBALANCE_TIMER=30s
|
||||||
# The number of errors on connection after which node is considered as unhealthy
|
# The number of errors on connection after which node is considered as unhealthy
|
||||||
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
# Enable zip compression to download files by common prefix.
|
||||||
# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead.
|
|
||||||
HTTP_GW_ZIP_COMPRESSION=false
|
HTTP_GW_ZIP_COMPRESSION=false
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
|
||||||
HTTP_GW_ARCHIVE_COMPRESSION=false
|
|
||||||
|
|
||||||
HTTP_GW_TRACING_ENABLED=true
|
HTTP_GW_TRACING_ENABLED=true
|
||||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||||
HTTP_GW_TRACING_TRUSTED_CA=""
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_0_KEY=key0
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_0_VALUE=value
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_1_KEY=key1
|
|
||||||
HTTP_GW_TRACING_ATTRIBUTES_1_VALUE=value
|
|
||||||
|
|
||||||
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||||
|
|
||||||
|
@ -125,8 +112,6 @@ HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
# Cache which contains mapping of bucket name to bucket info
|
||||||
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
||||||
# Cache which stores netmap
|
|
||||||
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
|
|
||||||
|
|
||||||
# Header to determine zone to resolve bucket name
|
# Header to determine zone to resolve bucket name
|
||||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||||
|
@ -136,37 +121,3 @@ HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
|
||||||
# Max attempt to make successful tree request.
|
# Max attempt to make successful tree request.
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||||
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||||
|
|
||||||
HTTP_GW_CORS_ALLOW_ORIGIN="*"
|
|
||||||
HTTP_GW_CORS_ALLOW_METHODS="GET" "POST"
|
|
||||||
HTTP_GW_CORS_ALLOW_HEADERS="*"
|
|
||||||
HTTP_GW_CORS_EXPOSE_HEADERS="*"
|
|
||||||
HTTP_GW_CORS_ALLOW_CREDENTIALS=false
|
|
||||||
HTTP_GW_CORS_MAX_AGE=600
|
|
||||||
|
|
||||||
# Multinet properties
|
|
||||||
# Enable multinet support
|
|
||||||
HTTP_GW_MULTINET_ENABLED=false
|
|
||||||
# Strategy to pick source IP address
|
|
||||||
HTTP_GW_MULTINET_BALANCER=roundrobin
|
|
||||||
# Restrict requests with unknown destination subnet
|
|
||||||
HTTP_GW_MULTINET_RESTRICT=false
|
|
||||||
# Delay between ipv6 to ipv4 fallback switch
|
|
||||||
HTTP_GW_MULTINET_FALLBACK_DELAY=300ms
|
|
||||||
# List of subnets and IP addresses to use as source for those subnets
|
|
||||||
HTTP_GW_MULTINET_SUBNETS_1_MASK=1.2.3.4/24
|
|
||||||
HTTP_GW_MULTINET_SUBNETS_1_SOURCE_IPS=1.2.3.4 1.2.3.5
|
|
||||||
|
|
||||||
# Number of workers in handler's worker pool
|
|
||||||
HTTP_GW_WORKER_POOL_SIZE=1000
|
|
||||||
|
|
||||||
# Index page
|
|
||||||
# Enable index page support
|
|
||||||
HTTP_GW_INDEX_PAGE_ENABLED=false
|
|
||||||
# Index page template path
|
|
||||||
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
|
||||||
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
|
||||||
|
|
|
@ -9,26 +9,14 @@ pprof:
|
||||||
prometheus:
|
prometheus:
|
||||||
enabled: false # Enable metrics.
|
enabled: false # Enable metrics.
|
||||||
address: localhost:8084
|
address: localhost:8084
|
||||||
|
|
||||||
tracing:
|
tracing:
|
||||||
enabled: true
|
enabled: true
|
||||||
exporter: "otlp_grpc"
|
exporter: "otlp_grpc"
|
||||||
endpoint: "localhost:4317"
|
endpoint: "localhost:4317"
|
||||||
trusted_ca: ""
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
|
|
||||||
logger:
|
logger:
|
||||||
level: debug # Log level.
|
level: debug # Log level.
|
||||||
destination: stdout
|
destination: stdout
|
||||||
sampling:
|
|
||||||
enabled: false
|
|
||||||
initial: 100
|
|
||||||
thereafter: 100
|
|
||||||
interval: 1s
|
|
||||||
|
|
||||||
server:
|
server:
|
||||||
- address: 0.0.0.0:8080
|
- address: 0.0.0.0:8080
|
||||||
|
@ -113,22 +101,8 @@ request_timeout: 5s # Timeout to check node health during rebalance.
|
||||||
rebalance_timer: 30s # Interval to check nodes health.
|
rebalance_timer: 30s # Interval to check nodes health.
|
||||||
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
|
||||||
|
|
||||||
# Number of workers in handler's worker pool
|
|
||||||
worker_pool_size: 1000
|
|
||||||
|
|
||||||
# Enables index page to see objects list for specified container and prefix
|
|
||||||
index_page:
|
|
||||||
enabled: false
|
|
||||||
template_path: internal/handler/templates/index.gotmpl
|
|
||||||
|
|
||||||
# Deprecated: Use archive.compression instead
|
|
||||||
zip:
|
zip:
|
||||||
# Enables zip compression to download files by common prefix.
|
compression: false # Enable zip compression to download files by common prefix.
|
||||||
compression: false
|
|
||||||
|
|
||||||
archive:
|
|
||||||
# Enables archive compression to download files by common prefix.
|
|
||||||
compression: false
|
|
||||||
|
|
||||||
runtime:
|
runtime:
|
||||||
soft_memory_limit: 1gb
|
soft_memory_limit: 1gb
|
||||||
|
@ -149,41 +123,7 @@ cache:
|
||||||
buckets:
|
buckets:
|
||||||
lifetime: 1m
|
lifetime: 1m
|
||||||
size: 1000
|
size: 1000
|
||||||
# Cache which stores netmap
|
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
|
|
||||||
resolve_bucket:
|
resolve_bucket:
|
||||||
namespace_header: X-Frostfs-Namespace
|
namespace_header: X-Frostfs-Namespace
|
||||||
default_namespaces: [ "", "root" ]
|
default_namespaces: [ "", "root" ]
|
||||||
|
|
||||||
cors:
|
|
||||||
allow_origin: ""
|
|
||||||
allow_methods: []
|
|
||||||
allow_headers: []
|
|
||||||
expose_headers: []
|
|
||||||
allow_credentials: false
|
|
||||||
max_age: 600
|
|
||||||
|
|
||||||
# Multinet properties
|
|
||||||
multinet:
|
|
||||||
# Enable multinet support
|
|
||||||
enabled: false
|
|
||||||
# Strategy to pick source IP address
|
|
||||||
balancer: roundrobin
|
|
||||||
# Restrict requests with unknown destination subnet
|
|
||||||
restrict: false
|
|
||||||
# Delay between ipv6 to ipv4 fallback switch
|
|
||||||
fallback_delay: 300ms
|
|
||||||
# List of subnets and IP addresses to use as source for those subnets
|
|
||||||
subnets:
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
|
|
||||||
features:
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
enable_filepath_fallback: false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
tree_pool_netmap_support: true
|
|
||||||
|
|
36
docs/api.md
36
docs/api.md
|
@ -1,14 +1,14 @@
|
||||||
# HTTP Gateway Specification
|
# HTTP Gateway Specification
|
||||||
|
|
||||||
| Route | Description |
|
| Route | Description |
|
||||||
|-------------------------------------------------|--------------------------------------------------|
|
|-------------------------------------------------|----------------------------------------------|
|
||||||
| `/upload/{cid}` | [Put object](#put-object) |
|
| `/upload/{cid}` | [Put object](#put-object) |
|
||||||
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
||||||
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
||||||
| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) |
|
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
|
||||||
|
|
||||||
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
||||||
(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)).
|
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
|
||||||
|
|
||||||
Route parameters can be:
|
Route parameters can be:
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ Route parameters can be:
|
||||||
|
|
||||||
### Bearer token
|
### Bearer token
|
||||||
|
|
||||||
All routes can accept [bearer token](./authentication.md) from:
|
All routes can accept [bearer token](../README.md#authentication) from:
|
||||||
|
|
||||||
* `Authorization` header with `Bearer` type and base64-encoded token in
|
* `Authorization` header with `Bearer` type and base64-encoded token in
|
||||||
credentials field
|
credentials field
|
||||||
|
@ -57,12 +57,10 @@ Upload file as object with attributes to FrostFS.
|
||||||
###### Headers
|
###### Headers
|
||||||
|
|
||||||
| Header | Description |
|
| Header | Description |
|
||||||
|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
||||||
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
||||||
| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. |
|
|
||||||
| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. |
|
|
||||||
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
||||||
|
|
||||||
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
||||||
|
@ -97,12 +95,12 @@ The `filename` field from the multipart form will be set as `FileName` attribute
|
||||||
|
|
||||||
## Get object
|
## Get object
|
||||||
|
|
||||||
Route: `/get/{cid}/{oid}?[download=false]`
|
Route: `/get/{cid}/{oid}?[download=true]`
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
| Route parameter | Type | Description |
|
||||||
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|-----------------|--------|------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `cid` | Single | Base58 encoded `container ID` or `container name` from NNS or `bucket name`. |
|
| `cid` | Single | Base58 encoded container ID or container name from NNS. |
|
||||||
| `oid` | Single | Base58 encoded `object ID`. Also could be `S3 object name` if `cid` is specified as bucket name. |
|
| `oid` | Single | Base58 encoded object ID. |
|
||||||
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
| `download` | Query | Set the `Content-Disposition` header as `attachment` in response.<br/> This make the browser to download object as file instead of showing it on the page. |
|
||||||
|
|
||||||
### Methods
|
### Methods
|
||||||
|
@ -143,13 +141,6 @@ Get an object (payload and attributes) by an address.
|
||||||
| 400 | Some error occurred during object downloading. |
|
| 400 | Some error occurred during object downloading. |
|
||||||
| 404 | Container or object not found. |
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
###### Body
|
|
||||||
|
|
||||||
Returns object data. If request performed from browser, either displays raw data or downloads it as
|
|
||||||
attachment if `download` query parameter is set to `true`.
|
|
||||||
If `index_page.enabled` is set to `true`, returns HTML with index-page if no object with specified
|
|
||||||
S3-name was found.
|
|
||||||
|
|
||||||
#### HEAD
|
#### HEAD
|
||||||
|
|
||||||
Get an object attributes by an address.
|
Get an object attributes by an address.
|
||||||
|
@ -271,9 +262,9 @@ If more than one object is found, an arbitrary one will be used to get attribute
|
||||||
| 400 | Some error occurred during operation. |
|
| 400 | Some error occurred during operation. |
|
||||||
| 404 | Container or object not found. |
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
## Download archive
|
## Download zip
|
||||||
|
|
||||||
Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
Route: `/zip/{cid}/{prefix}`
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
| Route parameter | Type | Description |
|
||||||
|-----------------|-----------|---------------------------------------------------------|
|
|-----------------|-----------|---------------------------------------------------------|
|
||||||
|
@ -284,13 +275,12 @@ Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
||||||
|
|
||||||
#### GET
|
#### GET
|
||||||
|
|
||||||
Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive.
|
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
|
||||||
Name of files in archive sets to `FilePath` attribute of objects.
|
Name of files in archive sets to `FilePath` attribute of objects.
|
||||||
Time of files sets to time when object has started downloading.
|
Time of files sets to time when object has started downloading.
|
||||||
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or
|
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
|
||||||
`/tar/{cid}/` route.
|
|
||||||
|
|
||||||
Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)).
|
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
|
||||||
|
|
||||||
##### Request
|
##### Request
|
||||||
|
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
# Request authentication
|
|
||||||
|
|
||||||
HTTP Gateway does not authorize requests. Gateway converts HTTP request to a
|
|
||||||
FrostFS request and signs it with its own private key.
|
|
||||||
|
|
||||||
You can always upload files to public containers (open for anyone to put
|
|
||||||
objects into), but for restricted containers you need to explicitly allow PUT
|
|
||||||
operations for a request signed with your HTTP Gateway keys.
|
|
||||||
|
|
||||||
If you don't want to manage gateway's secret keys and adjust policies when
|
|
||||||
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
|
||||||
public services, there is an option to let your application backend (or you) to
|
|
||||||
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
|
||||||
to grant access.
|
|
||||||
|
|
||||||
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
|
||||||
documentation for more details). There are two options to pass them to gateway:
|
|
||||||
* "Authorization" header with "Bearer" type and base64-encoded token in
|
|
||||||
credentials field
|
|
||||||
* "Bearer" cookie with base64-encoded token contents
|
|
||||||
|
|
||||||
For example, you have a mobile application frontend with a backend part storing
|
|
||||||
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
|
||||||
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
|
||||||
some data and upload it via any available FrostFS HTTP Gateway by adding
|
|
||||||
the corresponding header to the upload request. Accessing policy protected data
|
|
||||||
works the same way.
|
|
||||||
|
|
||||||
##### Example
|
|
||||||
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
|
||||||
|
|
||||||
1. Suppose you have a container with private policy for wallet key
|
|
||||||
|
|
||||||
```
|
|
||||||
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
|
||||||
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
|
||||||
|
|
||||||
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
|
||||||
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
|
||||||
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
|
||||||
--chain-id <chainID>
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
|
||||||
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"allowImpersonate": true,
|
|
||||||
"lifetime": {
|
|
||||||
"exp": "10000",
|
|
||||||
"nbf": "0",
|
|
||||||
"iat": "0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"signature": null
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Sign it with the wallet:
|
|
||||||
```
|
|
||||||
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Encode to base64 to use in header:
|
|
||||||
```
|
|
||||||
$ base64 -w 0 signed.json
|
|
||||||
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
|
||||||
```
|
|
||||||
|
|
||||||
After that, the Bearer token can be used:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
|
|
||||||
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
|
||||||
# output:
|
|
||||||
# {
|
|
||||||
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
|
|
||||||
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
|
|
||||||
# }
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Bearer Token owner
|
|
||||||
|
|
||||||
You can specify exact key who can use Bearer Token (gateway wallet address).
|
|
||||||
To do this, encode wallet address in base64 format
|
|
||||||
|
|
||||||
```
|
|
||||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
|
||||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
|
||||||
```
|
|
||||||
|
|
||||||
Then specify this value in Bearer Token Json
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"ownerID": {
|
|
||||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
|
||||||
},
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Policy override
|
|
||||||
|
|
||||||
Instead of impersonation, you can define the set of policies that will be applied
|
|
||||||
to the request sender. This allows to restrict access to specific operation and
|
|
||||||
specific objects without giving full impersonation control to the token user.
|
|
|
@ -57,9 +57,7 @@ $ cat http.log
|
||||||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
| `cache` | [Cache configuration](#cache-section) |
|
||||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||||
| `index_page` | [Index page configuration](#index_page-section) |
|
|
||||||
| `multinet` | [Multinet configuration](#multinet-section) |
|
|
||||||
| `features` | [Features configuration](#features-section) |
|
|
||||||
|
|
||||||
# General section
|
# General section
|
||||||
|
|
||||||
|
@ -75,12 +73,10 @@ request_timeout: 5s
|
||||||
rebalance_timer: 30s
|
rebalance_timer: 30s
|
||||||
pool_error_threshold: 100
|
pool_error_threshold: 100
|
||||||
reconnect_interval: 1m
|
reconnect_interval: 1m
|
||||||
worker_pool_size: 1000
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|------------------------|------------|---------------|---------------|------------------------------------------------------------------------------------|
|
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
|
||||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
||||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
||||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
||||||
|
@ -89,7 +85,6 @@ worker_pool_size: 1000
|
||||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
||||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
||||||
| `worker_pool_size` | `int` | no | `1000` | Maximum worker count in handler's worker pool. |
|
|
||||||
|
|
||||||
# `wallet` section
|
# `wallet` section
|
||||||
|
|
||||||
|
@ -169,21 +164,12 @@ server:
|
||||||
logger:
|
logger:
|
||||||
level: debug
|
level: debug
|
||||||
destination: stdout
|
destination: stdout
|
||||||
sampling:
|
|
||||||
enabled: false
|
|
||||||
initial: 100
|
|
||||||
thereafter: 100
|
|
||||||
interval: 1s
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|-----------------------|------------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
|---------------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||||
| `sampling.enabled` | `bool` | no | false | Sampling enabling flag. |
|
|
||||||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
|
||||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
|
||||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
|
||||||
|
|
||||||
# `web` section
|
# `web` section
|
||||||
|
|
||||||
|
@ -218,9 +204,8 @@ upload_header:
|
||||||
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
||||||
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
||||||
|
|
||||||
# `zip` section
|
|
||||||
|
|
||||||
> **_DEPRECATED:_** Use archive section instead
|
# `zip` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
zip:
|
zip:
|
||||||
|
@ -231,17 +216,6 @@ zip:
|
||||||
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
||||||
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
||||||
|
|
||||||
# `archive` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
archive:
|
|
||||||
compression: false
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
|
||||||
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
|
||||||
|
|
||||||
|
|
||||||
# `pprof` section
|
# `pprof` section
|
||||||
|
|
||||||
|
@ -282,37 +256,13 @@ tracing:
|
||||||
enabled: true
|
enabled: true
|
||||||
exporter: "otlp_grpc"
|
exporter: "otlp_grpc"
|
||||||
endpoint: "localhost:4317"
|
endpoint: "localhost:4317"
|
||||||
trusted_ca: "/etc/ssl/telemetry-trusted-ca.pem"
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
| ------------ | -------------------------------------- | ------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
|------------|----------|---------------|------------------|---------------------------------------------------------------|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
| `enabled` | `bool` | yes | `false` | Flag to enable the tracing. |
|
||||||
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
| `exporter` | `string` | yes | | Trace collector type (`stdout` or `otlp_grpc` are supported). |
|
||||||
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
| `endpoint` | `string` | yes | | Address of collector endpoint for OTLP exporters. |
|
||||||
| `trusted_ca` | `string` | yes | | Path to certificate of a certification authority in pem format, that issued the TLS certificate of the telemetry remote server. |
|
|
||||||
| `attributes` | [[]Attributes](#attributes-subsection) | yes | | An array of configurable attributes in key-value format. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `attributes` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
attributes:
|
|
||||||
- key: key0
|
|
||||||
value: value
|
|
||||||
- key: key1
|
|
||||||
value: value
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------------|----------|---------------|---------------|----------------------------------------------------------|
|
|
||||||
| `key` | `string` | yes | | Attribute key. |
|
|
||||||
| `value` | `string` | yes | | Attribute value. |
|
|
||||||
|
|
||||||
# `runtime` section
|
# `runtime` section
|
||||||
Contains runtime parameters.
|
Contains runtime parameters.
|
||||||
|
@ -351,14 +301,12 @@ cache:
|
||||||
buckets:
|
buckets:
|
||||||
lifetime: 1m
|
lifetime: 1m
|
||||||
size: 1000
|
size: 1000
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
|
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||||
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `cache` subsection
|
#### `cache` subsection
|
||||||
|
@ -388,101 +336,3 @@ resolve_bucket:
|
||||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
||||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
||||||
|
|
||||||
# `index_page` section
|
|
||||||
|
|
||||||
Parameters for index HTML-page output. Activates if `GetObject` request returns `not found`. Two
|
|
||||||
index page modes available:
|
|
||||||
|
|
||||||
* `s3` mode uses tree service for listing objects,
|
|
||||||
* `native` sends requests to nodes via native protocol.
|
|
||||||
If request pass S3-bucket name instead of CID, `s3` mode will be used, otherwise `native`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
index_page:
|
|
||||||
enabled: false
|
|
||||||
template_path: ""
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------|----------|---------------|---------------|---------------------------------------------------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Flag to enable index_page return if no object with specified S3-name was found. |
|
|
||||||
| `template_path` | `string` | yes | `""` | Path to .gotmpl file with html template for index_page. |
|
|
||||||
|
|
||||||
# `cors` section
|
|
||||||
|
|
||||||
Parameters for CORS (used in OPTIONS requests and responses in all handlers).
|
|
||||||
If values are not set, headers will not be included to response.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
cors:
|
|
||||||
allow_origin: "*"
|
|
||||||
allow_methods: ["GET", "HEAD"]
|
|
||||||
allow_headers: ["Authorization"]
|
|
||||||
expose_headers: ["*"]
|
|
||||||
allow_credentials: false
|
|
||||||
max_age: 600
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------------|------------|---------------|---------------|--------------------------------------------------------|
|
|
||||||
| `allow_origin` | `string` | yes | | Values for `Access-Control-Allow-Origin` headers. |
|
|
||||||
| `allow_methods` | `[]string` | yes | | Values for `Access-Control-Allow-Methods` headers. |
|
|
||||||
| `allow_headers` | `[]string` | yes | | Values for `Access-Control-Allow-Headers` headers. |
|
|
||||||
| `expose_headers` | `[]string` | yes | | Values for `Access-Control-Expose-Headers` headers. |
|
|
||||||
| `allow_credentials` | `bool` | yes | `false` | Values for `Access-Control-Allow-Credentials` headers. |
|
|
||||||
| `max_age` | `int` | yes | `600` | Values for `Access-Control-Max-Age ` headers. |
|
|
||||||
|
|
||||||
# `multinet` section
|
|
||||||
|
|
||||||
Configuration of multinet support.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
multinet:
|
|
||||||
enabled: false
|
|
||||||
balancer: roundrobin
|
|
||||||
restrict: false
|
|
||||||
fallback_delay: 300ms
|
|
||||||
subnets:
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|------------------|--------------------------------|---------------|---------------|--------------------------------------------------------------------------------------------|
|
|
||||||
| `enabled` | `bool` | yes | `false` | Enables multinet setting to manage source ip of outcoming requests. |
|
|
||||||
| `balancer` | `string` | yes | `""` | Strategy to pick source IP. By default picks first address. Supports `roundrobin` setting. |
|
|
||||||
| `restrict` | `bool` | yes | `false` | Restricts requests to an undefined subnets. |
|
|
||||||
| `fallback_delay` | `duration` | yes | `300ms` | Delay between IPv6 and IPv4 fallback stack switch. |
|
|
||||||
| `subnets` | [[]Subnet](#subnet-subsection) | yes | | Set of subnets to apply multinet dial settings. |
|
|
||||||
|
|
||||||
#### `subnet` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- mask: 1.2.3.4/24
|
|
||||||
source_ips:
|
|
||||||
- 1.2.3.4
|
|
||||||
- 1.2.3.5
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
|
|
||||||
| `mask` | `string` | yes | | Destination subnet. |
|
|
||||||
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
|
|
||||||
|
|
||||||
# `features` section
|
|
||||||
|
|
||||||
Contains parameters for enabling features.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
features:
|
|
||||||
enable_filepath_fallback: true
|
|
||||||
tree_pool_netmap_support: true
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
|
|
||||||
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
|
||||||
|
|
36
docs/nns.md
36
docs/nns.md
|
@ -1,36 +0,0 @@
|
||||||
# Nicename Resolving with NNS
|
|
||||||
|
|
||||||
Steps to start using name resolving:
|
|
||||||
|
|
||||||
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
|
||||||
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
|
||||||
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
|
||||||
|
|
||||||
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
|
||||||
|
|
||||||
$ docker exec -it morph_chain neo-go \
|
|
||||||
contract testinvokefunction \
|
|
||||||
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
|
||||||
resolve string:container-name.container int:16 \
|
|
||||||
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
|
||||||
| base64 -d && echo
|
|
||||||
|
|
||||||
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Use container name instead of its `$CID`. For example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
|
|
||||||
```
|
|
123
go.mod
123
go.mod
|
@ -1,138 +1,117 @@
|
||||||
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
|
|
||||||
go 1.22
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240718141740-ce8270568d36
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
github.com/bluele/gcache v0.0.2
|
github.com/bluele/gcache v0.0.2
|
||||||
github.com/docker/docker v27.1.1+incompatible
|
|
||||||
github.com/docker/go-units v0.5.0
|
|
||||||
github.com/fasthttp/router v1.4.1
|
github.com/fasthttp/router v1.4.1
|
||||||
github.com/nspcc-dev/neo-go v0.106.2
|
github.com/nspcc-dev/neo-go v0.106.2
|
||||||
github.com/panjf2000/ants/v2 v2.5.0
|
|
||||||
github.com/prometheus/client_golang v1.19.0
|
github.com/prometheus/client_golang v1.19.0
|
||||||
github.com/prometheus/client_model v0.5.0
|
github.com/prometheus/client_model v0.5.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/ssgreg/journald v1.0.0
|
github.com/ssgreg/journald v1.0.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/testcontainers/testcontainers-go v0.35.0
|
github.com/testcontainers/testcontainers-go v0.13.0
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
|
||||||
github.com/valyala/fasthttp v1.34.0
|
github.com/valyala/fasthttp v1.34.0
|
||||||
go.opentelemetry.io/otel v1.31.0
|
go.opentelemetry.io/otel v1.16.0
|
||||||
go.opentelemetry.io/otel/trace v1.31.0
|
go.opentelemetry.io/otel/trace v1.16.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||||
golang.org/x/net v0.30.0
|
golang.org/x/net v0.23.0
|
||||||
golang.org/x/sys v0.28.0
|
google.golang.org/grpc v1.62.0
|
||||||
google.golang.org/grpc v1.69.2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/containerd/containerd v1.7.18 // indirect
|
github.com/containerd/cgroups v1.0.3 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/containerd v1.6.2 // indirect
|
||||||
github.com/containerd/platforms v0.2.1 // indirect
|
|
||||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/distribution/reference v0.6.0 // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.5.0 // indirect
|
github.com/docker/docker v20.10.14+incompatible // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/ipfs/go-cid v0.0.7 // indirect
|
github.com/klauspost/compress v1.16.4 // indirect
|
||||||
github.com/klauspost/compress v1.17.4 // indirect
|
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
github.com/moby/sys/mount v0.3.2 // indirect
|
||||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/moby/sys/user v0.1.0 // indirect
|
|
||||||
github.com/moby/term v0.5.0 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multiaddr v0.14.0 // indirect
|
|
||||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
|
||||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
|
github.com/opencontainers/runc v1.1.1 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
||||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
github.com/spf13/afero v1.9.3 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
github.com/subosito/gotenv v1.4.2 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
|
||||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
github.com/urfave/cli v1.22.12 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
|
||||||
go.etcd.io/bbolt v1.3.9 // indirect
|
go.etcd.io/bbolt v1.3.9 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||||
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.31.0 // indirect
|
golang.org/x/crypto v0.21.0 // indirect
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
golang.org/x/sync v0.6.0 // indirect
|
||||||
golang.org/x/term v0.27.0 // indirect
|
golang.org/x/sys v0.18.0 // indirect
|
||||||
golang.org/x/text v0.21.0 // indirect
|
golang.org/x/term v0.18.0 // indirect
|
||||||
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
|
||||||
google.golang.org/protobuf v1.36.1 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
|
||||||
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
lukechampine.com/blake3 v1.2.1 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,15 +4,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TreeService provide interface to interact with tree service using s3 data models.
|
// TreeService provide interface to interact with tree service using s3 data models.
|
||||||
type TreeService interface {
|
type TreeService interface {
|
||||||
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
|
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
|
||||||
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
|
|
||||||
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
|
@ -1,4 +1,4 @@
|
||||||
package data
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -7,21 +7,11 @@ import (
|
||||||
// NodeVersion represent node from tree service.
|
// NodeVersion represent node from tree service.
|
||||||
type NodeVersion struct {
|
type NodeVersion struct {
|
||||||
BaseNodeVersion
|
BaseNodeVersion
|
||||||
|
DeleteMarker bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
// BaseNodeVersion is minimal node info from tree service.
|
||||||
// Basically used for "system" object.
|
// Basically used for "system" object.
|
||||||
type BaseNodeVersion struct {
|
type BaseNodeVersion struct {
|
||||||
ID uint64
|
|
||||||
OID oid.ID
|
OID oid.ID
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeInfo struct {
|
|
||||||
Meta []NodeMeta
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeMeta interface {
|
|
||||||
GetKey() string
|
|
||||||
GetValue() []byte
|
|
||||||
}
|
}
|
47
internal/cache/buckets.go
vendored
47
internal/cache/buckets.go
vendored
|
@ -6,7 +6,6 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -14,7 +13,6 @@ import (
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||||
type BucketCache struct {
|
type BucketCache struct {
|
||||||
cache gcache.Cache
|
cache gcache.Cache
|
||||||
cidCache gcache.Cache
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,45 +40,14 @@ func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
// NewBucketCache creates an object of BucketCache.
|
||||||
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
func NewBucketCache(config *Config) *BucketCache {
|
||||||
cache := &BucketCache{
|
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
return &BucketCache{cache: gc, logger: config.Logger}
|
||||||
logger: config.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cidCache {
|
|
||||||
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached object.
|
// Get returns a cached object.
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||||
return o.get(formKey(ns, bktName))
|
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|
||||||
if o.cidCache == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, err := o.cidCache.Get(cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := entry.(string)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", key)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) get(key string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -97,12 +64,6 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
|
|
||||||
// Put puts an object to cache.
|
// Put puts an object to cache.
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||||
if o.cidCache != nil {
|
|
||||||
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
65
internal/cache/netmap.go
vendored
65
internal/cache/netmap.go
vendored
|
@ -1,65 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// NetmapCache provides cache for netmap.
|
|
||||||
NetmapCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetmapCacheConfig stores expiration params for cache.
|
|
||||||
NetmapCacheConfig struct {
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultNetmapCacheLifetime = time.Minute
|
|
||||||
netmapCacheSize = 1
|
|
||||||
netmapKey = "netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultNetmapConfig returns new default cache expiration values.
|
|
||||||
func DefaultNetmapConfig(logger *zap.Logger) *NetmapCacheConfig {
|
|
||||||
return &NetmapCacheConfig{
|
|
||||||
Lifetime: DefaultNetmapCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetmapCache creates an object of NetmapCache.
|
|
||||||
func NewNetmapCache(config *NetmapCacheConfig) *NetmapCache {
|
|
||||||
gc := gcache.New(netmapCacheSize).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &NetmapCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Get() *netmap.NetMap {
|
|
||||||
entry, err := c.cache.Get(netmapKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetMap)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Put(nm netmap.NetMap) error {
|
|
||||||
return c.cache.Set(netmapKey, nm)
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@ package data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BucketInfo struct {
|
type BucketInfo struct {
|
||||||
|
@ -10,5 +9,4 @@ type BucketInfo struct {
|
||||||
Zone string // container zone from system attribute
|
Zone string // container zone from system attribute
|
||||||
CID cid.ID
|
CID cid.ID
|
||||||
HomomorphicHashDisabled bool
|
HomomorphicHashDisabled bool
|
||||||
PlacementPolicy netmap.PlacementPolicy
|
|
||||||
}
|
}
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
@ -34,9 +33,9 @@ func NewFrostFS(p *pool.Pool) *FrostFS {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container implements frostfs.FrostFS interface method.
|
// Container implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
func (x *FrostFS) Container(ctx context.Context, layerPrm handler.PrmContainer) (*container.Container, error) {
|
||||||
prm := pool.PrmContainerGet{
|
prm := pool.PrmContainerGet{
|
||||||
ContainerID: containerPrm.ContainerID,
|
ContainerID: layerPrm.ContainerID,
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := x.pool.GetContainer(ctx, prm)
|
res, err := x.pool.GetContainer(ctx, prm)
|
||||||
|
@ -61,10 +60,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate)
|
||||||
}
|
}
|
||||||
|
|
||||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
idObj, err := x.pool.PutObject(ctx, prmPut)
|
||||||
if err != nil {
|
return idObj, handleObjectError("save object via connection pool", err)
|
||||||
return oid.ID{}, handleObjectError("save object via connection pool", err)
|
|
||||||
}
|
|
||||||
return idObj.ObjectID, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
// wraps io.ReadCloser and transforms Read errors related to access violation
|
||||||
|
@ -174,15 +170,6 @@ func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations,
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||||
// It implements resolver.FrostFS.
|
// It implements resolver.FrostFS.
|
||||||
type ResolverFrostFS struct {
|
type ResolverFrostFS struct {
|
||||||
|
@ -215,10 +202,6 @@ func handleObjectError(msg string, err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
||||||
if strings.Contains(reason, "limit reached") {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrQuotaLimitReached, reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
91
internal/frostfs/services/pool_wrapper.go
Normal file
91
internal/frostfs/services/pool_wrapper.go
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package services
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GetNodeByPathResponseInfoWrapper struct {
|
||||||
|
response *grpcService.GetNodeByPathResponse_Info
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
||||||
|
return n.response.GetNodeId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
||||||
|
return n.response.GetParentId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
||||||
|
return n.response.GetTimestamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
|
for i, value := range n.response.Meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
type PoolWrapper struct {
|
||||||
|
p *treepool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||||
|
return &PoolWrapper{p: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
|
poolPrm := treepool.GetNodesParams{
|
||||||
|
CID: prm.CnrID,
|
||||||
|
TreeID: prm.TreeID,
|
||||||
|
Path: prm.Path,
|
||||||
|
Meta: prm.Meta,
|
||||||
|
PathAttribute: tree.FileNameKey,
|
||||||
|
LatestOnly: prm.LatestOnly,
|
||||||
|
AllAttrs: prm.AllAttrs,
|
||||||
|
BearerToken: getBearer(ctx),
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]tree.NodeResponse, len(nodes))
|
||||||
|
for i, info := range nodes {
|
||||||
|
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBearer(ctx context.Context) []byte {
|
||||||
|
token, err := tokens.LoadBearerToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return token.Marshal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if errors.Is(err, treepool.ErrNodeNotFound) {
|
||||||
|
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
||||||
|
}
|
||||||
|
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
||||||
|
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
|
@ -1,382 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"html/template"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
dateFormat = "02-01-2006 15:04"
|
|
||||||
attrOID = "OID"
|
|
||||||
attrCreated = "Created"
|
|
||||||
attrFileName = "FileName"
|
|
||||||
attrFilePath = "FilePath"
|
|
||||||
attrSize = "Size"
|
|
||||||
attrDeleteMarker = "IsDeleteMarker"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
BrowsePageData struct {
|
|
||||||
HasErrors bool
|
|
||||||
Container string
|
|
||||||
Prefix string
|
|
||||||
Protocol string
|
|
||||||
Objects []ResponseObject
|
|
||||||
}
|
|
||||||
ResponseObject struct {
|
|
||||||
OID string
|
|
||||||
Created string
|
|
||||||
FileName string
|
|
||||||
FilePath string
|
|
||||||
Size string
|
|
||||||
IsDir bool
|
|
||||||
GetURL string
|
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func newListObjectsResponseS3(attrs map[string]string) ResponseObject {
|
|
||||||
return ResponseObject{
|
|
||||||
Created: formatTimestamp(attrs[attrCreated]),
|
|
||||||
OID: attrs[attrOID],
|
|
||||||
FileName: attrs[attrFileName],
|
|
||||||
Size: attrs[attrSize],
|
|
||||||
IsDir: attrs[attrOID] == "",
|
|
||||||
IsDeleteMarker: attrs[attrDeleteMarker] == "true",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newListObjectsResponseNative(attrs map[string]string) ResponseObject {
|
|
||||||
filename := lastPathElement(attrs[object.AttributeFilePath])
|
|
||||||
if filename == "" {
|
|
||||||
filename = attrs[attrFileName]
|
|
||||||
}
|
|
||||||
return ResponseObject{
|
|
||||||
OID: attrs[attrOID],
|
|
||||||
Created: formatTimestamp(attrs[object.AttributeTimestamp] + "000"),
|
|
||||||
FileName: filename,
|
|
||||||
FilePath: attrs[object.AttributeFilePath],
|
|
||||||
Size: attrs[attrSize],
|
|
||||||
IsDir: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNextDir(filepath, prefix string) string {
|
|
||||||
restPath := strings.Replace(filepath, prefix, "", 1)
|
|
||||||
index := strings.Index(restPath, "/")
|
|
||||||
if index == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return restPath[:index]
|
|
||||||
}
|
|
||||||
|
|
||||||
func lastPathElement(path string) string {
|
|
||||||
if path == "" {
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
index := strings.LastIndex(path, "/")
|
|
||||||
if index == len(path)-1 {
|
|
||||||
index = strings.LastIndex(path[:index], "/")
|
|
||||||
}
|
|
||||||
return path[index+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTimestamp(tstamp string) (time.Time, error) {
|
|
||||||
millis, err := strconv.ParseInt(tstamp, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.UnixMilli(millis), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatTimestamp(strdate string) string {
|
|
||||||
date, err := parseTimestamp(strdate)
|
|
||||||
if err != nil || date.IsZero() {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return date.Format(dateFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatSize(strsize string) string {
|
|
||||||
size, err := strconv.ParseFloat(strsize, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "0B"
|
|
||||||
}
|
|
||||||
return units.HumanSize(size)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parentDir(prefix string) string {
|
|
||||||
index := strings.LastIndex(prefix, "/")
|
|
||||||
if index == -1 {
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
return prefix[index:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimPrefix(encPrefix string) string {
|
|
||||||
prefix, err := url.PathUnescape(encPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
slashIndex := strings.LastIndex(prefix, "/")
|
|
||||||
if slashIndex == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return prefix[:slashIndex]
|
|
||||||
}
|
|
||||||
|
|
||||||
func urlencode(path string) string {
|
|
||||||
var res strings.Builder
|
|
||||||
|
|
||||||
prefixParts := strings.Split(path, "/")
|
|
||||||
for _, prefixPart := range prefixParts {
|
|
||||||
prefixPart = "/" + url.PathEscape(prefixPart)
|
|
||||||
if prefixPart == "/." || prefixPart == "/.." {
|
|
||||||
prefixPart = url.PathEscape(prefixPart)
|
|
||||||
}
|
|
||||||
res.WriteString(prefixPart)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetObjectsResponse struct {
|
|
||||||
objects []ResponseObject
|
|
||||||
hasErrors bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
|
||||||
nodes, _, err := h.tree.GetSubTreeByPrefix(ctx, bucketInfo, prefix, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := &GetObjectsResponse{
|
|
||||||
objects: make([]ResponseObject, 0, len(nodes)),
|
|
||||||
}
|
|
||||||
for _, node := range nodes {
|
|
||||||
meta := node.Meta
|
|
||||||
if meta == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var attrs = make(map[string]string, len(meta))
|
|
||||||
for _, m := range meta {
|
|
||||||
attrs[m.GetKey()] = string(m.GetValue())
|
|
||||||
}
|
|
||||||
obj := newListObjectsResponseS3(attrs)
|
|
||||||
if obj.IsDeleteMarker {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
obj.FilePath = prefix + obj.FileName
|
|
||||||
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
|
||||||
result.objects = append(result.objects, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.BucketInfo, prefix string) (*GetObjectsResponse, error) {
|
|
||||||
var basePath string
|
|
||||||
if ind := strings.LastIndex(prefix, "/"); ind != -1 {
|
|
||||||
basePath = prefix[:ind+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
filters := object.NewSearchFilters()
|
|
||||||
filters.AddRootFilter()
|
|
||||||
if prefix != "" {
|
|
||||||
filters.AddFilter(object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := PrmObjectSearch{
|
|
||||||
PrmAuth: PrmAuth{
|
|
||||||
BearerToken: bearerToken(ctx),
|
|
||||||
},
|
|
||||||
Container: bucketInfo.CID,
|
|
||||||
Filters: filters,
|
|
||||||
}
|
|
||||||
objectIDs, err := h.frostfs.SearchObjects(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer objectIDs.Close()
|
|
||||||
|
|
||||||
resp, err := h.headDirObjects(ctx, bucketInfo.CID, objectIDs, basePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
dirs := make(map[string]struct{})
|
|
||||||
result := &GetObjectsResponse{
|
|
||||||
objects: make([]ResponseObject, 0, 100),
|
|
||||||
}
|
|
||||||
for objExt := range resp {
|
|
||||||
if objExt.Error != nil {
|
|
||||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
|
|
||||||
result.hasErrors = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if objExt.Object.IsDir {
|
|
||||||
if _, ok := dirs[objExt.Object.FileName]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + urlencode(objExt.Object.FilePath)
|
|
||||||
dirs[objExt.Object.FileName] = struct{}{}
|
|
||||||
} else {
|
|
||||||
objExt.Object.GetURL = "/get/" + bucketInfo.CID.EncodeToString() + "/" + objExt.Object.OID
|
|
||||||
}
|
|
||||||
result.objects = append(result.objects, objExt.Object)
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponseObjectExtended struct {
|
|
||||||
Object ResponseObject
|
|
||||||
Error error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs ResObjectSearch, basePath string) (<-chan ResponseObjectExtended, error) {
|
|
||||||
res := make(chan ResponseObjectExtended)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(res)
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cnrID.EncodeToString()),
|
|
||||||
zap.String("path", basePath),
|
|
||||||
)
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
err := objectIDs.Iterate(func(id oid.ID) bool {
|
|
||||||
wg.Add(1)
|
|
||||||
err := h.workerPool.Submit(func() {
|
|
||||||
defer wg.Done()
|
|
||||||
var obj ResponseObjectExtended
|
|
||||||
obj.Object, obj.Error = h.headDirObject(ctx, cnrID, id, basePath)
|
|
||||||
res <- obj
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
wg.Done()
|
|
||||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) headDirObject(ctx context.Context, cnrID cid.ID, objID oid.ID, basePath string) (ResponseObject, error) {
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
obj, err := h.frostfs.HeadObject(ctx, PrmObjectHead{
|
|
||||||
PrmAuth: PrmAuth{BearerToken: bearerToken(ctx)},
|
|
||||||
Address: addr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return ResponseObject{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs := loadAttributes(obj.Attributes())
|
|
||||||
attrs[attrOID] = objID.EncodeToString()
|
|
||||||
if multipartSize, ok := attrs[attributeMultipartObjectSize]; ok {
|
|
||||||
attrs[attrSize] = multipartSize
|
|
||||||
} else {
|
|
||||||
attrs[attrSize] = strconv.FormatUint(obj.PayloadSize(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
dirname := getNextDir(attrs[object.AttributeFilePath], basePath)
|
|
||||||
if dirname == "" {
|
|
||||||
return newListObjectsResponseNative(attrs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResponseObject{
|
|
||||||
FileName: dirname,
|
|
||||||
FilePath: basePath + dirname,
|
|
||||||
IsDir: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type browseParams struct {
|
|
||||||
bucketInfo *data.BucketInfo
|
|
||||||
prefix string
|
|
||||||
isNative bool
|
|
||||||
listObjects func(ctx context.Context, bucketName *data.BucketInfo, prefix string) (*GetObjectsResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) browseObjects(c *fasthttp.RequestCtx, p browseParams) {
|
|
||||||
const S3Protocol = "s3"
|
|
||||||
const FrostfsProtocol = "frostfs"
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(
|
|
||||||
zap.String("bucket", p.bucketInfo.Name),
|
|
||||||
zap.String("container", p.bucketInfo.CID.EncodeToString()),
|
|
||||||
zap.String("prefix", p.prefix),
|
|
||||||
)
|
|
||||||
resp, err := p.listObjects(ctx, p.bucketInfo, p.prefix)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
objects := resp.objects
|
|
||||||
sort.Slice(objects, func(i, j int) bool {
|
|
||||||
if objects[i].IsDir == objects[j].IsDir {
|
|
||||||
return objects[i].FileName < objects[j].FileName
|
|
||||||
}
|
|
||||||
return objects[i].IsDir
|
|
||||||
})
|
|
||||||
|
|
||||||
tmpl, err := template.New("index").Funcs(template.FuncMap{
|
|
||||||
"formatSize": formatSize,
|
|
||||||
"trimPrefix": trimPrefix,
|
|
||||||
"urlencode": urlencode,
|
|
||||||
"parentDir": parentDir,
|
|
||||||
}).Parse(h.config.IndexPageTemplate())
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bucketName := p.bucketInfo.Name
|
|
||||||
protocol := S3Protocol
|
|
||||||
if p.isNative {
|
|
||||||
bucketName = p.bucketInfo.CID.EncodeToString()
|
|
||||||
protocol = FrostfsProtocol
|
|
||||||
}
|
|
||||||
if err = tmpl.Execute(c, &BrowsePageData{
|
|
||||||
Container: bucketName,
|
|
||||||
Prefix: p.prefix,
|
|
||||||
Objects: objects,
|
|
||||||
Protocol: protocol,
|
|
||||||
HasErrors: resp.hasErrors,
|
|
||||||
}); err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,21 +1,19 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bufio"
|
"bufio"
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -25,42 +23,21 @@ import (
|
||||||
|
|
||||||
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
||||||
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
cidParam := c.UserValue("cid").(string)
|
test, _ := c.UserValue("oid").(string)
|
||||||
oidParam := c.UserValue("oid").(string)
|
var id oid.ID
|
||||||
downloadParam := c.QueryArgs().GetBool("download")
|
err := id.DecodeString(test)
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cidParam),
|
|
||||||
zap.String("oid", oidParam),
|
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
h.byObjectName(c, h.receiveFile)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
|
|
||||||
} else {
|
} else {
|
||||||
h.browseIndex(c, checkS3Err != nil)
|
h.byAddress(c, h.receiveFile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldDownload(oidParam string, downloadParam bool) bool {
|
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
||||||
return !isDir(oidParam) || downloadParam
|
return &request{
|
||||||
|
RequestCtx: ctx,
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadByAttribute handles attribute-based download requests.
|
// DownloadByAttribute handles attribute-based download requests.
|
||||||
|
@ -68,7 +45,7 @@ func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
h.byAttribute(c, h.receiveFile)
|
h.byAttribute(c, h.receiveFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
func (h *Handler) search(ctx context.Context, cnrID *cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
||||||
filters := object.NewSearchFilters()
|
filters := object.NewSearchFilters()
|
||||||
filters.AddRootFilter()
|
filters.AddRootFilter()
|
||||||
filters.AddFilter(key, val, op)
|
filters.AddFilter(key, val, op)
|
||||||
|
@ -77,68 +54,20 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
BearerToken: bearerToken(ctx),
|
BearerToken: bearerToken(ctx),
|
||||||
},
|
},
|
||||||
Container: cnrID,
|
Container: *cnrID,
|
||||||
Filters: filters,
|
Filters: filters,
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.frostfs.SearchObjects(ctx, prm)
|
return h.frostfs.SearchObjects(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadZip handles zip by prefix requests.
|
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||||
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, 3<<20)
|
|
||||||
zipWriter := zip.NewWriter(w)
|
|
||||||
var objectsWritten int
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createZipFile(zipWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
|
||||||
return
|
|
||||||
} else if objectsWritten == 0 {
|
|
||||||
log.Warn(logs.ObjectsNotFound)
|
|
||||||
}
|
|
||||||
if err := zipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseZipWriter, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
|
||||||
method := zip.Store
|
method := zip.Store
|
||||||
if h.config.ArchiveCompression() {
|
if h.config.ZipCompression() {
|
||||||
method = zip.Deflate
|
method = zip.Deflate
|
||||||
}
|
}
|
||||||
|
|
||||||
filePath := getFilePath(obj)
|
filePath := getZipFilePath(obj)
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||||
}
|
}
|
||||||
|
@ -150,139 +79,98 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadTar forms tar.gz from objects by prefix.
|
// DownloadZipped handles zip by prefix requests.
|
||||||
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
scid, _ := c.UserValue("cid").(string)
|
scid, _ := c.UserValue("cid").(string)
|
||||||
|
prefix, _ := c.UserValue("prefix").(string)
|
||||||
|
|
||||||
|
prefix, err := url.QueryUnescape(prefix)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()))
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
|
resSearch, err := h.search(ctx, &bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||||
|
c.Response.SetStatusCode(http.StatusOK)
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
|
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
defer resSearch.Close()
|
||||||
|
|
||||||
compressionLevel := gzip.NoCompression
|
zipWriter := zip.NewWriter(w)
|
||||||
if h.config.ArchiveCompression() {
|
|
||||||
compressionLevel = gzip.DefaultCompression
|
var bufZip []byte
|
||||||
|
var addr oid.Address
|
||||||
|
|
||||||
|
empty := true
|
||||||
|
called := false
|
||||||
|
btoken := bearerToken(ctx)
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
|
||||||
|
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
if empty {
|
||||||
|
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||||
|
}
|
||||||
|
empty = false
|
||||||
|
|
||||||
|
addr.SetObject(id)
|
||||||
|
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
||||||
|
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore error because it's not nil only if compressionLevel argument is invalid
|
return false
|
||||||
gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel)
|
})
|
||||||
tarWriter := tar.NewWriter(gzipWriter)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := tarWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseTarWriter, zap.Error(err))
|
|
||||||
}
|
|
||||||
if err := gzipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseGzipWriter, zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var objectsWritten int
|
|
||||||
buf := make([]byte, 3<<20) // the same as for upload
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createTarFile(tarWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
if errIter != nil {
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||||
} else if objectsWritten == 0 {
|
} else if !called {
|
||||||
log.Warn(logs.ObjectsNotFound)
|
log.Error(logs.ObjectsNotFound)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) {
|
if err = zipWriter.Close(); err != nil {
|
||||||
filePath := getFilePath(obj)
|
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tw, tw.WriteHeader(&tar.Header{
|
|
||||||
Name: filePath,
|
|
||||||
Mode: 0655,
|
|
||||||
Size: int64(obj.PayloadSize()),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||||
return func(id oid.ID) bool {
|
|
||||||
log = log.With(zap.String("oid", id.EncodeToString()))
|
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
prm := PrmObjectGet{
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
BearerToken: bearerToken(ctx),
|
BearerToken: btoken,
|
||||||
},
|
},
|
||||||
Address: newAddress(cnrID, id),
|
Address: addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToGetObject, zap.Error(err))
|
return fmt.Errorf("get FrostFS object: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
return fmt.Errorf("zip create header: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
prefix, _ := c.UserValue("prefix").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
|
|
||||||
prefix, err := url.QueryUnescape(prefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
|
||||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
||||||
|
|
||||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
|
||||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resSearch, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
|
||||||
var err error
|
|
||||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil {
|
|
||||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,10 +178,14 @@ func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
||||||
return fmt.Errorf("object body close error: %w", err)
|
return fmt.Errorf("object body close error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = zipWriter.Flush(); err != nil {
|
||||||
|
return fmt.Errorf("flush zip writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFilePath(obj *object.Object) string {
|
func getZipFilePath(obj *object.Object) string {
|
||||||
for _, attr := range obj.Attributes() {
|
for _, attr := range obj.Attributes() {
|
||||||
if attr.Key() == object.AttributeFilePath {
|
if attr.Key() == object.AttributeFilePath {
|
||||||
return attr.Value()
|
return attr.Value()
|
||||||
|
|
|
@ -229,10 +229,6 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (Res
|
||||||
return &resObjectSearchMock{res: res}, nil
|
return &resObjectSearchMock{res: res}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestFrostFS) InitMultiObjectReader(context.Context, PrmInitMultiObjectReader) (io.Reader, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
||||||
for _, attr := range attributes {
|
for _, attr := range attributes {
|
||||||
if attr.Key() == filter.Header() {
|
if attr.Key() == filter.Header() {
|
||||||
|
@ -273,3 +269,10 @@ func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID o
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(cnr)
|
||||||
|
addr.SetObject(obj)
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
|
@ -11,8 +11,9 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
@ -21,20 +22,16 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Config interface {
|
type Config interface {
|
||||||
DefaultTimestamp() bool
|
DefaultTimestamp() bool
|
||||||
ArchiveCompression() bool
|
ZipCompression() bool
|
||||||
ClientCut() bool
|
ClientCut() bool
|
||||||
IndexPageEnabled() bool
|
|
||||||
IndexPageTemplate() string
|
|
||||||
BufferMaxSizeForPut() uint64
|
BufferMaxSizeForPut() uint64
|
||||||
NamespaceHeader() string
|
NamespaceHeader() string
|
||||||
EnableFilepathFallback() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
// PrmContainer groups parameters of FrostFS.Container operation.
|
||||||
|
@ -120,14 +117,6 @@ type PrmObjectSearch struct {
|
||||||
Filters object.SearchFilters
|
Filters object.SearchFilters
|
||||||
}
|
}
|
||||||
|
|
||||||
type PrmInitMultiObjectReader struct {
|
|
||||||
// payload range
|
|
||||||
Off, Ln uint64
|
|
||||||
|
|
||||||
Addr oid.Address
|
|
||||||
Bearer *bearer.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResObjectSearch interface {
|
type ResObjectSearch interface {
|
||||||
Read(buf []oid.ID) (int, error)
|
Read(buf []oid.ID) (int, error)
|
||||||
Iterate(f func(oid.ID) bool) error
|
Iterate(f func(oid.ID) bool) error
|
||||||
|
@ -139,8 +128,6 @@ var (
|
||||||
ErrAccessDenied = errors.New("access denied")
|
ErrAccessDenied = errors.New("access denied")
|
||||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||||
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
|
|
||||||
ErrQuotaLimitReached = errors.New("quota limit reached")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FrostFS represents virtual connection to FrostFS network.
|
// FrostFS represents virtual connection to FrostFS network.
|
||||||
|
@ -151,8 +138,6 @@ type FrostFS interface {
|
||||||
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
||||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||||
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
||||||
InitMultiObjectReader(ctx context.Context, p PrmInitMultiObjectReader) (io.Reader, error)
|
|
||||||
|
|
||||||
utils.EpochInfoFetcher
|
utils.EpochInfoFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,9 +151,8 @@ type Handler struct {
|
||||||
ownerID *user.ID
|
ownerID *user.ID
|
||||||
config Config
|
config Config
|
||||||
containerResolver ContainerResolver
|
containerResolver ContainerResolver
|
||||||
tree layer.TreeService
|
tree *tree.Tree
|
||||||
cache *cache.BucketCache
|
cache *cache.BucketCache
|
||||||
workerPool *ants.Pool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type AppParams struct {
|
type AppParams struct {
|
||||||
|
@ -179,7 +163,7 @@ type AppParams struct {
|
||||||
Cache *cache.BucketCache
|
Cache *cache.BucketCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
|
func New(params *AppParams, config Config, tree *tree.Tree) *Handler {
|
||||||
return &Handler{
|
return &Handler{
|
||||||
log: params.Logger,
|
log: params.Logger,
|
||||||
frostfs: params.FrostFS,
|
frostfs: params.FrostFS,
|
||||||
|
@ -188,120 +172,140 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
|
||||||
containerResolver: params.Resolver,
|
containerResolver: params.Resolver,
|
||||||
tree: tree,
|
tree: tree,
|
||||||
cache: params.Cache,
|
cache: params.Cache,
|
||||||
workerPool: workerPool,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// prepares request and object address to it.
|
// prepares request and object address to it.
|
||||||
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
|
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
addr := newAddress(cnrID, objID)
|
var (
|
||||||
handler(ctx, req, addr)
|
idCnr, _ = c.UserValue("cid").(string)
|
||||||
}
|
idObj, _ = c.UserValue("oid").(string)
|
||||||
|
log = h.log.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||||
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
)
|
||||||
// resolves object address from S3-like path <bucket name>/<object key>.
|
|
||||||
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if foundOID.IsDeleteMarker {
|
|
||||||
log.Error(logs.ObjectWasDeleted)
|
|
||||||
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, foundOID.OID)
|
|
||||||
handler(ctx, newRequest(c, log), addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byNativeAddress.
|
|
||||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
|
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
|
||||||
key, _ := c.UserValue("attr_key").(string)
|
|
||||||
val, _ := c.UserValue("attr_val").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
|
|
||||||
key, err := url.QueryUnescape(key)
|
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key), zap.Error(err))
|
|
||||||
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = url.QueryUnescape(val)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val), zap.Error(err))
|
|
||||||
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
|
objID := new(oid.ID)
|
||||||
if err != nil {
|
if err = objID.DecodeString(idObj); err != nil {
|
||||||
if errors.Is(err, io.EOF) {
|
log.Error(logs.WrongObjectID, zap.Error(err))
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
|
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(bktInfo.CID)
|
||||||
addr.SetObject(objID)
|
addr.SetObject(*objID)
|
||||||
|
|
||||||
handler(ctx, newRequest(c, log), addr)
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
|
// prepares request and object address to it.
|
||||||
|
func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
var (
|
||||||
|
bucketname = req.UserValue("cid").(string)
|
||||||
|
key = req.UserValue("oid").(string)
|
||||||
|
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||||
|
)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(req, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, key)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||||
|
response.Error(req, "Access Denied", fasthttp.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||||
|
|
||||||
|
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if foundOid.DeleteMarker {
|
||||||
|
log.Error(logs.ObjectWasDeleted)
|
||||||
|
response.Error(req, "object deleted", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(foundOid.OID)
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(req, log), addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byAttribute is a wrapper similar to byAddress.
|
||||||
|
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
scid, _ := c.UserValue("cid").(string)
|
||||||
|
key, _ := c.UserValue("attr_key").(string)
|
||||||
|
val, _ := c.UserValue("attr_val").(string)
|
||||||
|
|
||||||
|
key, err := url.QueryUnescape(key)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err = url.QueryUnescape(val)
|
||||||
|
if err != nil {
|
||||||
|
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Uint64("id", c.ID()), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log := h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := h.search(ctx, &bktInfo.CID, key, val, object.MatchStringEqual)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer res.Close()
|
defer res.Close()
|
||||||
|
|
||||||
buf := make([]oid.ID, 1)
|
buf := make([]oid.ID, 1)
|
||||||
|
|
||||||
n, err := res.Read(buf)
|
n, err := res.Read(buf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
switch {
|
if errors.Is(err, io.EOF) {
|
||||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
|
||||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName)
|
|
||||||
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, attrVal)
|
|
||||||
case errors.Is(err, io.EOF):
|
|
||||||
log.Error(logs.ObjectNotFound, zap.Error(err))
|
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
||||||
default:
|
return
|
||||||
|
}
|
||||||
|
|
||||||
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||||
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
|
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
}
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf[0], nil
|
var addrObj oid.Address
|
||||||
}
|
addrObj.SetContainer(bktInfo.CID)
|
||||||
|
addrObj.SetObject(buf[0])
|
||||||
|
|
||||||
func (h *Handler) needSearchByFileName(key, val string) bool {
|
f(ctx, *h.newRequest(c, log), addrObj)
|
||||||
if key != attrFilePath || !h.config.EnableFilepathFallback() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveContainer decode container id, if it's not a valid container id
|
// resolveContainer decode container id, if it's not a valid container id
|
||||||
|
@ -366,46 +370,6 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
||||||
bktInfo.PlacementPolicy = res.PlacementPolicy()
|
|
||||||
|
|
||||||
return bktInfo, err
|
return bktInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
|
||||||
if !h.config.IndexPageEnabled() {
|
|
||||||
c.SetStatusCode(fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
cidURLParam := c.UserValue("cid").(string)
|
|
||||||
oidURLParam := c.UserValue("oid").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
|
|
||||||
|
|
||||||
unescapedKey, err := url.QueryUnescape(oidURLParam)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidURLParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
listFunc := h.getDirObjectsS3
|
|
||||||
if isNativeList {
|
|
||||||
// tree probe failed, trying to use native
|
|
||||||
listFunc = h.getDirObjectsNative
|
|
||||||
}
|
|
||||||
|
|
||||||
h.browseObjects(c, browseParams{
|
|
||||||
bucketInfo: bktInfo,
|
|
||||||
prefix: unescapedKey,
|
|
||||||
listObjects: listFunc,
|
|
||||||
isNative: isNativeList,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,580 +0,0 @@
|
||||||
//go:build gofuzz
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fuzzSuccessExitCode = 0
|
|
||||||
fuzzFailExitCode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) {
|
|
||||||
array := make([]string, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) {
|
|
||||||
array := make([]bool, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
count = count % max
|
|
||||||
if count < 0 {
|
|
||||||
count += max
|
|
||||||
}
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
count = count % len(params)
|
|
||||||
if count < 0 {
|
|
||||||
count += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
position, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
position = position % len(params)
|
|
||||||
if position < 0 {
|
|
||||||
position += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Header.Set(params[position], v)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) {
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
initValue, err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return initValue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
aclList := []acl.Basic{
|
|
||||||
acl.Private,
|
|
||||||
acl.PrivateExtended,
|
|
||||||
acl.PublicRO,
|
|
||||||
acl.PublicROExtended,
|
|
||||||
acl.PublicRW,
|
|
||||||
acl.PublicRWExtended,
|
|
||||||
acl.PublicAppend,
|
|
||||||
acl.PublicAppendExtended,
|
|
||||||
}
|
|
||||||
|
|
||||||
pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
acl := aclList[pos]
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 6)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
bktName := strings[0]
|
|
||||||
objFileName := strings[1]
|
|
||||||
valAttr := strings[2]
|
|
||||||
keyAttr := strings[3]
|
|
||||||
|
|
||||||
if len(bktName) == 0 {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cnrID.EncodeToString())
|
|
||||||
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: objFileName,
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := tp.GetBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Close(); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Request.SetBodyStream(&buff, buff.Len())
|
|
||||||
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
|
|
||||||
if r.Response.StatusCode() != http.StatusOK {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzUpload() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzUpload(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, _, _, _, _, err = upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzUpload(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) {
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
panic(resp)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
data := resp.Response.Body()
|
|
||||||
err := json.Unmarshal(data, &putRes)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(object.AttributeFilePath)
|
|
||||||
|
|
||||||
filename, err = maybeFillRandom(tp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attr.SetValue(filename)
|
|
||||||
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
oid := putRes.ObjectID
|
|
||||||
oid, err = maybeFillRandom(tp, oid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("oid", oid)
|
|
||||||
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
r.SetUserValue("download", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzGet() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzGet(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzGet(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHead() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHead(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHead(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHead(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHeadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHeadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHeadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHeadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadZipped() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadZipped(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("prefix", prefix)
|
|
||||||
|
|
||||||
hc.Handler().DownloadZip(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadZipped(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadZipped(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzStoreBearerTokenAppCtx() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzStoreBearerTokenAppCtx(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 3)
|
|
||||||
|
|
||||||
rand, err := prepareBools(tp, 2)
|
|
||||||
|
|
||||||
if rand[0] == true {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
} else if rand[1] == true {
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
} else {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens.StoreBearerTokenAppCtx(ctx, r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzStoreBearerTokenAppCtx(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzStoreBearerTokenAppCtx(data)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -12,10 +12,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
@ -26,62 +25,29 @@ import (
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type treeServiceMock struct {
|
type treeClientMock struct {
|
||||||
system map[string]map[string]*data.BaseNodeVersion
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTreeService() *treeServiceMock {
|
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
return &treeServiceMock{
|
|
||||||
system: make(map[string]map[string]*data.BaseNodeVersion),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
|
|
||||||
_, ok := t.system["bucket-settings"]
|
|
||||||
if !ok {
|
|
||||||
return layer.ErrNodeNotFound
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type configMock struct {
|
type configMock struct {
|
||||||
additionalSearch bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) DefaultTimestamp() bool {
|
func (c *configMock) DefaultTimestamp() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) ArchiveCompression() bool {
|
func (c *configMock) ZipCompression() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) IndexPageEnabled() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) IndexPageTemplate() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) IndexPageNativeTemplate() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ClientCut() bool {
|
func (c *configMock) ClientCut() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -94,17 +60,13 @@ func (c *configMock) NamespaceHeader() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) EnableFilepathFallback() bool {
|
|
||||||
return c.additionalSearch
|
|
||||||
}
|
|
||||||
|
|
||||||
type handlerContext struct {
|
type handlerContext struct {
|
||||||
key *keys.PrivateKey
|
key *keys.PrivateKey
|
||||||
owner user.ID
|
owner user.ID
|
||||||
|
|
||||||
h *Handler
|
h *Handler
|
||||||
frostfs *TestFrostFS
|
frostfs *TestFrostFS
|
||||||
tree *treeServiceMock
|
tree *treeClientMock
|
||||||
cfg *configMock
|
cfg *configMock
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,17 +104,13 @@ func prepareHandlerContext() (*handlerContext, error) {
|
||||||
Size: 1,
|
Size: 1,
|
||||||
Lifetime: 1,
|
Lifetime: 1,
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
}, false),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
treeMock := newTreeService()
|
treeMock := &treeClientMock{}
|
||||||
cfgMock := &configMock{}
|
cfgMock := &configMock{}
|
||||||
|
|
||||||
workerPool, err := ants.NewPool(1)
|
handler := New(params, cfgMock, tree.NewTree(treeMock))
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
handler := New(params, cfgMock, treeMock, workerPool)
|
|
||||||
|
|
||||||
return &handlerContext{
|
return &handlerContext{
|
||||||
key: key,
|
key: key,
|
||||||
|
@ -219,8 +177,10 @@ func TestBasic(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||||
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
|
attr := object.NewAttribute()
|
||||||
obj.SetAttributes(append(obj.Attributes(), attr)...)
|
attr.SetKey(object.AttributeFilePath)
|
||||||
|
attr.SetValue(objFileName)
|
||||||
|
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
||||||
|
|
||||||
t.Run("get", func(t *testing.T) {
|
t.Run("get", func(t *testing.T) {
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||||
|
@ -250,7 +210,7 @@ func TestBasic(t *testing.T) {
|
||||||
|
|
||||||
t.Run("zip", func(t *testing.T) {
|
t.Run("zip", func(t *testing.T) {
|
||||||
r = prepareGetZipped(ctx, bktName, "")
|
r = prepareGetZipped(ctx, bktName, "")
|
||||||
hc.Handler().DownloadZip(r)
|
hc.Handler().DownloadZipped(r)
|
||||||
|
|
||||||
readerAt := bytes.NewReader(r.Response.Body())
|
readerAt := bytes.NewReader(r.Response.Body())
|
||||||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
||||||
|
@ -269,159 +229,6 @@ func TestBasic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindObjectByAttribute(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.cfg.additionalSearch = true
|
|
||||||
|
|
||||||
bktName := "bucket"
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testAttrVal1 := "test-attr-val1"
|
|
||||||
testAttrVal2 := "test-attr-val2"
|
|
||||||
testAttrVal3 := "test-attr-val3"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
firstAttr object.Attribute
|
|
||||||
secondAttr object.Attribute
|
|
||||||
reqAttrKey string
|
|
||||||
reqAttrValue string
|
|
||||||
err string
|
|
||||||
additionalSearch bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "success search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
|
||||||
if tc.err != "" {
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), tc.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNeedSearchByFileName(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
attrKey string
|
|
||||||
attrVal string
|
|
||||||
additionalSearch bool
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "need search - not contains slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "need search - single lead slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - single slash but not lead",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - more one slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - incorrect attribute key",
|
|
||||||
attrKey: attrFileName,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - additional search disabled",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: false,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
|
|
||||||
require.Equal(t, tc.expected, res)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
||||||
r := new(fasthttp.RequestCtx)
|
r := new(fasthttp.RequestCtx)
|
||||||
utils.SetContextToRequest(ctx, r)
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
@ -454,13 +261,6 @@ func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.Requ
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareObjectAttributes(attrKey, attrValue string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(attrKey)
|
|
||||||
attr.SetValue(attrValue)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
keyAttr = "User-Attribute"
|
keyAttr = "User-Attribute"
|
||||||
valAttr = "user value"
|
valAttr = "user value"
|
||||||
|
|
|
@ -2,13 +2,11 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
@ -45,11 +43,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
||||||
var (
|
var contentType string
|
||||||
contentType string
|
|
||||||
filename string
|
|
||||||
filepath string
|
|
||||||
)
|
|
||||||
for _, attr := range obj.Attributes() {
|
for _, attr := range obj.Attributes() {
|
||||||
key := attr.Key()
|
key := attr.Key()
|
||||||
val := attr.Value()
|
val := attr.Value()
|
||||||
|
@ -73,15 +67,8 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||||
case object.AttributeContentType:
|
case object.AttributeContentType:
|
||||||
contentType = val
|
contentType = val
|
||||||
case object.AttributeFilePath:
|
|
||||||
filepath = val
|
|
||||||
case object.AttributeFileName:
|
|
||||||
filename = val
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if filename == "" {
|
|
||||||
filename = filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
idsToResponse(&req.Response, obj)
|
idsToResponse(&req.Response, obj)
|
||||||
|
|
||||||
|
@ -96,7 +83,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.frostfs.RangeObject(ctx, prmRange)
|
return h.frostfs.RangeObject(ctx, prmRange)
|
||||||
}, filename)
|
})
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
req.handleFrostFSErr(err, start)
|
req.handleFrostFSErr(err, start)
|
||||||
return
|
return
|
||||||
|
@ -115,36 +102,14 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
||||||
|
|
||||||
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
||||||
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
test, _ := c.UserValue("oid").(string)
|
||||||
oidParam, _ := c.UserValue("oid").(string)
|
var id oid.ID
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
err := id.DecodeString(test)
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cidParam),
|
|
||||||
zap.String("oid", oidParam),
|
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
h.byObjectName(c, h.headObject)
|
||||||
return
|
|
||||||
}
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
|
||||||
} else {
|
} else {
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
h.byAddress(c, h.headObject)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,13 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/multipart"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const attributeMultipartObjectSize = "S3-Multipart-Object-Size"
|
|
||||||
|
|
||||||
// MultipartFile provides standard ReadCloser interface and also allows one to
|
// MultipartFile provides standard ReadCloser interface and also allows one to
|
||||||
// get file name, it's used for multipart uploads.
|
// get file name, it's used for multipart uploads.
|
||||||
type MultipartFile interface {
|
type MultipartFile interface {
|
||||||
|
@ -42,39 +38,10 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
||||||
// ignore multipart/form-data values
|
// ignore multipart/form-data values
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||||
if err = part.Close(); err != nil {
|
|
||||||
l.Warn(logs.FailedToCloseReader, zap.Error(err))
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return part, nil
|
return part, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getPayload returns initial payload if object is not multipart else composes new reader with parts data.
|
|
||||||
func (h *Handler) getPayload(p getMultiobjectBodyParams) (io.ReadCloser, uint64, error) {
|
|
||||||
cid, ok := p.obj.Header.ContainerID()
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, errors.New("no container id set")
|
|
||||||
}
|
|
||||||
oid, ok := p.obj.Header.ID()
|
|
||||||
if !ok {
|
|
||||||
return nil, 0, errors.New("no object id set")
|
|
||||||
}
|
|
||||||
size, err := strconv.ParseUint(p.strSize, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
ctx := p.req.RequestCtx
|
|
||||||
params := PrmInitMultiObjectReader{
|
|
||||||
Addr: newAddress(cid, oid),
|
|
||||||
Bearer: bearerToken(ctx),
|
|
||||||
}
|
|
||||||
payload, err := h.frostfs.InitMultiObjectReader(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return io.NopCloser(payload), size, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -4,14 +4,13 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -26,7 +25,7 @@ type readCloser struct {
|
||||||
|
|
||||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||||
// Returns r's error directly. Also returns the processed data.
|
// Returns r's error directly. Also returns the processed data.
|
||||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), filename string) (string, []byte, error) {
|
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||||
if maxSize > sizeToDetectType {
|
if maxSize > sizeToDetectType {
|
||||||
maxSize = sizeToDetectType
|
maxSize = sizeToDetectType
|
||||||
}
|
}
|
||||||
|
@ -45,42 +44,22 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
|
||||||
|
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
|
|
||||||
contentType := http.DetectContentType(buf)
|
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||||
|
|
||||||
// Since the detector detects the "text/plain" content type for various types of text files,
|
|
||||||
// including CSS, JavaScript, and CSV files,
|
|
||||||
// we'll determine the final content type based on the file's extension.
|
|
||||||
if strings.HasPrefix(contentType, "text/plain") {
|
|
||||||
ext := path.Ext(filename)
|
|
||||||
// If the file doesn't have a file extension, we'll keep the content type as is.
|
|
||||||
if len(ext) > 0 {
|
|
||||||
contentType = mime.TypeByExtension(ext)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return contentType, buf, err // to not lose io.EOF
|
func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oid.Address) {
|
||||||
}
|
|
||||||
|
|
||||||
type getMultiobjectBodyParams struct {
|
|
||||||
obj *Object
|
|
||||||
req request
|
|
||||||
strSize string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.Address) {
|
|
||||||
var (
|
var (
|
||||||
shouldDownload = req.QueryArgs().GetBool("download")
|
err error
|
||||||
|
dis = "inline"
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
filename string
|
filename string
|
||||||
filepath string
|
|
||||||
contentType string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
prm := PrmObjectGet{
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
BearerToken: bearerToken(ctx),
|
BearerToken: bearerToken(ctx),
|
||||||
},
|
},
|
||||||
Address: objAddress,
|
Address: objectAddress,
|
||||||
}
|
}
|
||||||
|
|
||||||
rObj, err := h.frostfs.GetObject(ctx, prm)
|
rObj, err := h.frostfs.GetObject(ctx, prm)
|
||||||
|
@ -90,9 +69,15 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
||||||
}
|
}
|
||||||
|
|
||||||
// we can't close reader in this function, so how to do it?
|
// we can't close reader in this function, so how to do it?
|
||||||
req.setIDs(rObj.Header)
|
|
||||||
payload := rObj.Payload
|
if req.Request.URI().QueryArgs().GetBool("download") {
|
||||||
|
dis = "attachment"
|
||||||
|
}
|
||||||
|
|
||||||
payloadSize := rObj.Header.PayloadSize()
|
payloadSize := rObj.Header.PayloadSize()
|
||||||
|
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
||||||
|
var contentType string
|
||||||
for _, attr := range rObj.Header.Attributes() {
|
for _, attr := range rObj.Header.Attributes() {
|
||||||
key := attr.Key()
|
key := attr.Key()
|
||||||
val := attr.Value()
|
val := attr.Value()
|
||||||
|
@ -107,45 +92,33 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
||||||
case object.AttributeFileName:
|
case object.AttributeFileName:
|
||||||
filename = val
|
filename = val
|
||||||
case object.AttributeTimestamp:
|
case object.AttributeTimestamp:
|
||||||
if err = req.setTimestamp(val); err != nil {
|
value, err := strconv.ParseInt(val, 10, 64)
|
||||||
req.log.Error(logs.CouldntParseCreationDate,
|
if err != nil {
|
||||||
|
req.log.Info(logs.CouldntParseCreationDate,
|
||||||
|
zap.String("key", key),
|
||||||
zap.String("val", val),
|
zap.String("val", val),
|
||||||
zap.Error(err))
|
zap.Error(err))
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
req.Response.Header.Set(fasthttp.HeaderLastModified,
|
||||||
|
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||||
case object.AttributeContentType:
|
case object.AttributeContentType:
|
||||||
contentType = val
|
contentType = val
|
||||||
case object.AttributeFilePath:
|
|
||||||
filepath = val
|
|
||||||
case attributeMultipartObjectSize:
|
|
||||||
payload, payloadSize, err = h.getPayload(getMultiobjectBodyParams{
|
|
||||||
obj: rObj,
|
|
||||||
req: req,
|
|
||||||
strSize: val,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
req.handleFrostFSErr(err, start)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if filename == "" {
|
|
||||||
filename = filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
req.setDisposition(shouldDownload, filename)
|
idsToResponse(&req.Response, &rObj.Header)
|
||||||
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(payloadSize, 10))
|
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
if len(contentType) == 0 {
|
||||||
// determine the Content-Type from the payload head
|
// determine the Content-Type from the payload head
|
||||||
var payloadHead []byte
|
var payloadHead []byte
|
||||||
|
|
||||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||||
return payload, nil
|
return rObj.Payload, nil
|
||||||
}, filename)
|
})
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||||
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,46 +126,16 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
||||||
var headReader io.Reader = bytes.NewReader(payloadHead)
|
var headReader io.Reader = bytes.NewReader(payloadHead)
|
||||||
|
|
||||||
if err != io.EOF { // otherwise, we've already read full payload
|
if err != io.EOF { // otherwise, we've already read full payload
|
||||||
headReader = io.MultiReader(headReader, payload)
|
headReader = io.MultiReader(headReader, rObj.Payload)
|
||||||
}
|
}
|
||||||
|
|
||||||
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
// note: we could do with io.Reader, but SetBodyStream below closes body stream
|
||||||
// if it implements io.Closer and that's useful for us.
|
// if it implements io.Closer and that's useful for us.
|
||||||
payload = readCloser{headReader, payload}
|
rObj.Payload = readCloser{headReader, rObj.Payload}
|
||||||
}
|
}
|
||||||
req.SetContentType(contentType)
|
req.SetContentType(contentType)
|
||||||
req.Response.SetBodyStream(payload, int(payloadSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setIDs(obj object.Object) {
|
req.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
||||||
objID, _ := obj.ID()
|
|
||||||
cnrID, _ := obj.ContainerID()
|
|
||||||
r.Response.Header.Set(hdrObjectID, objID.String())
|
|
||||||
r.Response.Header.Set(hdrOwnerID, obj.OwnerID().String())
|
|
||||||
r.Response.Header.Set(hdrContainerID, cnrID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setDisposition(shouldDownload bool, filename string) {
|
req.Response.SetBodyStream(rObj.Payload, int(payloadSize))
|
||||||
const (
|
|
||||||
inlineDisposition = "inline"
|
|
||||||
attachmentDisposition = "attachment"
|
|
||||||
)
|
|
||||||
|
|
||||||
dis := inlineDisposition
|
|
||||||
if shouldDownload {
|
|
||||||
dis = attachmentDisposition
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderContentDisposition, dis+"; filename="+path.Base(filename))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) setTimestamp(timestamp string) error {
|
|
||||||
value, err := strconv.ParseInt(timestamp, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.Response.Header.Set(fasthttp.HeaderLastModified,
|
|
||||||
time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,16 +10,8 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
txtContentType = "text/plain; charset=utf-8"
|
|
||||||
cssContentType = "text/css; charset=utf-8"
|
|
||||||
htmlContentType = "text/html; charset=utf-8"
|
|
||||||
javascriptContentType = "text/javascript; charset=utf-8"
|
|
||||||
|
|
||||||
htmlBody = "<!DOCTYPE html><html ><head><meta charset=\"utf-8\"><title>Test Html</title>"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDetector(t *testing.T) {
|
func TestDetector(t *testing.T) {
|
||||||
|
txtContentType := "text/plain; charset=utf-8"
|
||||||
sb := strings.Builder{}
|
sb := strings.Builder{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
|
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
|
||||||
|
@ -27,63 +19,30 @@ func TestDetector(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
Name string
|
Name string
|
||||||
ExpectedContentType string
|
ContentType string
|
||||||
Content string
|
Expected string
|
||||||
FileName string
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
Name: "less than 512b",
|
Name: "less than 512b",
|
||||||
ExpectedContentType: txtContentType,
|
ContentType: txtContentType,
|
||||||
Content: sb.String()[:256],
|
Expected: sb.String()[:256],
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "more than 512b",
|
Name: "more than 512b",
|
||||||
ExpectedContentType: txtContentType,
|
ContentType: txtContentType,
|
||||||
Content: sb.String(),
|
Expected: sb.String(),
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "css content type",
|
|
||||||
ExpectedContentType: cssContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.css",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "javascript content type",
|
|
||||||
ExpectedContentType: javascriptContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.js",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file content",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: htmlBody,
|
|
||||||
FileName: "test.detect-by-content",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file extension",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.html",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "empty file extension",
|
|
||||||
ExpectedContentType: txtContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test",
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
contentType, data, err := readContentType(uint64(len(tc.Content)),
|
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
||||||
func(uint64) (io.Reader, error) {
|
func(uint64) (io.Reader, error) {
|
||||||
return strings.NewReader(tc.Content), nil
|
return strings.NewReader(tc.Expected), nil
|
||||||
}, tc.FileName,
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.ExpectedContentType, contentType)
|
require.Equal(t, tc.ContentType, contentType)
|
||||||
require.True(t, strings.HasPrefix(tc.Content, string(data)))
|
require.True(t, strings.HasPrefix(tc.Expected, string(data)))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,20 +1,15 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
@ -27,7 +22,6 @@ import (
|
||||||
const (
|
const (
|
||||||
jsonHeader = "application/json; charset=UTF-8"
|
jsonHeader = "application/json; charset=UTF-8"
|
||||||
drainBufSize = 4096
|
drainBufSize = 4096
|
||||||
explodeArchiveHeader = "X-Explode-Archive"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
|
@ -49,95 +43,93 @@ func (pr *putResponse) encode(w io.Writer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload handles multipart upload request.
|
// Upload handles multipart upload request.
|
||||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
var file MultipartFile
|
var (
|
||||||
|
file MultipartFile
|
||||||
|
idObj oid.ID
|
||||||
|
addr oid.Address
|
||||||
|
scid, _ = req.UserValue("cid").(string)
|
||||||
|
log = h.log.With(zap.String("cid", scid))
|
||||||
|
bodyStream = req.RequestBodyStream()
|
||||||
|
drainBuf = make([]byte, drainBufSize)
|
||||||
|
)
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
ctx := utils.GetContextFromRequest(req)
|
||||||
bodyStream := c.RequestBodyStream()
|
|
||||||
drainBuf := make([]byte, drainBufSize)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
log := reqLog.With(zap.String("cid", scid))
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(req, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
defer func() {
|
||||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
// If the temporary reader can be closed - let's close it.
|
||||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
if file == nil {
|
||||||
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
err := file.Close()
|
||||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
log.Debug(
|
||||||
if err != nil {
|
logs.CloseTemporaryMultipartFormFile,
|
||||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err))
|
zap.Stringer("address", addr),
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
zap.String("filename", file.FileName()),
|
||||||
return
|
zap.Error(err),
|
||||||
}
|
|
||||||
|
|
||||||
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
|
|
||||||
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
|
|
||||||
} else {
|
|
||||||
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multipart is multipart and thus can contain more than one part which
|
|
||||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
|
||||||
// the last zero-length chunk might be left unread (because multipart
|
|
||||||
// reader only cares about its boundary and doesn't look further) and
|
|
||||||
// it will be (erroneously) interpreted as the start of the next
|
|
||||||
// pipelined header. Thus, we need to drain the body buffer.
|
|
||||||
for {
|
|
||||||
_, err = bodyStream.Read(drainBuf)
|
|
||||||
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
|
|
||||||
|
|
||||||
attributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, file)
|
|
||||||
if err != nil {
|
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debug(logs.ObjectUploaded,
|
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
|
||||||
zap.String("FileName", file.FileName()),
|
|
||||||
)
|
)
|
||||||
|
}()
|
||||||
addr := newAddress(bkt.CID, idObj)
|
boundary := string(req.Request.Header.MultipartFormBoundary())
|
||||||
c.Response.Header.SetContentType(jsonHeader)
|
if file, err = fetchMultipartFile(h.log, bodyStream, boundary); err != nil {
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||||
if err = newPutResponse(addr).encode(c); err != nil {
|
response.Error(req, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
|
||||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
filtered, err := filterHeaders(h.log, &req.Request.Header)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||||
|
response.Error(req, err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
now := time.Now()
|
||||||
ctx := utils.GetContextFromRequest(c)
|
if rawHeader := req.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||||
|
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||||
|
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||||
|
} else {
|
||||||
|
now = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = utils.PrepareExpirationHeader(req, h.frostfs, filtered, now); err != nil {
|
||||||
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||||
|
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := make([]object.Attribute, 0, len(filtered))
|
||||||
|
// prepares attributes from filtered headers
|
||||||
|
for key, val := range filtered {
|
||||||
|
attribute := object.NewAttribute()
|
||||||
|
attribute.SetKey(key)
|
||||||
|
attribute.SetValue(val)
|
||||||
|
attributes = append(attributes, *attribute)
|
||||||
|
}
|
||||||
|
// sets FileName attribute if it wasn't set from header
|
||||||
|
if _, ok := filtered[object.AttributeFileName]; !ok {
|
||||||
|
filename := object.NewAttribute()
|
||||||
|
filename.SetKey(object.AttributeFileName)
|
||||||
|
filename.SetValue(file.FileName())
|
||||||
|
attributes = append(attributes, *filename)
|
||||||
|
}
|
||||||
|
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||||
|
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||||
|
timestamp := object.NewAttribute()
|
||||||
|
timestamp.SetKey(object.AttributeTimestamp)
|
||||||
|
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
|
attributes = append(attributes, *timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
obj := object.New()
|
obj := object.New()
|
||||||
obj.SetContainerID(bkt.CID)
|
obj.SetContainerID(bktInfo.CID)
|
||||||
obj.SetOwnerID(*h.ownerID)
|
obj.SetOwnerID(*h.ownerID)
|
||||||
obj.SetAttributes(attrs...)
|
obj.SetAttributes(attributes...)
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
prm := PrmObjectCreate{
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
|
@ -146,128 +138,48 @@ func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, att
|
||||||
Object: obj,
|
Object: obj,
|
||||||
Payload: file,
|
Payload: file,
|
||||||
ClientCut: h.config.ClientCut(),
|
ClientCut: h.config.ClientCut(),
|
||||||
WithoutHomomorphicHash: bkt.HomomorphicHashDisabled,
|
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
|
||||||
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
||||||
}
|
}
|
||||||
|
|
||||||
idObj, err := h.frostfs.CreateObject(ctx, prm)
|
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||||
if err != nil {
|
h.handlePutFrostFSErr(req, err)
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return idObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
|
|
||||||
now := time.Now()
|
|
||||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
|
||||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
|
||||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
|
||||||
} else {
|
|
||||||
now = parsed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
|
||||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
attributes := make([]object.Attribute, 0, len(filtered))
|
|
||||||
// prepares attributes from filtered headers
|
|
||||||
for key, val := range filtered {
|
|
||||||
attribute := newAttribute(key, val)
|
|
||||||
attributes = append(attributes, attribute)
|
|
||||||
}
|
|
||||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
|
||||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
|
||||||
timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
attributes = append(attributes, timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAttribute(key string, val string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(key)
|
|
||||||
attr.SetValue(val)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
// explodeArchive read files from archive and creates objects for each of them.
|
|
||||||
// Sets FilePath attribute with name from tar.Header.
|
|
||||||
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
// remove user attributes which vary for each file in archive
|
|
||||||
// to guarantee that they won't appear twice
|
|
||||||
delete(filtered, object.AttributeFileName)
|
|
||||||
delete(filtered, object.AttributeFilePath)
|
|
||||||
|
|
||||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
attributes := commonAttributes
|
|
||||||
|
|
||||||
reader := file
|
addr.SetObject(idObj)
|
||||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
addr.SetContainer(bktInfo.CID)
|
||||||
log.Debug(logs.GzipReaderSelected)
|
|
||||||
gzipReader, err := gzip.NewReader(file)
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||||
if err != nil {
|
if err = newPutResponse(addr).encode(req); err != nil {
|
||||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err))
|
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req, "could not encode response", fasthttp.StatusBadRequest)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
// Multipart is multipart and thus can contain more than one part which
|
||||||
if err := gzipReader.Close(); err != nil {
|
// we ignore at the moment. Also, when dealing with chunked encoding
|
||||||
log.Warn(logs.FailedToCloseReader, zap.Error(err))
|
// the last zero-length chunk might be left unread (because multipart
|
||||||
}
|
// reader only cares about its boundary and doesn't look further) and
|
||||||
}()
|
// it will be (erroneously) interpreted as the start of the next
|
||||||
reader = gzipReader
|
// pipelined header. Thus we need to drain the body buffer.
|
||||||
}
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(reader)
|
|
||||||
for {
|
for {
|
||||||
obj, err := tarReader.Next()
|
_, err = bodyStream.Read(drainBuf)
|
||||||
if errors.Is(err, io.EOF) {
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
break
|
break
|
||||||
} else if err != nil {
|
|
||||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err))
|
|
||||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if isDir(obj.Name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// set varying attributes
|
|
||||||
attributes = attributes[:len(commonAttributes)]
|
|
||||||
fileName := filepath.Base(obj.Name)
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
|
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
|
|
||||||
if err != nil {
|
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug(logs.ObjectUploaded,
|
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
|
||||||
zap.String("FileName", fileName),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Report status code and content type.
|
||||||
|
req.Response.SetStatusCode(fasthttp.StatusOK)
|
||||||
|
req.Response.Header.SetContentType(jsonHeader)
|
||||||
|
}
|
||||||
|
|
||||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error) {
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
||||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||||
|
|
||||||
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
h.log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||||
ResponseError(r, msg, statusCode)
|
response.Error(r, msg, statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
|
|
@ -2,19 +2,14 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -24,23 +19,16 @@ type request struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
|
||||||
return request{
|
|
||||||
RequestCtx: ctx,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||||
logFields := []zap.Field{
|
logFields := []zap.Field{
|
||||||
zap.Stringer("elapsed", time.Since(start)),
|
zap.Stringer("elapsed", time.Since(start)),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
}
|
}
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
||||||
logFields = append(logFields, additionalFields...)
|
logFields = append(logFields, additionalFields...)
|
||||||
|
|
||||||
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||||
ResponseError(r.RequestCtx, msg, statusCode)
|
response.Error(r.RequestCtx, msg, statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bearerToken(ctx context.Context) *bearer.Token {
|
func bearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
@ -50,18 +38,6 @@ func bearerToken(ctx context.Context) *bearer.Token {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func isDir(name string) bool {
|
|
||||||
return name == "" || strings.HasSuffix(name, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadAttributes(attrs []object.Attribute) map[string]string {
|
|
||||||
result := make(map[string]string)
|
|
||||||
for _, attr := range attrs {
|
|
||||||
result[attr.Key()] = attr.Value()
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidToken(s string) bool {
|
func isValidToken(s string) bool {
|
||||||
for _, c := range s {
|
for _, c := range s {
|
||||||
if c <= ' ' || c > 127 {
|
if c <= ' ' || c > 127 {
|
||||||
|
@ -88,55 +64,8 @@ func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||||
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||||
|
|
||||||
if client.IsErrContainerNotFound(err) {
|
if client.IsErrContainerNotFound(err) {
|
||||||
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
|
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
}
|
|
||||||
|
|
||||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnr)
|
|
||||||
addr.SetObject(obj)
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// setIfNotExist sets key value to map if key is not present yet.
|
|
||||||
func setIfNotExist(m map[string]string, key, value string) {
|
|
||||||
if _, ok := m[key]; !ok {
|
|
||||||
m[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
|
|
||||||
r.Error(msg+"\n", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
|
|
||||||
var (
|
|
||||||
msg string
|
|
||||||
statusCode int
|
|
||||||
logFields []zap.Field
|
|
||||||
)
|
|
||||||
|
|
||||||
st := new(sdkstatus.ObjectAccessDenied)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case errors.As(err, &st):
|
|
||||||
statusCode = fasthttp.StatusForbidden
|
|
||||||
reason := st.Reason()
|
|
||||||
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
|
||||||
logFields = append(logFields, zap.String("error_detail", reason))
|
|
||||||
case errors.Is(err, ErrQuotaLimitReached):
|
|
||||||
statusCode = fasthttp.StatusConflict
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
|
||||||
statusCode = fasthttp.StatusNotFound
|
|
||||||
msg = "Not Found"
|
|
||||||
default:
|
|
||||||
statusCode = fasthttp.StatusBadRequest
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode, msg, logFields
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,98 +1,80 @@
|
||||||
package logs
|
package logs
|
||||||
|
|
||||||
const (
|
const (
|
||||||
CouldntParseCreationDate = "couldn't parse creation date"
|
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||||
CouldNotReceiveObject = "could not receive object"
|
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||||
ObjectWasDeleted = "object was deleted"
|
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||||
CouldNotSearchForObjects = "could not search for objects"
|
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
||||||
ObjectNotFound = "object not found"
|
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||||
ReadObjectListFailed = "read object list failed"
|
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||||
FailedToGetObject = "failed to get object"
|
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||||
ObjectsNotFound = "objects not found"
|
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||||
CloseZipWriter = "close zip writer"
|
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||||
ServiceIsRunning = "service is running"
|
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
|
||||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
|
||||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
|
||||||
ShuttingDownService = "shutting down service"
|
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||||
CantShutDownService = "can't shut down service"
|
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
||||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||||
CouldNotParseClientTime = "could not parse client time"
|
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
|
||||||
CouldNotEncodeResponse = "could not encode response"
|
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
|
||||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
|
||||||
AddAttributeToResultObject = "add attribute to result object"
|
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
|
||||||
FailedToCreateResolver = "failed to create resolver"
|
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
|
||||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||||
SetCustomIndexPageTemplate = "set custom index page template"
|
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||||
MetricsAreDisabled = "metrics are disabled"
|
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||||
StartingApplication = "starting application"
|
StartingApplication = "starting application" // Info in ../../app.go
|
||||||
StartingServer = "starting server"
|
StartingServer = "starting server" // Info in ../../app.go
|
||||||
ListenAndServe = "listen and serve"
|
ListenAndServe = "listen and serve" // Fatal in ../../app.go
|
||||||
ShuttingDownWebServer = "shutting down web server"
|
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
|
||||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
|
||||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
|
||||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
|
||||||
FailedToReloadConfig = "failed to reload config"
|
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
|
||||||
LogLevelWontBeUpdated = "log level won't be updated"
|
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
|
||||||
FailedToUpdateResolvers = "failed to update resolvers"
|
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
|
||||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
|
||||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
|
||||||
AddedPathUploadCid = "added path /upload/{cid}"
|
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
|
||||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
|
||||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
|
||||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
|
||||||
Request = "request"
|
Request = "request" // Info in ../../app.go
|
||||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
|
||||||
FailedToAddServer = "failed to add server"
|
FailedToAddServer = "failed to add server" // Warn in ../../app.go
|
||||||
AddServer = "add server"
|
AddServer = "add server" // Info in ../../app.go
|
||||||
NoHealthyServers = "no healthy servers"
|
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
|
||||||
FailedToInitializeTracing = "failed to initialize tracing"
|
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
|
||||||
TracingConfigUpdated = "tracing config updated"
|
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
|
||||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
|
||||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
|
||||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
|
||||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
|
||||||
UsingCredentials = "using credentials"
|
UsingCredentials = "using credentials" // Info in ../../settings.go
|
||||||
FailedToCreateConnectionPool = "failed to create connection pool"
|
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
|
||||||
FailedToDialConnectionPool = "failed to dial connection pool"
|
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
|
||||||
FailedToCreateTreePool = "failed to create tree pool"
|
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||||
FailedToDialTreePool = "failed to dial tree pool"
|
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||||
AddedStoragePeer = "added storage peer"
|
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||||
CouldntGetBucket = "could not get bucket"
|
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
||||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
||||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
||||||
FailedToHeadObject = "failed to head object"
|
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
||||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
||||||
InvalidCacheEntryType = "invalid cache entry type"
|
|
||||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
|
||||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
|
||||||
FailedToUnescapeQuery = "failed to unescape query"
|
FailedToUnescapeQuery = "failed to unescape query"
|
||||||
ServerReconnecting = "reconnecting server..."
|
ServerReconnecting = "reconnecting server..."
|
||||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||||
ServerReconnectFailed = "failed to reconnect server"
|
ServerReconnectFailed = "failed to reconnect server"
|
||||||
WarnDuplicateAddress = "duplicate address"
|
WarnDuplicateAddress = "duplicate address"
|
||||||
MultinetDialSuccess = "multinet dial successful"
|
|
||||||
MultinetDialFail = "multinet dial failed"
|
|
||||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
|
||||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
|
||||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
|
||||||
CouldntCacheNetmap = "couldn't cache netmap"
|
|
||||||
FailedToFilterHeaders = "failed to filter headers"
|
|
||||||
FailedToReadFileFromTar = "failed to read file from tar"
|
|
||||||
FailedToGetAttributes = "failed to get attributes"
|
|
||||||
ObjectUploaded = "object uploaded"
|
|
||||||
CloseGzipWriter = "close gzip writer"
|
|
||||||
CloseTarWriter = "close tar writer"
|
|
||||||
FailedToCloseReader = "failed to close reader"
|
|
||||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
|
||||||
GzipReaderSelected = "gzip reader selected"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,68 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
|
||||||
"slices"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/multinet"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errEmptySourceIPList = errors.New("empty source IP list")
|
|
||||||
|
|
||||||
type Subnet struct {
|
|
||||||
Prefix string
|
|
||||||
SourceIPs []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Enabled bool
|
|
||||||
Subnets []Subnet
|
|
||||||
Balancer string
|
|
||||||
Restrict bool
|
|
||||||
FallbackDelay time.Duration
|
|
||||||
EventHandler multinet.EventHandler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) toMultinetConfig() (multinet.Config, error) {
|
|
||||||
var subnets []multinet.Subnet
|
|
||||||
for _, s := range c.Subnets {
|
|
||||||
var ms multinet.Subnet
|
|
||||||
p, err := netip.ParsePrefix(s.Prefix)
|
|
||||||
if err != nil {
|
|
||||||
return multinet.Config{}, fmt.Errorf("parse IP prefix '%s': %w", s.Prefix, err)
|
|
||||||
}
|
|
||||||
ms.Prefix = p
|
|
||||||
for _, ip := range s.SourceIPs {
|
|
||||||
addr, err := netip.ParseAddr(ip)
|
|
||||||
if err != nil {
|
|
||||||
return multinet.Config{}, fmt.Errorf("parse IP address '%s': %w", ip, err)
|
|
||||||
}
|
|
||||||
ms.SourceIPs = append(ms.SourceIPs, addr)
|
|
||||||
}
|
|
||||||
if len(ms.SourceIPs) == 0 {
|
|
||||||
return multinet.Config{}, errEmptySourceIPList
|
|
||||||
}
|
|
||||||
subnets = append(subnets, ms)
|
|
||||||
}
|
|
||||||
return multinet.Config{
|
|
||||||
Subnets: subnets,
|
|
||||||
Balancer: multinet.BalancerType(c.Balancer),
|
|
||||||
Restrict: c.Restrict,
|
|
||||||
FallbackDelay: c.FallbackDelay,
|
|
||||||
Dialer: newDefaultDialer(),
|
|
||||||
EventHandler: c.EventHandler,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) equals(other Config) bool {
|
|
||||||
return c.Enabled == other.Enabled &&
|
|
||||||
slices.EqualFunc(c.Subnets, other.Subnets, func(lhs, rhs Subnet) bool {
|
|
||||||
return lhs.Prefix == rhs.Prefix && slices.Equal(lhs.SourceIPs, rhs.SourceIPs)
|
|
||||||
}) &&
|
|
||||||
c.Balancer == other.Balancer &&
|
|
||||||
c.Restrict == other.Restrict &&
|
|
||||||
c.FallbackDelay == other.FallbackDelay
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
// NOTE: code is taken from https://github.com/grpc/grpc-go/blob/v1.68.x/internal/transport/http_util.go
|
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2014 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseDialTarget returns the network and address to pass to dialer.
|
|
||||||
func parseDialTarget(target string) (string, string) {
|
|
||||||
net := "tcp"
|
|
||||||
m1 := strings.Index(target, ":")
|
|
||||||
m2 := strings.Index(target, ":/")
|
|
||||||
// handle unix:addr which will fail with url.Parse
|
|
||||||
if m1 >= 0 && m2 < 0 {
|
|
||||||
if n := target[0:m1]; n == "unix" {
|
|
||||||
return n, target[m1+1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m2 >= 0 {
|
|
||||||
t, err := url.Parse(target)
|
|
||||||
if err != nil {
|
|
||||||
return net, target
|
|
||||||
}
|
|
||||||
scheme := t.Scheme
|
|
||||||
addr := t.Path
|
|
||||||
if scheme == "unix" {
|
|
||||||
if addr == "" {
|
|
||||||
addr = t.Host
|
|
||||||
}
|
|
||||||
return scheme, addr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return net, target
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newDefaultDialer() net.Dialer {
|
|
||||||
// From `grpc.WithContextDialer` comment:
|
|
||||||
//
|
|
||||||
// Note: All supported releases of Go (as of December 2023) override the OS
|
|
||||||
// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
|
|
||||||
// with OS defaults for keepalive time and interval, use a net.Dialer that sets
|
|
||||||
// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
|
|
||||||
// option to true from the Control field. For a concrete example of how to do
|
|
||||||
// this, see internal.NetDialerWithTCPKeepalive().
|
|
||||||
//
|
|
||||||
// https://github.com/grpc/grpc-go/blob/830135e6c5a351abf75f0c9cfdf978e5df8daeba/dialoptions.go#L432
|
|
||||||
//
|
|
||||||
// From `internal.NetDialerWithTCPKeepalive` comment:
|
|
||||||
//
|
|
||||||
// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
|
|
||||||
// appropriate Go version becomes less than our least supported Go version, we
|
|
||||||
// should look into using the new API to make things more straightforward.
|
|
||||||
return net.Dialer{
|
|
||||||
KeepAlive: time.Duration(-1),
|
|
||||||
Control: func(_, _ string, c syscall.RawConn) error {
|
|
||||||
return c.Control(func(fd uintptr) {
|
|
||||||
_ = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/multinet"
|
|
||||||
)
|
|
||||||
|
|
||||||
type DialerSource struct {
|
|
||||||
guard sync.RWMutex
|
|
||||||
|
|
||||||
c Config
|
|
||||||
|
|
||||||
md multinet.Dialer
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDialerSource(c Config) (*DialerSource, error) {
|
|
||||||
result := &DialerSource{}
|
|
||||||
if err := result.build(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DialerSource) build(c Config) error {
|
|
||||||
if c.Enabled {
|
|
||||||
mc, err := c.toMultinetConfig()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md, err := multinet.NewDialer(mc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.md = md
|
|
||||||
s.c = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s.md = nil
|
|
||||||
s.c = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GrpcContextDialer returns grpc.WithContextDialer func.
|
|
||||||
// Returns nil if multinet disabled.
|
|
||||||
func (s *DialerSource) GrpcContextDialer() func(context.Context, string) (net.Conn, error) {
|
|
||||||
s.guard.RLock()
|
|
||||||
defer s.guard.RUnlock()
|
|
||||||
|
|
||||||
if s.c.Enabled {
|
|
||||||
return func(ctx context.Context, address string) (net.Conn, error) {
|
|
||||||
network, address := parseDialTarget(address)
|
|
||||||
return s.md.DialContext(ctx, network, address)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *DialerSource) Update(c Config) error {
|
|
||||||
s.guard.Lock()
|
|
||||||
defer s.guard.Unlock()
|
|
||||||
|
|
||||||
if s.c.equals(c) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return s.build(c)
|
|
||||||
}
|
|
|
@ -1,28 +0,0 @@
|
||||||
package net
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LogEventHandler struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err error) {
|
|
||||||
sourceIPString := "undefined"
|
|
||||||
if sourceIP != nil {
|
|
||||||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
|
|
||||||
} else {
|
|
||||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLogEventHandler(logger *zap.Logger) LogEventHandler {
|
|
||||||
return LogEventHandler{logger: logger}
|
|
||||||
}
|
|
|
@ -1,83 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandleObjectError(t *testing.T) {
|
|
||||||
msg := "some msg"
|
|
||||||
|
|
||||||
t.Run("nil error", func(t *testing.T) {
|
|
||||||
err := handleObjectError(msg, nil)
|
|
||||||
require.Nil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple access denied", func(t *testing.T) {
|
|
||||||
reason := "some reason"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrAccessDenied)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("access denied - quota reached", func(t *testing.T) {
|
|
||||||
reason := "Quota limit reached"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple timeout", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("timeout")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("deadline exceeded", func(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
<-ctx.Done()
|
|
||||||
|
|
||||||
err := handleObjectError(msg, ctx.Err())
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), ctx.Err().Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
|
||||||
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("unknown error", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("unknown error")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, inputErr)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,241 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
|
||||||
type PartInfo struct {
|
|
||||||
Key string `json:"key"`
|
|
||||||
UploadID string `json:"uploadId"`
|
|
||||||
Number int `json:"number"`
|
|
||||||
OID oid.ID `json:"oid"`
|
|
||||||
Size uint64 `json:"size"`
|
|
||||||
ETag string `json:"etag"`
|
|
||||||
MD5 string `json:"md5"`
|
|
||||||
Created time.Time `json:"created"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetFrostFSParams struct {
|
|
||||||
// payload range
|
|
||||||
Off, Ln uint64
|
|
||||||
Addr oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartObj struct {
|
|
||||||
OID oid.ID
|
|
||||||
Size uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type readerInitiator interface {
|
|
||||||
InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiObjectReader implements io.Reader of payloads of the object list stored in the FrostFS network.
|
|
||||||
type MultiObjectReader struct {
|
|
||||||
ctx context.Context
|
|
||||||
|
|
||||||
layer readerInitiator
|
|
||||||
|
|
||||||
startPartOffset uint64
|
|
||||||
endPartLength uint64
|
|
||||||
|
|
||||||
prm GetFrostFSParams
|
|
||||||
|
|
||||||
curIndex int
|
|
||||||
curReader io.ReadCloser
|
|
||||||
|
|
||||||
parts []PartObj
|
|
||||||
}
|
|
||||||
|
|
||||||
type MultiObjectReaderConfig struct {
|
|
||||||
Initiator readerInitiator
|
|
||||||
|
|
||||||
// the offset of complete object and total size to read
|
|
||||||
Off, Ln uint64
|
|
||||||
|
|
||||||
Addr oid.Address
|
|
||||||
Parts []PartObj
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errOffsetIsOutOfRange = errors.New("offset is out of payload range")
|
|
||||||
errLengthIsOutOfRange = errors.New("length is out of payload range")
|
|
||||||
errEmptyPartsList = errors.New("empty parts list")
|
|
||||||
errorZeroRangeLength = errors.New("zero range length")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
|
||||||
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
|
||||||
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
|
||||||
Address: p.Addr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get combined object '%s': %w", p.Addr.Object().EncodeToString(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var parts []*PartInfo
|
|
||||||
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objParts := make([]PartObj, len(parts))
|
|
||||||
for i, part := range parts {
|
|
||||||
objParts[i] = PartObj{
|
|
||||||
OID: part.OID,
|
|
||||||
Size: part.Size,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
|
||||||
Initiator: x,
|
|
||||||
Off: p.Off,
|
|
||||||
Ln: p.Ln,
|
|
||||||
Parts: objParts,
|
|
||||||
Addr: p.Addr,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewMultiObjectReader(ctx context.Context, cfg MultiObjectReaderConfig) (*MultiObjectReader, error) {
|
|
||||||
if len(cfg.Parts) == 0 {
|
|
||||||
return nil, errEmptyPartsList
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &MultiObjectReader{
|
|
||||||
ctx: ctx,
|
|
||||||
layer: cfg.Initiator,
|
|
||||||
prm: GetFrostFSParams{
|
|
||||||
Addr: cfg.Addr,
|
|
||||||
},
|
|
||||||
parts: cfg.Parts,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Off+cfg.Ln == 0 {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if cfg.Off > 0 && cfg.Ln == 0 {
|
|
||||||
return nil, errorZeroRangeLength
|
|
||||||
}
|
|
||||||
|
|
||||||
startPartIndex, startPartOffset := findStartPart(cfg)
|
|
||||||
if startPartIndex == -1 {
|
|
||||||
return nil, errOffsetIsOutOfRange
|
|
||||||
}
|
|
||||||
r.startPartOffset = startPartOffset
|
|
||||||
|
|
||||||
endPartIndex, endPartLength := findEndPart(cfg)
|
|
||||||
if endPartIndex == -1 {
|
|
||||||
return nil, errLengthIsOutOfRange
|
|
||||||
}
|
|
||||||
r.endPartLength = endPartLength
|
|
||||||
|
|
||||||
r.parts = cfg.Parts[startPartIndex : endPartIndex+1]
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findStartPart(cfg MultiObjectReaderConfig) (index int, offset uint64) {
|
|
||||||
position := cfg.Off
|
|
||||||
for i, part := range cfg.Parts {
|
|
||||||
// Strict inequality when searching for start position to avoid reading zero length part.
|
|
||||||
if position < part.Size {
|
|
||||||
return i, position
|
|
||||||
}
|
|
||||||
position -= part.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func findEndPart(cfg MultiObjectReaderConfig) (index int, length uint64) {
|
|
||||||
position := cfg.Off + cfg.Ln
|
|
||||||
for i, part := range cfg.Parts {
|
|
||||||
// Non-strict inequality when searching for end position to avoid out of payload range error.
|
|
||||||
if position <= part.Size {
|
|
||||||
return i, position
|
|
||||||
}
|
|
||||||
position -= part.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
|
|
||||||
if x.curReader != nil {
|
|
||||||
n, err = x.curReader.Read(p)
|
|
||||||
if err != nil {
|
|
||||||
if closeErr := x.curReader.Close(); closeErr != nil {
|
|
||||||
return n, fmt.Errorf("%w (close err: %v)", err, closeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !errors.Is(err, io.EOF) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
x.curIndex++
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.curIndex == len(x.parts) {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
x.prm.Addr.SetObject(x.parts[x.curIndex].OID)
|
|
||||||
|
|
||||||
if x.curIndex == 0 {
|
|
||||||
x.prm.Off = x.startPartOffset
|
|
||||||
x.prm.Ln = x.parts[x.curIndex].Size - x.startPartOffset
|
|
||||||
}
|
|
||||||
|
|
||||||
if x.curIndex == len(x.parts)-1 {
|
|
||||||
x.prm.Ln = x.endPartLength - x.prm.Off
|
|
||||||
}
|
|
||||||
|
|
||||||
x.curReader, err = x.layer.InitFrostFSObjectPayloadReader(x.ctx, x.prm)
|
|
||||||
if err != nil {
|
|
||||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
x.prm.Off = 0
|
|
||||||
x.prm.Ln = 0
|
|
||||||
|
|
||||||
next, err := x.Read(p[n:])
|
|
||||||
|
|
||||||
return n + next, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
|
||||||
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
|
||||||
var prmAuth handler.PrmAuth
|
|
||||||
|
|
||||||
if p.Off+p.Ln != 0 {
|
|
||||||
prm := handler.PrmObjectRange{
|
|
||||||
PrmAuth: prmAuth,
|
|
||||||
PayloadRange: [2]uint64{p.Off, p.Ln},
|
|
||||||
Address: p.Addr,
|
|
||||||
}
|
|
||||||
|
|
||||||
return x.RangeObject(ctx, prm)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := handler.PrmObjectGet{
|
|
||||||
PrmAuth: prmAuth,
|
|
||||||
Address: p.Addr,
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.GetObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Payload, nil
|
|
||||||
}
|
|
|
@ -1,137 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type readerInitiatorMock struct {
|
|
||||||
parts map[oid.ID][]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readerInitiatorMock) InitFrostFSObjectPayloadReader(_ context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
|
||||||
partPayload, ok := r.parts[p.Addr.Object()]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("part not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off+p.Ln == 0 {
|
|
||||||
return io.NopCloser(bytes.NewReader(partPayload)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off > uint64(len(partPayload)-1) {
|
|
||||||
return nil, fmt.Errorf("invalid offset: %d/%d", p.Off, len(partPayload))
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Off+p.Ln > uint64(len(partPayload)) {
|
|
||||||
return nil, fmt.Errorf("invalid range: %d-%d/%d", p.Off, p.Off+p.Ln, len(partPayload))
|
|
||||||
}
|
|
||||||
|
|
||||||
return io.NopCloser(bytes.NewReader(partPayload[p.Off : p.Off+p.Ln])), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareDataReader() ([]byte, []PartObj, *readerInitiatorMock) {
|
|
||||||
mockInitReader := &readerInitiatorMock{
|
|
||||||
parts: map[oid.ID][]byte{
|
|
||||||
oidtest.ID(): []byte("first part 1"),
|
|
||||||
oidtest.ID(): []byte("second part 2"),
|
|
||||||
oidtest.ID(): []byte("third part 3"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
var fullPayload []byte
|
|
||||||
parts := make([]PartObj, 0, len(mockInitReader.parts))
|
|
||||||
for id, payload := range mockInitReader.parts {
|
|
||||||
parts = append(parts, PartObj{OID: id, Size: uint64(len(payload))})
|
|
||||||
fullPayload = append(fullPayload, payload...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fullPayload, parts, mockInitReader
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMultiReader(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
fullPayload, parts, mockInitReader := prepareDataReader()
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
off uint64
|
|
||||||
ln uint64
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "simple read all",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "simple read with length",
|
|
||||||
ln: uint64(len(fullPayload)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "middle of parts",
|
|
||||||
off: parts[0].Size + 2,
|
|
||||||
ln: 4,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "first and second",
|
|
||||||
off: parts[0].Size - 4,
|
|
||||||
ln: 8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "first and third",
|
|
||||||
off: parts[0].Size - 4,
|
|
||||||
ln: parts[1].Size + 8,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "second part",
|
|
||||||
off: parts[0].Size,
|
|
||||||
ln: parts[1].Size,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "second and third",
|
|
||||||
off: parts[0].Size,
|
|
||||||
ln: parts[1].Size + parts[2].Size,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "offset out of range",
|
|
||||||
off: uint64(len(fullPayload) + 1),
|
|
||||||
ln: 1,
|
|
||||||
err: errOffsetIsOutOfRange,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "zero length",
|
|
||||||
off: parts[1].Size + 1,
|
|
||||||
ln: 0,
|
|
||||||
err: errorZeroRangeLength,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
multiReader, err := NewMultiObjectReader(ctx, MultiObjectReaderConfig{
|
|
||||||
Initiator: mockInitReader,
|
|
||||||
Parts: parts,
|
|
||||||
Off: tc.off,
|
|
||||||
Ln: tc.ln,
|
|
||||||
})
|
|
||||||
require.ErrorIs(t, err, tc.err)
|
|
||||||
|
|
||||||
if tc.err == nil {
|
|
||||||
off := tc.off
|
|
||||||
ln := tc.ln
|
|
||||||
if off+ln == 0 {
|
|
||||||
ln = uint64(len(fullPayload))
|
|
||||||
}
|
|
||||||
data, err := io.ReadAll(multiReader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, fullPayload[off:off+ln], data)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Source struct {
|
|
||||||
frostFS *FrostFS
|
|
||||||
netmapCache *cache.NetmapCache
|
|
||||||
bucketCache *cache.BucketCache
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSource(frostFS *FrostFS, netmapCache *cache.NetmapCache, bucketCache *cache.BucketCache, log *zap.Logger) *Source {
|
|
||||||
return &Source{
|
|
||||||
frostFS: frostFS,
|
|
||||||
netmapCache: netmapCache,
|
|
||||||
bucketCache: bucketCache,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
cachedNetmap := s.netmapCache.Get()
|
|
||||||
if cachedNetmap != nil {
|
|
||||||
return *cachedNetmap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
netmapSnapshot, err := s.frostFS.NetmapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.NetMap{}, fmt.Errorf("get netmap: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
|
|
||||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) PlacementPolicy(ctx context.Context, cnrID cid.ID) (netmap.PlacementPolicy, error) {
|
|
||||||
info := s.bucketCache.GetByCID(cnrID)
|
|
||||||
if info != nil {
|
|
||||||
return info.PlacementPolicy, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := handler.PrmContainer{
|
|
||||||
ContainerID: cnrID,
|
|
||||||
}
|
|
||||||
res, err := s.frostFS.Container(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.PlacementPolicy{}, fmt.Errorf("get container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't put container back to the cache to keep cache
|
|
||||||
// coherent to the requests made by users. FrostFS Source
|
|
||||||
// is being used by SDK Tree Pool and it should not fill cache
|
|
||||||
// with possibly irrelevant container values.
|
|
||||||
|
|
||||||
return res.PlacementPolicy(), nil
|
|
||||||
}
|
|
|
@ -1,163 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
|
||||||
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
|
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
|
||||||
)
|
|
||||||
|
|
||||||
type GetNodeByPathResponseInfoWrapper struct {
|
|
||||||
response *apitree.GetNodeByPathResponseInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
|
||||||
return []uint64{n.response.GetNodeID()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
|
||||||
return []uint64{n.response.GetParentID()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
|
||||||
return []uint64{n.response.GetTimestamp()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
|
||||||
for i, value := range n.response.GetMeta() {
|
|
||||||
res[i] = value
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
type PoolWrapper struct {
|
|
||||||
p *treepool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
|
||||||
return &PoolWrapper{p: p}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
|
||||||
poolPrm := treepool.GetNodesParams{
|
|
||||||
CID: prm.CnrID,
|
|
||||||
TreeID: prm.TreeID,
|
|
||||||
Path: prm.Path,
|
|
||||||
Meta: prm.Meta,
|
|
||||||
PathAttribute: tree.FileNameKey,
|
|
||||||
LatestOnly: prm.LatestOnly,
|
|
||||||
AllAttrs: prm.AllAttrs,
|
|
||||||
BearerToken: getBearer(ctx),
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes, err := w.p.GetNodes(ctx, poolPrm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make([]tree.NodeResponse, len(nodes))
|
|
||||||
for i, info := range nodes {
|
|
||||||
res[i] = GetNodeByPathResponseInfoWrapper{info}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBearer(ctx context.Context) []byte {
|
|
||||||
token, err := tokens.LoadBearerToken(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return token.Marshal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleError(err error) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if errors.Is(err, treepool.ErrNodeNotFound) {
|
|
||||||
return fmt.Errorf("%w: %s", tree.ErrNodeNotFound, err.Error())
|
|
||||||
}
|
|
||||||
if errors.Is(err, treepool.ErrNodeAccessDenied) {
|
|
||||||
return fmt.Errorf("%w: %s", tree.ErrNodeAccessDenied, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
|
||||||
order := treepool.NoneOrder
|
|
||||||
if sort {
|
|
||||||
order = treepool.AscendingOrder
|
|
||||||
}
|
|
||||||
poolPrm := treepool.GetSubTreeParams{
|
|
||||||
CID: bktInfo.CID,
|
|
||||||
TreeID: treeID,
|
|
||||||
RootID: rootID,
|
|
||||||
Depth: depth,
|
|
||||||
BearerToken: getBearer(ctx),
|
|
||||||
Order: order,
|
|
||||||
}
|
|
||||||
if len(rootID) == 1 && rootID[0] == 0 {
|
|
||||||
// storage node interprets 'nil' value as []uint64{0}
|
|
||||||
// gate wants to send 'nil' value instead of []uint64{0}, because
|
|
||||||
// it provides compatibility with previous tree service api where
|
|
||||||
// single uint64(0) value is dropped from signature
|
|
||||||
poolPrm.RootID = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var subtree []tree.NodeResponse
|
|
||||||
|
|
||||||
node, err := subTreeReader.Next()
|
|
||||||
for err == nil {
|
|
||||||
subtree = append(subtree, GetSubTreeResponseBodyWrapper{node})
|
|
||||||
node, err = subTreeReader.Next()
|
|
||||||
}
|
|
||||||
if err != io.EOF {
|
|
||||||
return nil, handleError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return subtree, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GetSubTreeResponseBodyWrapper struct {
|
|
||||||
response *apitree.GetSubTreeResponseBody
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
|
||||||
return n.response.GetNodeID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
|
||||||
resp := n.response.GetParentID()
|
|
||||||
if resp == nil {
|
|
||||||
// storage sends nil that should be interpreted as []uint64{0}
|
|
||||||
// due to protobuf compatibility, see 'GetSubTree' function
|
|
||||||
return []uint64{0}
|
|
||||||
}
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
|
||||||
return n.response.GetTimestamp()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
|
||||||
for i, value := range n.response.GetMeta() {
|
|
||||||
res[i] = value
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
|
@ -1,112 +0,0 @@
|
||||||
{{$container := .Container}}
|
|
||||||
{{ $prefix := trimPrefix .Prefix }}
|
|
||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8"/>
|
|
||||||
<title>Index of {{.Protocol}}://{{$container}}
|
|
||||||
/{{if $prefix}}/{{$prefix}}/{{end}}</title>
|
|
||||||
<style>
|
|
||||||
.alert {
|
|
||||||
width: 80%;
|
|
||||||
box-sizing: border-box;
|
|
||||||
padding: 20px;
|
|
||||||
background-color: #f44336;
|
|
||||||
color: white;
|
|
||||||
margin-bottom: 15px;
|
|
||||||
}
|
|
||||||
table {
|
|
||||||
width: 80%;
|
|
||||||
border-collapse: collapse;
|
|
||||||
}
|
|
||||||
body {
|
|
||||||
background: #f2f2f2;
|
|
||||||
}
|
|
||||||
table, th, td {
|
|
||||||
border: 0 solid transparent;
|
|
||||||
}
|
|
||||||
th, td {
|
|
||||||
padding: 10px;
|
|
||||||
text-align: left;
|
|
||||||
}
|
|
||||||
th {
|
|
||||||
background-color: #c3bcbc;
|
|
||||||
}
|
|
||||||
|
|
||||||
h1 {
|
|
||||||
font-size: 1.5em;
|
|
||||||
}
|
|
||||||
tr:nth-child(even) {background-color: #ebe7e7;}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<h1>Index of {{.Protocol}}://{{$container}}/{{if $prefix}}{{$prefix}}/{{end}}</h1>
|
|
||||||
{{ if .HasErrors }}
|
|
||||||
<div class="alert">
|
|
||||||
Errors occurred while processing the request. Perhaps some objects are missing
|
|
||||||
</div>
|
|
||||||
{{ end }}
|
|
||||||
<table>
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Filename</th>
|
|
||||||
<th>OID</th>
|
|
||||||
<th>Size</th>
|
|
||||||
<th>Created</th>
|
|
||||||
<th>Download</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{ $trimmedPrefix := trimPrefix $prefix }}
|
|
||||||
{{if $trimmedPrefix }}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
⮐<a href="/get/{{$container}}{{ urlencode $trimmedPrefix }}/">..</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{{else}}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
⮐<a href="/get/{{$container}}/">..</a>
|
|
||||||
</td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
<td></td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{range .Objects}}
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
{{if .IsDir}}
|
|
||||||
🗀
|
|
||||||
<a href="{{.GetURL}}/">
|
|
||||||
{{.FileName}}/
|
|
||||||
</a>
|
|
||||||
{{else}}
|
|
||||||
🗎
|
|
||||||
<a href="{{ .GetURL }}">
|
|
||||||
{{.FileName}}
|
|
||||||
</a>
|
|
||||||
{{end}}
|
|
||||||
</td>
|
|
||||||
<td>{{.OID}}</td>
|
|
||||||
<td>{{if not .IsDir}}{{ formatSize .Size }}{{end}}</td>
|
|
||||||
<td>{{ .Created }}</td>
|
|
||||||
<td>
|
|
||||||
{{ if .OID }}
|
|
||||||
<a href="{{ .GetURL }}?download=true">
|
|
||||||
Link
|
|
||||||
</a>
|
|
||||||
{{ end }}
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
|
@ -1,6 +0,0 @@
|
||||||
package templates
|
|
||||||
|
|
||||||
import _ "embed"
|
|
||||||
|
|
||||||
//go:embed index.gotmpl
|
|
||||||
var DefaultIndexTemplate string
|
|
|
@ -76,15 +76,6 @@ var appMetricsDesc = map[string]map[string]Description{
|
||||||
VariableLabels: []string{"endpoint"},
|
VariableLabels: []string{"endpoint"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
statisticSubsystem: {
|
|
||||||
droppedLogs: Description{
|
|
||||||
Type: dto.MetricType_COUNTER,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: statisticSubsystem,
|
|
||||||
Name: droppedLogs,
|
|
||||||
Help: "Dropped logs (by sampling) count",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Description struct {
|
type Description struct {
|
||||||
|
@ -157,12 +148,3 @@ func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
|
||||||
description.VariableLabels,
|
description.VariableLabels,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustNewCounter(description Description) prometheus.Counter {
|
|
||||||
if description.Type != dto.MetricType_COUNTER {
|
|
||||||
panic("invalid metric type")
|
|
||||||
}
|
|
||||||
return prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts(newOpts(description)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
|
@ -14,13 +14,11 @@ const (
|
||||||
stateSubsystem = "state"
|
stateSubsystem = "state"
|
||||||
poolSubsystem = "pool"
|
poolSubsystem = "pool"
|
||||||
serverSubsystem = "server"
|
serverSubsystem = "server"
|
||||||
statisticSubsystem = "statistic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
healthMetric = "health"
|
healthMetric = "health"
|
||||||
versionInfoMetric = "version_info"
|
versionInfoMetric = "version_info"
|
||||||
droppedLogs = "dropped_logs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -37,6 +35,8 @@ const (
|
||||||
methodGetContainer = "get_container"
|
methodGetContainer = "get_container"
|
||||||
methodListContainer = "list_container"
|
methodListContainer = "list_container"
|
||||||
methodDeleteContainer = "delete_container"
|
methodDeleteContainer = "delete_container"
|
||||||
|
methodGetContainerEacl = "get_container_eacl"
|
||||||
|
methodSetContainerEacl = "set_container_eacl"
|
||||||
methodEndpointInfo = "endpoint_info"
|
methodEndpointInfo = "endpoint_info"
|
||||||
methodNetworkInfo = "network_info"
|
methodNetworkInfo = "network_info"
|
||||||
methodPutObject = "put_object"
|
methodPutObject = "put_object"
|
||||||
|
@ -69,7 +69,6 @@ type GateMetrics struct {
|
||||||
stateMetrics
|
stateMetrics
|
||||||
poolMetricsCollector
|
poolMetricsCollector
|
||||||
serverMetrics
|
serverMetrics
|
||||||
statisticMetrics
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type stateMetrics struct {
|
type stateMetrics struct {
|
||||||
|
@ -77,10 +76,6 @@ type stateMetrics struct {
|
||||||
versionInfo *prometheus.GaugeVec
|
versionInfo *prometheus.GaugeVec
|
||||||
}
|
}
|
||||||
|
|
||||||
type statisticMetrics struct {
|
|
||||||
droppedLogs prometheus.Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
type poolMetricsCollector struct {
|
type poolMetricsCollector struct {
|
||||||
scraper StatisticScraper
|
scraper StatisticScraper
|
||||||
overallErrors prometheus.Gauge
|
overallErrors prometheus.Gauge
|
||||||
|
@ -101,14 +96,10 @@ func NewGateMetrics(p StatisticScraper) *GateMetrics {
|
||||||
serverMetric := newServerMetrics()
|
serverMetric := newServerMetrics()
|
||||||
serverMetric.register()
|
serverMetric.register()
|
||||||
|
|
||||||
statsMetric := newStatisticMetrics()
|
|
||||||
statsMetric.register()
|
|
||||||
|
|
||||||
return &GateMetrics{
|
return &GateMetrics{
|
||||||
stateMetrics: *stateMetric,
|
stateMetrics: *stateMetric,
|
||||||
poolMetricsCollector: *poolMetric,
|
poolMetricsCollector: *poolMetric,
|
||||||
serverMetrics: *serverMetric,
|
serverMetrics: *serverMetric,
|
||||||
statisticMetrics: *statsMetric,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +107,6 @@ func (g *GateMetrics) Unregister() {
|
||||||
g.stateMetrics.unregister()
|
g.stateMetrics.unregister()
|
||||||
prometheus.Unregister(&g.poolMetricsCollector)
|
prometheus.Unregister(&g.poolMetricsCollector)
|
||||||
g.serverMetrics.unregister()
|
g.serverMetrics.unregister()
|
||||||
g.statisticMetrics.unregister()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStateMetrics() *stateMetrics {
|
func newStateMetrics() *stateMetrics {
|
||||||
|
@ -126,20 +116,6 @@ func newStateMetrics() *stateMetrics {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStatisticMetrics() *statisticMetrics {
|
|
||||||
return &statisticMetrics{
|
|
||||||
droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) register() {
|
|
||||||
prometheus.MustRegister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) unregister() {
|
|
||||||
prometheus.Unregister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) register() {
|
func (m stateMetrics) register() {
|
||||||
prometheus.MustRegister(m.healthCheck)
|
prometheus.MustRegister(m.healthCheck)
|
||||||
prometheus.MustRegister(m.versionInfo)
|
prometheus.MustRegister(m.versionInfo)
|
||||||
|
@ -158,13 +134,6 @@ func (m stateMetrics) SetVersion(ver string) {
|
||||||
m.versionInfo.WithLabelValues(ver).Set(1)
|
m.versionInfo.WithLabelValues(ver).Set(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statisticMetrics) DroppedLogsInc() {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.droppedLogs.Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
||||||
return &poolMetricsCollector{
|
return &poolMetricsCollector{
|
||||||
scraper: p,
|
scraper: p,
|
||||||
|
@ -222,6 +191,8 @@ func (m *poolMetricsCollector) updateRequestsDuration(node pool.NodeStatistic) {
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodGetContainer).Set(float64(node.AverageGetContainer().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodListContainer).Set(float64(node.AverageListContainer().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodDeleteContainer).Set(float64(node.AverageDeleteContainer().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(node.Address(), methodGetContainerEacl).Set(float64(node.AverageGetContainerEACL().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(node.Address(), methodSetContainerEacl).Set(float64(node.AverageSetContainerEACL().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodEndpointInfo).Set(float64(node.AverageEndpointInfo().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodNetworkInfo).Set(float64(node.AverageNetworkInfo().Milliseconds()))
|
||||||
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
m.requestDuration.WithLabelValues(node.Address(), methodPutObject).Set(float64(node.AveragePutObject().Milliseconds()))
|
||||||
|
|
41
response/utils.go
Normal file
41
response/utils.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package response
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Error(r *fasthttp.RequestCtx, msg string, code int) {
|
||||||
|
r.Error(msg+"\n", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
|
||||||
|
var (
|
||||||
|
msg string
|
||||||
|
statusCode int
|
||||||
|
logFields []zap.Field
|
||||||
|
)
|
||||||
|
|
||||||
|
st := new(sdkstatus.ObjectAccessDenied)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &st):
|
||||||
|
statusCode = fasthttp.StatusForbidden
|
||||||
|
reason := st.Reason()
|
||||||
|
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
||||||
|
logFields = append(logFields, zap.String("error_detail", reason))
|
||||||
|
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
||||||
|
statusCode = fasthttp.StatusNotFound
|
||||||
|
msg = "Not Found"
|
||||||
|
default:
|
||||||
|
statusCode = fasthttp.StatusBadRequest
|
||||||
|
msg = fmt.Sprintf("%s: %v", message, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusCode, msg, logFields
|
||||||
|
}
|
|
@ -52,8 +52,8 @@ func BearerTokenFromCookie(h *fasthttp.RequestHeader) []byte {
|
||||||
|
|
||||||
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
// StoreBearerTokenAppCtx extracts a bearer token from the header or cookie and stores
|
||||||
// it in the application context.
|
// it in the application context.
|
||||||
func StoreBearerTokenAppCtx(ctx context.Context, c *fasthttp.RequestCtx) (context.Context, error) {
|
func StoreBearerTokenAppCtx(ctx context.Context, req *fasthttp.RequestCtx) (context.Context, error) {
|
||||||
tkn, err := fetchBearerToken(c)
|
tkn, err := fetchBearerToken(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -82,23 +82,15 @@ func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
|
||||||
tkn = new(bearer.Token)
|
tkn = new(bearer.Token)
|
||||||
)
|
)
|
||||||
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
|
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
|
||||||
buf = parse(&ctx.Request.Header)
|
if buf = parse(&ctx.Request.Header); buf == nil {
|
||||||
if buf == nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
} else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
|
||||||
|
|
||||||
data, err := base64.StdEncoding.DecodeString(string(buf))
|
|
||||||
if err != nil {
|
|
||||||
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
|
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
} else if err = tkn.Unmarshal(data); err != nil {
|
||||||
|
|
||||||
if err = tkn.Unmarshal(data); err != nil {
|
|
||||||
if err = tkn.UnmarshalJSON(data); err != nil {
|
|
||||||
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
|
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return tkn, nil
|
return tkn, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,14 +98,8 @@ func TestFetchBearerToken(t *testing.T) {
|
||||||
tkn := new(bearer.Token)
|
tkn := new(bearer.Token)
|
||||||
tkn.ForUser(uid)
|
tkn.ForUser(uid)
|
||||||
|
|
||||||
jsonToken, err := tkn.MarshalJSON()
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
require.NoError(t, err)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
jsonTokenBase64 := base64.StdEncoding.EncodeToString(jsonToken)
|
|
||||||
binaryTokenBase64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
|
|
||||||
require.NotEmpty(t, jsonTokenBase64)
|
|
||||||
require.NotEmpty(t, binaryTokenBase64)
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -149,47 +143,25 @@ func TestFetchBearerToken(t *testing.T) {
|
||||||
error: "can't unmarshal bearer token",
|
error: "can't unmarshal bearer token",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie with binary token",
|
name: "bad header, but good cookie",
|
||||||
header: "dGVzdAo=",
|
header: "dGVzdAo=",
|
||||||
cookie: binaryTokenBase64,
|
cookie: t64,
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad cookie, but good header with binary token",
|
name: "bad cookie, but good header",
|
||||||
header: binaryTokenBase64,
|
header: t64,
|
||||||
cookie: "dGVzdAo=",
|
cookie: "dGVzdAo=",
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie with json token",
|
name: "ok for header",
|
||||||
header: "dGVzdAo=",
|
header: t64,
|
||||||
cookie: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad cookie, but good header with json token",
|
name: "ok for cookie",
|
||||||
header: jsonTokenBase64,
|
cookie: t64,
|
||||||
cookie: "dGVzdAo=",
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for header with binary token",
|
|
||||||
header: binaryTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for cookie with binary token",
|
|
||||||
cookie: binaryTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for header with json token",
|
|
||||||
header: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for cookie with json token",
|
|
||||||
cookie: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
324
tree/tree.go
324
tree/tree.go
|
@ -2,12 +2,11 @@ package tree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
@ -21,7 +20,6 @@ type (
|
||||||
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
|
||||||
ServiceClient interface {
|
ServiceClient interface {
|
||||||
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
|
||||||
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
treeNode struct {
|
treeNode struct {
|
||||||
|
@ -29,14 +27,8 @@ type (
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
multiSystemNode struct {
|
|
||||||
// the first element is latest
|
|
||||||
nodes []*treeNode
|
|
||||||
}
|
|
||||||
|
|
||||||
GetNodesParams struct {
|
GetNodesParams struct {
|
||||||
CnrID cid.ID
|
CnrID cid.ID
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
TreeID string
|
TreeID string
|
||||||
Path []string
|
Path []string
|
||||||
Meta []string
|
Meta []string
|
||||||
|
@ -55,18 +47,16 @@ var (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FileNameKey = "FileName"
|
FileNameKey = "FileName"
|
||||||
settingsFileName = "bucket-settings"
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
oidKV = "OID"
|
oidKV = "OID"
|
||||||
uploadIDKV = "UploadId"
|
|
||||||
sizeKV = "Size"
|
|
||||||
|
|
||||||
// keys for delete marker nodes.
|
// keys for delete marker nodes.
|
||||||
isDeleteMarkerKV = "IsDeleteMarker"
|
isDeleteMarkerKV = "IsDeleteMarker"
|
||||||
|
|
||||||
// versionTree -- ID of a tree with object versions.
|
// versionTree -- ID of a tree with object versions.
|
||||||
versionTree = "version"
|
versionTree = "version"
|
||||||
systemTree = "system"
|
|
||||||
|
|
||||||
separator = "/"
|
separator = "/"
|
||||||
)
|
)
|
||||||
|
@ -83,28 +73,26 @@ type Meta interface {
|
||||||
|
|
||||||
type NodeResponse interface {
|
type NodeResponse interface {
|
||||||
GetMeta() []Meta
|
GetMeta() []Meta
|
||||||
GetTimestamp() []uint64
|
GetTimestamp() uint64
|
||||||
GetNodeID() []uint64
|
|
||||||
GetParentID() []uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||||
tNode := &treeNode{
|
treeNode := &treeNode{
|
||||||
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, kv := range nodeInfo.GetMeta() {
|
for _, kv := range nodeInfo.GetMeta() {
|
||||||
switch kv.GetKey() {
|
switch kv.GetKey() {
|
||||||
case oidKV:
|
case oidKV:
|
||||||
if err := tNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
if err := treeNode.ObjID.DecodeString(string(kv.GetValue())); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
tNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
treeNode.Meta[kv.GetKey()] = string(kv.GetValue())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return tNode, nil
|
return treeNode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *treeNode) Get(key string) (string, bool) {
|
func (n *treeNode) Get(key string) (string, bool) {
|
||||||
|
@ -117,94 +105,30 @@ func (n *treeNode) FileName() (string, bool) {
|
||||||
return value, ok
|
return value, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) {
|
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||||
tNode, err := newTreeNode(node)
|
treeNode, err := newTreeNode(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid tree node: %w", err)
|
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newNodeVersionFromTreeNode(tNode), nil
|
return newNodeVersionFromTreeNode(treeNode), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersionFromTreeNode(treeNode *treeNode) *data.NodeVersion {
|
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||||
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||||
version := &data.NodeVersion{
|
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
version := &api.NodeVersion{
|
||||||
|
BaseNodeVersion: api.BaseNodeVersion{
|
||||||
OID: treeNode.ObjID,
|
OID: treeNode.ObjID,
|
||||||
IsDeleteMarker: isDeleteMarker,
|
|
||||||
},
|
},
|
||||||
|
DeleteMarker: isDeleteMarker,
|
||||||
}
|
}
|
||||||
|
|
||||||
return version
|
return version
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeInfo(node NodeResponse) data.NodeInfo {
|
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||||
nodeMeta := node.GetMeta()
|
meta := []string{oidKV, isDeleteMarkerKV}
|
||||||
nodeInfo := data.NodeInfo{
|
|
||||||
Meta: make([]data.NodeMeta, 0, len(nodeMeta)),
|
|
||||||
}
|
|
||||||
for _, meta := range nodeMeta {
|
|
||||||
nodeInfo.Meta = append(nodeInfo.Meta, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
index int
|
|
||||||
maxTimestamp uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
return nil, errors.New("multi node must have at least one node")
|
|
||||||
}
|
|
||||||
|
|
||||||
treeNodes := make([]*treeNode, len(nodes))
|
|
||||||
|
|
||||||
for i, node := range nodes {
|
|
||||||
if treeNodes[i], err = newTreeNode(node); err != nil {
|
|
||||||
return nil, fmt.Errorf("parse system node response: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if timestamp := getMaxTimestamp(node); timestamp > maxTimestamp {
|
|
||||||
index = i
|
|
||||||
maxTimestamp = timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
treeNodes[0], treeNodes[index] = treeNodes[index], treeNodes[0]
|
|
||||||
|
|
||||||
return &multiSystemNode{
|
|
||||||
nodes: treeNodes,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *multiSystemNode) Latest() *treeNode {
|
|
||||||
return m.nodes[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *multiSystemNode) Old() []*treeNode {
|
|
||||||
return m.nodes[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) {
|
|
||||||
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
latestNode, err := getLatestVersionNode(nodes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newNodeVersion(latestNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
|
||||||
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
|
||||||
path := pathFromName(objectName)
|
path := pathFromName(objectName)
|
||||||
|
|
||||||
p := &GetNodesParams{
|
p := &GetNodesParams{
|
||||||
|
@ -215,73 +139,30 @@ func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string
|
||||||
LatestOnly: false,
|
LatestOnly: false,
|
||||||
AllAttrs: false,
|
AllAttrs: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.service.GetNodes(ctx, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error {
|
|
||||||
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name string) (*multiSystemNode, error) {
|
|
||||||
p := &GetNodesParams{
|
|
||||||
CnrID: bktInfo.CID,
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
TreeID: systemTree,
|
|
||||||
Path: []string{name},
|
|
||||||
LatestOnly: false,
|
|
||||||
AllAttrs: true,
|
|
||||||
}
|
|
||||||
nodes, err := c.service.GetNodes(ctx, p)
|
nodes, err := c.service.GetNodes(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
nodes = filterMultipartNodes(nodes)
|
latestNode, err := getLatestNode(nodes)
|
||||||
|
if err != nil {
|
||||||
if len(nodes) == 0 {
|
return nil, err
|
||||||
return nil, layer.ErrNodeNotFound
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return newMultiNode(nodes)
|
return newNodeVersion(latestNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterMultipartNodes(nodes []NodeResponse) []NodeResponse {
|
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
|
||||||
res := make([]NodeResponse, 0, len(nodes))
|
|
||||||
|
|
||||||
LOOP:
|
|
||||||
for _, node := range nodes {
|
|
||||||
for _, meta := range node.GetMeta() {
|
|
||||||
if meta.GetKey() == uploadIDKV {
|
|
||||||
continue LOOP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res = append(res, node)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLatestVersionNode(nodes []NodeResponse) (NodeResponse, error) {
|
|
||||||
var (
|
var (
|
||||||
maxCreationTime uint64
|
maxCreationTime uint64
|
||||||
targetIndexNode = -1
|
targetIndexNode = -1
|
||||||
)
|
)
|
||||||
|
|
||||||
for i, node := range nodes {
|
for i, node := range nodes {
|
||||||
if !checkExistOID(node.GetMeta()) {
|
currentCreationTime := node.GetTimestamp()
|
||||||
continue
|
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
|
||||||
}
|
|
||||||
|
|
||||||
if currentCreationTime := getMaxTimestamp(node); currentCreationTime > maxCreationTime {
|
|
||||||
targetIndexNode = i
|
|
||||||
maxCreationTime = currentCreationTime
|
maxCreationTime = currentCreationTime
|
||||||
|
targetIndexNode = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,154 +187,3 @@ func checkExistOID(meta []Meta) bool {
|
||||||
func pathFromName(objectName string) []string {
|
func pathFromName(objectName string) []string {
|
||||||
return strings.Split(objectName, separator)
|
return strings.Split(objectName, separator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
|
|
||||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
nodesMap := make(map[string][]NodeResponse, len(subTree))
|
|
||||||
for _, node := range subTree {
|
|
||||||
if MultiID(rootID).Equal(node.GetNodeID()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fileName := GetFilename(node)
|
|
||||||
if !strings.HasPrefix(fileName, tailPrefix) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
nodes := nodesMap[fileName]
|
|
||||||
|
|
||||||
// Add all nodes if flag latestOnly is false.
|
|
||||||
// Add all intermediate nodes
|
|
||||||
// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
nodes = []NodeResponse{node}
|
|
||||||
} else if !latestOnly || isIntermediate(node) {
|
|
||||||
nodes = append(nodes, node)
|
|
||||||
} else if isIntermediate(nodes[0]) {
|
|
||||||
nodes = append([]NodeResponse{node}, nodes...)
|
|
||||||
} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
|
|
||||||
nodes[0] = node
|
|
||||||
}
|
|
||||||
|
|
||||||
nodesMap[fileName] = nodes
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make([]data.NodeInfo, 0, len(subTree))
|
|
||||||
for _, nodes := range nodesMap {
|
|
||||||
result = append(result, nodeResponseToNodeInfo(nodes)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
|
|
||||||
nodesInfo := make([]data.NodeInfo, 0, len(nodes))
|
|
||||||
for _, node := range nodes {
|
|
||||||
nodesInfo = append(nodesInfo, newNodeInfo(node))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodesInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
|
||||||
rootID := []uint64{0}
|
|
||||||
path := strings.Split(prefix, separator)
|
|
||||||
tailPrefix := path[len(path)-1]
|
|
||||||
|
|
||||||
if len(path) > 1 {
|
|
||||||
var err error
|
|
||||||
rootID, err = c.getPrefixNodeID(ctx, bktInfo, treeID, path[:len(path)-1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rootID, tailPrefix, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, treeID string, prefixPath []string) ([]uint64, error) {
|
|
||||||
p := &GetNodesParams{
|
|
||||||
CnrID: bktInfo.CID,
|
|
||||||
BktInfo: bktInfo,
|
|
||||||
TreeID: treeID,
|
|
||||||
Path: prefixPath,
|
|
||||||
LatestOnly: false,
|
|
||||||
AllAttrs: true,
|
|
||||||
}
|
|
||||||
nodes, err := c.service.GetNodes(ctx, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var intermediateNodes []uint64
|
|
||||||
for _, node := range nodes {
|
|
||||||
if isIntermediate(node) {
|
|
||||||
intermediateNodes = append(intermediateNodes, node.GetNodeID()...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(intermediateNodes) == 0 {
|
|
||||||
return nil, layer.ErrNodeNotFound
|
|
||||||
}
|
|
||||||
|
|
||||||
return intermediateNodes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetFilename(node NodeResponse) string {
|
|
||||||
for _, kv := range node.GetMeta() {
|
|
||||||
if kv.GetKey() == FileNameKey {
|
|
||||||
return string(kv.GetValue())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func isIntermediate(node NodeResponse) bool {
|
|
||||||
if len(node.GetMeta()) != 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return node.GetMeta()[0].GetKey() == FileNameKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMaxTimestamp(node NodeResponse) uint64 {
|
|
||||||
var maxTimestamp uint64
|
|
||||||
|
|
||||||
for _, timestamp := range node.GetTimestamp() {
|
|
||||||
if timestamp > maxTimestamp {
|
|
||||||
maxTimestamp = timestamp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxTimestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
type MultiID []uint64
|
|
||||||
|
|
||||||
func (m MultiID) Equal(id MultiID) bool {
|
|
||||||
seen := make(map[uint64]struct{}, len(m))
|
|
||||||
|
|
||||||
for i := range m {
|
|
||||||
seen[m[i]] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := range id {
|
|
||||||
if _, ok := seen[id[i]]; !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
|
@ -21,10 +21,10 @@ func (m nodeMeta) GetValue() []byte {
|
||||||
|
|
||||||
type nodeResponse struct {
|
type nodeResponse struct {
|
||||||
meta []nodeMeta
|
meta []nodeMeta
|
||||||
timestamp []uint64
|
timestamp uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetTimestamp() []uint64 {
|
func (n nodeResponse) GetTimestamp() uint64 {
|
||||||
return n.timestamp
|
return n.timestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,13 +36,6 @@ func (n nodeResponse) GetMeta() []Meta {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetNodeID() []uint64 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
func (n nodeResponse) GetParentID() []uint64 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetLatestNode(t *testing.T) {
|
func TestGetLatestNode(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -59,7 +52,7 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
name: "one node of the object version",
|
name: "one node of the object version",
|
||||||
nodes: []NodeResponse{
|
nodes: []NodeResponse{
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{1},
|
timestamp: 1,
|
||||||
meta: []nodeMeta{
|
meta: []nodeMeta{
|
||||||
{
|
{
|
||||||
key: oidKV,
|
key: oidKV,
|
||||||
|
@ -74,11 +67,11 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
name: "one node of the object version and one node of the secondary object",
|
name: "one node of the object version and one node of the secondary object",
|
||||||
nodes: []NodeResponse{
|
nodes: []NodeResponse{
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{3},
|
timestamp: 3,
|
||||||
meta: []nodeMeta{},
|
meta: []nodeMeta{},
|
||||||
},
|
},
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{1},
|
timestamp: 1,
|
||||||
meta: []nodeMeta{
|
meta: []nodeMeta{
|
||||||
{
|
{
|
||||||
key: oidKV,
|
key: oidKV,
|
||||||
|
@ -93,11 +86,11 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
name: "all nodes represent a secondary object",
|
name: "all nodes represent a secondary object",
|
||||||
nodes: []NodeResponse{
|
nodes: []NodeResponse{
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{3},
|
timestamp: 3,
|
||||||
meta: []nodeMeta{},
|
meta: []nodeMeta{},
|
||||||
},
|
},
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{5},
|
timestamp: 5,
|
||||||
meta: []nodeMeta{},
|
meta: []nodeMeta{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -107,7 +100,7 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
name: "several nodes of different types and with different timestamp",
|
name: "several nodes of different types and with different timestamp",
|
||||||
nodes: []NodeResponse{
|
nodes: []NodeResponse{
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{1},
|
timestamp: 1,
|
||||||
meta: []nodeMeta{
|
meta: []nodeMeta{
|
||||||
{
|
{
|
||||||
key: oidKV,
|
key: oidKV,
|
||||||
|
@ -116,11 +109,11 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{3},
|
timestamp: 3,
|
||||||
meta: []nodeMeta{},
|
meta: []nodeMeta{},
|
||||||
},
|
},
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{4},
|
timestamp: 4,
|
||||||
meta: []nodeMeta{
|
meta: []nodeMeta{
|
||||||
{
|
{
|
||||||
key: oidKV,
|
key: oidKV,
|
||||||
|
@ -129,7 +122,7 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodeResponse{
|
nodeResponse{
|
||||||
timestamp: []uint64{6},
|
timestamp: 6,
|
||||||
meta: []nodeMeta{},
|
meta: []nodeMeta{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -137,7 +130,7 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actualNode, err := getLatestVersionNode(tc.nodes)
|
actualNode, err := getLatestNode(tc.nodes)
|
||||||
if tc.error {
|
if tc.error {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetContextToRequest adds new context to fasthttp request.
|
// SetContextToRequest adds new context to fasthttp request.
|
||||||
|
@ -16,34 +15,3 @@ func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
|
||||||
func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context {
|
func GetContextFromRequest(c *fasthttp.RequestCtx) context.Context {
|
||||||
return c.UserValue("context").(context.Context)
|
return c.UserValue("context").(context.Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ctxReqLoggerKeyType struct{}
|
|
||||||
|
|
||||||
// SetReqLog sets child zap.Logger in the context.
|
|
||||||
func SetReqLog(ctx context.Context, log *zap.Logger) context.Context {
|
|
||||||
if ctx == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return context.WithValue(ctx, ctxReqLoggerKeyType{}, log)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReqLog returns log if set.
|
|
||||||
// If zap.Logger isn't set returns nil.
|
|
||||||
func GetReqLog(ctx context.Context) *zap.Logger {
|
|
||||||
if ctx == nil {
|
|
||||||
return nil
|
|
||||||
} else if r, ok := ctx.Value(ctxReqLoggerKeyType{}).(*zap.Logger); ok {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReqLogOrDefault returns log from context, if it exists.
|
|
||||||
// If the log is missing from the context, the default logger is returned.
|
|
||||||
func GetReqLogOrDefault(ctx context.Context, defaultLog *zap.Logger) *zap.Logger {
|
|
||||||
log := GetReqLog(ctx)
|
|
||||||
if log == nil {
|
|
||||||
log = defaultLog
|
|
||||||
}
|
|
||||||
return log
|
|
||||||
}
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue