forked from TrueCloudLab/frostfs-http-gw
Compare commits
3 commits
master
...
poc/frostf
Author | SHA1 | Date | |
---|---|---|---|
ca46dc5ec1 | |||
eccc3f8077 | |||
e3b6f534cc |
50 changed files with 1514 additions and 3361 deletions
|
@ -1,9 +1,9 @@
|
||||||
FROM golang:1.22-alpine AS basebuilder
|
FROM golang:1.21-alpine as basebuilder
|
||||||
RUN apk add --update make bash ca-certificates
|
RUN apk add --update make bash ca-certificates
|
||||||
|
|
||||||
FROM basebuilder AS builder
|
FROM basebuilder as builder
|
||||||
ENV GOGC=off
|
ENV GOGC off
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED 0
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG VERSION=dev
|
ARG VERSION=dev
|
||||||
ARG REPO=repository
|
ARG REPO=repository
|
||||||
|
|
|
@ -6,7 +6,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.20', '1.21' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -18,6 +18,3 @@ jobs:
|
||||||
|
|
||||||
- name: Build binary
|
- name: Build binary
|
||||||
run: make
|
run: make
|
||||||
|
|
||||||
- name: Check dirty suffix
|
|
||||||
run: if [[ $(make version) == *"dirty"* ]]; then echo "Version has dirty suffix" && exit 1; fi
|
|
||||||
|
|
|
@ -12,9 +12,9 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.21'
|
||||||
|
|
||||||
- name: Run commit format checker
|
- name: Run commit format checker
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v1
|
||||||
with:
|
with:
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
from: adb95642d
|
||||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.23'
|
go-version: '1.21'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -24,7 +24,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.22', '1.23' ]
|
go_versions: [ '1.20', '1.21' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -38,4 +38,4 @@ jobs:
|
||||||
run: make dep
|
run: make dep
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: make test
|
run: make test
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.22'
|
go-version: '1.21'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
|
@ -12,8 +12,7 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
formats:
|
format: tab
|
||||||
- format: tab
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
|
|
@ -30,6 +30,11 @@ repos:
|
||||||
hooks:
|
hooks:
|
||||||
- id: shellcheck
|
- id: shellcheck
|
||||||
|
|
||||||
|
- repo: https://github.com/golangci/golangci-lint
|
||||||
|
rev: v1.51.2
|
||||||
|
hooks:
|
||||||
|
- id: golangci-lint
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
- id: make-lint-install
|
- id: make-lint-install
|
||||||
|
|
65
CHANGELOG.md
65
CHANGELOG.md
|
@ -3,80 +3,21 @@
|
||||||
This document outlines major changes between releases.
|
This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
- Support percent-encoding for GET queries (#134)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Update go version to 1.22 (#132)
|
|
||||||
|
|
||||||
## [0.30.0] - Kangshung - 2024-07-22
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Handle query unescape and invalid bearer token errors (#107)
|
|
||||||
- Fix HTTP/2 requests (#110)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add new `reconnect_interval` config param (#100)
|
|
||||||
- Erasure coding support in placement policy (#114)
|
|
||||||
- HTTP Header canonicalizer for well-known headers (#121)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Improve test coverage (#112, #117)
|
|
||||||
- Bumped vulnerable dependencies (#115)
|
|
||||||
- Replace extended ACL examples with policies in README (#118)
|
|
||||||
|
|
||||||
### Removed
|
|
||||||
|
|
||||||
## [0.29.0] - Zemu - 2024-05-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Fix possibility of panic during SIGHUP (#99)
|
|
||||||
- Handle query unescape and invalid bearer token errors (#108)
|
|
||||||
- Fix log-level change on SIGHUP (#105)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Support client side object cut (#70)
|
|
||||||
- Add `frostfs.client_cut` config param
|
|
||||||
- Add `frostfs.buffer_max_size_for_put` config param
|
|
||||||
- Add bucket/container caching
|
|
||||||
- Disable homomorphic hash for PUT if it's disabled in container itself
|
|
||||||
- Add new `logger.destination` config param with journald support (#89, #104)
|
|
||||||
- Add support namespaces (#91)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Replace atomics with mutex for reloadable params (#74)
|
|
||||||
|
|
||||||
## [0.28.1] - 2024-01-24
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Tree pool traversal limit (#92)
|
|
||||||
|
|
||||||
### Update from 0.28.0
|
|
||||||
See new `frostfs.tree_pool_max_attempts` config parameter.
|
|
||||||
|
|
||||||
## [0.28.0] - Academy of Sciences - 2023-12-07
|
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- `grpc` schemas in tree configuration (#62)
|
- `grpc` schemas in tree configuration (#62)
|
||||||
- `GetSubTree` failures (#67)
|
|
||||||
- Debian packaging (#69, #90)
|
|
||||||
- Get latest version of tree node (#85)
|
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Support dump metrics descriptions (#29)
|
- Support dump metrics descriptions (#29)
|
||||||
- Support impersonate bearer token (#40, #45)
|
- Support impersonate bearer token (#40, #45)
|
||||||
- Tracing support (#20, #44, #60)
|
- Tracing support (#20, #44, #60)
|
||||||
- Object name resolving with tree service (#30)
|
- Object name resolving with tree service (#30)
|
||||||
- Metrics for current endpoint status (#77)
|
|
||||||
- Soft memory limit with `runtime.soft_memory_limit` (#72)
|
|
||||||
- Add selection of the node of the latest version of the object (#85)
|
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Update prometheus to v1.15.0 (#35)
|
- Update prometheus to v1.15.0 (#35)
|
||||||
- Update go version to 1.19 (#50)
|
- Update go version to 1.19 (#50)
|
||||||
- Finish rebranding (#2)
|
- Finish rebranding (#2)
|
||||||
- Use gate key to form object owner (#66)
|
- Use gate key to form object owner (#66)
|
||||||
- Move log messages to constants (#36)
|
|
||||||
- Uploader and downloader refactor (#73)
|
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
|
- Drop `tree.service` param (now endpoints from `peers` section are used) (#59)
|
||||||
|
@ -120,8 +61,4 @@ This project is a fork of [NeoFS HTTP Gateway](https://github.com/nspcc-dev/neof
|
||||||
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
|
To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs-http-gw/blob/master/CHANGELOG.md.
|
||||||
|
|
||||||
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
|
[0.27.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/72734ab4...v0.27.0
|
||||||
[0.28.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...v0.28.0
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.27.0...master
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.0...v0.28.1
|
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.28.1...v0.29.0
|
|
||||||
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.29.0...v0.30.0
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.0...master
|
|
||||||
|
|
42
Makefile
42
Makefile
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --match "v*" --dirty --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.22
|
GO_VERSION ?= 1.20
|
||||||
LINT_VERSION ?= 1.60.3
|
LINT_VERSION ?= 1.54.0
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.2
|
||||||
BUILD ?= $(shell date -u --iso=seconds)
|
BUILD ?= $(shell date -u --iso=seconds)
|
||||||
|
|
||||||
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
||||||
|
@ -30,11 +30,6 @@ PKG_VERSION ?= $(shell echo $(VERSION) | sed "s/^v//" | \
|
||||||
sed "s/-/~/")-${OS_RELEASE}
|
sed "s/-/~/")-${OS_RELEASE}
|
||||||
.PHONY: debpackage debclean
|
.PHONY: debpackage debclean
|
||||||
|
|
||||||
FUZZ_NGFUZZ_DIR ?= ""
|
|
||||||
FUZZ_TIMEOUT ?= 30
|
|
||||||
FUZZ_FUNCTIONS ?= "all"
|
|
||||||
FUZZ_AUX ?= ""
|
|
||||||
|
|
||||||
# Make all binaries
|
# Make all binaries
|
||||||
all: $(BINS)
|
all: $(BINS)
|
||||||
$(BINS): $(DIRS) dep
|
$(BINS): $(DIRS) dep
|
||||||
|
@ -83,35 +78,6 @@ cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
# Run fuzzing
|
|
||||||
CLANG := $(shell which clang-17 2>/dev/null)
|
|
||||||
.PHONY: check-clang all
|
|
||||||
check-clang:
|
|
||||||
ifeq ($(CLANG),)
|
|
||||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
|
||||||
@exit 1
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: check-ngfuzz all
|
|
||||||
check-ngfuzz:
|
|
||||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
|
||||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
|
||||||
exit 1; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: install-fuzzing-deps
|
|
||||||
install-fuzzing-deps: check-clang check-ngfuzz
|
|
||||||
|
|
||||||
.PHONY: fuzz
|
|
||||||
fuzz: install-fuzzing-deps
|
|
||||||
@START_PATH=$$(pwd); \
|
|
||||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
|
||||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
|
||||||
./ngfuzz -clean && \
|
|
||||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
|
||||||
./ngfuzz -report
|
|
||||||
|
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
fmt:
|
fmt:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
@ -183,7 +149,7 @@ version:
|
||||||
# Clean up
|
# Clean up
|
||||||
clean:
|
clean:
|
||||||
rm -rf vendor
|
rm -rf vendor
|
||||||
rm -rf $(BINDIR)
|
rm -rf $(BINDIR)
|
||||||
|
|
||||||
# Package for Debian
|
# Package for Debian
|
||||||
debpackage:
|
debpackage:
|
||||||
|
|
110
README.md
110
README.md
|
@ -466,13 +466,13 @@ You can always upload files to public containers (open for anyone to put
|
||||||
objects into), but for restricted containers you need to explicitly allow PUT
|
objects into), but for restricted containers you need to explicitly allow PUT
|
||||||
operations for a request signed with your HTTP Gateway keys.
|
operations for a request signed with your HTTP Gateway keys.
|
||||||
|
|
||||||
If you don't want to manage gateway's secret keys and adjust policies when
|
If your don't want to manage gateway's secret keys and adjust eACL rules when
|
||||||
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
||||||
public services, there is an option to let your application backend (or you) to
|
public services, there is an option to let your application backend (or you) to
|
||||||
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
issue Bearer Tokens ans pass them from the client via gate down to FrostFS level
|
||||||
to grant access.
|
to grant access.
|
||||||
|
|
||||||
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
FrostFS Bearer Token basically is a container owner-signed ACL data (refer to FrostFS
|
||||||
documentation for more details). There are two options to pass them to gateway:
|
documentation for more details). There are two options to pass them to gateway:
|
||||||
* "Authorization" header with "Bearer" type and base64-encoded token in
|
* "Authorization" header with "Bearer" type and base64-encoded token in
|
||||||
credentials field
|
credentials field
|
||||||
|
@ -482,31 +482,33 @@ For example, you have a mobile application frontend with a backend part storing
|
||||||
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
||||||
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
||||||
some data and upload it via any available FrostFS HTTP Gateway by adding
|
some data and upload it via any available FrostFS HTTP Gateway by adding
|
||||||
the corresponding header to the upload request. Accessing policy protected data
|
the corresponding header to the upload request. Accessing the ACL protected data
|
||||||
works the same way.
|
works the same way.
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
In order to generate a bearer token, you need to have a wallet (which will be used to sign the token) and
|
||||||
|
the address of the sender who will do the request to FrostFS (in our case, it's a gateway wallet address).
|
||||||
|
|
||||||
1. Suppose you have a container with private policy for wallet key
|
Suppose we have:
|
||||||
|
* **NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3** (token owner (gateway address))
|
||||||
|
|
||||||
|
Firstly, we need to encode the container id and the sender address to base64 (now it's base58).
|
||||||
|
So use **base58** and **base64** utils.
|
||||||
|
|
||||||
|
1. Encoding token owner id:
|
||||||
```
|
```
|
||||||
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
||||||
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
||||||
|
|
||||||
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
|
||||||
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
|
||||||
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
|
||||||
--chain-id <chainID>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
2. Form a Bearer token (10000 is lifetime expiration in epoch) and save it to **bearer.json**:
|
||||||
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
|
||||||
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"body": {
|
"body": {
|
||||||
"allowImpersonate": true,
|
"allowImpersonate": true,
|
||||||
|
"ownerID": {
|
||||||
|
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
||||||
|
},
|
||||||
"lifetime": {
|
"lifetime": {
|
||||||
"exp": "10000",
|
"exp": "10000",
|
||||||
"nbf": "0",
|
"nbf": "0",
|
||||||
|
@ -519,7 +521,7 @@ $ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
||||||
|
|
||||||
3. Sign it with the wallet:
|
3. Sign it with the wallet:
|
||||||
```
|
```
|
||||||
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w ./wallet.json
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Encode to base64 to use in header:
|
4. Encode to base64 to use in header:
|
||||||
|
@ -540,32 +542,47 @@ $ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoEC
|
||||||
# }
|
# }
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Note: Bearer Token owner
|
##### Note
|
||||||
|
For the token to work correctly, you need to create a container with a basic ACL that:
|
||||||
You can specify exact key who can use Bearer Token (gateway wallet address).
|
1. Allow PUT operation to others
|
||||||
To do this, encode wallet address in base64 format
|
2. Doesn't set "final" bit
|
||||||
|
|
||||||
|
For example:
|
||||||
```
|
```
|
||||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
$ frostfs-cli -w ./wallet.json --basic-acl 0x0FFFCFFF -r 192.168.130.72:8080 container create --policy "REP 3" --await
|
||||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Then specify this value in Bearer Token Json
|
To deny access to a container without a token, set the eACL rules:
|
||||||
|
```
|
||||||
|
$ frostfs-cli -w ./wallet.json -r 192.168.130.72:8080 container set-eacl --table eacl.json --await --cid BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
||||||
|
```
|
||||||
|
|
||||||
|
File **eacl.json**:
|
||||||
```
|
```
|
||||||
{
|
{
|
||||||
"body": {
|
"version": {
|
||||||
"ownerID": {
|
"major": 0,
|
||||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
"minor": 0
|
||||||
},
|
},
|
||||||
...
|
"containerID": {
|
||||||
|
"value": "mRnZWzewzxjzIPa7Fqlfqdl3TM1KpJ0YnsXsEhafJJg="
|
||||||
|
},
|
||||||
|
"records": [
|
||||||
|
{
|
||||||
|
"operation": "PUT",
|
||||||
|
"action": "DENY",
|
||||||
|
"filters": [],
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"role": "OTHERS",
|
||||||
|
"keys": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Note: Policy override
|
|
||||||
|
|
||||||
Instead of impersonation, you can define the set of policies that will be applied
|
|
||||||
to the request sender. This allows to restrict access to specific operation and
|
|
||||||
specific objects without giving full impersonation control to the token user.
|
|
||||||
|
|
||||||
### Metrics and Pprof
|
### Metrics and Pprof
|
||||||
|
|
||||||
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
|
If enabled, Prometheus metrics are available at `localhost:8084` endpoint
|
||||||
|
@ -575,26 +592,3 @@ See [configuration](./docs/gate-configuration.md).
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
Please see [CREDITS](CREDITS.md) for details.
|
||||||
|
|
||||||
## Fuzzing
|
|
||||||
|
|
||||||
To run fuzzing tests use the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ make fuzz
|
|
||||||
```
|
|
||||||
|
|
||||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
|
||||||
|
|
||||||
You can also use the following arguments:
|
|
||||||
|
|
||||||
```
|
|
||||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
|
||||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
|
||||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
|
||||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
|
||||||
````
|
|
||||||
|
|
||||||
## Credits
|
|
||||||
|
|
||||||
Please see [CREDITS](CREDITS.md) for details.
|
|
||||||
|
|
26
SECURITY.md
26
SECURITY.md
|
@ -1,26 +0,0 @@
|
||||||
# Security Policy
|
|
||||||
|
|
||||||
|
|
||||||
## How To Report a Vulnerability
|
|
||||||
|
|
||||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
|
||||||
|
|
||||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
|
||||||
|
|
||||||
Instead, you can report it using one of the following ways:
|
|
||||||
|
|
||||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
|
||||||
|
|
||||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
|
||||||
|
|
||||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
|
||||||
* Affected version(s)
|
|
||||||
* Impact of the issue, including how an attacker might exploit the issue
|
|
||||||
* Step-by-step instructions to reproduce the issue
|
|
||||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
|
||||||
* Full paths of source file(s) related to the manifestation of the issue
|
|
||||||
* Any special configuration required to reproduce the issue
|
|
||||||
* Any log files that are related to this issue (if possible)
|
|
||||||
* Proof-of-concept or exploit code (if possible)
|
|
||||||
|
|
||||||
This information will help us triage your report more quickly.
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.30.0
|
v0.27.0
|
||||||
|
|
|
@ -1,26 +1,21 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"crypto/elliptic"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/frostfsid"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/frostfs/services"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
@ -29,6 +24,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
@ -41,7 +37,6 @@ import (
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -60,10 +55,8 @@ type (
|
||||||
metrics *gateMetrics
|
metrics *gateMetrics
|
||||||
services []*metrics.Service
|
services []*metrics.Service
|
||||||
settings *appSettings
|
settings *appSettings
|
||||||
|
servers []Server
|
||||||
servers []Server
|
frostfsid *frostfsid.FrostFSID
|
||||||
unbindServers []ServerInfo
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// App is an interface for the main gateway function.
|
// App is an interface for the main gateway function.
|
||||||
|
@ -84,15 +77,9 @@ type (
|
||||||
|
|
||||||
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
// appSettings stores reloading parameters, so it has to provide getters and setters which use RWMutex.
|
||||||
appSettings struct {
|
appSettings struct {
|
||||||
reconnectInterval time.Duration
|
mu sync.RWMutex
|
||||||
|
defaultTimestamp bool
|
||||||
mu sync.RWMutex
|
zipCompression bool
|
||||||
defaultTimestamp bool
|
|
||||||
zipCompression bool
|
|
||||||
clientCut bool
|
|
||||||
bufferMaxSizeForPut uint64
|
|
||||||
namespaceHeader string
|
|
||||||
defaultNamespaces []string
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -153,6 +140,7 @@ func newApp(ctx context.Context, opt ...Option) App {
|
||||||
a.initAppSettings()
|
a.initAppSettings()
|
||||||
a.initResolver()
|
a.initResolver()
|
||||||
a.initMetrics()
|
a.initMetrics()
|
||||||
|
a.initIAM(ctx)
|
||||||
a.initTracing(ctx)
|
a.initTracing(ctx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
|
@ -182,54 +170,30 @@ func (s *appSettings) setZipCompression(val bool) {
|
||||||
s.mu.Unlock()
|
s.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) ClientCut() bool {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.clientCut
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) setClientCut(val bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.clientCut = val
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.bufferMaxSizeForPut
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) setBufferMaxSizeForPut(val uint64) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.bufferMaxSizeForPut = val
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initAppSettings() {
|
func (a *app) initAppSettings() {
|
||||||
a.settings = &appSettings{
|
a.settings = &appSettings{}
|
||||||
reconnectInterval: fetchReconnectInterval(a.cfg),
|
|
||||||
}
|
|
||||||
a.updateSettings()
|
a.updateSettings()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initResolver() {
|
func (a *app) initResolver() {
|
||||||
var err error
|
var err error
|
||||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
a.resolver, err = resolver.NewContainerResolver(a.getResolverOrder(), a.getResolverConfig())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
func (a *app) getResolverConfig() *resolver.Config {
|
||||||
resolveCfg := &resolver.Config{
|
return &resolver.Config{
|
||||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
FrostFS: resolver.NewFrostFSResolver(a.pool),
|
||||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
Settings: a.settings,
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) getResolverOrder() []string {
|
||||||
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||||
if resolveCfg.RPCAddress == "" {
|
if a.cfg.GetString(cfgRPCEndpoint) == "" {
|
||||||
order = remove(order, resolver.NNSResolver)
|
order = remove(order, resolver.NNSResolver)
|
||||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||||
}
|
}
|
||||||
|
@ -238,7 +202,7 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||||
}
|
}
|
||||||
|
|
||||||
return order, resolveCfg
|
return order
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initMetrics() {
|
func (a *app) initMetrics() {
|
||||||
|
@ -247,6 +211,22 @@ func (a *app) initMetrics() {
|
||||||
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *app) initIAM(ctx context.Context) {
|
||||||
|
if !a.cfg.GetBool(cfgFrostfsIDEnabled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
a.frostfsid, err = frostfsid.New(ctx, frostfsid.Config{
|
||||||
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
|
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||||
|
Key: a.key,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal("init frostfsid contract", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||||
if !enabled {
|
if !enabled {
|
||||||
logger.Warn(logs.MetricsAreDisabled)
|
logger.Warn(logs.MetricsAreDisabled)
|
||||||
|
@ -408,22 +388,16 @@ func (a *app) Serve() {
|
||||||
a.startServices()
|
a.startServices()
|
||||||
a.initServers(a.ctx)
|
a.initServers(a.ctx)
|
||||||
|
|
||||||
servs := a.getServers()
|
for i := range a.servers {
|
||||||
|
|
||||||
for i := range servs {
|
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
a.log.Info(logs.StartingServer, zap.String("address", a.servers[i].Address()))
|
||||||
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
if err := a.webServer.Serve(a.servers[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
a.metrics.MarkUnhealthy(a.servers[i].Address())
|
||||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(a.unbindServers) != 0 {
|
|
||||||
a.scheduleReconnect(a.ctx, a.webServer)
|
|
||||||
}
|
|
||||||
|
|
||||||
sigs := make(chan os.Signal, 1)
|
sigs := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigs, syscall.SIGHUP)
|
signal.Notify(sigs, syscall.SIGHUP)
|
||||||
|
|
||||||
|
@ -473,7 +447,7 @@ func (a *app) configReload(ctx context.Context) {
|
||||||
a.logLevel.SetLevel(lvl)
|
a.logLevel.SetLevel(lvl)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
if err := a.resolver.UpdateResolvers(a.getResolverOrder()); err != nil {
|
||||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,10 +472,6 @@ func (a *app) configReload(ctx context.Context) {
|
||||||
func (a *app) updateSettings() {
|
func (a *app) updateSettings() {
|
||||||
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
a.settings.setDefaultTimestamp(a.cfg.GetBool(cfgUploaderHeaderEnableDefaultTimestamp))
|
||||||
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
a.settings.setZipCompression(a.cfg.GetBool(cfgZipCompression))
|
||||||
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
|
||||||
a.settings.setBufferMaxSizeForPut(a.cfg.GetUint64(cfgBufferMaxSizeForPut))
|
|
||||||
a.settings.setNamespaceHeader(a.cfg.GetString(cfgResolveNamespaceHeader))
|
|
||||||
a.settings.setDefaultNamespaces(a.cfg.GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) startServices() {
|
func (a *app) startServices() {
|
||||||
|
@ -535,20 +505,75 @@ func (a *app) configureRouter(handler *handler.Handler) {
|
||||||
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.POST("/upload/{cid}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.Upload))))))
|
r.POST("/upload/{cid}", a.addMiddlewares(handler.Upload))
|
||||||
a.log.Info(logs.AddedPathUploadCid)
|
a.log.Info(logs.AddedPathUploadCid)
|
||||||
r.GET("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAddressOrBucketName))))))
|
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(handler.DownloadByAddressOrBucketName))
|
||||||
r.HEAD("/get/{cid}/{oid:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAddressOrBucketName))))))
|
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(handler.HeadByAddressOrBucketName))
|
||||||
a.log.Info(logs.AddedPathGetCidOid)
|
a.log.Info(logs.AddedPathGetCidOid)
|
||||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadByAttribute))))))
|
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.DownloadByAttribute))
|
||||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.HeadByAttribute))))))
|
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.HeadByAttribute))
|
||||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||||
r.GET("/zip/{cid}/{prefix:*}", a.logger(a.canonicalizer(a.tokenizer(a.tracer(a.reqNamespace(handler.DownloadZipped))))))
|
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(handler.DownloadZipped))
|
||||||
a.log.Info(logs.AddedPathZipCidPrefix)
|
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||||
|
|
||||||
a.webServer.Handler = r.Handler
|
a.webServer.Handler = r.Handler
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *app) addMiddlewares(handler fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
list := []func(fasthttp.RequestHandler) fasthttp.RequestHandler{
|
||||||
|
a.logger,
|
||||||
|
a.tokenizer,
|
||||||
|
a.tracer,
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.frostfsid != nil {
|
||||||
|
list = append(list, a.iam)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := handler
|
||||||
|
for i := len(list) - 1; i >= 0; i-- {
|
||||||
|
res = list[i](res)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *app) iam(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
|
return func(req *fasthttp.RequestCtx) {
|
||||||
|
ctx := utils.GetContextFromRequest(req)
|
||||||
|
tkn, err := tokens.LoadBearerToken(ctx)
|
||||||
|
if err != nil || tkn == nil {
|
||||||
|
a.log.Debug(logs.AnonRequestSkipIAMValidation, zap.Uint64("id", req.ID()))
|
||||||
|
h(req)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = validateBearerToken(a.frostfsid, tkn); err != nil {
|
||||||
|
a.log.Error(logs.IAMValidationFailed, zap.Uint64("id", req.ID()), zap.Error(err))
|
||||||
|
response.Error(req, "iam validation failed: "+err.Error(), fasthttp.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
h(req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateBearerToken(frostfsID *frostfsid.FrostFSID, bt *bearer.Token) error {
|
||||||
|
m := new(acl.BearerToken)
|
||||||
|
bt.WriteToV2(m)
|
||||||
|
|
||||||
|
pk, err := keys.NewPublicKeyFromBytes(m.GetSignature().GetKey(), elliptic.P256())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid bearer token public key: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = frostfsID.ValidatePublicKey(pk); err != nil {
|
||||||
|
return fmt.Errorf("validation data user key failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return func(req *fasthttp.RequestCtx) {
|
||||||
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
a.log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
||||||
|
@ -560,45 +585,12 @@ func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) canonicalizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
|
||||||
return func(req *fasthttp.RequestCtx) {
|
|
||||||
// regardless of DisableHeaderNamesNormalizing setting, some headers
|
|
||||||
// MUST be normalized in order to process execution. They are normalized
|
|
||||||
// here.
|
|
||||||
|
|
||||||
toAddKeys := make([][]byte, 0, 10)
|
|
||||||
toAddValues := make([][]byte, 0, 10)
|
|
||||||
prefix := []byte(utils.UserAttributeHeaderPrefix)
|
|
||||||
|
|
||||||
req.Request.Header.VisitAll(func(k, v []byte) {
|
|
||||||
if bytes.HasPrefix(k, prefix) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
toAddKeys = append(toAddKeys, k)
|
|
||||||
toAddValues = append(toAddValues, v)
|
|
||||||
})
|
|
||||||
|
|
||||||
// this is safe to do after all headers were read into header structure
|
|
||||||
req.Request.Header.EnableNormalizing()
|
|
||||||
|
|
||||||
for i := range toAddKeys {
|
|
||||||
req.Request.Header.SetBytesKV(toAddKeys[i], toAddValues[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// return normalization setting back
|
|
||||||
req.Request.Header.DisableNormalizing()
|
|
||||||
|
|
||||||
h(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return func(req *fasthttp.RequestCtx) {
|
||||||
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
appCtx, err := tokens.StoreBearerTokenAppCtx(a.ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Uint64("id", req.ID()), zap.Error(err))
|
a.log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
||||||
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
utils.SetContextToRequest(appCtx, req)
|
utils.SetContextToRequest(appCtx, req)
|
||||||
h(req)
|
h(req)
|
||||||
|
@ -615,37 +607,22 @@ func (a *app) tracer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
span.End()
|
span.End()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
appCtx = treepool.SetRequestID(appCtx, strconv.FormatUint(req.ID(), 10))
|
|
||||||
|
|
||||||
utils.SetContextToRequest(appCtx, req)
|
utils.SetContextToRequest(appCtx, req)
|
||||||
h(req)
|
h(req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) reqNamespace(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
func (a *app) AppParams() *utils.AppParams {
|
||||||
return func(req *fasthttp.RequestCtx) {
|
return &utils.AppParams{
|
||||||
appCtx := utils.GetContextFromRequest(req)
|
|
||||||
|
|
||||||
nsBytes := req.Request.Header.Peek(a.settings.NamespaceHeader())
|
|
||||||
appCtx = middleware.SetNamespace(appCtx, string(nsBytes))
|
|
||||||
|
|
||||||
utils.SetContextToRequest(appCtx, req)
|
|
||||||
h(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) AppParams() *handler.AppParams {
|
|
||||||
return &handler.AppParams{
|
|
||||||
Logger: a.log,
|
Logger: a.log,
|
||||||
FrostFS: frostfs.NewFrostFS(a.pool),
|
Pool: a.pool,
|
||||||
Owner: a.owner,
|
Owner: a.owner,
|
||||||
Resolver: a.resolver,
|
Resolver: a.resolver,
|
||||||
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initServers(ctx context.Context) {
|
func (a *app) initServers(ctx context.Context) {
|
||||||
serversInfo := fetchServers(a.cfg, a.log)
|
serversInfo := fetchServers(a.cfg)
|
||||||
|
|
||||||
a.servers = make([]Server, 0, len(serversInfo))
|
a.servers = make([]Server, 0, len(serversInfo))
|
||||||
for _, serverInfo := range serversInfo {
|
for _, serverInfo := range serversInfo {
|
||||||
|
@ -655,7 +632,6 @@ func (a *app) initServers(ctx context.Context) {
|
||||||
}
|
}
|
||||||
srv, err := newServer(ctx, serverInfo)
|
srv, err := newServer(ctx, serverInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||||
continue
|
continue
|
||||||
|
@ -672,24 +648,21 @@ func (a *app) initServers(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) updateServers() error {
|
func (a *app) updateServers() error {
|
||||||
serversInfo := fetchServers(a.cfg, a.log)
|
serversInfo := fetchServers(a.cfg)
|
||||||
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
var found bool
|
var found bool
|
||||||
for _, serverInfo := range serversInfo {
|
for _, serverInfo := range serversInfo {
|
||||||
ser := a.getServer(serverInfo.Address)
|
index := a.serverIndex(serverInfo.Address)
|
||||||
if ser != nil {
|
if index == -1 {
|
||||||
if serverInfo.TLS.Enabled {
|
continue
|
||||||
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
|
||||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
|
||||||
}
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
|
||||||
found = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if serverInfo.TLS.Enabled {
|
||||||
|
if err := a.servers[index].UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
|
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
found = true
|
||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
|
@ -699,29 +672,13 @@ func (a *app) updateServers() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) getServers() []Server {
|
func (a *app) serverIndex(address string) int {
|
||||||
a.mu.RLock()
|
|
||||||
defer a.mu.RUnlock()
|
|
||||||
return a.servers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) getServer(address string) Server {
|
|
||||||
for i := range a.servers {
|
for i := range a.servers {
|
||||||
if a.servers[i].Address() == address {
|
if a.servers[i].Address() == address {
|
||||||
return a.servers[i]
|
return i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return -1
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) updateUnbindServerInfo(info ServerInfo) bool {
|
|
||||||
for i := range a.unbindServers {
|
|
||||||
if a.unbindServers[i].Address == info.Address {
|
|
||||||
a.unbindServers[i] = info
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initTracing(ctx context.Context) {
|
func (a *app) initTracing(ctx context.Context) {
|
||||||
|
@ -761,93 +718,3 @@ func (a *app) setRuntimeParameters() {
|
||||||
zap.Int64("old_value", previous))
|
zap.Int64("old_value", previous))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) NamespaceHeader() string {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.namespaceHeader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) setNamespaceHeader(nsHeader string) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.namespaceHeader = nsHeader
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
|
||||||
s.mu.RLock()
|
|
||||||
namespaces := s.defaultNamespaces
|
|
||||||
s.mu.RUnlock()
|
|
||||||
if slices.Contains(namespaces, ns) {
|
|
||||||
return v2container.SysAttributeZoneDefault, true
|
|
||||||
}
|
|
||||||
|
|
||||||
return ns + ".ns", false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) setDefaultNamespaces(namespaces []string) {
|
|
||||||
for i := range namespaces { // to be set namespaces in env variable as `HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"`
|
|
||||||
namespaces[i] = strings.Trim(namespaces[i], "\"")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
s.defaultNamespaces = namespaces
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) scheduleReconnect(ctx context.Context, srv *fasthttp.Server) {
|
|
||||||
go func() {
|
|
||||||
t := time.NewTicker(a.settings.reconnectInterval)
|
|
||||||
defer t.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-t.C:
|
|
||||||
if a.tryReconnect(ctx, srv) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Reset(a.settings.reconnectInterval)
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
|
||||||
a.mu.Lock()
|
|
||||||
defer a.mu.Unlock()
|
|
||||||
|
|
||||||
a.log.Info(logs.ServerReconnecting)
|
|
||||||
var failedServers []ServerInfo
|
|
||||||
|
|
||||||
for _, serverInfo := range a.unbindServers {
|
|
||||||
fields := []zap.Field{
|
|
||||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
|
||||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
|
||||||
}
|
|
||||||
|
|
||||||
srv, err := newServer(ctx, serverInfo)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
|
||||||
failedServers = append(failedServers, serverInfo)
|
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
|
||||||
a.metrics.MarkHealthy(serverInfo.Address)
|
|
||||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
|
||||||
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
a.servers = append(a.servers, srv)
|
|
||||||
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
|
||||||
}
|
|
||||||
|
|
||||||
a.unbindServers = failedServers
|
|
||||||
|
|
||||||
return len(a.unbindServers) == 0
|
|
||||||
}
|
|
||||||
|
|
|
@ -6,35 +6,29 @@ import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/eacl"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
|
@ -52,18 +46,11 @@ func TestIntegration(t *testing.T) {
|
||||||
rootCtx := context.Background()
|
rootCtx := context.Background()
|
||||||
aioImage := "truecloudlab/frostfs-aio:"
|
aioImage := "truecloudlab/frostfs-aio:"
|
||||||
versions := []string{
|
versions := []string{
|
||||||
"1.2.7",
|
"1.2.7", // frostfs-storage v0.36.0 RC
|
||||||
"1.3.0",
|
|
||||||
"1.5.0",
|
|
||||||
}
|
}
|
||||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
file, err := os.CreateTemp("", "wallet")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(file.Name())
|
|
||||||
makeTempWallet(t, key, file.Name())
|
|
||||||
|
|
||||||
var ownerID user.ID
|
var ownerID user.ID
|
||||||
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
|
user.IDFromKey(&ownerID, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
|
@ -71,21 +58,16 @@ func TestIntegration(t *testing.T) {
|
||||||
ctx, cancel2 := context.WithCancel(rootCtx)
|
ctx, cancel2 := context.WithCancel(rootCtx)
|
||||||
|
|
||||||
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
||||||
server, cancel := runServer(file.Name())
|
server, cancel := runServer()
|
||||||
clientPool := getPool(ctx, t, key)
|
clientPool := getPool(ctx, t, key)
|
||||||
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
||||||
require.NoError(t, err, version)
|
require.NoError(t, err, version)
|
||||||
|
|
||||||
token := makeBearerToken(t, key, ownerID, version)
|
|
||||||
|
|
||||||
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
||||||
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
|
|
||||||
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
|
|
||||||
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
||||||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
|
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
server.Wait()
|
server.Wait()
|
||||||
|
@ -95,14 +77,11 @@ func TestIntegration(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func runServer(pathToWallet string) (App, context.CancelFunc) {
|
func runServer() (App, context.CancelFunc) {
|
||||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
v := getDefaultConfig()
|
v := getDefaultConfig()
|
||||||
v.Set(cfgWalletPath, pathToWallet)
|
l, lvl := newLogger(v)
|
||||||
v.Set(cfgWalletPassphrase, "")
|
|
||||||
|
|
||||||
l, lvl := newStdoutLogger(zapcore.DebugLevel)
|
|
||||||
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
|
|
||||||
|
@ -117,38 +96,7 @@ func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, vers
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||||
}
|
}
|
||||||
|
|
||||||
func putWithBearerTokenInHeader(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
|
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
request.Header.Set("Authorization", "Bearer "+token)
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putWithBearerTokenInCookie(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, token string) {
|
|
||||||
url := testHost + "/upload/" + CID.String()
|
|
||||||
|
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
request.AddCookie(&http.Cookie{Name: "Bearer", Value: token})
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, CID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
func makePutRequestAndCheck(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, url string) {
|
||||||
request, content, attributes := makePutRequest(t, url)
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(request)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
checkPutResponse(ctx, t, p, cnrID, resp, content, attributes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string]string) {
|
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
keyAttr, valAttr := "User-Attribute", "user value"
|
keyAttr, valAttr := "User-Attribute", "user value"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
|
@ -170,10 +118,9 @@ func makePutRequest(t *testing.T, url string) (*http.Request, string, map[string
|
||||||
request.Header.Set("Content-Type", w.FormDataContentType())
|
request.Header.Set("Content-Type", w.FormDataContentType())
|
||||||
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
||||||
|
|
||||||
return request, content, attributes
|
resp, err := http.DefaultClient.Do(request)
|
||||||
}
|
require.NoError(t, err)
|
||||||
|
|
||||||
func checkPutResponse(ctx context.Context, t *testing.T, p *pool.Pool, cnrID cid.ID, resp *http.Response, content string, attributes map[string]string) {
|
|
||||||
defer func() {
|
defer func() {
|
||||||
err := resp.Body.Close()
|
err := resp.Body.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -391,40 +338,6 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
|
||||||
content := "content of file"
|
|
||||||
attributes := map[string]string{
|
|
||||||
"some-attr": "some-get-value",
|
|
||||||
}
|
|
||||||
|
|
||||||
id := putObject(ctx, t, clientPool, ownerID, CID, content, attributes)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "")
|
|
||||||
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "root")
|
|
||||||
|
|
||||||
resp, err = http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkGetResponse(t, resp, content, attributes)
|
|
||||||
|
|
||||||
req, err = http.NewRequest(http.MethodGet, testHost+"/get/"+testContainerName+"/"+id.String(), nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header.Set(defaultNamespaceHeader, "root2")
|
|
||||||
|
|
||||||
resp, err = http.DefaultClient.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusNotFound, resp.StatusCode)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||||
req := testcontainers.ContainerRequest{
|
req := testcontainers.ContainerRequest{
|
||||||
Image: image,
|
Image: image,
|
||||||
|
@ -507,7 +420,7 @@ func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
||||||
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
|
func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, content string, attributes map[string]string) oid.ID {
|
||||||
obj := object.New()
|
obj := object.New()
|
||||||
obj.SetContainerID(CID)
|
obj.SetContainerID(CID)
|
||||||
obj.SetOwnerID(ownerID)
|
obj.SetOwnerID(&ownerID)
|
||||||
|
|
||||||
var attrs []object.Attribute
|
var attrs []object.Attribute
|
||||||
for key, val := range attributes {
|
for key, val := range attributes {
|
||||||
|
@ -527,37 +440,3 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
|
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
|
|
||||||
tkn := new(bearer.Token)
|
|
||||||
tkn.ForUser(ownerID)
|
|
||||||
tkn.SetExp(10000)
|
|
||||||
|
|
||||||
if version == "1.2.7" {
|
|
||||||
tkn.SetEACLTable(*eacl.NewTable())
|
|
||||||
} else {
|
|
||||||
tkn.SetImpersonate(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := tkn.Sign(key.PrivateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
require.NotEmpty(t, t64)
|
|
||||||
|
|
||||||
return t64
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
|
||||||
w, err := wallet.NewWallet(path)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
acc := wallet.NewAccountFromPrivateKey(key)
|
|
||||||
err = acc.Encrypt("", w.Scrypt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
w.AddAccount(acc)
|
|
||||||
|
|
||||||
err = w.Save()
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
func main() {
|
func main() {
|
||||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
v := settings()
|
v := settings()
|
||||||
logger, atomicLevel := pickLogger(v)
|
logger, atomicLevel := newLogger(v)
|
||||||
|
|
||||||
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
|
|
|
@ -68,13 +68,11 @@ func newServer(ctx context.Context, serverInfo ServerInfo) (*server, error) {
|
||||||
|
|
||||||
if serverInfo.TLS.Enabled {
|
if serverInfo.TLS.Enabled {
|
||||||
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
if err = tlsProvider.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
lnErr := ln.Close()
|
return nil, fmt.Errorf("failed to update cert: %w", err)
|
||||||
return nil, fmt.Errorf("failed to update cert (listener close: %v): %w", lnErr, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ln = tls.NewListener(ln, &tls.Config{
|
ln = tls.NewListener(ln, &tls.Config{
|
||||||
GetCertificate: tlsProvider.GetCertificate,
|
GetCertificate: tlsProvider.GetCertificate,
|
||||||
NextProtos: []string{"h2"}, // required to enable HTTP/2 requests in `http.Serve`
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,119 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"crypto/x509/pkix"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"golang.org/x/net/http2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
expHeaderKey = "Foo"
|
|
||||||
expHeaderValue = "Bar"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHTTP2TLS(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
certPath, keyPath := prepareTestCerts(t)
|
|
||||||
|
|
||||||
srv := &http.Server{
|
|
||||||
Handler: http.HandlerFunc(testHandler),
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsListener, err := newServer(ctx, ServerInfo{
|
|
||||||
Address: ":0",
|
|
||||||
TLS: ServerTLSInfo{
|
|
||||||
Enabled: true,
|
|
||||||
CertFile: certPath,
|
|
||||||
KeyFile: keyPath,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
port := tlsListener.Listener().Addr().(*net.TCPAddr).Port
|
|
||||||
addr := fmt.Sprintf("https://localhost:%d", port)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_ = srv.Serve(tlsListener.Listener())
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Server is running, now send HTTP/2 request
|
|
||||||
|
|
||||||
tlsClientConfig := &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
cliHTTP1 := http.Client{Transport: &http.Transport{TLSClientConfig: tlsClientConfig}}
|
|
||||||
cliHTTP2 := http.Client{Transport: &http2.Transport{TLSClientConfig: tlsClientConfig}}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", addr, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
req.Header[expHeaderKey] = []string{expHeaderValue}
|
|
||||||
|
|
||||||
resp, err := cliHTTP1.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
|
|
||||||
resp, err = cliHTTP2.Do(req)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testHandler(resp http.ResponseWriter, req *http.Request) {
|
|
||||||
hdr, ok := req.Header[expHeaderKey]
|
|
||||||
if !ok || len(hdr) != 1 || hdr[0] != expHeaderValue {
|
|
||||||
resp.WriteHeader(http.StatusBadRequest)
|
|
||||||
} else {
|
|
||||||
resp.WriteHeader(http.StatusOK)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareTestCerts(t *testing.T) (certPath, keyPath string) {
|
|
||||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
template := x509.Certificate{
|
|
||||||
SerialNumber: big.NewInt(1),
|
|
||||||
Subject: pkix.Name{CommonName: "localhost"},
|
|
||||||
NotBefore: time.Now(),
|
|
||||||
NotAfter: time.Now().Add(time.Hour * 24 * 365),
|
|
||||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
|
||||||
BasicConstraintsValid: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
|
||||||
certPath = path.Join(dir, "cert.pem")
|
|
||||||
keyPath = path.Join(dir, "key.pem")
|
|
||||||
|
|
||||||
certFile, err := os.Create(certPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer certFile.Close()
|
|
||||||
|
|
||||||
keyFile, err := os.Create(keyPath)
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer keyFile.Close()
|
|
||||||
|
|
||||||
err = pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
err = pem.Encode(keyFile, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
return certPath, keyPath
|
|
||||||
}
|
|
|
@ -13,28 +13,20 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/ssgreg/journald"
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
destinationStdout = "stdout"
|
|
||||||
destinationJournald = "journald"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultRebalanceTimer = 60 * time.Second
|
defaultRebalanceTimer = 60 * time.Second
|
||||||
defaultRequestTimeout = 15 * time.Second
|
defaultRequestTimeout = 15 * time.Second
|
||||||
|
@ -47,19 +39,11 @@ const (
|
||||||
|
|
||||||
defaultSoftMemoryLimit = math.MaxInt64
|
defaultSoftMemoryLimit = math.MaxInt64
|
||||||
|
|
||||||
defaultBufferMaxSizeForPut = 1024 * 1024 // 1mb
|
|
||||||
|
|
||||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
|
||||||
|
|
||||||
defaultReconnectInterval = time.Minute
|
|
||||||
|
|
||||||
cfgServer = "server"
|
cfgServer = "server"
|
||||||
cfgTLSEnabled = "tls.enabled"
|
cfgTLSEnabled = "tls.enabled"
|
||||||
cfgTLSCertFile = "tls.cert_file"
|
cfgTLSCertFile = "tls.cert_file"
|
||||||
cfgTLSKeyFile = "tls.key_file"
|
cfgTLSKeyFile = "tls.key_file"
|
||||||
|
|
||||||
cfgReconnectInterval = "reconnect_interval"
|
|
||||||
|
|
||||||
// Web.
|
// Web.
|
||||||
cfgWebReadBufferSize = "web.read_buffer_size"
|
cfgWebReadBufferSize = "web.read_buffer_size"
|
||||||
cfgWebWriteBufferSize = "web.write_buffer_size"
|
cfgWebWriteBufferSize = "web.write_buffer_size"
|
||||||
|
@ -87,8 +71,7 @@ const (
|
||||||
cfgPoolErrorThreshold = "pool_error_threshold"
|
cfgPoolErrorThreshold = "pool_error_threshold"
|
||||||
|
|
||||||
// Logger.
|
// Logger.
|
||||||
cfgLoggerLevel = "logger.level"
|
cfgLoggerLevel = "logger.level"
|
||||||
cfgLoggerDestination = "logger.destination"
|
|
||||||
|
|
||||||
// Wallet.
|
// Wallet.
|
||||||
cfgWalletPassphrase = "wallet.passphrase"
|
cfgWalletPassphrase = "wallet.passphrase"
|
||||||
|
@ -113,21 +96,9 @@ const (
|
||||||
// Runtime.
|
// Runtime.
|
||||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||||
|
|
||||||
// Enabling client side object preparing for PUT operations.
|
// FrostfsID.
|
||||||
cfgClientCut = "frostfs.client_cut"
|
cfgFrostfsIDEnabled = "frostfsid.enabled"
|
||||||
// Sets max buffer size for read payload in put operations.
|
cfgFrostfsIDContract = "frostfsid.contract"
|
||||||
cfgBufferMaxSizeForPut = "frostfs.buffer_max_size_for_put"
|
|
||||||
// Configuration of parameters of requests to FrostFS.
|
|
||||||
// Sets max attempt to make successful tree request.
|
|
||||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
|
||||||
|
|
||||||
// Caching.
|
|
||||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
|
||||||
cfgBucketsCacheSize = "cache.buckets.size"
|
|
||||||
|
|
||||||
// Bucket resolving options.
|
|
||||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
|
||||||
cfgResolveDefaultNamespaces = "resolve_bucket.default_namespaces"
|
|
||||||
|
|
||||||
// Command line args.
|
// Command line args.
|
||||||
cmdHelp = "help"
|
cmdHelp = "help"
|
||||||
|
@ -186,14 +157,10 @@ func settings() *viper.Viper {
|
||||||
|
|
||||||
// logger:
|
// logger:
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
v.SetDefault(cfgLoggerLevel, "debug")
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
|
||||||
|
|
||||||
// pool:
|
// pool:
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
||||||
// frostfs:
|
|
||||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
|
||||||
|
|
||||||
// web-server:
|
// web-server:
|
||||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
v.SetDefault(cfgWebReadBufferSize, 4096)
|
||||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
||||||
|
@ -212,9 +179,8 @@ func settings() *viper.Viper {
|
||||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||||
|
|
||||||
// resolve bucket
|
// frostfsid
|
||||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
v.SetDefault(cfgFrostfsIDContract, "frostfsid.frostfs")
|
||||||
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
|
||||||
|
|
||||||
// Binding flags
|
// Binding flags
|
||||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
||||||
|
@ -375,25 +341,7 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
||||||
return v.MergeConfig(cfgFile)
|
return v.MergeConfig(cfgFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
// newLogger constructs a zap.Logger instance for current application.
|
||||||
lvl, err := getLogLevel(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dest := v.GetString(cfgLoggerDestination)
|
|
||||||
|
|
||||||
switch dest {
|
|
||||||
case destinationStdout:
|
|
||||||
return newStdoutLogger(lvl)
|
|
||||||
case destinationJournald:
|
|
||||||
return newJournaldLogger(lvl)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
|
||||||
// Panics on failure.
|
// Panics on failure.
|
||||||
//
|
//
|
||||||
// Logger is built from zap's production logging configuration with:
|
// Logger is built from zap's production logging configuration with:
|
||||||
|
@ -404,7 +352,12 @@ func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
// Logger records a stack trace for all messages at or above fatal level.
|
||||||
//
|
//
|
||||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||||
func newStdoutLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
func newLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
lvl, err := getLogLevel(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
c := zap.NewProductionConfig()
|
c := zap.NewProductionConfig()
|
||||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||||
c.Encoding = "console"
|
c.Encoding = "console"
|
||||||
|
@ -420,25 +373,6 @@ func newStdoutLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
return l, c.Level
|
return l, c.Level
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJournaldLogger(lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
|
||||||
c := zap.NewProductionConfig()
|
|
||||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
|
||||||
|
|
||||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
|
||||||
|
|
||||||
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
|
||||||
coreWithContext := core.With([]zapcore.Field{
|
|
||||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
|
||||||
zapjournald.SyslogIdentifier(),
|
|
||||||
zapjournald.SyslogPid(),
|
|
||||||
})
|
|
||||||
|
|
||||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
|
||||||
|
|
||||||
return l, c.Level
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
var lvl zapcore.Level
|
var lvl zapcore.Level
|
||||||
lvlStr := v.GetString(cfgLoggerLevel)
|
lvlStr := v.GetString(cfgLoggerLevel)
|
||||||
|
@ -458,18 +392,8 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
return lvl, nil
|
return lvl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
func fetchServers(v *viper.Viper) []ServerInfo {
|
||||||
reconnect := cfg.GetDuration(cfgReconnectInterval)
|
|
||||||
if reconnect <= 0 {
|
|
||||||
reconnect = defaultReconnectInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
return reconnect
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|
||||||
var servers []ServerInfo
|
var servers []ServerInfo
|
||||||
seen := make(map[string]struct{})
|
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
key := cfgServer + "." + strconv.Itoa(i) + "."
|
key := cfgServer + "." + strconv.Itoa(i) + "."
|
||||||
|
@ -484,11 +408,6 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := seen[serverInfo.Address]; ok {
|
|
||||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[serverInfo.Address] = struct{}{}
|
|
||||||
servers = append(servers, serverInfo)
|
servers = append(servers, serverInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -549,8 +468,6 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
||||||
prm.SetLogger(logger)
|
prm.SetLogger(logger)
|
||||||
prmTree.SetLogger(logger)
|
prmTree.SetLogger(logger)
|
||||||
|
|
||||||
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
|
||||||
|
|
||||||
var apiGRPCDialOpts []grpc.DialOption
|
var apiGRPCDialOpts []grpc.DialOption
|
||||||
var treeGRPCDialOpts []grpc.DialOption
|
var treeGRPCDialOpts []grpc.DialOption
|
||||||
if cfg.GetBool(cfgTracingEnabled) {
|
if cfg.GetBool(cfgTracingEnabled) {
|
||||||
|
@ -621,44 +538,3 @@ func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
||||||
|
|
||||||
return int64(softMemoryLimit)
|
return int64(softMemoryLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
|
||||||
cacheCfg := cache.DefaultBucketConfig(l)
|
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
|
||||||
cacheCfg.Size = fetchCacheSize(v, l, cfgBucketsCacheSize, cacheCfg.Size)
|
|
||||||
|
|
||||||
return cacheCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
|
||||||
if v.IsSet(cfgEntry) {
|
|
||||||
lifetime := v.GetDuration(cfgEntry)
|
|
||||||
if lifetime <= 0 {
|
|
||||||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
|
||||||
zap.String("parameter", cfgEntry),
|
|
||||||
zap.Duration("value in config", lifetime),
|
|
||||||
zap.Duration("default", defaultValue))
|
|
||||||
} else {
|
|
||||||
return lifetime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue int) int {
|
|
||||||
if v.IsSet(cfgEntry) {
|
|
||||||
size := v.GetInt(cfgEntry)
|
|
||||||
if size <= 0 {
|
|
||||||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
|
||||||
zap.String("parameter", cfgEntry),
|
|
||||||
zap.Int("value in config", size),
|
|
||||||
zap.Int("default", defaultValue))
|
|
||||||
} else {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
|
@ -26,9 +26,6 @@ HTTP_GW_SERVER_1_TLS_ENABLED=true
|
||||||
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
HTTP_GW_SERVER_1_TLS_CERT_FILE=/path/to/tls/cert
|
||||||
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
HTTP_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||||
|
|
||||||
# How often to reconnect to the servers
|
|
||||||
HTTP_GW_RECONNECT_INTERVAL: 1m
|
|
||||||
|
|
||||||
# Nodes configuration.
|
# Nodes configuration.
|
||||||
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
# This configuration make the gateway use the first node (grpc://s01.frostfs.devenv:8080)
|
||||||
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
# while it's healthy. Otherwise, the gateway use the second node (grpc://s01.frostfs.devenv:8080)
|
||||||
|
@ -102,22 +99,8 @@ HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||||
|
|
||||||
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
HTTP_GW_RUNTIME_SOFT_MEMORY_LIMIT=1073741824
|
||||||
|
|
||||||
# Parameters of requests to FrostFS
|
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||||
# This flag enables client side object preparing.
|
# Enables check that allow requests only users that is registered in FrostfsID contract.
|
||||||
HTTP_GW_FROSTFS_CLIENT_CUT=false
|
HTTP_GW_FROSTFSID_ENABLED=false
|
||||||
# Sets max buffer size for read payload in put operations.
|
# FrostfsID contract hash (LE) or name in NNS.
|
||||||
HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
HTTP_GW_FROSTFSID_CONTRACT=frostfsid.frostfs
|
||||||
|
|
||||||
# Caching
|
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
|
||||||
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
|
||||||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
|
||||||
|
|
||||||
# Header to determine zone to resolve bucket name
|
|
||||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
|
||||||
# Namespaces that should be handled as default
|
|
||||||
HTTP_GW_RESOLVE_BUCKET_DEFAULT_NAMESPACES="" "root"
|
|
||||||
|
|
||||||
# Max attempt to make successful tree request.
|
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
|
||||||
HTTP_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ tracing:
|
||||||
|
|
||||||
logger:
|
logger:
|
||||||
level: debug # Log level.
|
level: debug # Log level.
|
||||||
destination: stdout
|
|
||||||
|
|
||||||
server:
|
server:
|
||||||
- address: 0.0.0.0:8080
|
- address: 0.0.0.0:8080
|
||||||
|
@ -55,7 +54,6 @@ peers:
|
||||||
priority: 2
|
priority: 2
|
||||||
weight: 9
|
weight: 9
|
||||||
|
|
||||||
reconnect_interval: 1m
|
|
||||||
|
|
||||||
web:
|
web:
|
||||||
# Per-connection buffer size for requests' reading.
|
# Per-connection buffer size for requests' reading.
|
||||||
|
@ -107,23 +105,10 @@ zip:
|
||||||
runtime:
|
runtime:
|
||||||
soft_memory_limit: 1gb
|
soft_memory_limit: 1gb
|
||||||
|
|
||||||
# Parameters of requests to FrostFS
|
# FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||||
frostfs:
|
frostfsid:
|
||||||
# This flag enables client side object preparing.
|
# Enables check that allow requests only users that is registered in FrostfsID contract.
|
||||||
client_cut: false
|
enabled: false
|
||||||
# Sets max buffer size for read payload in put operations.
|
# FrostfsID contract hash (LE) or name in NNS.
|
||||||
buffer_max_size_for_put: 1048576
|
contract: frostfsid.frostfs
|
||||||
# Max attempt to make successful tree request.
|
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
|
||||||
tree_pool_max_attempts: 0
|
|
||||||
|
|
||||||
# Caching
|
|
||||||
cache:
|
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
|
||||||
buckets:
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
|
|
||||||
resolve_bucket:
|
|
||||||
namespace_header: X-Frostfs-Namespace
|
|
||||||
default_namespaces: [ "", "root" ]
|
|
|
@ -40,23 +40,21 @@ $ cat http.log
|
||||||
|
|
||||||
# Structure
|
# Structure
|
||||||
|
|
||||||
| Section | Description |
|
| Section | Description |
|
||||||
|------------------|----------------------------------------------------------------|
|
|-----------------|-------------------------------------------------------|
|
||||||
| no section | [General parameters](#general-section) |
|
| no section | [General parameters](#general-section) |
|
||||||
| `wallet` | [Wallet configuration](#wallet-section) |
|
| `wallet` | [Wallet configuration](#wallet-section) |
|
||||||
| `peers` | [Nodes configuration](#peers-section) |
|
| `peers` | [Nodes configuration](#peers-section) |
|
||||||
| `logger` | [Logger configuration](#logger-section) |
|
| `logger` | [Logger configuration](#logger-section) |
|
||||||
| `web` | [Web configuration](#web-section) |
|
| `web` | [Web configuration](#web-section) |
|
||||||
| `server` | [Server configuration](#server-section) |
|
| `server` | [Server configuration](#server-section) |
|
||||||
| `upload-header` | [Upload header configuration](#upload-header-section) |
|
| `upload-header` | [Upload header configuration](#upload-header-section) |
|
||||||
| `zip` | [ZIP configuration](#zip-section) |
|
| `zip` | [ZIP configuration](#zip-section) |
|
||||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||||
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
| `prometheus` | [Prometheus configuration](#prometheus-section) |
|
||||||
| `tracing` | [Tracing configuration](#tracing-section) |
|
| `tracing` | [Tracing configuration](#tracing-section) |
|
||||||
| `runtime` | [Runtime configuration](#runtime-section) |
|
| `runtime` | [Runtime configuration](#runtime-section) |
|
||||||
| `frostfs` | [Frostfs configuration](#frostfs-section) |
|
| `frostfsid` | [FrostfsID configuration](#frostfsid-section) |
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
|
||||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
|
||||||
|
|
||||||
|
|
||||||
# General section
|
# General section
|
||||||
|
@ -72,19 +70,17 @@ stream_timeout: 10s
|
||||||
request_timeout: 5s
|
request_timeout: 5s
|
||||||
rebalance_timer: 30s
|
rebalance_timer: 30s
|
||||||
pool_error_threshold: 100
|
pool_error_threshold: 100
|
||||||
reconnect_interval: 1m
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|------------------------|------------|---------------|----------------|------------------------------------------------------------------------------------|
|
|------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `rpc_endpoint` | `string` | yes | | The address of the RPC host to which the gateway connects to resolve bucket names. |
|
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
||||||
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. |
|
| `resolve_order` | `[]string` | yes | `[nns, dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
||||||
| `connect_timeout` | `duration` | | `10s` | Timeout to connect to a node. |
|
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
||||||
| `stream_timeout` | `duration` | | `10s` | Timeout for individual operations in streaming RPC. |
|
| `stream_timeout` | `duration` | no | `10s` | Timeout for individual operations in streaming RPC. |
|
||||||
| `request_timeout` | `duration` | | `15s` | Timeout to check node health during rebalance. |
|
| `request_timeout` | `duration` | no | `15s` | Timeout to check node health during rebalance. |
|
||||||
| `rebalance_timer` | `duration` | | `60s` | Interval to check node health. |
|
| `rebalance_timer` | `duration` | no | `60s` | Interval to check node health. |
|
||||||
| `pool_error_threshold` | `uint32` | | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
| `pool_error_threshold` | `uint32` | no | `100` | The number of errors on connection after which node is considered as unhealthy. |
|
||||||
| `reconnect_interval` | `duration` | no | `1m` | Listeners reconnection interval. |
|
|
||||||
|
|
||||||
# `wallet` section
|
# `wallet` section
|
||||||
|
|
||||||
|
@ -163,13 +159,12 @@ server:
|
||||||
```yaml
|
```yaml
|
||||||
logger:
|
logger:
|
||||||
level: debug
|
level: debug
|
||||||
destination: stdout
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|---------------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
|-----------|----------|---------------|---------------|----------------------------------------------------------------------------------------------------|
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
|
||||||
|
|
||||||
# `web` section
|
# `web` section
|
||||||
|
|
||||||
|
@ -276,63 +271,17 @@ runtime:
|
||||||
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|---------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
| `soft_memory_limit` | `size` | yes | maxint64 | Soft memory limit for the runtime. Zero or no value stands for no limit. If `GOMEMLIMIT` environment variable is set, the value from the configuration file will be ignored. |
|
||||||
|
|
||||||
# `frostfs` section
|
# `frostfsid` section
|
||||||
|
|
||||||
Contains parameters of requests to FrostFS.
|
FrostfsID contract configuration. To enable this functionality the `rpc_endpoint` param must be also set.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
frostfs:
|
frostfsid:
|
||||||
client_cut: false
|
enabled: false
|
||||||
buffer_max_size_for_put: 1048576 # 1mb
|
contract: frostfsid.frostfs
|
||||||
tree_pool_max_attempts: 0
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|---------------------------|----------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------|
|
|------------|----------|---------------|-------------------|----------------------------------------------------------------------------------------|
|
||||||
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
| `enabled` | `bool` | no | false | Enables check that allow requests only users that is registered in FrostfsID contract. |
|
||||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
| `contract` | `string` | no | frostfsid.frostfs | FrostfsID contract hash (LE) or name in NNS. |
|
||||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
|
||||||
|
|
||||||
|
|
||||||
### `cache` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
cache:
|
|
||||||
buckets:
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
|
||||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `cache` subsection
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
lifetime: 1m
|
|
||||||
size: 1000
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
|
||||||
|------------|------------|------------------|-------------------------------|
|
|
||||||
| `lifetime` | `duration` | depends on cache | Lifetime of entries in cache. |
|
|
||||||
| `size` | `int` | depends on cache | LRU cache size. |
|
|
||||||
|
|
||||||
|
|
||||||
# `resolve_bucket` section
|
|
||||||
|
|
||||||
Bucket name resolving parameters from and to container ID.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
resolve_bucket:
|
|
||||||
namespace_header: X-Frostfs-Namespace
|
|
||||||
default_namespaces: [ "", "root" ]
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|----------------------|------------|---------------|-----------------------|--------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `namespace_header` | `string` | yes | `X-Frostfs-Namespace` | Header to determine zone to resolve bucket name. |
|
|
||||||
| `default_namespaces` | `[]string` | yes | ["","root"] | Namespaces that should be handled as default. |
|
|
||||||
|
|
65
go.mod
65
go.mod
|
@ -1,34 +1,28 @@
|
||||||
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
|
|
||||||
go 1.22
|
go 1.20
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.15.1-0.20230802075510-964c3edb3f44
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.18.1-0.20231004065251-4194633db7bb
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240718141740-ce8270568d36
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230802103237-363f153eafa6
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
|
||||||
github.com/bluele/gcache v0.0.2
|
|
||||||
github.com/fasthttp/router v1.4.1
|
github.com/fasthttp/router v1.4.1
|
||||||
github.com/nspcc-dev/neo-go v0.106.2
|
github.com/nspcc-dev/neo-go v0.101.5-0.20230808195420-5fc61be5f6c5
|
||||||
github.com/prometheus/client_golang v1.19.0
|
github.com/prometheus/client_golang v1.15.1
|
||||||
github.com/prometheus/client_model v0.5.0
|
github.com/prometheus/client_model v0.3.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/ssgreg/journald v1.0.0
|
github.com/stretchr/testify v1.8.3
|
||||||
github.com/stretchr/testify v1.9.0
|
|
||||||
github.com/testcontainers/testcontainers-go v0.13.0
|
github.com/testcontainers/testcontainers-go v0.13.0
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
|
||||||
github.com/valyala/fasthttp v1.34.0
|
github.com/valyala/fasthttp v1.34.0
|
||||||
go.opentelemetry.io/otel v1.16.0
|
go.opentelemetry.io/otel v1.16.0
|
||||||
go.opentelemetry.io/otel/trace v1.16.0
|
go.opentelemetry.io/otel/trace v1.16.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.26.0
|
||||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
google.golang.org/grpc v1.55.0
|
||||||
golang.org/x/net v0.23.0
|
|
||||||
google.golang.org/grpc v1.62.0
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
|
@ -56,32 +50,33 @@ require (
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
github.com/gorilla/websocket v1.5.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.16.4 // indirect
|
github.com/klauspost/compress v1.16.4 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/moby/sys/mount v0.3.2 // indirect
|
github.com/moby/sys/mount v0.3.2 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20220111165707-25110be27d22 // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20230808195420-5fc61be5f6c5 // indirect
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/opencontainers/runc v1.1.1 // indirect
|
github.com/opencontainers/runc v1.1.1 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
|
@ -93,7 +88,6 @@ require (
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
github.com/urfave/cli v1.22.5 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
go.etcd.io/bbolt v1.3.9 // indirect
|
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||||
|
@ -102,17 +96,18 @@ require (
|
||||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.21.0 // indirect
|
golang.org/x/crypto v0.9.0 // indirect
|
||||||
golang.org/x/sync v0.6.0 // indirect
|
golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect
|
||||||
golang.org/x/sys v0.18.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
golang.org/x/term v0.18.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/text v0.14.0 // indirect
|
golang.org/x/sys v0.8.0 // indirect
|
||||||
|
golang.org/x/term v0.8.0 // indirect
|
||||||
|
golang.org/x/text v0.9.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
|
|
||||||
google.golang.org/protobuf v1.33.0 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
72
internal/cache/buckets.go
vendored
72
internal/cache/buckets.go
vendored
|
@ -1,72 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
|
||||||
type BucketCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config stores expiration params for cache.
|
|
||||||
type Config struct {
|
|
||||||
Size int
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultBucketCacheSize is a default maximum number of entries in cache.
|
|
||||||
DefaultBucketCacheSize = 1e3
|
|
||||||
// DefaultBucketCacheLifetime is a default lifetime of entries in cache.
|
|
||||||
DefaultBucketCacheLifetime = time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultBucketConfig returns new default cache expiration values.
|
|
||||||
func DefaultBucketConfig(logger *zap.Logger) *Config {
|
|
||||||
return &Config{
|
|
||||||
Size: DefaultBucketCacheSize,
|
|
||||||
Lifetime: DefaultBucketCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
|
||||||
func NewBucketCache(config *Config) *BucketCache {
|
|
||||||
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &BucketCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a cached object.
|
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(formKey(ns, bktName))
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(*data.BucketInfo)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put puts an object to cache.
|
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formKey(ns, name string) string {
|
|
||||||
return name + "." + ns
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
package data
|
|
||||||
|
|
||||||
import (
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BucketInfo struct {
|
|
||||||
Name string // container name from system attribute
|
|
||||||
Zone string // container zone from system attribute
|
|
||||||
CID cid.ID
|
|
||||||
HomomorphicHashDisabled bool
|
|
||||||
}
|
|
|
@ -1,242 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostFS represents virtual connection to the FrostFS network.
|
|
||||||
// It is used to provide an interface to dependent packages
|
|
||||||
// which work with FrostFS.
|
|
||||||
type FrostFS struct {
|
|
||||||
pool *pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFrostFS creates new FrostFS using provided pool.Pool.
|
|
||||||
func NewFrostFS(p *pool.Pool) *FrostFS {
|
|
||||||
return &FrostFS{
|
|
||||||
pool: p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Container implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) Container(ctx context.Context, layerPrm handler.PrmContainer) (*container.Container, error) {
|
|
||||||
prm := pool.PrmContainerGet{
|
|
||||||
ContainerID: layerPrm.ContainerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.GetContainer(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read container via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
|
||||||
var prmPut pool.PrmObjectPut
|
|
||||||
prmPut.SetHeader(*prm.Object)
|
|
||||||
prmPut.SetPayload(prm.Payload)
|
|
||||||
prmPut.SetClientCut(prm.ClientCut)
|
|
||||||
prmPut.WithoutHomomorphicHash(prm.WithoutHomomorphicHash)
|
|
||||||
prmPut.SetBufferMaxSize(prm.BufferMaxSize)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmPut.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
|
||||||
return idObj, handleObjectError("save object via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
|
||||||
// to frostfs.ErrAccessDenied.
|
|
||||||
type payloadReader struct {
|
|
||||||
io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x payloadReader) Read(p []byte) (int, error) {
|
|
||||||
n, err := x.ReadCloser.Read(p)
|
|
||||||
if err != nil && errors.Is(err, io.EOF) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
return n, handleObjectError("read payload", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
|
||||||
var prmHead pool.PrmObjectHead
|
|
||||||
prmHead.SetAddress(prm.Address)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmHead.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.HeadObject(ctx, prmHead)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read object header via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
|
||||||
var prmGet pool.PrmObjectGet
|
|
||||||
prmGet.SetAddress(prm.Address)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmGet.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init full object reading via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &handler.Object{
|
|
||||||
Header: res.Header,
|
|
||||||
Payload: res.Payload,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RangeObject implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
|
||||||
var prmRange pool.PrmObjectRange
|
|
||||||
prmRange.SetAddress(prm.Address)
|
|
||||||
prmRange.SetOffset(prm.PayloadRange[0])
|
|
||||||
prmRange.SetLength(prm.PayloadRange[1])
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmRange.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.ObjectRange(ctx, prmRange)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init payload range reading via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return payloadReader{&res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchObjects implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
|
||||||
var prmSearch pool.PrmObjectSearch
|
|
||||||
prmSearch.SetContainerID(prm.Container)
|
|
||||||
prmSearch.SetFilters(prm.Filters)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmSearch.UseBearer(*prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := x.pool.SearchObjects(ctx, prmSearch)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init object search via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetEpochDurations implements frostfs.FrostFS interface method.
|
|
||||||
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res := &utils.EpochDurations{
|
|
||||||
CurrentEpoch: networkInfo.CurrentEpoch(),
|
|
||||||
MsPerBlock: networkInfo.MsPerBlock(),
|
|
||||||
BlockPerEpoch: networkInfo.EpochDuration(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if res.BlockPerEpoch == 0 {
|
|
||||||
return nil, fmt.Errorf("EpochDuration is empty")
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
|
||||||
// It implements resolver.FrostFS.
|
|
||||||
type ResolverFrostFS struct {
|
|
||||||
pool *pool.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResolverFrostFS creates new ResolverFrostFS using provided pool.Pool.
|
|
||||||
func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
|
||||||
return &ResolverFrostFS{pool: p}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SystemDNS implements resolver.FrostFS interface method.
|
|
||||||
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", handleObjectError("read network info via client", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
|
||||||
if domain == nil {
|
|
||||||
return "", errors.New("system DNS parameter not found or empty")
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(domain), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleObjectError(msg string, err error) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsTimeoutError(err) {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrGatewayTimeout, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%s: %w", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnwrapErr(err error) error {
|
|
||||||
unwrappedErr := errors.Unwrap(err)
|
|
||||||
for unwrappedErr != nil {
|
|
||||||
err = unwrappedErr
|
|
||||||
unwrappedErr = errors.Unwrap(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsErrObjectAccessDenied(err error) (string, bool) {
|
|
||||||
err = UnwrapErr(err)
|
|
||||||
switch err := err.(type) {
|
|
||||||
default:
|
|
||||||
return "", false
|
|
||||||
case *apistatus.ObjectAccessDenied:
|
|
||||||
return err.Reason(), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsTimeoutError(err error) bool {
|
|
||||||
if strings.Contains(err.Error(), "timeout") ||
|
|
||||||
errors.Is(err, context.DeadlineExceeded) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return status.Code(UnwrapErr(err)) == codes.DeadlineExceeded
|
|
||||||
}
|
|
87
internal/frostfs/frostfsid/frostfsid.go
Normal file
87
internal/frostfs/frostfsid/frostfsid.go
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
package frostfsid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/rpcclient"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FrostFSID struct {
|
||||||
|
cli *client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// RPCAddress is an endpoint to connect to neo rpc.
|
||||||
|
RPCAddress string
|
||||||
|
|
||||||
|
// Contract is hash of contract or its name in NNS.
|
||||||
|
Contract string
|
||||||
|
|
||||||
|
// Key is used to interact with frostfsid contract.
|
||||||
|
// If this is nil than random key will be generated.
|
||||||
|
Key *keys.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates new FrostfsID contract wrapper that implements auth.FrostFSID interface.
|
||||||
|
func New(ctx context.Context, cfg Config) (*FrostFSID, error) {
|
||||||
|
contractHash, err := fetchContractHash(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve frostfs contract hash: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := cfg.Key
|
||||||
|
if key == nil {
|
||||||
|
if key, err = keys.NewPrivateKey(); err != nil {
|
||||||
|
return nil, fmt.Errorf("generate anon private key for frostfsid: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcCli, err := rpcclient.New(ctx, cfg.RPCAddress, rpcclient.Options{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("init rpc client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cli, err := client.New(rpcCli, wallet.NewAccountFromPrivateKey(key), contractHash, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("init frostfsid client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &FrostFSID{
|
||||||
|
cli: cli,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FrostFSID) ValidatePublicKey(key *keys.PublicKey) error {
|
||||||
|
_, err := f.cli.GetSubjectByKey(key)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchContractHash(cfg Config) (util.Uint160, error) {
|
||||||
|
if hash, err := util.Uint160DecodeStringLE(cfg.Contract); err == nil {
|
||||||
|
return hash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
splitName := strings.Split(cfg.Contract, ".")
|
||||||
|
if len(splitName) != 2 {
|
||||||
|
return util.Uint160{}, fmt.Errorf("invalid contract name: '%s'", cfg.Contract)
|
||||||
|
}
|
||||||
|
|
||||||
|
var domain container.Domain
|
||||||
|
domain.SetName(splitName[0])
|
||||||
|
domain.SetZone(splitName[1])
|
||||||
|
|
||||||
|
var nns ns.NNS
|
||||||
|
if err := nns.Dial(cfg.RPCAddress); err != nil {
|
||||||
|
return util.Uint160{}, fmt.Errorf("dial nns %s: %w", cfg.RPCAddress, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nns.ResolveContractHash(domain)
|
||||||
|
}
|
|
@ -35,6 +35,30 @@ func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type GetSubTreeResponseBodyWrapper struct {
|
||||||
|
response *grpcService.GetSubTreeResponse_Body
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
|
||||||
|
return n.response.GetNodeId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
|
||||||
|
return n.response.GetParentId()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
|
||||||
|
return n.response.GetTimestamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||||
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
|
for i, value := range n.response.Meta {
|
||||||
|
res[i] = value
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
type PoolWrapper struct {
|
type PoolWrapper struct {
|
||||||
p *treepool.Pool
|
p *treepool.Pool
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,9 +14,12 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -27,7 +30,7 @@ func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
var id oid.ID
|
var id oid.ID
|
||||||
err := id.DecodeString(test)
|
err := id.DecodeString(test)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.byObjectName(c, h.receiveFile)
|
h.byBucketname(c, h.receiveFile)
|
||||||
} else {
|
} else {
|
||||||
h.byAddress(c, h.receiveFile)
|
h.byAddress(c, h.receiveFile)
|
||||||
}
|
}
|
||||||
|
@ -45,20 +48,26 @@ func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
h.byAttribute(c, h.receiveFile)
|
h.byAttribute(c, h.receiveFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) search(ctx context.Context, cnrID *cid.ID, key, val string, op object.SearchMatchType) (ResObjectSearch, error) {
|
func (h *Handler) search(ctx context.Context, cid *cid.ID, key, val string, op object.SearchMatchType) (pool.ResObjectSearch, error) {
|
||||||
filters := object.NewSearchFilters()
|
filters := object.NewSearchFilters()
|
||||||
filters.AddRootFilter()
|
filters.AddRootFilter()
|
||||||
filters.AddFilter(key, val, op)
|
filters.AddFilter(key, val, op)
|
||||||
|
|
||||||
prm := PrmObjectSearch{
|
var prm pool.PrmObjectSearch
|
||||||
PrmAuth: PrmAuth{
|
prm.SetContainerID(*cid)
|
||||||
BearerToken: bearerToken(ctx),
|
prm.SetFilters(filters)
|
||||||
},
|
if btoken := bearerToken(ctx); btoken != nil {
|
||||||
Container: *cnrID,
|
prm.UseBearer(*btoken)
|
||||||
Filters: filters,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.frostfs.SearchObjects(ctx, prm)
|
return h.pool.SearchObjects(ctx, prm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) getContainer(ctx context.Context, cnrID cid.ID) (container.Container, error) {
|
||||||
|
var prm pool.PrmContainerGet
|
||||||
|
prm.SetContainerID(cnrID)
|
||||||
|
|
||||||
|
return h.pool.GetContainer(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||||
|
@ -82,26 +91,32 @@ func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer,
|
||||||
// DownloadZipped handles zip by prefix requests.
|
// DownloadZipped handles zip by prefix requests.
|
||||||
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
scid, _ := c.UserValue("cid").(string)
|
scid, _ := c.UserValue("cid").(string)
|
||||||
prefix, _ := c.UserValue("prefix").(string)
|
prefix, _ := url.QueryUnescape(c.UserValue("prefix").(string))
|
||||||
|
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||||
prefix, err := url.QueryUnescape(prefix)
|
|
||||||
if err != nil {
|
|
||||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()), zap.Error(err))
|
|
||||||
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log := h.log.With(zap.String("cid", scid), zap.String("prefix", prefix), zap.Uint64("id", c.ID()))
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
containerID, err := h.getContainerID(ctx, scid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
||||||
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
resSearch, err := h.search(ctx, &bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
// check if container exists here to be able to return 404 error,
|
||||||
|
// otherwise we get this error only in object iteration step
|
||||||
|
// and client get 200 OK.
|
||||||
|
if _, err = h.getContainer(ctx, *containerID); err != nil {
|
||||||
|
log.Error(logs.CouldNotCheckContainerExistence, zap.Error(err))
|
||||||
|
if client.IsErrContainerNotFound(err) {
|
||||||
|
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response.Error(c, "could not check container existence: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resSearch, err := h.search(ctx, containerID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
@ -123,7 +138,7 @@ func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
empty := true
|
empty := true
|
||||||
called := false
|
called := false
|
||||||
btoken := bearerToken(ctx)
|
btoken := bearerToken(ctx)
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(*containerID)
|
||||||
|
|
||||||
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||||
called = true
|
called = true
|
||||||
|
@ -153,14 +168,13 @@ func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||||
prm := PrmObjectGet{
|
var prm pool.PrmObjectGet
|
||||||
PrmAuth: PrmAuth{
|
prm.SetAddress(addr)
|
||||||
BearerToken: btoken,
|
if btoken != nil {
|
||||||
},
|
prm.UseBearer(*btoken)
|
||||||
Address: addr,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
resGet, err := h.pool.GetObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get FrostFS object: %v", err)
|
return fmt.Errorf("get FrostFS object: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,278 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
)
|
|
||||||
|
|
||||||
type TestFrostFS struct {
|
|
||||||
objects map[string]*object.Object
|
|
||||||
containers map[string]*container.Container
|
|
||||||
accessList map[string]bool
|
|
||||||
key *keys.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
|
|
||||||
return &TestFrostFS{
|
|
||||||
objects: make(map[string]*object.Object),
|
|
||||||
containers: make(map[string]*container.Container),
|
|
||||||
accessList: make(map[string]bool),
|
|
||||||
key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) ContainerID(name string) (*cid.ID, error) {
|
|
||||||
for id, cnr := range t.containers {
|
|
||||||
if container.Name(*cnr) == name {
|
|
||||||
var cnrID cid.ID
|
|
||||||
return &cnrID, cnrID.DecodeString(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
|
||||||
t.containers[cnrID.EncodeToString()] = cnr
|
|
||||||
}
|
|
||||||
|
|
||||||
// AllowUserOperation grants access to object operations.
|
|
||||||
// Empty userID and objID means any user and object respectively.
|
|
||||||
func (t *TestFrostFS) AllowUserOperation(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) {
|
|
||||||
t.accessList[fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID)] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
|
|
||||||
for k, v := range t.containers {
|
|
||||||
if k == prm.ContainerID.EncodeToString() {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) requestOwner(btoken *bearer.Token) user.ID {
|
|
||||||
if btoken != nil {
|
|
||||||
return bearer.ResolveIssuer(*btoken)
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, t.key.PrivateKey.PublicKey)
|
|
||||||
return owner
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) retrieveObject(addr oid.Address, btoken *bearer.Token) (*object.Object, error) {
|
|
||||||
sAddr := addr.EncodeToString()
|
|
||||||
|
|
||||||
if obj, ok := t.objects[sAddr]; ok {
|
|
||||||
owner := t.requestOwner(btoken)
|
|
||||||
|
|
||||||
if !t.isAllowed(addr.Container(), owner, acl.OpObjectGet, addr.Object()) {
|
|
||||||
return nil, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) HeadObject(_ context.Context, prm PrmObjectHead) (*object.Object, error) {
|
|
||||||
return t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) GetObject(_ context.Context, prm PrmObjectGet) (*Object, error) {
|
|
||||||
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Object{
|
|
||||||
Header: *obj,
|
|
||||||
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) RangeObject(_ context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
|
|
||||||
obj, err := t.retrieveObject(prm.Address, prm.BearerToken)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
off := prm.PayloadRange[0]
|
|
||||||
payload := obj.Payload()[off : off+prm.PayloadRange[1]]
|
|
||||||
return io.NopCloser(bytes.NewReader(payload)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
|
||||||
b := make([]byte, 32)
|
|
||||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
var id oid.ID
|
|
||||||
id.SetSHA256(sha256.Sum256(b))
|
|
||||||
prm.Object.SetID(id)
|
|
||||||
|
|
||||||
attrs := prm.Object.Attributes()
|
|
||||||
if prm.ClientCut {
|
|
||||||
a := object.NewAttribute()
|
|
||||||
a.SetKey("s3-client-cut")
|
|
||||||
a.SetValue("true")
|
|
||||||
attrs = append(attrs, *a)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm.Object.SetAttributes(attrs...)
|
|
||||||
|
|
||||||
if prm.Payload != nil {
|
|
||||||
all, err := io.ReadAll(prm.Payload)
|
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
prm.Object.SetPayload(all)
|
|
||||||
prm.Object.SetPayloadSize(uint64(len(all)))
|
|
||||||
var hash checksum.Checksum
|
|
||||||
checksum.Calculate(&hash, checksum.SHA256, all)
|
|
||||||
prm.Object.SetPayloadChecksum(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, _ := prm.Object.ContainerID()
|
|
||||||
objID, _ := prm.Object.ID()
|
|
||||||
|
|
||||||
owner := t.requestOwner(prm.BearerToken)
|
|
||||||
|
|
||||||
if !t.isAllowed(cnrID, owner, acl.OpObjectPut, objID) {
|
|
||||||
return oid.ID{}, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
|
||||||
t.objects[addr.EncodeToString()] = prm.Object
|
|
||||||
return objID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type resObjectSearchMock struct {
|
|
||||||
res []oid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Read(buf []oid.ID) (int, error) {
|
|
||||||
for i := range buf {
|
|
||||||
if i > len(r.res)-1 {
|
|
||||||
return len(r.res), io.EOF
|
|
||||||
}
|
|
||||||
buf[i] = r.res[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
r.res = r.res[len(buf):]
|
|
||||||
|
|
||||||
return len(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Iterate(f func(oid.ID) bool) error {
|
|
||||||
for _, id := range r.res {
|
|
||||||
if f(id) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *resObjectSearchMock) Close() {}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) (ResObjectSearch, error) {
|
|
||||||
if !t.isAllowed(prm.Container, t.requestOwner(prm.BearerToken), acl.OpObjectSearch, oid.ID{}) {
|
|
||||||
return nil, ErrAccessDenied
|
|
||||||
}
|
|
||||||
|
|
||||||
cidStr := prm.Container.EncodeToString()
|
|
||||||
var res []oid.ID
|
|
||||||
|
|
||||||
if len(prm.Filters) == 1 { // match root filter
|
|
||||||
for k, v := range t.objects {
|
|
||||||
if strings.Contains(k, cidStr) {
|
|
||||||
id, _ := v.ID()
|
|
||||||
res = append(res, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &resObjectSearchMock{res: res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
filter := prm.Filters[1]
|
|
||||||
if len(prm.Filters) != 2 ||
|
|
||||||
filter.Operation() != object.MatchCommonPrefix && filter.Operation() != object.MatchStringEqual {
|
|
||||||
return nil, fmt.Errorf("usupported filters")
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range t.objects {
|
|
||||||
if strings.Contains(k, cidStr) && isMatched(v.Attributes(), filter) {
|
|
||||||
id, _ := v.ID()
|
|
||||||
res = append(res, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resObjectSearchMock{res: res}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMatched(attributes []object.Attribute, filter object.SearchFilter) bool {
|
|
||||||
for _, attr := range attributes {
|
|
||||||
if attr.Key() == filter.Header() {
|
|
||||||
switch filter.Operation() {
|
|
||||||
case object.MatchStringEqual:
|
|
||||||
return attr.Value() == filter.Value()
|
|
||||||
case object.MatchCommonPrefix:
|
|
||||||
return strings.HasPrefix(attr.Value(), filter.Value())
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) GetEpochDurations(context.Context) (*utils.EpochDurations, error) {
|
|
||||||
return &utils.EpochDurations{
|
|
||||||
CurrentEpoch: 10,
|
|
||||||
MsPerBlock: 1000,
|
|
||||||
BlockPerEpoch: 100,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestFrostFS) isAllowed(cnrID cid.ID, userID user.ID, op acl.Op, objID oid.ID) bool {
|
|
||||||
keysToCheck := []string{
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, objID),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, userID, op, oid.ID{}),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, objID),
|
|
||||||
fmt.Sprintf("%s/%s/%s/%s", cnrID, user.ID{}, op, oid.ID{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keysToCheck {
|
|
||||||
if t.accessList[key] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnr)
|
|
||||||
addr.SetObject(obj)
|
|
||||||
return addr
|
|
||||||
}
|
|
|
@ -3,24 +3,18 @@ package handler
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
@ -29,152 +23,39 @@ import (
|
||||||
type Config interface {
|
type Config interface {
|
||||||
DefaultTimestamp() bool
|
DefaultTimestamp() bool
|
||||||
ZipCompression() bool
|
ZipCompression() bool
|
||||||
ClientCut() bool
|
|
||||||
BufferMaxSizeForPut() uint64
|
|
||||||
NamespaceHeader() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
|
||||||
type PrmContainer struct {
|
|
||||||
// Container identifier.
|
|
||||||
ContainerID cid.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmAuth groups authentication parameters for the FrostFS operation.
|
|
||||||
type PrmAuth struct {
|
|
||||||
// Bearer token to be used for the operation. Overlaps PrivateKey. Optional.
|
|
||||||
BearerToken *bearer.Token
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectHead groups parameters of FrostFS.HeadObject operation.
|
|
||||||
type PrmObjectHead struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectGet groups parameters of FrostFS.GetObject operation.
|
|
||||||
type PrmObjectGet struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectRange groups parameters of FrostFS.RangeObject operation.
|
|
||||||
type PrmObjectRange struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Address to read the object header from.
|
|
||||||
Address oid.Address
|
|
||||||
|
|
||||||
// Offset-length range of the object payload to be read.
|
|
||||||
PayloadRange [2]uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object represents FrostFS object.
|
|
||||||
type Object struct {
|
|
||||||
// Object header (doesn't contain payload).
|
|
||||||
Header object.Object
|
|
||||||
|
|
||||||
// Object payload part encapsulated in io.Reader primitive.
|
|
||||||
// Returns ErrAccessDenied on read access violation.
|
|
||||||
Payload io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectCreate groups parameters of FrostFS.CreateObject operation.
|
|
||||||
type PrmObjectCreate struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
Object *object.Object
|
|
||||||
|
|
||||||
// Object payload encapsulated in io.Reader primitive.
|
|
||||||
Payload io.Reader
|
|
||||||
|
|
||||||
// Enables client side object preparing.
|
|
||||||
ClientCut bool
|
|
||||||
|
|
||||||
// Disables using Tillich-Zémor hash for payload.
|
|
||||||
WithoutHomomorphicHash bool
|
|
||||||
|
|
||||||
// Sets max buffer size to read payload.
|
|
||||||
BufferMaxSize uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrmObjectSearch groups parameters of FrostFS.sear SearchObjects operation.
|
|
||||||
type PrmObjectSearch struct {
|
|
||||||
// Authentication parameters.
|
|
||||||
PrmAuth
|
|
||||||
|
|
||||||
// Container to select the objects from.
|
|
||||||
Container cid.ID
|
|
||||||
|
|
||||||
Filters object.SearchFilters
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResObjectSearch interface {
|
|
||||||
Read(buf []oid.ID) (int, error)
|
|
||||||
Iterate(f func(oid.ID) bool) error
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
|
||||||
ErrAccessDenied = errors.New("access denied")
|
|
||||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
|
||||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
|
||||||
)
|
|
||||||
|
|
||||||
// FrostFS represents virtual connection to FrostFS network.
|
|
||||||
type FrostFS interface {
|
|
||||||
Container(context.Context, PrmContainer) (*container.Container, error)
|
|
||||||
HeadObject(context.Context, PrmObjectHead) (*object.Object, error)
|
|
||||||
GetObject(context.Context, PrmObjectGet) (*Object, error)
|
|
||||||
RangeObject(context.Context, PrmObjectRange) (io.ReadCloser, error)
|
|
||||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
|
||||||
SearchObjects(context.Context, PrmObjectSearch) (ResObjectSearch, error)
|
|
||||||
utils.EpochInfoFetcher
|
|
||||||
}
|
|
||||||
|
|
||||||
type ContainerResolver interface {
|
|
||||||
Resolve(ctx context.Context, name string) (*cid.ID, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
frostfs FrostFS
|
pool *pool.Pool
|
||||||
ownerID *user.ID
|
ownerID *user.ID
|
||||||
config Config
|
config Config
|
||||||
containerResolver ContainerResolver
|
containerResolver *resolver.ContainerResolver
|
||||||
tree *tree.Tree
|
tree *tree.Tree
|
||||||
cache *cache.BucketCache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type AppParams struct {
|
func New(params *utils.AppParams, config Config, tree *tree.Tree) *Handler {
|
||||||
Logger *zap.Logger
|
|
||||||
FrostFS FrostFS
|
|
||||||
Owner *user.ID
|
|
||||||
Resolver ContainerResolver
|
|
||||||
Cache *cache.BucketCache
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(params *AppParams, config Config, tree *tree.Tree) *Handler {
|
|
||||||
return &Handler{
|
return &Handler{
|
||||||
log: params.Logger,
|
log: params.Logger,
|
||||||
frostfs: params.FrostFS,
|
pool: params.Pool,
|
||||||
ownerID: params.Owner,
|
ownerID: params.Owner,
|
||||||
config: config,
|
config: config,
|
||||||
containerResolver: params.Resolver,
|
containerResolver: params.Resolver,
|
||||||
tree: tree,
|
tree: tree,
|
||||||
cache: params.Cache,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getContainerID decode container id, if it's not a valid container id
|
||||||
|
// then trey to resolve name using provided resolver.
|
||||||
|
func (h *Handler) getContainerID(ctx context.Context, containerID string) (*cid.ID, error) {
|
||||||
|
cnrID := new(cid.ID)
|
||||||
|
err := cnrID.DecodeString(containerID)
|
||||||
|
if err != nil {
|
||||||
|
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
||||||
|
}
|
||||||
|
return cnrID, err
|
||||||
|
}
|
||||||
|
|
||||||
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// prepares request and object address to it.
|
// prepares request and object address to it.
|
||||||
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
|
@ -186,9 +67,10 @@ func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, requ
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
cnrID, err := h.getContainerID(ctx, idCnr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
||||||
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,43 +82,33 @@ func (h *Handler) byAddress(c *fasthttp.RequestCtx, f func(context.Context, requ
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(*cnrID)
|
||||||
addr.SetObject(*objID)
|
addr.SetObject(*objID)
|
||||||
|
|
||||||
f(ctx, *h.newRequest(c, log), addr)
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// byObjectName is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byBucketname is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// prepares request and object address to it.
|
// prepares request and object address to it.
|
||||||
func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
func (h *Handler) byBucketname(req *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
var (
|
var (
|
||||||
bucketname = req.UserValue("cid").(string)
|
bucketname = req.UserValue("cid").(string)
|
||||||
key = req.UserValue("oid").(string)
|
key = req.UserValue("oid").(string)
|
||||||
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
log = h.log.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||||
)
|
)
|
||||||
|
|
||||||
unescapedKey, err := url.QueryUnescape(key)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(req, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(req)
|
ctx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
cnrID, err := h.getContainerID(ctx, bucketname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(req, log, err)
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
||||||
|
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
foundOid, err := h.tree.GetLatestVersion(ctx, cnrID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
log.Error(logs.ObjectWasntFound, zap.Error(err))
|
||||||
response.Error(req, "Access Denied", fasthttp.StatusForbidden)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
|
||||||
|
|
||||||
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
response.Error(req, "object wasn't found", fasthttp.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -247,7 +119,7 @@ func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(*cnrID)
|
||||||
addr.SetObject(foundOid.OID)
|
addr.SetObject(foundOid.OID)
|
||||||
|
|
||||||
f(ctx, *h.newRequest(req, log), addr)
|
f(ctx, *h.newRequest(req, log), addr)
|
||||||
|
@ -255,35 +127,23 @@ func (h *Handler) byObjectName(req *fasthttp.RequestCtx, f func(context.Context,
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byAddress.
|
// byAttribute is a wrapper similar to byAddress.
|
||||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
scid, _ := c.UserValue("cid").(string)
|
var (
|
||||||
key, _ := c.UserValue("attr_key").(string)
|
scid, _ = c.UserValue("cid").(string)
|
||||||
val, _ := c.UserValue("attr_val").(string)
|
key, _ = url.QueryUnescape(c.UserValue("attr_key").(string))
|
||||||
|
val, _ = url.QueryUnescape(c.UserValue("attr_val").(string))
|
||||||
key, err := url.QueryUnescape(key)
|
log = h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||||
if err != nil {
|
)
|
||||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Uint64("id", c.ID()), zap.Error(err))
|
|
||||||
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
val, err = url.QueryUnescape(val)
|
|
||||||
if err != nil {
|
|
||||||
h.log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Uint64("id", c.ID()), zap.Error(err))
|
|
||||||
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
log := h.log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
containerID, err := h.getContainerID(ctx, scid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
||||||
|
response.Error(c, "wrong container id", fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := h.search(ctx, &bktInfo.CID, key, val, object.MatchStringEqual)
|
res, err := h.search(ctx, containerID, key, val, object.MatchStringEqual)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
@ -308,74 +168,8 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, re
|
||||||
}
|
}
|
||||||
|
|
||||||
var addrObj oid.Address
|
var addrObj oid.Address
|
||||||
addrObj.SetContainer(bktInfo.CID)
|
addrObj.SetContainer(*containerID)
|
||||||
addrObj.SetObject(buf[0])
|
addrObj.SetObject(buf[0])
|
||||||
|
|
||||||
f(ctx, *h.newRequest(c, log), addrObj)
|
f(ctx, *h.newRequest(c, log), addrObj)
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveContainer decode container id, if it's not a valid container id
|
|
||||||
// then trey to resolve name using provided resolver.
|
|
||||||
func (h *Handler) resolveContainer(ctx context.Context, containerID string) (*cid.ID, error) {
|
|
||||||
cnrID := new(cid.ID)
|
|
||||||
err := cnrID.DecodeString(containerID)
|
|
||||||
if err != nil {
|
|
||||||
cnrID, err = h.containerResolver.Resolve(ctx, containerID)
|
|
||||||
if err != nil && strings.Contains(err.Error(), "not found") {
|
|
||||||
err = fmt.Errorf("%w: %s", new(apistatus.ContainerNotFound), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cnrID, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *zap.Logger) (*data.BucketInfo, error) {
|
|
||||||
ns, err := middleware.GetNamespace(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if bktInfo := h.cache.Get(ns, containerName); bktInfo != nil {
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = h.cache.Put(bktInfo); err != nil {
|
|
||||||
log.Warn(logs.CouldntPutBucketIntoCache,
|
|
||||||
zap.String("bucket name", bktInfo.Name),
|
|
||||||
zap.Stringer("bucket cid", bktInfo.CID),
|
|
||||||
zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.BucketInfo, error) {
|
|
||||||
prm := PrmContainer{ContainerID: cnrID}
|
|
||||||
res, err := h.frostfs.Container(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get frostfs container '%s': %w", cnrID.String(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{
|
|
||||||
CID: cnrID,
|
|
||||||
Name: cnrID.EncodeToString(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if domain := container.ReadDomain(*res); domain.Name() != "" {
|
|
||||||
bktInfo.Name = domain.Name()
|
|
||||||
bktInfo.Zone = domain.Zone()
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
|
||||||
|
|
||||||
return bktInfo, err
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,580 +0,0 @@
|
||||||
//go:build gofuzz
|
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
go_fuzz_utils "github.com/trailofbits/go-fuzz-utils"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
fuzzSuccessExitCode = 0
|
|
||||||
fuzzFailExitCode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
func prepareStrings(tp *go_fuzz_utils.TypeProvider, count int) ([]string, error) {
|
|
||||||
array := make([]string, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareBools(tp *go_fuzz_utils.TypeProvider, count int) ([]bool, error) {
|
|
||||||
array := make([]bool, count)
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
err = tp.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
array[i], err = tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return array, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRandomDeterministicPositiveIntInRange(tp *go_fuzz_utils.TypeProvider, max int) (int, error) {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
count = count % max
|
|
||||||
if count < 0 {
|
|
||||||
count += max
|
|
||||||
}
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateHeaders(tp *go_fuzz_utils.TypeProvider, r *fasthttp.Request, params []string) error {
|
|
||||||
count, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
count = count % len(params)
|
|
||||||
if count < 0 {
|
|
||||||
count += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
position, err := tp.GetInt()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
position = position % len(params)
|
|
||||||
if position < 0 {
|
|
||||||
position += len(params)
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Header.Set(params[position], v)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func maybeFillRandom(tp *go_fuzz_utils.TypeProvider, initValue string) (string, error) {
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
initValue, err = tp.GetString()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return initValue, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func upload(tp *go_fuzz_utils.TypeProvider) (context.Context, *handlerContext, cid.ID, *fasthttp.RequestCtx, string, string, string, error) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
aclList := []acl.Basic{
|
|
||||||
acl.Private,
|
|
||||||
acl.PrivateExtended,
|
|
||||||
acl.PublicRO,
|
|
||||||
acl.PublicROExtended,
|
|
||||||
acl.PublicRW,
|
|
||||||
acl.PublicRWExtended,
|
|
||||||
acl.PublicAppend,
|
|
||||||
acl.PublicAppendExtended,
|
|
||||||
}
|
|
||||||
|
|
||||||
pos, err := getRandomDeterministicPositiveIntInRange(tp, len(aclList))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
acl := aclList[pos]
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 6)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
bktName := strings[0]
|
|
||||||
objFileName := strings[1]
|
|
||||||
valAttr := strings[2]
|
|
||||||
keyAttr := strings[3]
|
|
||||||
|
|
||||||
if len(bktName) == 0 {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("not enought buckets")
|
|
||||||
}
|
|
||||||
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cnrID.EncodeToString())
|
|
||||||
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: objFileName,
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := tp.GetBytes()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(fw, bytes.NewReader(content)); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Close(); err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Request.SetBodyStream(&buff, buff.Len())
|
|
||||||
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
err = generateHeaders(tp, &r.Request, []string{"X-Attribute-", "X-Attribute-DupKey", "X-Attribute-MyAttribute", "X-Attribute-System-DupKey", "X-Attribute-System-Expiration-Epoch1", "X-Attribute-SYSTEM-Expiration-Epoch2", "X-Attribute-system-Expiration-Epoch3", "X-Attribute-User-Attribute", "X-Attribute-", "X-Attribute-FileName", "X-Attribute-FROSTFS", "X-Attribute-neofs", "X-Attribute-SYSTEM", "X-Attribute-System-Expiration-Duration", "X-Attribute-System-Expiration-Epoch", "X-Attribute-System-Expiration-RFC3339", "X-Attribute-System-Expiration-Timestamp", "X-Attribute-Timestamp", "X-Attribute-" + strings[4], "X-Attribute-System-" + strings[5]})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
|
|
||||||
if r.Response.StatusCode() != http.StatusOK {
|
|
||||||
return nil, nil, cid.ID{}, nil, "", "", "", errors.New("error on upload")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctx, hc, cnrID, r, objFileName, keyAttr, valAttr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzUpload() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzUpload(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, _, _, _, _, err = upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzUpload(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadOrHead(tp *go_fuzz_utils.TypeProvider, ctx context.Context, hc *handlerContext, cnrID cid.ID, resp *fasthttp.RequestCtx, filename string) (*fasthttp.RequestCtx, error) {
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
panic(resp)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
data := resp.Response.Body()
|
|
||||||
err := json.Unmarshal(data, &putRes)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(object.AttributeFilePath)
|
|
||||||
|
|
||||||
filename, err = maybeFillRandom(tp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
attr.SetValue(filename)
|
|
||||||
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
oid := putRes.ObjectID
|
|
||||||
oid, err = maybeFillRandom(tp, oid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("oid", oid)
|
|
||||||
|
|
||||||
rnd, err := tp.GetBool()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if rnd == true {
|
|
||||||
r.SetUserValue("download", "true")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzGet() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzGet(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzGet(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzUpload(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHead() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHead(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, resp, filename, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := downloadOrHead(tp, ctx, hc, cnrID, resp, filename)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHead(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHead(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzHeadByAttribute() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzHeadByAttribute(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, attrKey, attrVal, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrKey, err = maybeFillRandom(tp, attrKey)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
attrVal, err = maybeFillRandom(tp, attrVal)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzHeadByAttribute(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzHeadByAttribute(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzDownloadZipped() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzDownloadZipped(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, hc, cnrID, _, _, _, _, err := upload(tp)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
cid := cnrID.EncodeToString()
|
|
||||||
cid, err = maybeFillRandom(tp, cid)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", cid)
|
|
||||||
r.SetUserValue("prefix", prefix)
|
|
||||||
|
|
||||||
hc.Handler().DownloadZipped(r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzDownloadZipped(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzDownloadZipped(data)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitFuzzStoreBearerTokenAppCtx() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func DoFuzzStoreBearerTokenAppCtx(input []byte) int {
|
|
||||||
// FUZZER INIT
|
|
||||||
if len(input) < 100 {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
tp, err := go_fuzz_utils.NewTypeProvider(input)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix := ""
|
|
||||||
prefix, err = maybeFillRandom(tp, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fuzzFailExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
|
|
||||||
strings, err := prepareStrings(tp, 3)
|
|
||||||
|
|
||||||
rand, err := prepareBools(tp, 2)
|
|
||||||
|
|
||||||
if rand[0] == true {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
} else if rand[1] == true {
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
} else {
|
|
||||||
r.Request.Header.Set(fasthttp.HeaderAuthorization, "Bearer"+strings[0])
|
|
||||||
r.Request.Header.SetCookie(fasthttp.HeaderAuthorization, "Bearer"+strings[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens.StoreBearerTokenAppCtx(ctx, r)
|
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func FuzzStoreBearerTokenAppCtx(f *testing.F) {
|
|
||||||
f.Fuzz(func(t *testing.T, data []byte) {
|
|
||||||
DoFuzzStoreBearerTokenAppCtx(data)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -1,296 +0,0 @@
|
||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/valyala/fasthttp"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type treeClientMock struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type configMock struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) DefaultTimestamp() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ZipCompression() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) ClientCut() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) BufferMaxSizeForPut() uint64 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *configMock) NamespaceHeader() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type handlerContext struct {
|
|
||||||
key *keys.PrivateKey
|
|
||||||
owner user.ID
|
|
||||||
|
|
||||||
h *Handler
|
|
||||||
frostfs *TestFrostFS
|
|
||||||
tree *treeClientMock
|
|
||||||
cfg *configMock
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *handlerContext) Handler() *Handler {
|
|
||||||
return hc.h
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareHandlerContext() (*handlerContext, error) {
|
|
||||||
logger, err := zap.NewDevelopment()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
|
||||||
|
|
||||||
testFrostFS := NewTestFrostFS(key)
|
|
||||||
|
|
||||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
|
||||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (*cid.ID, error) {
|
|
||||||
return testFrostFS.ContainerID(name)
|
|
||||||
})
|
|
||||||
|
|
||||||
params := &AppParams{
|
|
||||||
Logger: logger,
|
|
||||||
FrostFS: testFrostFS,
|
|
||||||
Owner: &owner,
|
|
||||||
Resolver: testResolver,
|
|
||||||
Cache: cache.NewBucketCache(&cache.Config{
|
|
||||||
Size: 1,
|
|
||||||
Lifetime: 1,
|
|
||||||
Logger: logger,
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
|
|
||||||
treeMock := &treeClientMock{}
|
|
||||||
cfgMock := &configMock{}
|
|
||||||
|
|
||||||
handler := New(params, cfgMock, tree.NewTree(treeMock))
|
|
||||||
|
|
||||||
return &handlerContext{
|
|
||||||
key: key,
|
|
||||||
owner: owner,
|
|
||||||
h: handler,
|
|
||||||
frostfs: testFrostFS,
|
|
||||||
tree: treeMock,
|
|
||||||
cfg: cfgMock,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *handlerContext) prepareContainer(name string, basicACL acl.Basic) (cid.ID, *container.Container, error) {
|
|
||||||
var pp netmap.PlacementPolicy
|
|
||||||
err := pp.DecodeString("REP 1")
|
|
||||||
if err != nil {
|
|
||||||
return cid.ID{}, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var cnr container.Container
|
|
||||||
cnr.Init()
|
|
||||||
cnr.SetOwner(hc.owner)
|
|
||||||
cnr.SetPlacementPolicy(pp)
|
|
||||||
cnr.SetBasicACL(basicACL)
|
|
||||||
|
|
||||||
var domain container.Domain
|
|
||||||
domain.SetName(name)
|
|
||||||
container.WriteDomain(&cnr, domain)
|
|
||||||
container.SetName(&cnr, name)
|
|
||||||
container.SetCreationTime(&cnr, time.Now())
|
|
||||||
|
|
||||||
cnrID := cidtest.ID()
|
|
||||||
|
|
||||||
for op := acl.OpObjectGet; op < acl.OpObjectHash; op++ {
|
|
||||||
hc.frostfs.AllowUserOperation(cnrID, hc.owner, op, oid.ID{})
|
|
||||||
if basicACL.IsOpAllowed(op, acl.RoleOthers) {
|
|
||||||
hc.frostfs.AllowUserOperation(cnrID, user.ID{}, op, oid.ID{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnrID, &cnr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBasic(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
bktName := "bucket"
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(object.AttributeFilePath)
|
|
||||||
attr.SetValue(objFileName)
|
|
||||||
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
|
||||||
|
|
||||||
t.Run("get", func(t *testing.T) {
|
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
|
||||||
hc.Handler().DownloadByAddressOrBucketName(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("head", func(t *testing.T) {
|
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
|
||||||
hc.Handler().HeadByAddressOrBucketName(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("get by attribute", func(t *testing.T) {
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("head by attribute", func(t *testing.T) {
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("zip", func(t *testing.T) {
|
|
||||||
r = prepareGetZipped(ctx, bktName, "")
|
|
||||||
hc.Handler().DownloadZipped(r)
|
|
||||||
|
|
||||||
readerAt := bytes.NewReader(r.Response.Body())
|
|
||||||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, zipReader.File, 1)
|
|
||||||
require.Equal(t, objFileName, zipReader.File[0].Name)
|
|
||||||
f, err := zipReader.File[0].Open()
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
inErr := f.Close()
|
|
||||||
require.NoError(t, inErr)
|
|
||||||
}()
|
|
||||||
data, err := io.ReadAll(f)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, content, string(data))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
return r, fillMultipartBody(r, content)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetRequest(ctx context.Context, bucket, objID string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("oid", objID)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetByAttributeRequest(ctx context.Context, bucket, attrKey, attrVal string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("attr_key", attrKey)
|
|
||||||
r.SetUserValue("attr_val", attrVal)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.RequestCtx {
|
|
||||||
r := new(fasthttp.RequestCtx)
|
|
||||||
utils.SetContextToRequest(ctx, r)
|
|
||||||
r.SetUserValue("cid", bucket)
|
|
||||||
r.SetUserValue("prefix", prefix)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
keyAttr = "User-Attribute"
|
|
||||||
valAttr = "user value"
|
|
||||||
objFileName = "newFile.txt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func fillMultipartBody(r *fasthttp.RequestCtx, content string) error {
|
|
||||||
attributes := map[string]string{
|
|
||||||
object.AttributeFileName: objFileName,
|
|
||||||
keyAttr: valAttr,
|
|
||||||
}
|
|
||||||
|
|
||||||
var buff bytes.Buffer
|
|
||||||
w := multipart.NewWriter(&buff)
|
|
||||||
fw, err := w.CreateFormFile("file", attributes[object.AttributeFileName])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(fw, bytes.NewBufferString(content)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.Request.SetBodyStream(&buff, buff.Len())
|
|
||||||
r.Request.Header.Set("Content-Type", w.FormDataContentType())
|
|
||||||
r.Request.Header.Set("X-Attribute-"+keyAttr, valAttr)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -29,14 +30,13 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
|
|
||||||
btoken := bearerToken(ctx)
|
btoken := bearerToken(ctx)
|
||||||
|
|
||||||
prm := PrmObjectHead{
|
var prm pool.PrmObjectHead
|
||||||
PrmAuth: PrmAuth{
|
prm.SetAddress(objectAddress)
|
||||||
BearerToken: btoken,
|
if btoken != nil {
|
||||||
},
|
prm.UseBearer(*btoken)
|
||||||
Address: objectAddress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := h.frostfs.HeadObject(ctx, prm)
|
obj, err := h.pool.HeadObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
req.handleFrostFSErr(err, start)
|
req.handleFrostFSErr(err, start)
|
||||||
return
|
return
|
||||||
|
@ -70,19 +70,22 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
idsToResponse(&req.Response, obj)
|
idsToResponse(&req.Response, &obj)
|
||||||
|
|
||||||
if len(contentType) == 0 {
|
if len(contentType) == 0 {
|
||||||
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
contentType, _, err = readContentType(obj.PayloadSize(), func(sz uint64) (io.Reader, error) {
|
||||||
prmRange := PrmObjectRange{
|
var prmRange pool.PrmObjectRange
|
||||||
PrmAuth: PrmAuth{
|
prmRange.SetAddress(objectAddress)
|
||||||
BearerToken: btoken,
|
prmRange.SetLength(sz)
|
||||||
},
|
if btoken != nil {
|
||||||
Address: objectAddress,
|
prmRange.UseBearer(*btoken)
|
||||||
PayloadRange: [2]uint64{0, sz},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.frostfs.RangeObject(ctx, prmRange)
|
resObj, err := h.pool.ObjectRange(ctx, prmRange)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resObj, nil
|
||||||
})
|
})
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
req.handleFrostFSErr(err, start)
|
req.handleFrostFSErr(err, start)
|
||||||
|
@ -107,7 +110,7 @@ func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
|
|
||||||
err := id.DecodeString(test)
|
err := id.DecodeString(test)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.byObjectName(c, h.headObject)
|
h.byBucketname(c, h.headObject)
|
||||||
} else {
|
} else {
|
||||||
h.byAddress(c, h.headObject)
|
h.byAddress(c, h.headObject)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
package middleware
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// keyWrapper is wrapper for context keys.
|
|
||||||
type keyWrapper string
|
|
||||||
|
|
||||||
const nsKey = keyWrapper("namespace")
|
|
||||||
|
|
||||||
// GetNamespace extract namespace from context.
|
|
||||||
func GetNamespace(ctx context.Context) (string, error) {
|
|
||||||
ns, ok := ctx.Value(nsKey).(string)
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("couldn't get namespace from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNamespace sets namespace in the context.
|
|
||||||
func SetNamespace(ctx context.Context, ns string) context.Context {
|
|
||||||
return context.WithValue(ctx, nsKey, ns)
|
|
||||||
}
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -55,14 +56,13 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objectAddress oi
|
||||||
filename string
|
filename string
|
||||||
)
|
)
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
var prm pool.PrmObjectGet
|
||||||
PrmAuth: PrmAuth{
|
prm.SetAddress(objectAddress)
|
||||||
BearerToken: bearerToken(ctx),
|
if btoken := bearerToken(ctx); btoken != nil {
|
||||||
},
|
prm.UseBearer(*btoken)
|
||||||
Address: objectAddress,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rObj, err := h.frostfs.GetObject(ctx, prm)
|
rObj, err := h.pool.GetObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
req.handleFrostFSErr(err, start)
|
req.handleFrostFSErr(err, start)
|
||||||
return
|
return
|
||||||
|
|
|
@ -35,7 +35,7 @@ func TestDetector(t *testing.T) {
|
||||||
} {
|
} {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
||||||
func(uint64) (io.Reader, error) {
|
func(sz uint64) (io.Reader, error) {
|
||||||
return strings.NewReader(tc.Expected), nil
|
return strings.NewReader(tc.Expected), nil
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -56,9 +57,10 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(req)
|
ctx := utils.GetContextFromRequest(req)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
idCnr, err := h.getContainerID(ctx, scid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(req, log, err)
|
log.Error(logs.WrongContainerID, zap.Error(err))
|
||||||
|
response.Error(req, "wrong container id", fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +99,7 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = utils.PrepareExpirationHeader(req, h.frostfs, filtered, now); err != nil {
|
if err = utils.PrepareExpirationHeader(req, h.pool, filtered, now); err != nil {
|
||||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||||
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
|
@ -127,28 +129,26 @@ func (h *Handler) Upload(req *fasthttp.RequestCtx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
obj := object.New()
|
obj := object.New()
|
||||||
obj.SetContainerID(bktInfo.CID)
|
obj.SetContainerID(*idCnr)
|
||||||
obj.SetOwnerID(*h.ownerID)
|
obj.SetOwnerID(h.ownerID)
|
||||||
obj.SetAttributes(attributes...)
|
obj.SetAttributes(attributes...)
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
var prm pool.PrmObjectPut
|
||||||
PrmAuth: PrmAuth{
|
prm.SetHeader(*obj)
|
||||||
BearerToken: h.fetchBearerToken(ctx),
|
prm.SetPayload(file)
|
||||||
},
|
|
||||||
Object: obj,
|
bt := h.fetchBearerToken(ctx)
|
||||||
Payload: file,
|
if bt != nil {
|
||||||
ClientCut: h.config.ClientCut(),
|
prm.UseBearer(*bt)
|
||||||
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
|
|
||||||
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
if idObj, err = h.pool.PutObject(ctx, prm); err != nil {
|
||||||
h.handlePutFrostFSErr(req, err)
|
h.handlePutFrostFSErr(req, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
addr.SetObject(idObj)
|
addr.SetObject(idObj)
|
||||||
addr.SetContainer(bktInfo.CID)
|
addr.SetContainer(*idCnr)
|
||||||
|
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||||
if err = newPutResponse(addr).encode(req); err != nil {
|
if err = newPutResponse(addr).encode(req); err != nil {
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -59,13 +58,3 @@ func isValidValue(s string) bool {
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
|
||||||
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
|
||||||
|
|
||||||
if client.IsErrContainerNotFound(err) {
|
|
||||||
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
|
@ -4,12 +4,14 @@ const (
|
||||||
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||||
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||||
|
WrongContainerID = "wrong container id" // Error in ../../downloader/download.go and uploader/upload.go
|
||||||
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||||
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
ObjectWasntFound = "object wasn't found" // Error in ../../downloader/download.go
|
||||||
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||||
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||||
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||||
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||||
|
CouldNotCheckContainerExistence = "could not check container existence" // Error in ../../downloader/download.go
|
||||||
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||||
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||||
|
@ -19,7 +21,6 @@ const (
|
||||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||||
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||||
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
|
||||||
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||||
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||||
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||||
|
@ -67,14 +68,6 @@ const (
|
||||||
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||||
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||||
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||||
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
AnonRequestSkipIAMValidation = "anon request, skip IAM validation" // Debug in ../../app.go
|
||||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
IAMValidationFailed = "IAM validation failed" // Error in ../../app.go
|
||||||
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
|
||||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
|
||||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
|
||||||
FailedToUnescapeQuery = "failed to unescape query"
|
|
||||||
ServerReconnecting = "reconnecting server..."
|
|
||||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
|
||||||
ServerReconnectFailed = "failed to reconnect server"
|
|
||||||
WarnDuplicateAddress = "duplicate address"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -40,9 +40,6 @@ func (ms *Service) ShutDown(ctx context.Context) {
|
||||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||||
err := ms.Shutdown(ctx)
|
err := ms.Shutdown(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
ms.log.Panic(logs.CantShutDownService)
|
||||||
if err = ms.Close(); err != nil {
|
|
||||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
35
resolver/frostfs.go
Normal file
35
resolver/frostfs.go
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
package resolver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FrostFSResolver represents virtual connection to the FrostFS network.
|
||||||
|
// It implements resolver.FrostFS.
|
||||||
|
type FrostFSResolver struct {
|
||||||
|
pool *pool.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFrostFSResolver creates new FrostFSResolver using provided pool.Pool.
|
||||||
|
func NewFrostFSResolver(p *pool.Pool) *FrostFSResolver {
|
||||||
|
return &FrostFSResolver{pool: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemDNS implements resolver.FrostFS interface method.
|
||||||
|
func (x *FrostFSResolver) SystemDNS(ctx context.Context) (string, error) {
|
||||||
|
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("read network info via client: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
domain := networkInfo.RawNetworkParameter("SystemDNS")
|
||||||
|
if domain == nil {
|
||||||
|
return "", errors.New("system DNS parameter not found or empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(domain), nil
|
||||||
|
}
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||||
|
@ -29,17 +28,15 @@ type FrostFS interface {
|
||||||
SystemDNS(context.Context) (string, error)
|
SystemDNS(context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Settings interface {
|
|
||||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
FrostFS FrostFS
|
FrostFS FrostFS
|
||||||
RPCAddress string
|
RPCAddress string
|
||||||
Settings Settings
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ContainerResolver struct {
|
type ContainerResolver struct {
|
||||||
|
rpcAddress string
|
||||||
|
frostfs FrostFS
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
resolvers []*Resolver
|
resolvers []*Resolver
|
||||||
}
|
}
|
||||||
|
@ -64,7 +61,9 @@ func NewContainerResolver(resolverNames []string, cfg *Config) (*ContainerResolv
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ContainerResolver{
|
return &ContainerResolver{
|
||||||
resolvers: resolvers,
|
rpcAddress: cfg.RPCAddress,
|
||||||
|
frostfs: cfg.FrostFS,
|
||||||
|
resolvers: resolvers,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +106,7 @@ func (r *ContainerResolver) Resolve(ctx context.Context, cnrName string) (*cid.I
|
||||||
return nil, ErrNoResolvers
|
return nil, ErrNoResolvers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ContainerResolver) UpdateResolvers(resolverNames []string, cfg *Config) error {
|
func (r *ContainerResolver) UpdateResolvers(resolverNames []string) error {
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
defer r.mu.Unlock()
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
@ -115,7 +114,7 @@ func (r *ContainerResolver) UpdateResolvers(resolverNames []string, cfg *Config)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resolvers, err := createResolvers(resolverNames, cfg)
|
resolvers, err := createResolvers(resolverNames, &Config{FrostFS: r.frostfs, RPCAddress: r.rpcAddress})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -141,43 +140,29 @@ func (r *ContainerResolver) equals(resolverNames []string) bool {
|
||||||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case DNSResolver:
|
case DNSResolver:
|
||||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
return NewDNSResolver(cfg.FrostFS)
|
||||||
case NNSResolver:
|
case NNSResolver:
|
||||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
return NewNNSResolver(cfg.RPCAddress)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||||
if frostFS == nil {
|
if frostFS == nil {
|
||||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||||
}
|
}
|
||||||
if settings == nil {
|
|
||||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
|
||||||
}
|
|
||||||
|
|
||||||
var dns ns.DNS
|
var dns ns.DNS
|
||||||
|
|
||||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
||||||
var err error
|
domain, err := frostFS.SystemDNS(ctx)
|
||||||
|
|
||||||
namespace, err := middleware.GetNamespace(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zone, isDefault := settings.FormContainerZone(namespace)
|
domain = name + "." + domain
|
||||||
if isDefault {
|
|
||||||
zone, err = frostFS.SystemDNS(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
domain := name + "." + zone
|
|
||||||
cnrID, err := dns.ResolveContainerName(domain)
|
cnrID, err := dns.ResolveContainerName(domain)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
return nil, fmt.Errorf("couldn't resolve container '%s' as '%s': %w", name, domain, err)
|
||||||
}
|
}
|
||||||
|
@ -190,32 +175,17 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNNSResolver(rpcAddress string, settings Settings) (*Resolver, error) {
|
func NewNNSResolver(rpcAddress string) (*Resolver, error) {
|
||||||
if rpcAddress == "" {
|
|
||||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
|
||||||
}
|
|
||||||
if settings == nil {
|
|
||||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
|
||||||
}
|
|
||||||
|
|
||||||
var nns ns.NNS
|
var nns ns.NNS
|
||||||
|
|
||||||
if err := nns.Dial(rpcAddress); err != nil {
|
if err := nns.Dial(rpcAddress); err != nil {
|
||||||
return nil, fmt.Errorf("could not dial nns: %w", err)
|
return nil, fmt.Errorf("could not dial nns: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resolveFunc := func(ctx context.Context, name string) (*cid.ID, error) {
|
resolveFunc := func(_ context.Context, name string) (*cid.ID, error) {
|
||||||
var d container.Domain
|
var d container.Domain
|
||||||
d.SetName(name)
|
d.SetName(name)
|
||||||
|
|
||||||
namespace, err := middleware.GetNamespace(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
zone, _ := settings.FormContainerZone(namespace)
|
|
||||||
d.SetZone(zone)
|
|
||||||
|
|
||||||
cnrID, err := nns.ResolveContainerDomain(d)
|
cnrID, err := nns.ResolveContainerDomain(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
return nil, fmt.Errorf("couldn't resolve container '%s': %w", name, err)
|
||||||
|
|
|
@ -23,29 +23,19 @@ func makeTestCookie(value []byte) *fasthttp.RequestHeader {
|
||||||
func makeTestHeader(value []byte) *fasthttp.RequestHeader {
|
func makeTestHeader(value []byte) *fasthttp.RequestHeader {
|
||||||
header := new(fasthttp.RequestHeader)
|
header := new(fasthttp.RequestHeader)
|
||||||
if value != nil {
|
if value != nil {
|
||||||
header.Set(fasthttp.HeaderAuthorization, string(value))
|
header.Set(fasthttp.HeaderAuthorization, bearerTokenHdr+" "+string(value))
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeBearer(value string) string {
|
func Test_fromCookie(t *testing.T) {
|
||||||
return bearerTokenHdr + " " + value
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBearerTokenFromCookie(t *testing.T) {
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
actual []byte
|
actual []byte
|
||||||
expect []byte
|
expect []byte
|
||||||
}{
|
}{
|
||||||
{
|
{name: "empty"},
|
||||||
name: "empty",
|
{name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "normal",
|
|
||||||
actual: []byte("TOKEN"),
|
|
||||||
expect: []byte("TOKEN"),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
|
@ -55,31 +45,14 @@ func TestBearerTokenFromCookie(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBearerTokenFromHeader(t *testing.T) {
|
func Test_fromHeader(t *testing.T) {
|
||||||
validToken := "token"
|
|
||||||
tokenWithoutPrefix := "invalid-token"
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
actual []byte
|
actual []byte
|
||||||
expect []byte
|
expect []byte
|
||||||
}{
|
}{
|
||||||
{
|
{name: "empty"},
|
||||||
name: "empty",
|
{name: "normal", actual: []byte("TOKEN"), expect: []byte("TOKEN")},
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "token without the bearer prefix",
|
|
||||||
actual: []byte(tokenWithoutPrefix),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "token without payload",
|
|
||||||
actual: []byte(makeBearer("")),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "normal",
|
|
||||||
actual: []byte(makeBearer(validToken)),
|
|
||||||
expect: []byte(validToken),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
|
@ -89,7 +62,7 @@ func TestBearerTokenFromHeader(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFetchBearerToken(t *testing.T) {
|
func Test_fetchBearerToken(t *testing.T) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var uid user.ID
|
var uid user.ID
|
||||||
|
@ -102,77 +75,43 @@ func TestFetchBearerToken(t *testing.T) {
|
||||||
require.NotEmpty(t, t64)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
||||||
cookie string
|
cookie string
|
||||||
header string
|
header string
|
||||||
|
|
||||||
error string
|
error string
|
||||||
nilCtx bool
|
|
||||||
expect *bearer.Token
|
expect *bearer.Token
|
||||||
}{
|
}{
|
||||||
{
|
{name: "empty"},
|
||||||
name: "empty",
|
|
||||||
},
|
{name: "bad base64 header", header: "WRONG BASE64", error: "can't base64-decode bearer token"},
|
||||||
{
|
{name: "bad base64 cookie", cookie: "WRONG BASE64", error: "can't base64-decode bearer token"},
|
||||||
name: "nil context",
|
|
||||||
nilCtx: true,
|
{name: "header token unmarshal error", header: "dGVzdAo=", error: "can't unmarshal bearer token"},
|
||||||
},
|
{name: "cookie token unmarshal error", cookie: "dGVzdAo=", error: "can't unmarshal bearer token"},
|
||||||
{
|
|
||||||
name: "bad base64 header",
|
|
||||||
header: "WRONG BASE64",
|
|
||||||
error: "can't base64-decode bearer token",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bad base64 cookie",
|
|
||||||
cookie: "WRONG BASE64",
|
|
||||||
error: "can't base64-decode bearer token",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "header token unmarshal error",
|
|
||||||
header: "dGVzdAo=",
|
|
||||||
error: "can't unmarshal bearer token",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "cookie token unmarshal error",
|
|
||||||
cookie: "dGVzdAo=",
|
|
||||||
error: "can't unmarshal bearer token",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "bad header and cookie",
|
name: "bad header and cookie",
|
||||||
header: "WRONG BASE64",
|
header: "WRONG BASE64",
|
||||||
cookie: "dGVzdAo=",
|
cookie: "dGVzdAo=",
|
||||||
error: "can't unmarshal bearer token",
|
error: "can't unmarshal bearer token",
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie",
|
name: "bad header, but good cookie",
|
||||||
header: "dGVzdAo=",
|
header: "dGVzdAo=",
|
||||||
cookie: t64,
|
cookie: t64,
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "bad cookie, but good header",
|
{name: "ok for header", header: t64, expect: tkn},
|
||||||
header: t64,
|
{name: "ok for cookie", cookie: t64, expect: tkn},
|
||||||
cookie: "dGVzdAo=",
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for header",
|
|
||||||
header: t64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for cookie",
|
|
||||||
cookie: t64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
var ctx *fasthttp.RequestCtx
|
ctx := makeTestRequest(tt.cookie, tt.header)
|
||||||
if !tt.nilCtx {
|
|
||||||
ctx = makeTestRequest(tt.cookie, tt.header)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual, err := fetchBearerToken(ctx)
|
actual, err := fetchBearerToken(ctx)
|
||||||
|
|
||||||
if tt.error == "" {
|
if tt.error == "" {
|
||||||
|
@ -200,7 +139,7 @@ func makeTestRequest(cookie, header string) *fasthttp.RequestCtx {
|
||||||
return ctx
|
return ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckAndPropagateBearerToken(t *testing.T) {
|
func Test_checkAndPropagateBearerToken(t *testing.T) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
var uid user.ID
|
var uid user.ID
|
||||||
|
@ -223,85 +162,3 @@ func TestCheckAndPropagateBearerToken(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tkn, actual)
|
require.Equal(t, tkn, actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLoadBearerToken(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
token := new(bearer.Token)
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
appCtx context.Context
|
|
||||||
error string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "token is missing in the context",
|
|
||||||
appCtx: ctx,
|
|
||||||
error: "found empty bearer token",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "normal",
|
|
||||||
appCtx: context.WithValue(ctx, bearerTokenKey, token),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range cases {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
tkn, err := LoadBearerToken(tt.appCtx)
|
|
||||||
|
|
||||||
if tt.error == "" {
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, token, tkn)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Contains(t, err.Error(), tt.error)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStoreBearerTokenAppCtx(t *testing.T) {
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
var uid user.ID
|
|
||||||
user.IDFromKey(&uid, key.PrivateKey.PublicKey)
|
|
||||||
|
|
||||||
tkn := new(bearer.Token)
|
|
||||||
tkn.ForUser(uid)
|
|
||||||
|
|
||||||
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
require.NotEmpty(t, t64)
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
req *fasthttp.RequestCtx
|
|
||||||
error string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "invalid token",
|
|
||||||
req: makeTestRequest("dGVzdAo=", ""),
|
|
||||||
error: "can't unmarshal bearer token",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "normal",
|
|
||||||
req: makeTestRequest(t64, ""),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range cases {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
ctx, err := StoreBearerTokenAppCtx(context.Background(), tt.req)
|
|
||||||
|
|
||||||
if tt.error == "" {
|
|
||||||
require.NoError(t, err)
|
|
||||||
actualToken, ok := ctx.Value(bearerTokenKey).(*bearer.Token)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, tkn, actualToken)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Contains(t, err.Error(), tt.error)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
39
tree/tree.go
39
tree/tree.go
|
@ -73,7 +73,6 @@ type Meta interface {
|
||||||
|
|
||||||
type NodeResponse interface {
|
type NodeResponse interface {
|
||||||
GetMeta() []Meta
|
GetMeta() []Meta
|
||||||
GetTimestamp() uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
|
||||||
|
@ -136,7 +135,7 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
||||||
TreeID: versionTree,
|
TreeID: versionTree,
|
||||||
Path: path,
|
Path: path,
|
||||||
Meta: meta,
|
Meta: meta,
|
||||||
LatestOnly: false,
|
LatestOnly: true,
|
||||||
AllAttrs: false,
|
AllAttrs: false,
|
||||||
}
|
}
|
||||||
nodes, err := c.service.GetNodes(ctx, p)
|
nodes, err := c.service.GetNodes(ctx, p)
|
||||||
|
@ -144,43 +143,11 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
latestNode, err := getLatestNode(nodes)
|
if len(nodes) == 0 {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newNodeVersion(latestNode)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLatestNode(nodes []NodeResponse) (NodeResponse, error) {
|
|
||||||
var (
|
|
||||||
maxCreationTime uint64
|
|
||||||
targetIndexNode = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
for i, node := range nodes {
|
|
||||||
currentCreationTime := node.GetTimestamp()
|
|
||||||
if checkExistOID(node.GetMeta()) && currentCreationTime > maxCreationTime {
|
|
||||||
maxCreationTime = currentCreationTime
|
|
||||||
targetIndexNode = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetIndexNode == -1 {
|
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, layer.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes[targetIndexNode], nil
|
return newNodeVersion(nodes[0])
|
||||||
}
|
|
||||||
|
|
||||||
func checkExistOID(meta []Meta) bool {
|
|
||||||
for _, kv := range meta {
|
|
||||||
if kv.GetKey() == "OID" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// pathFromName splits name by '/'.
|
// pathFromName splits name by '/'.
|
||||||
|
|
|
@ -1,143 +0,0 @@
|
||||||
package tree
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
type nodeMeta struct {
|
|
||||||
key string
|
|
||||||
value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m nodeMeta) GetKey() string {
|
|
||||||
return m.key
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m nodeMeta) GetValue() []byte {
|
|
||||||
return m.value
|
|
||||||
}
|
|
||||||
|
|
||||||
type nodeResponse struct {
|
|
||||||
meta []nodeMeta
|
|
||||||
timestamp uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n nodeResponse) GetTimestamp() uint64 {
|
|
||||||
return n.timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n nodeResponse) GetMeta() []Meta {
|
|
||||||
res := make([]Meta, len(n.meta))
|
|
||||||
for i, value := range n.meta {
|
|
||||||
res[i] = value
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetLatestNode(t *testing.T) {
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
nodes []NodeResponse
|
|
||||||
exceptedOID string
|
|
||||||
error bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "empty",
|
|
||||||
nodes: []NodeResponse{},
|
|
||||||
error: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one node of the object version",
|
|
||||||
nodes: []NodeResponse{
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 1,
|
|
||||||
meta: []nodeMeta{
|
|
||||||
{
|
|
||||||
key: oidKV,
|
|
||||||
value: []byte("oid1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
exceptedOID: "oid1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one node of the object version and one node of the secondary object",
|
|
||||||
nodes: []NodeResponse{
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 3,
|
|
||||||
meta: []nodeMeta{},
|
|
||||||
},
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 1,
|
|
||||||
meta: []nodeMeta{
|
|
||||||
{
|
|
||||||
key: oidKV,
|
|
||||||
value: []byte("oid1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
exceptedOID: "oid1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "all nodes represent a secondary object",
|
|
||||||
nodes: []NodeResponse{
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 3,
|
|
||||||
meta: []nodeMeta{},
|
|
||||||
},
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 5,
|
|
||||||
meta: []nodeMeta{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
error: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "several nodes of different types and with different timestamp",
|
|
||||||
nodes: []NodeResponse{
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 1,
|
|
||||||
meta: []nodeMeta{
|
|
||||||
{
|
|
||||||
key: oidKV,
|
|
||||||
value: []byte("oid1"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 3,
|
|
||||||
meta: []nodeMeta{},
|
|
||||||
},
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 4,
|
|
||||||
meta: []nodeMeta{
|
|
||||||
{
|
|
||||||
key: oidKV,
|
|
||||||
value: []byte("oid2"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
nodeResponse{
|
|
||||||
timestamp: 6,
|
|
||||||
meta: []nodeMeta{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
exceptedOID: "oid2",
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
actualNode, err := getLatestNode(tc.nodes)
|
|
||||||
if tc.error {
|
|
||||||
require.Error(t, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, tc.exceptedOID, string(actualNode.GetMeta()[0].GetValue()))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -11,18 +11,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
)
|
)
|
||||||
|
|
||||||
type EpochDurations struct {
|
|
||||||
CurrentEpoch uint64
|
|
||||||
MsPerBlock int64
|
|
||||||
BlockPerEpoch uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type EpochInfoFetcher interface {
|
|
||||||
GetEpochDurations(context.Context) (*EpochDurations, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
UserAttributeHeaderPrefix = "X-Attribute-"
|
UserAttributeHeaderPrefix = "X-Attribute-"
|
||||||
)
|
)
|
||||||
|
@ -159,7 +151,7 @@ func title(str string) string {
|
||||||
return string(r0) + str[size:]
|
return string(r0) + str[size:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrepareExpirationHeader(ctx context.Context, epochFetcher EpochInfoFetcher, headers map[string]string, now time.Time) error {
|
func PrepareExpirationHeader(ctx context.Context, p *pool.Pool, headers map[string]string, now time.Time) error {
|
||||||
formatsNum := 0
|
formatsNum := 0
|
||||||
index := -1
|
index := -1
|
||||||
for i, transformer := range transformers {
|
for i, transformer := range transformers {
|
||||||
|
@ -173,7 +165,7 @@ func PrepareExpirationHeader(ctx context.Context, epochFetcher EpochInfoFetcher,
|
||||||
case 0:
|
case 0:
|
||||||
return nil
|
return nil
|
||||||
case 1:
|
case 1:
|
||||||
epochDuration, err := epochFetcher.GetEpochDurations(ctx)
|
epochDuration, err := GetEpochDurations(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't get epoch durations from network info: %w", err)
|
return fmt.Errorf("couldn't get epoch durations from network info: %w", err)
|
||||||
}
|
}
|
||||||
|
|
15
utils/params.go
Normal file
15
utils/params.go
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AppParams struct {
|
||||||
|
Logger *zap.Logger
|
||||||
|
Pool *pool.Pool
|
||||||
|
Owner *user.ID
|
||||||
|
Resolver *resolver.ContainerResolver
|
||||||
|
}
|
|
@ -30,12 +30,12 @@ func (c *httpCarrier) Set(key string, value string) {
|
||||||
func (c *httpCarrier) Keys() []string {
|
func (c *httpCarrier) Keys() []string {
|
||||||
dict := make(map[string]interface{})
|
dict := make(map[string]interface{})
|
||||||
c.r.Request.Header.VisitAll(
|
c.r.Request.Header.VisitAll(
|
||||||
func(key, _ []byte) {
|
func(key, value []byte) {
|
||||||
dict[string(key)] = true
|
dict[string(key)] = true
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
c.r.Response.Header.VisitAll(
|
c.r.Response.Header.VisitAll(
|
||||||
func(key, _ []byte) {
|
func(key, value []byte) {
|
||||||
dict[string(key)] = true
|
dict[string(key)] = true
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,10 +2,36 @@ package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type EpochDurations struct {
|
||||||
|
CurrentEpoch uint64
|
||||||
|
MsPerBlock int64
|
||||||
|
BlockPerEpoch uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetEpochDurations(ctx context.Context, p *pool.Pool) (*EpochDurations, error) {
|
||||||
|
networkInfo, err := p.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &EpochDurations{
|
||||||
|
CurrentEpoch: networkInfo.CurrentEpoch(),
|
||||||
|
MsPerBlock: networkInfo.MsPerBlock(),
|
||||||
|
BlockPerEpoch: networkInfo.EpochDuration(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.BlockPerEpoch == 0 {
|
||||||
|
return nil, fmt.Errorf("EpochDuration is empty")
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetContextToRequest adds new context to fasthttp request.
|
// SetContextToRequest adds new context to fasthttp request.
|
||||||
func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
|
func SetContextToRequest(ctx context.Context, c *fasthttp.RequestCtx) {
|
||||||
c.SetUserValue("context", ctx)
|
c.SetUserValue("context", ctx)
|
||||||
|
|
Loading…
Add table
Reference in a new issue