Compare commits
No commits in common. "master" and "v0.31.0" have entirely different histories.
54 changed files with 1938 additions and 2962 deletions
|
@ -1,27 +0,0 @@
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
image:
|
|
||||||
name: OCI image
|
|
||||||
runs-on: docker
|
|
||||||
container: git.frostfs.info/truecloudlab/env:oci-image-builder-bookworm
|
|
||||||
steps:
|
|
||||||
- name: Clone git repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Build OCI image
|
|
||||||
run: make image
|
|
||||||
|
|
||||||
- name: Push image to OCI registry
|
|
||||||
run: |
|
|
||||||
echo "$REGISTRY_PASSWORD" \
|
|
||||||
| docker login --username truecloudlab --password-stdin git.frostfs.info
|
|
||||||
make image-push
|
|
||||||
if: >-
|
|
||||||
startsWith(github.ref, 'refs/tags/v') &&
|
|
||||||
(github.event_name == 'workflow_dispatch' || github.event_name == 'push')
|
|
||||||
env:
|
|
||||||
REGISTRY_PASSWORD: ${{secrets.FORGEJO_OCI_REGISTRY_PUSH_TOKEN}}
|
|
|
@ -43,19 +43,3 @@ jobs:
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
integration:
|
|
||||||
name: Integration tests
|
|
||||||
runs-on: oci-runner
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.23'
|
|
||||||
|
|
||||||
- name: Run integration tests
|
|
||||||
run: |-
|
|
||||||
podman-service.sh
|
|
||||||
make integration-test
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.22.12'
|
go-version: '1.22'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
38
CHANGELOG.md
38
CHANGELOG.md
|
@ -4,38 +4,6 @@ This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add handling quota limit reached error (#187)
|
|
||||||
- Add slash clipping for FileName attribute (#174)
|
|
||||||
|
|
||||||
## [0.32.3] - 2025-02-05
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Add slash clipping for FileName attribute (#174)
|
|
||||||
|
|
||||||
## [0.32.2] - 2025-02-03
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Possible memory leak in gRPC client (#202)
|
|
||||||
|
|
||||||
## [0.32.1] - 2025-01-27
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- SIGHUP panic (#198)
|
|
||||||
|
|
||||||
## [0.32.0] - Khumbu - 2024-12-20
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- Getting S3 object with FrostFS Object ID-like key (#166)
|
|
||||||
- Ignore delete marked objects in versioned bucket in index page (#181)
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- Metric of dropped logs by log sampler (#150)
|
|
||||||
- Fallback FileName attribute search during FilePath attribute search (#174)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- Updated tree service pool without api-go dependency (#178)
|
|
||||||
|
|
||||||
## [0.31.0] - Rongbuk - 2024-11-20
|
## [0.31.0] - Rongbuk - 2024-11-20
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
@ -202,8 +170,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
||||||
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
|
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.1...v0.30.2
|
||||||
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
|
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.2...v0.30.3
|
||||||
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
|
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.30.3...v0.31.0
|
||||||
[0.32.0]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...v0.32.0
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.31.0...master
|
||||||
[0.32.1]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.0...v0.32.1
|
|
||||||
[0.32.2]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.1...v0.32.2
|
|
||||||
[0.32.3]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.2...v0.32.3
|
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-http-gw/compare/v0.32.3...master
|
|
|
@ -1,3 +1 @@
|
||||||
.* @TrueCloudLab/storage-services-developers @TrueCloudLab/storage-services-committers
|
.* @alexvanin @dkirillov
|
||||||
.forgejo/.* @potyarkin
|
|
||||||
Makefile @potyarkin
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -7,7 +7,7 @@ LINT_VERSION ?= 1.60.3
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||||
BUILD ?= $(shell date -u --iso=seconds)
|
BUILD ?= $(shell date -u --iso=seconds)
|
||||||
|
|
||||||
HUB_IMAGE ?= git.frostfs.info/truecloudlab/frostfs-http-gw
|
HUB_IMAGE ?= truecloudlab/frostfs-http-gw
|
||||||
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
HUB_TAG ?= "$(shell echo ${VERSION} | sed 's/^v//')"
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
|
|
141
README.md
141
README.md
|
@ -38,7 +38,7 @@ version Show current version
|
||||||
```
|
```
|
||||||
|
|
||||||
Or you can also use a [Docker
|
Or you can also use a [Docker
|
||||||
image](https://git.frostfs.info/TrueCloudLab/-/packages/container/frostfs-http-gw) provided for the released
|
image](https://hub.docker.com/r/truecloudlab/frostfs-http-gw) provided for the released
|
||||||
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
(and occasionally unreleased) versions of the gateway (`:latest` points to the
|
||||||
latest stable release).
|
latest stable release).
|
||||||
|
|
||||||
|
@ -217,8 +217,41 @@ Also, in case of downloading, you need to have a file inside a container.
|
||||||
### NNS
|
### NNS
|
||||||
|
|
||||||
In all download/upload routes you can use container name instead of its id (`$CID`).
|
In all download/upload routes you can use container name instead of its id (`$CID`).
|
||||||
Read more about it in [docs/nns.md](./docs/nns.md).
|
|
||||||
|
|
||||||
|
Steps to start using name resolving:
|
||||||
|
|
||||||
|
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
|
resolve_order:
|
||||||
|
- nns
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
||||||
|
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
||||||
|
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
||||||
|
|
||||||
|
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
||||||
|
|
||||||
|
$ docker exec -it morph_chain neo-go \
|
||||||
|
contract testinvokefunction \
|
||||||
|
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
||||||
|
resolve string:container-name.container int:16 \
|
||||||
|
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
||||||
|
| base64 -d && echo
|
||||||
|
|
||||||
|
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Use container name instead of its `$CID`. For example:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
|
||||||
|
```
|
||||||
|
|
||||||
#### Create a container
|
#### Create a container
|
||||||
|
|
||||||
|
@ -429,7 +462,109 @@ object ID, like this:
|
||||||
|
|
||||||
#### Authentication
|
#### Authentication
|
||||||
|
|
||||||
Read more about request authentication in [docs/authentication.md](./docs/authemtnication.md)
|
You can always upload files to public containers (open for anyone to put
|
||||||
|
objects into), but for restricted containers you need to explicitly allow PUT
|
||||||
|
operations for a request signed with your HTTP Gateway keys.
|
||||||
|
|
||||||
|
If you don't want to manage gateway's secret keys and adjust policies when
|
||||||
|
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
||||||
|
public services, there is an option to let your application backend (or you) to
|
||||||
|
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
||||||
|
to grant access.
|
||||||
|
|
||||||
|
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
||||||
|
documentation for more details). There are two options to pass them to gateway:
|
||||||
|
* "Authorization" header with "Bearer" type and base64-encoded token in
|
||||||
|
credentials field
|
||||||
|
* "Bearer" cookie with base64-encoded token contents
|
||||||
|
|
||||||
|
For example, you have a mobile application frontend with a backend part storing
|
||||||
|
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
||||||
|
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
||||||
|
some data and upload it via any available FrostFS HTTP Gateway by adding
|
||||||
|
the corresponding header to the upload request. Accessing policy protected data
|
||||||
|
works the same way.
|
||||||
|
|
||||||
|
##### Example
|
||||||
|
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
||||||
|
|
||||||
|
1. Suppose you have a container with private policy for wallet key
|
||||||
|
|
||||||
|
```
|
||||||
|
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
||||||
|
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
||||||
|
|
||||||
|
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
||||||
|
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
||||||
|
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
||||||
|
--chain-id <chainID>
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
||||||
|
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"body": {
|
||||||
|
"allowImpersonate": true,
|
||||||
|
"lifetime": {
|
||||||
|
"exp": "10000",
|
||||||
|
"nbf": "0",
|
||||||
|
"iat": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"signature": null
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Sign it with the wallet:
|
||||||
|
```
|
||||||
|
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
||||||
|
```
|
||||||
|
|
||||||
|
4. Encode to base64 to use in header:
|
||||||
|
```
|
||||||
|
$ base64 -w 0 signed.json
|
||||||
|
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
||||||
|
```
|
||||||
|
|
||||||
|
After that, the Bearer token can be used:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
|
||||||
|
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
||||||
|
# output:
|
||||||
|
# {
|
||||||
|
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
|
||||||
|
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
|
||||||
|
# }
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Note: Bearer Token owner
|
||||||
|
|
||||||
|
You can specify exact key who can use Bearer Token (gateway wallet address).
|
||||||
|
To do this, encode wallet address in base64 format
|
||||||
|
|
||||||
|
```
|
||||||
|
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
||||||
|
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
||||||
|
```
|
||||||
|
|
||||||
|
Then specify this value in Bearer Token Json
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"body": {
|
||||||
|
"ownerID": {
|
||||||
|
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
||||||
|
},
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Note: Policy override
|
||||||
|
|
||||||
|
Instead of impersonation, you can define the set of policies that will be applied
|
||||||
|
to the request sender. This allows to restrict access to specific operation and
|
||||||
|
specific objects without giving full impersonation control to the token user.
|
||||||
|
|
||||||
### Metrics and Pprof
|
### Metrics and Pprof
|
||||||
|
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.32.3
|
v0.31.0
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
|
@ -25,11 +26,11 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/templates"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
|
@ -44,7 +45,6 @@ import (
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -52,36 +52,33 @@ type (
|
||||||
app struct {
|
app struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
|
logLevel zap.AtomicLevel
|
||||||
pool *pool.Pool
|
pool *pool.Pool
|
||||||
treePool *treepool.Pool
|
treePool *treepool.Pool
|
||||||
key *keys.PrivateKey
|
key *keys.PrivateKey
|
||||||
owner *user.ID
|
owner *user.ID
|
||||||
cfg *appCfg
|
cfg *viper.Viper
|
||||||
webServer *fasthttp.Server
|
webServer *fasthttp.Server
|
||||||
webDone chan struct{}
|
webDone chan struct{}
|
||||||
resolver *resolver.ContainerResolver
|
resolver *resolver.ContainerResolver
|
||||||
metrics *gateMetrics
|
metrics *gateMetrics
|
||||||
services []*metrics.Service
|
services []*metrics.Service
|
||||||
settings *appSettings
|
settings *appSettings
|
||||||
loggerSettings *loggerSettings
|
|
||||||
bucketCache *cache.BucketCache
|
|
||||||
|
|
||||||
servers []Server
|
servers []Server
|
||||||
unbindServers []ServerInfo
|
unbindServers []ServerInfo
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
loggerSettings struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
appMetrics *metrics.GateMetrics
|
|
||||||
}
|
|
||||||
|
|
||||||
// App is an interface for the main gateway function.
|
// App is an interface for the main gateway function.
|
||||||
App interface {
|
App interface {
|
||||||
Wait()
|
Wait()
|
||||||
Serve()
|
Serve()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Option is an application option.
|
||||||
|
Option func(a *app)
|
||||||
|
|
||||||
gateMetrics struct {
|
gateMetrics struct {
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
provider *metrics.GateMetrics
|
provider *metrics.GateMetrics
|
||||||
|
@ -94,11 +91,10 @@ type (
|
||||||
reconnectInterval time.Duration
|
reconnectInterval time.Duration
|
||||||
dialerSource *internalnet.DialerSource
|
dialerSource *internalnet.DialerSource
|
||||||
workerPoolSize int
|
workerPoolSize int
|
||||||
logLevelConfig *logLevelConfig
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
defaultTimestamp bool
|
defaultTimestamp bool
|
||||||
archiveCompression bool
|
zipCompression bool
|
||||||
clientCut bool
|
clientCut bool
|
||||||
returnIndexPage bool
|
returnIndexPage bool
|
||||||
indexPageTemplate string
|
indexPageTemplate string
|
||||||
|
@ -111,16 +107,6 @@ type (
|
||||||
corsExposeHeaders []string
|
corsExposeHeaders []string
|
||||||
corsAllowCredentials bool
|
corsAllowCredentials bool
|
||||||
corsMaxAge int
|
corsMaxAge int
|
||||||
enableFilepathFallback bool
|
|
||||||
}
|
|
||||||
|
|
||||||
tagsConfig struct {
|
|
||||||
tagLogs sync.Map
|
|
||||||
}
|
|
||||||
|
|
||||||
logLevelConfig struct {
|
|
||||||
logLevel zap.AtomicLevel
|
|
||||||
tagsConfig *tagsConfig
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CORS struct {
|
CORS struct {
|
||||||
|
@ -133,114 +119,55 @@ type (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func newLogLevel(v *viper.Viper) zap.AtomicLevel {
|
// WithLogger returns Option to set a specific logger.
|
||||||
ll, err := getLogLevel(v)
|
func WithLogger(l *zap.Logger, lvl zap.AtomicLevel) Option {
|
||||||
if err != nil {
|
return func(a *app) {
|
||||||
panic(err.Error())
|
if l == nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
atomicLogLevel := zap.NewAtomicLevel()
|
a.log = l
|
||||||
atomicLogLevel.SetLevel(ll)
|
a.logLevel = lvl
|
||||||
return atomicLogLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTagsConfig(v *viper.Viper, ll zapcore.Level) *tagsConfig {
|
|
||||||
var t tagsConfig
|
|
||||||
if err := t.update(v, ll); err != nil {
|
|
||||||
// panic here is analogue of the similar panic during common log level initialization.
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
return &t
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogLevelConfig(lvl zap.AtomicLevel, tagsConfig *tagsConfig) *logLevelConfig {
|
|
||||||
return &logLevelConfig{
|
|
||||||
logLevel: lvl,
|
|
||||||
tagsConfig: tagsConfig,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *logLevelConfig) update(cfg *viper.Viper, log *zap.Logger) {
|
// WithConfig returns Option to use specific Viper configuration.
|
||||||
if lvl, err := getLogLevel(cfg); err != nil {
|
func WithConfig(c *viper.Viper) Option {
|
||||||
log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
return func(a *app) {
|
||||||
} else {
|
if c == nil {
|
||||||
l.logLevel.SetLevel(lvl)
|
return
|
||||||
}
|
}
|
||||||
|
a.cfg = c
|
||||||
if err := l.tagsConfig.update(cfg, l.logLevel.Level()); err != nil {
|
|
||||||
log.Warn(logs.TagsLogConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tagsConfig) LevelEnabled(tag string, tgtLevel zapcore.Level) bool {
|
func newApp(ctx context.Context, opt ...Option) App {
|
||||||
lvl, ok := t.tagLogs.Load(tag)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return lvl.(zapcore.Level).Enabled(tgtLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tagsConfig) update(cfg *viper.Viper, ll zapcore.Level) error {
|
|
||||||
tags, err := fetchLogTagsConfig(cfg, ll)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.tagLogs.Range(func(key, value any) bool {
|
|
||||||
k := key.(string)
|
|
||||||
v := value.(zapcore.Level)
|
|
||||||
|
|
||||||
if lvl, ok := tags[k]; ok {
|
|
||||||
if lvl != v {
|
|
||||||
t.tagLogs.Store(key, lvl)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.tagLogs.Delete(key)
|
|
||||||
delete(tags, k)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
for k, v := range tags {
|
|
||||||
t.tagLogs.Store(k, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newApp(ctx context.Context, cfg *appCfg) App {
|
|
||||||
logSettings := &loggerSettings{}
|
|
||||||
logLevel := newLogLevel(cfg.config())
|
|
||||||
tagConfig := newTagsConfig(cfg.config(), logLevel.Level())
|
|
||||||
logConfig := newLogLevelConfig(logLevel, tagConfig)
|
|
||||||
log := pickLogger(cfg.config(), logConfig.logLevel, logSettings, tagConfig)
|
|
||||||
|
|
||||||
a := &app{
|
a := &app{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
log: log.logger,
|
log: zap.L(),
|
||||||
cfg: cfg,
|
cfg: viper.GetViper(),
|
||||||
loggerSettings: logSettings,
|
|
||||||
webServer: new(fasthttp.Server),
|
webServer: new(fasthttp.Server),
|
||||||
webDone: make(chan struct{}),
|
webDone: make(chan struct{}),
|
||||||
bucketCache: cache.NewBucketCache(getBucketCacheOptions(cfg.config(), log.logger), cfg.config().GetBool(cfgFeaturesTreePoolNetmapSupport)),
|
}
|
||||||
|
for i := range opt {
|
||||||
|
opt[i](a)
|
||||||
}
|
}
|
||||||
|
|
||||||
a.initAppSettings(logConfig)
|
a.initAppSettings()
|
||||||
|
|
||||||
// -- setup FastHTTP server --
|
// -- setup FastHTTP server --
|
||||||
a.webServer.Name = "frost-http-gw"
|
a.webServer.Name = "frost-http-gw"
|
||||||
a.webServer.ReadBufferSize = a.config().GetInt(cfgWebReadBufferSize)
|
a.webServer.ReadBufferSize = a.cfg.GetInt(cfgWebReadBufferSize)
|
||||||
a.webServer.WriteBufferSize = a.config().GetInt(cfgWebWriteBufferSize)
|
a.webServer.WriteBufferSize = a.cfg.GetInt(cfgWebWriteBufferSize)
|
||||||
a.webServer.ReadTimeout = a.config().GetDuration(cfgWebReadTimeout)
|
a.webServer.ReadTimeout = a.cfg.GetDuration(cfgWebReadTimeout)
|
||||||
a.webServer.WriteTimeout = a.config().GetDuration(cfgWebWriteTimeout)
|
a.webServer.WriteTimeout = a.cfg.GetDuration(cfgWebWriteTimeout)
|
||||||
a.webServer.DisableHeaderNamesNormalizing = true
|
a.webServer.DisableHeaderNamesNormalizing = true
|
||||||
a.webServer.NoDefaultServerHeader = true
|
a.webServer.NoDefaultServerHeader = true
|
||||||
a.webServer.NoDefaultContentType = true
|
a.webServer.NoDefaultContentType = true
|
||||||
a.webServer.MaxRequestBodySize = a.config().GetInt(cfgWebMaxRequestBodySize)
|
a.webServer.MaxRequestBodySize = a.cfg.GetInt(cfgWebMaxRequestBodySize)
|
||||||
a.webServer.DisablePreParseMultipartForm = true
|
a.webServer.DisablePreParseMultipartForm = true
|
||||||
a.webServer.StreamRequestBody = a.config().GetBool(cfgWebStreamRequestBody)
|
a.webServer.StreamRequestBody = a.cfg.GetBool(cfgWebStreamRequestBody)
|
||||||
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
// -- -- -- -- -- -- -- -- -- -- -- -- -- --
|
||||||
a.initPools(ctx)
|
a.pool, a.treePool, a.key = getPools(ctx, a.log, a.cfg, a.settings.dialerSource)
|
||||||
|
|
||||||
var owner user.ID
|
var owner user.ID
|
||||||
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
user.IDFromKey(&owner, a.key.PrivateKey.PublicKey)
|
||||||
|
@ -255,23 +182,18 @@ func newApp(ctx context.Context, cfg *appCfg) App {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) config() *viper.Viper {
|
func (a *app) initAppSettings() {
|
||||||
return a.cfg.config()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initAppSettings(lc *logLevelConfig) {
|
|
||||||
a.settings = &appSettings{
|
a.settings = &appSettings{
|
||||||
reconnectInterval: fetchReconnectInterval(a.config()),
|
reconnectInterval: fetchReconnectInterval(a.cfg),
|
||||||
dialerSource: getDialerSource(a.log, a.config()),
|
dialerSource: getDialerSource(a.log, a.cfg),
|
||||||
workerPoolSize: a.config().GetInt(cfgWorkerPoolSize),
|
workerPoolSize: a.cfg.GetInt(cfgWorkerPoolSize),
|
||||||
logLevelConfig: lc,
|
|
||||||
}
|
}
|
||||||
a.settings.update(a.config(), a.log)
|
a.settings.update(a.cfg, a.log)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||||
defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp)
|
defaultTimestamp := v.GetBool(cfgUploaderHeaderEnableDefaultTimestamp)
|
||||||
archiveCompression := fetchArchiveCompression(v)
|
zipCompression := v.GetBool(cfgZipCompression)
|
||||||
returnIndexPage := v.GetBool(cfgIndexPageEnabled)
|
returnIndexPage := v.GetBool(cfgIndexPageEnabled)
|
||||||
clientCut := v.GetBool(cfgClientCut)
|
clientCut := v.GetBool(cfgClientCut)
|
||||||
bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
bufferMaxSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
||||||
|
@ -284,13 +206,12 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||||
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
|
corsExposeHeaders := v.GetStringSlice(cfgCORSExposeHeaders)
|
||||||
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
|
corsAllowCredentials := v.GetBool(cfgCORSAllowCredentials)
|
||||||
corsMaxAge := fetchCORSMaxAge(v)
|
corsMaxAge := fetchCORSMaxAge(v)
|
||||||
enableFilepathFallback := v.GetBool(cfgFeaturesEnableFilepathFallback)
|
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
s.defaultTimestamp = defaultTimestamp
|
s.defaultTimestamp = defaultTimestamp
|
||||||
s.archiveCompression = archiveCompression
|
s.zipCompression = zipCompression
|
||||||
s.returnIndexPage = returnIndexPage
|
s.returnIndexPage = returnIndexPage
|
||||||
s.clientCut = clientCut
|
s.clientCut = clientCut
|
||||||
s.bufferMaxSizeForPut = bufferMaxSizeForPut
|
s.bufferMaxSizeForPut = bufferMaxSizeForPut
|
||||||
|
@ -304,23 +225,6 @@ func (s *appSettings) update(v *viper.Viper, l *zap.Logger) {
|
||||||
s.corsExposeHeaders = corsExposeHeaders
|
s.corsExposeHeaders = corsExposeHeaders
|
||||||
s.corsAllowCredentials = corsAllowCredentials
|
s.corsAllowCredentials = corsAllowCredentials
|
||||||
s.corsMaxAge = corsMaxAge
|
s.corsMaxAge = corsMaxAge
|
||||||
s.enableFilepathFallback = enableFilepathFallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *loggerSettings) DroppedLogsInc() {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
|
|
||||||
if s.appMetrics != nil {
|
|
||||||
s.appMetrics.DroppedLogsInc()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *loggerSettings) setMetrics(appMetrics *metrics.GateMetrics) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
s.appMetrics = appMetrics
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) DefaultTimestamp() bool {
|
func (s *appSettings) DefaultTimestamp() bool {
|
||||||
|
@ -329,10 +233,10 @@ func (s *appSettings) DefaultTimestamp() bool {
|
||||||
return s.defaultTimestamp
|
return s.defaultTimestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) ArchiveCompression() bool {
|
func (s *appSettings) ZipCompression() bool {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.archiveCompression
|
return s.zipCompression
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) IndexPageEnabled() bool {
|
func (s *appSettings) IndexPageEnabled() bool {
|
||||||
|
@ -402,36 +306,29 @@ func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool)
|
||||||
return ns + ".ns", false
|
return ns + ".ns", false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) EnableFilepathFallback() bool {
|
|
||||||
s.mu.RLock()
|
|
||||||
defer s.mu.RUnlock()
|
|
||||||
return s.enableFilepathFallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *app) initResolver() {
|
func (a *app) initResolver() {
|
||||||
var err error
|
var err error
|
||||||
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
a.resolver, err = resolver.NewContainerResolver(a.getResolverConfig())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Fatal(logs.FailedToCreateResolver, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
resolveCfg := &resolver.Config{
|
resolveCfg := &resolver.Config{
|
||||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||||
RPCAddress: a.config().GetString(cfgRPCEndpoint),
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
Settings: a.settings,
|
Settings: a.settings,
|
||||||
}
|
}
|
||||||
|
|
||||||
order := a.config().GetStringSlice(cfgResolveOrder)
|
order := a.cfg.GetStringSlice(cfgResolveOrder)
|
||||||
if resolveCfg.RPCAddress == "" {
|
if resolveCfg.RPCAddress == "" {
|
||||||
order = remove(order, resolver.NNSResolver)
|
order = remove(order, resolver.NNSResolver)
|
||||||
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided, logs.TagField(logs.TagApp))
|
a.log.Warn(logs.ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(order) == 0 {
|
if len(order) == 0 {
|
||||||
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty,
|
a.log.Info(logs.ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty)
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return order, resolveCfg
|
return order, resolveCfg
|
||||||
|
@ -439,14 +336,13 @@ func (a *app) getResolverConfig() ([]string, *resolver.Config) {
|
||||||
|
|
||||||
func (a *app) initMetrics() {
|
func (a *app) initMetrics() {
|
||||||
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
gateMetricsProvider := metrics.NewGateMetrics(a.pool)
|
||||||
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.config().GetBool(cfgPrometheusEnabled))
|
a.metrics = newGateMetrics(a.log, gateMetricsProvider, a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
a.metrics.SetHealth(metrics.HealthStatusStarting)
|
||||||
a.loggerSettings.setMetrics(a.metrics.provider)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
func newGateMetrics(logger *zap.Logger, provider *metrics.GateMetrics, enabled bool) *gateMetrics {
|
||||||
if !enabled {
|
if !enabled {
|
||||||
logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
logger.Warn(logs.MetricsAreDisabled)
|
||||||
}
|
}
|
||||||
return &gateMetrics{
|
return &gateMetrics{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
|
@ -464,7 +360,7 @@ func (m *gateMetrics) isEnabled() bool {
|
||||||
|
|
||||||
func (m *gateMetrics) SetEnabled(enabled bool) {
|
func (m *gateMetrics) SetEnabled(enabled bool) {
|
||||||
if !enabled {
|
if !enabled {
|
||||||
m.logger.Warn(logs.MetricsAreDisabled, logs.TagField(logs.TagApp))
|
m.logger.Warn(logs.MetricsAreDisabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
m.mu.Lock()
|
m.mu.Lock()
|
||||||
|
@ -527,7 +423,7 @@ func getFrostFSKey(cfg *viper.Viper, log *zap.Logger) (*keys.PrivateKey, error)
|
||||||
walletPath := cfg.GetString(cfgWalletPath)
|
walletPath := cfg.GetString(cfgWalletPath)
|
||||||
|
|
||||||
if len(walletPath) == 0 {
|
if len(walletPath) == 0 {
|
||||||
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun, logs.TagField(logs.TagApp))
|
log.Info(logs.NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun)
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -584,10 +480,7 @@ func getKeyFromWallet(w *wallet.Wallet, addrStr string, password *string) (*keys
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) Wait() {
|
func (a *app) Wait() {
|
||||||
a.log.Info(logs.StartingApplication,
|
a.log.Info(logs.StartingApplication, zap.String("app_name", "frostfs-http-gw"), zap.String("version", Version))
|
||||||
zap.String("app_name", "frostfs-http-gw"),
|
|
||||||
zap.String("version", Version),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
|
|
||||||
a.metrics.SetVersion(Version)
|
a.metrics.SetVersion(Version)
|
||||||
a.setHealthStatus()
|
a.setHealthStatus()
|
||||||
|
@ -606,10 +499,10 @@ func (a *app) Serve() {
|
||||||
close(a.webDone)
|
close(a.webDone)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
handle := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
|
handler := handler.New(a.AppParams(), a.settings, tree.NewTree(frostfs.NewPoolWrapper(a.treePool)), workerPool)
|
||||||
|
|
||||||
// Configure router.
|
// Configure router.
|
||||||
a.configureRouter(handle)
|
a.configureRouter(handler)
|
||||||
|
|
||||||
a.startServices()
|
a.startServices()
|
||||||
a.initServers(a.ctx)
|
a.initServers(a.ctx)
|
||||||
|
@ -618,10 +511,10 @@ func (a *app) Serve() {
|
||||||
|
|
||||||
for i := range servs {
|
for i := range servs {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()), logs.TagField(logs.TagApp))
|
a.log.Info(logs.StartingServer, zap.String("address", servs[i].Address()))
|
||||||
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
if err := a.webServer.Serve(servs[i].Listener()); err != nil && err != http.ErrServerClosed {
|
||||||
a.metrics.MarkUnhealthy(servs[i].Address())
|
a.metrics.MarkUnhealthy(servs[i].Address())
|
||||||
a.log.Fatal(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Fatal(logs.ListenAndServe, zap.Error(err))
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
@ -643,7 +536,7 @@ LOOP:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()), logs.TagField(logs.TagApp))
|
a.log.Info(logs.ShuttingDownWebServer, zap.Error(a.webServer.Shutdown()))
|
||||||
|
|
||||||
a.metrics.Shutdown()
|
a.metrics.Shutdown()
|
||||||
a.stopServices()
|
a.stopServices()
|
||||||
|
@ -653,7 +546,7 @@ LOOP:
|
||||||
func (a *app) initWorkerPool() *ants.Pool {
|
func (a *app) initWorkerPool() *ants.Pool {
|
||||||
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
workerPool, err := ants.NewPool(a.settings.workerPoolSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Fatal(logs.FailedToCreateWorkerPool, zap.Error(err))
|
||||||
}
|
}
|
||||||
return workerPool
|
return workerPool
|
||||||
}
|
}
|
||||||
|
@ -664,33 +557,37 @@ func (a *app) shutdownTracing() {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := tracing.Shutdown(shdnCtx); err != nil {
|
if err := tracing.Shutdown(shdnCtx); err != nil {
|
||||||
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToShutdownTracing, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) configReload(ctx context.Context) {
|
func (a *app) configReload(ctx context.Context) {
|
||||||
a.log.Info(logs.SIGHUPConfigReloadStarted, logs.TagField(logs.TagApp))
|
a.log.Info(logs.SIGHUPConfigReloadStarted)
|
||||||
if !a.config().IsSet(cmdConfig) && !a.config().IsSet(cmdConfigDir) {
|
if !a.cfg.IsSet(cmdConfig) && !a.cfg.IsSet(cmdConfigDir) {
|
||||||
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed, logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToReloadConfigBecauseItsMissed)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := a.cfg.reload(); err != nil {
|
if err := readInConfig(a.cfg); err != nil {
|
||||||
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToReloadConfig, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
a.settings.logLevelConfig.update(a.cfg.settings, a.log)
|
if lvl, err := getLogLevel(a.cfg); err != nil {
|
||||||
|
a.log.Warn(logs.LogLevelWontBeUpdated, zap.Error(err))
|
||||||
|
} else {
|
||||||
|
a.logLevel.SetLevel(lvl)
|
||||||
|
}
|
||||||
|
|
||||||
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.config(), a.log)); err != nil {
|
if err := a.settings.dialerSource.Update(fetchMultinetConfig(a.cfg, a.log)); err != nil {
|
||||||
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.MultinetConfigWontBeUpdated, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
if err := a.resolver.UpdateResolvers(a.getResolverConfig()); err != nil {
|
||||||
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToUpdateResolvers, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.updateServers(); err != nil {
|
if err := a.updateServers(); err != nil {
|
||||||
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToReloadServerParameters, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.setRuntimeParameters()
|
a.setRuntimeParameters()
|
||||||
|
@ -698,24 +595,22 @@ func (a *app) configReload(ctx context.Context) {
|
||||||
a.stopServices()
|
a.stopServices()
|
||||||
a.startServices()
|
a.startServices()
|
||||||
|
|
||||||
a.settings.update(a.config(), a.log)
|
a.settings.update(a.cfg, a.log)
|
||||||
|
|
||||||
a.metrics.SetEnabled(a.config().GetBool(cfgPrometheusEnabled))
|
a.metrics.SetEnabled(a.cfg.GetBool(cfgPrometheusEnabled))
|
||||||
a.initTracing(ctx)
|
a.initTracing(ctx)
|
||||||
a.setHealthStatus()
|
a.setHealthStatus()
|
||||||
|
|
||||||
a.log.Info(logs.SIGHUPConfigReloadCompleted, logs.TagField(logs.TagApp))
|
a.log.Info(logs.SIGHUPConfigReloadCompleted)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) startServices() {
|
func (a *app) startServices() {
|
||||||
a.services = a.services[:0]
|
pprofConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPprofEnabled), Address: a.cfg.GetString(cfgPprofAddress)}
|
||||||
|
|
||||||
pprofConfig := metrics.Config{Enabled: a.config().GetBool(cfgPprofEnabled), Address: a.config().GetString(cfgPprofAddress)}
|
|
||||||
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
pprofService := metrics.NewPprofService(a.log, pprofConfig)
|
||||||
a.services = append(a.services, pprofService)
|
a.services = append(a.services, pprofService)
|
||||||
go pprofService.Start()
|
go pprofService.Start()
|
||||||
|
|
||||||
prometheusConfig := metrics.Config{Enabled: a.config().GetBool(cfgPrometheusEnabled), Address: a.config().GetString(cfgPrometheusAddress)}
|
prometheusConfig := metrics.Config{Enabled: a.cfg.GetBool(cfgPrometheusEnabled), Address: a.cfg.GetString(cfgPrometheusAddress)}
|
||||||
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
prometheusService := metrics.NewPrometheusService(a.log, prometheusConfig)
|
||||||
a.services = append(a.services, prometheusService)
|
a.services = append(a.services, prometheusService)
|
||||||
go prometheusService.Start()
|
go prometheusService.Start()
|
||||||
|
@ -730,32 +625,30 @@ func (a *app) stopServices() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) configureRouter(h *handler.Handler) {
|
func (a *app) configureRouter(handler *handler.Handler) {
|
||||||
r := router.New()
|
r := router.New()
|
||||||
r.RedirectTrailingSlash = true
|
r.RedirectTrailingSlash = true
|
||||||
r.NotFound = func(r *fasthttp.RequestCtx) {
|
r.NotFound = func(r *fasthttp.RequestCtx) {
|
||||||
handler.ResponseError(r, "Not found", fasthttp.StatusNotFound)
|
response.Error(r, "Not found", fasthttp.StatusNotFound)
|
||||||
}
|
}
|
||||||
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
r.MethodNotAllowed = func(r *fasthttp.RequestCtx) {
|
||||||
handler.ResponseError(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
response.Error(r, "Method Not Allowed", fasthttp.StatusMethodNotAllowed)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.POST("/upload/{cid}", a.addMiddlewares(h.Upload))
|
r.POST("/upload/{cid}", a.addMiddlewares(handler.Upload))
|
||||||
r.OPTIONS("/upload/{cid}", a.addPreflight())
|
r.OPTIONS("/upload/{cid}", a.addPreflight())
|
||||||
a.log.Info(logs.AddedPathUploadCid, logs.TagField(logs.TagApp))
|
a.log.Info(logs.AddedPathUploadCid)
|
||||||
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(h.DownloadByAddressOrBucketName))
|
r.GET("/get/{cid}/{oid:*}", a.addMiddlewares(handler.DownloadByAddressOrBucketName))
|
||||||
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(h.HeadByAddressOrBucketName))
|
r.HEAD("/get/{cid}/{oid:*}", a.addMiddlewares(handler.HeadByAddressOrBucketName))
|
||||||
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
|
r.OPTIONS("/get/{cid}/{oid:*}", a.addPreflight())
|
||||||
a.log.Info(logs.AddedPathGetCidOid, logs.TagField(logs.TagApp))
|
a.log.Info(logs.AddedPathGetCidOid)
|
||||||
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.DownloadByAttribute))
|
r.GET("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.DownloadByAttribute))
|
||||||
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(h.HeadByAttribute))
|
r.HEAD("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addMiddlewares(handler.HeadByAttribute))
|
||||||
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
r.OPTIONS("/get_by_attribute/{cid}/{attr_key}/{attr_val:*}", a.addPreflight())
|
||||||
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal, logs.TagField(logs.TagApp))
|
a.log.Info(logs.AddedPathGetByAttributeCidAttrKeyAttrVal)
|
||||||
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadZip))
|
r.GET("/zip/{cid}/{prefix:*}", a.addMiddlewares(handler.DownloadZipped))
|
||||||
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
r.OPTIONS("/zip/{cid}/{prefix:*}", a.addPreflight())
|
||||||
r.GET("/tar/{cid}/{prefix:*}", a.addMiddlewares(h.DownloadTar))
|
a.log.Info(logs.AddedPathZipCidPrefix)
|
||||||
r.OPTIONS("/tar/{cid}/{prefix:*}", a.addPreflight())
|
|
||||||
a.log.Info(logs.AddedPathZipCidPrefix, logs.TagField(logs.TagApp))
|
|
||||||
|
|
||||||
a.webServer.Handler = r.Handler
|
a.webServer.Handler = r.Handler
|
||||||
}
|
}
|
||||||
|
@ -844,11 +737,14 @@ func (a *app) logger(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
reqCtx = utils.SetReqLog(reqCtx, log)
|
reqCtx = utils.SetReqLog(reqCtx, log)
|
||||||
utils.SetContextToRequest(reqCtx, req)
|
utils.SetContextToRequest(reqCtx, req)
|
||||||
|
|
||||||
log.Info(logs.Request, zap.String("remote", req.RemoteAddr().String()),
|
fields := []zap.Field{
|
||||||
|
zap.String("remote", req.RemoteAddr().String()),
|
||||||
zap.ByteString("method", req.Method()),
|
zap.ByteString("method", req.Method()),
|
||||||
zap.ByteString("path", req.Path()),
|
zap.ByteString("path", req.Path()),
|
||||||
zap.ByteString("query", req.QueryArgs().QueryString()),
|
zap.ByteString("query", req.QueryArgs().QueryString()),
|
||||||
logs.TagField(logs.TagDatapath))
|
}
|
||||||
|
|
||||||
|
log.Info(logs.Request, fields...)
|
||||||
h(req)
|
h(req)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -892,8 +788,8 @@ func (a *app) tokenizer(h fasthttp.RequestHandler) fasthttp.RequestHandler {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
log := utils.GetReqLogOrDefault(reqCtx, a.log)
|
||||||
|
|
||||||
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Error(logs.CouldNotFetchAndStoreBearerToken, zap.Error(err))
|
||||||
handler.ResponseError(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req, "could not fetch and store bearer token: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
utils.SetContextToRequest(appCtx, req)
|
utils.SetContextToRequest(appCtx, req)
|
||||||
|
@ -934,12 +830,12 @@ func (a *app) AppParams() *handler.AppParams {
|
||||||
FrostFS: frostfs.NewFrostFS(a.pool),
|
FrostFS: frostfs.NewFrostFS(a.pool),
|
||||||
Owner: a.owner,
|
Owner: a.owner,
|
||||||
Resolver: a.resolver,
|
Resolver: a.resolver,
|
||||||
Cache: a.bucketCache,
|
Cache: cache.NewBucketCache(getCacheOptions(a.cfg, a.log)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initServers(ctx context.Context) {
|
func (a *app) initServers(ctx context.Context) {
|
||||||
serversInfo := fetchServers(a.config(), a.log)
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
a.servers = make([]Server, 0, len(serversInfo))
|
a.servers = make([]Server, 0, len(serversInfo))
|
||||||
for _, serverInfo := range serversInfo {
|
for _, serverInfo := range serversInfo {
|
||||||
|
@ -951,22 +847,22 @@ func (a *app) initServers(ctx context.Context) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.unbindServers = append(a.unbindServers, serverInfo)
|
a.unbindServers = append(a.unbindServers, serverInfo)
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err), logs.TagField(logs.TagApp))...)
|
a.log.Warn(logs.FailedToAddServer, append(fields, zap.Error(err))...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
a.metrics.MarkHealthy(serverInfo.Address)
|
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||
|
|
||||||
a.servers = append(a.servers, srv)
|
a.servers = append(a.servers, srv)
|
||||||
a.log.Info(logs.AddServer, append(fields, logs.TagField(logs.TagApp))...)
|
a.log.Info(logs.AddServer, fields...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(a.servers) == 0 {
|
if len(a.servers) == 0 {
|
||||||
a.log.Fatal(logs.NoHealthyServers, logs.TagField(logs.TagApp))
|
a.log.Fatal(logs.NoHealthyServers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) updateServers() error {
|
func (a *app) updateServers() error {
|
||||||
serversInfo := fetchServers(a.config(), a.log)
|
serversInfo := fetchServers(a.cfg, a.log)
|
||||||
|
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
@ -979,8 +875,8 @@ func (a *app) updateServers() error {
|
||||||
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
if err := ser.UpdateCert(serverInfo.TLS.CertFile, serverInfo.TLS.KeyFile); err != nil {
|
||||||
return fmt.Errorf("failed to update tls certs: %w", err)
|
return fmt.Errorf("failed to update tls certs: %w", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
found = true
|
found = true
|
||||||
|
}
|
||||||
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
} else if unbind := a.updateUnbindServerInfo(serverInfo); unbind {
|
||||||
found = true
|
found = true
|
||||||
}
|
}
|
||||||
|
@ -1024,60 +920,58 @@ func (a *app) initTracing(ctx context.Context) {
|
||||||
instanceID = a.servers[0].Address()
|
instanceID = a.servers[0].Address()
|
||||||
}
|
}
|
||||||
cfg := tracing.Config{
|
cfg := tracing.Config{
|
||||||
Enabled: a.config().GetBool(cfgTracingEnabled),
|
Enabled: a.cfg.GetBool(cfgTracingEnabled),
|
||||||
Exporter: tracing.Exporter(a.config().GetString(cfgTracingExporter)),
|
Exporter: tracing.Exporter(a.cfg.GetString(cfgTracingExporter)),
|
||||||
Endpoint: a.config().GetString(cfgTracingEndpoint),
|
Endpoint: a.cfg.GetString(cfgTracingEndpoint),
|
||||||
Service: "frostfs-http-gw",
|
Service: "frostfs-http-gw",
|
||||||
InstanceID: instanceID,
|
InstanceID: instanceID,
|
||||||
Version: Version,
|
Version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
if trustedCa := a.config().GetString(cfgTracingTrustedCa); trustedCa != "" {
|
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||||
caBytes, err := os.ReadFile(trustedCa)
|
caBytes, err := os.ReadFile(trustedCa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
certPool := x509.NewCertPool()
|
certPool := x509.NewCertPool()
|
||||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"),
|
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cfg.ServerCaCertPool = certPool
|
cfg.ServerCaCertPool = certPool
|
||||||
}
|
}
|
||||||
|
|
||||||
attributes, err := fetchTracingAttributes(a.config())
|
attributes, err := fetchTracingAttributes(a.cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cfg.Attributes = attributes
|
cfg.Attributes = attributes
|
||||||
|
|
||||||
updated, err := tracing.Setup(ctx, cfg)
|
updated, err := tracing.Setup(ctx, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||||
}
|
}
|
||||||
if updated {
|
if updated {
|
||||||
a.log.Info(logs.TracingConfigUpdated, logs.TagField(logs.TagApp))
|
a.log.Info(logs.TracingConfigUpdated)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) setRuntimeParameters() {
|
func (a *app) setRuntimeParameters() {
|
||||||
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
if len(os.Getenv("GOMEMLIMIT")) != 0 {
|
||||||
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
// default limit < yaml limit < app env limit < GOMEMLIMIT
|
||||||
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT, logs.TagField(logs.TagApp))
|
a.log.Warn(logs.RuntimeSoftMemoryDefinedWithGOMEMLIMIT)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
softMemoryLimit := fetchSoftMemoryLimit(a.config())
|
softMemoryLimit := fetchSoftMemoryLimit(a.cfg)
|
||||||
previous := debug.SetMemoryLimit(softMemoryLimit)
|
previous := debug.SetMemoryLimit(softMemoryLimit)
|
||||||
if softMemoryLimit != previous {
|
if softMemoryLimit != previous {
|
||||||
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
a.log.Info(logs.RuntimeSoftMemoryLimitUpdated,
|
||||||
zap.Int64("new_value", softMemoryLimit),
|
zap.Int64("new_value", softMemoryLimit),
|
||||||
zap.Int64("old_value", previous),
|
zap.Int64("old_value", previous))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1103,32 +997,34 @@ func (a *app) tryReconnect(ctx context.Context, sr *fasthttp.Server) bool {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
a.log.Info(logs.ServerReconnecting, logs.TagField(logs.TagApp))
|
a.log.Info(logs.ServerReconnecting)
|
||||||
var failedServers []ServerInfo
|
var failedServers []ServerInfo
|
||||||
|
|
||||||
for _, serverInfo := range a.unbindServers {
|
for _, serverInfo := range a.unbindServers {
|
||||||
|
fields := []zap.Field{
|
||||||
|
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
||||||
|
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
||||||
|
}
|
||||||
|
|
||||||
srv, err := newServer(ctx, serverInfo)
|
srv, err := newServer(ctx, serverInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.ServerReconnectFailed, zap.Error(err))
|
||||||
failedServers = append(failedServers, serverInfo)
|
failedServers = append(failedServers, serverInfo)
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()), logs.TagField(logs.TagApp))
|
a.log.Info(logs.StartingServer, zap.String("address", srv.Address()))
|
||||||
a.metrics.MarkHealthy(serverInfo.Address)
|
a.metrics.MarkHealthy(serverInfo.Address)
|
||||||
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err = sr.Serve(srv.Listener()); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
a.log.Warn(logs.ListenAndServe, zap.Error(err), logs.TagField(logs.TagApp))
|
a.log.Warn(logs.ListenAndServe, zap.Error(err))
|
||||||
a.metrics.MarkUnhealthy(serverInfo.Address)
|
a.metrics.MarkUnhealthy(serverInfo.Address)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
a.servers = append(a.servers, srv)
|
a.servers = append(a.servers, srv)
|
||||||
a.log.Info(logs.ServerReconnectedSuccessfully,
|
a.log.Info(logs.ServerReconnectedSuccessfully, fields...)
|
||||||
zap.String("address", serverInfo.Address), zap.Bool("tls enabled", serverInfo.TLS.Enabled),
|
|
||||||
zap.String("tls cert", serverInfo.TLS.CertFile), zap.String("tls key", serverInfo.TLS.KeyFile),
|
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a.unbindServers = failedServers
|
a.unbindServers = failedServers
|
||||||
|
|
|
@ -14,11 +14,10 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/container"
|
containerv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
@ -29,12 +28,13 @@ import (
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
docker "github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
"github.com/nspcc-dev/neo-go/pkg/wallet"
|
||||||
|
"github.com/spf13/viper"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
|
"go.uber.org/zap/zapcore"
|
||||||
)
|
)
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
|
@ -50,12 +50,11 @@ const (
|
||||||
|
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
rootCtx := context.Background()
|
rootCtx := context.Background()
|
||||||
aioImage := "git.frostfs.info/truecloudlab/frostfs-aio:"
|
aioImage := "truecloudlab/frostfs-aio:"
|
||||||
versions := []string{
|
versions := []string{
|
||||||
"1.2.7",
|
"1.2.7",
|
||||||
"1.3.0",
|
"1.3.0",
|
||||||
"1.5.0",
|
"1.5.0",
|
||||||
"1.6.5",
|
|
||||||
}
|
}
|
||||||
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
key, err := keys.NewPrivateKeyFromHex("1dd37fba80fec4e6a6f13fd708d8dcb3b29def768017052f6c930fa1c5d90bbb")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -72,28 +71,21 @@ func TestIntegration(t *testing.T) {
|
||||||
ctx, cancel2 := context.WithCancel(rootCtx)
|
ctx, cancel2 := context.WithCancel(rootCtx)
|
||||||
|
|
||||||
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
aioContainer := createDockerContainer(ctx, t, aioImage+version)
|
||||||
if strings.HasPrefix(version, "1.6") {
|
|
||||||
registerUser(t, ctx, aioContainer, file.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the logs from the command execution.
|
|
||||||
server, cancel := runServer(file.Name())
|
server, cancel := runServer(file.Name())
|
||||||
clientPool := getPool(ctx, t, key)
|
clientPool := getPool(ctx, t, key)
|
||||||
CID, err := createContainer(ctx, t, clientPool, ownerID)
|
CID, err := createContainer(ctx, t, clientPool, ownerID, version)
|
||||||
require.NoError(t, err, version)
|
require.NoError(t, err, version)
|
||||||
|
|
||||||
jsonToken, binaryToken := makeBearerTokens(t, key, ownerID, version)
|
token := makeBearerToken(t, key, ownerID, version)
|
||||||
|
|
||||||
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID) })
|
t.Run("simple put "+version, func(t *testing.T) { simplePut(ctx, t, clientPool, CID, version) })
|
||||||
t.Run("put with json bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, jsonToken) })
|
t.Run("put with bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, token) })
|
||||||
t.Run("put with json bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, jsonToken) })
|
t.Run("put with bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, token) })
|
||||||
t.Run("put with binary bearer token in header"+version, func(t *testing.T) { putWithBearerTokenInHeader(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with binary bearer token in cookie"+version, func(t *testing.T) { putWithBearerTokenInCookie(ctx, t, clientPool, CID, binaryToken) })
|
|
||||||
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
t.Run("put with duplicate keys "+version, func(t *testing.T) { putWithDuplicateKeys(t, CID) })
|
||||||
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID) })
|
t.Run("simple get "+version, func(t *testing.T) { simpleGet(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID) })
|
t.Run("get by attribute "+version, func(t *testing.T) { getByAttr(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID) })
|
t.Run("get zip "+version, func(t *testing.T) { getZip(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID) })
|
t.Run("test namespaces "+version, func(t *testing.T) { checkNamespaces(ctx, t, clientPool, ownerID, CID, version) })
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
server.Wait()
|
server.Wait()
|
||||||
|
@ -107,16 +99,17 @@ func runServer(pathToWallet string) (App, context.CancelFunc) {
|
||||||
cancelCtx, cancel := context.WithCancel(context.Background())
|
cancelCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
v := getDefaultConfig()
|
v := getDefaultConfig()
|
||||||
v.config().Set(cfgWalletPath, pathToWallet)
|
v.Set(cfgWalletPath, pathToWallet)
|
||||||
v.config().Set(cfgWalletPassphrase, "")
|
v.Set(cfgWalletPassphrase, "")
|
||||||
|
|
||||||
application := newApp(cancelCtx, v)
|
l, lvl := newStdoutLogger(v, zapcore.DebugLevel)
|
||||||
|
application := newApp(cancelCtx, WithConfig(v), WithLogger(l, lvl))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
|
|
||||||
return application, cancel
|
return application, cancel
|
||||||
}
|
}
|
||||||
|
|
||||||
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID) {
|
func simplePut(ctx context.Context, t *testing.T, p *pool.Pool, CID cid.ID, version string) {
|
||||||
url := testHost + "/upload/" + CID.String()
|
url := testHost + "/upload/" + CID.String()
|
||||||
makePutRequestAndCheck(ctx, t, p, CID, url)
|
makePutRequestAndCheck(ctx, t, p, CID, url)
|
||||||
|
|
||||||
|
@ -264,7 +257,7 @@ func putWithDuplicateKeys(t *testing.T, CID cid.ID) {
|
||||||
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func simpleGet(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"some-attr": "some-get-value",
|
"some-attr": "some-get-value",
|
||||||
|
@ -311,7 +304,7 @@ func checkGetByAttrResponse(t *testing.T, resp *http.Response, content string, a
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
|
keyAttr, valAttr := "some-attr", "some-get-by-attr-value"
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{keyAttr: valAttr}
|
attributes := map[string]string{keyAttr: valAttr}
|
||||||
|
@ -333,7 +326,7 @@ func getByAttr(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
checkGetByAttrResponse(t, resp, content, expectedAttr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func getZip(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
names := []string{"zipfolder/dir/name1.txt", "zipfolder/name2.txt"}
|
||||||
contents := []string{"content of file1", "content of file2"}
|
contents := []string{"content of file1", "content of file2"}
|
||||||
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
attributes1 := map[string]string{object.AttributeFilePath: names[0]}
|
||||||
|
@ -398,7 +391,7 @@ func checkZip(t *testing.T, data []byte, length int64, names, contents []string)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID) {
|
func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, CID cid.ID, version string) {
|
||||||
content := "content of file"
|
content := "content of file"
|
||||||
attributes := map[string]string{
|
attributes := map[string]string{
|
||||||
"some-attr": "some-get-value",
|
"some-attr": "some-get-value",
|
||||||
|
@ -435,12 +428,10 @@ func checkNamespaces(ctx context.Context, t *testing.T, clientPool *pool.Pool, o
|
||||||
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
func createDockerContainer(ctx context.Context, t *testing.T, image string) testcontainers.Container {
|
||||||
req := testcontainers.ContainerRequest{
|
req := testcontainers.ContainerRequest{
|
||||||
Image: image,
|
Image: image,
|
||||||
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(2 * time.Minute),
|
WaitingFor: wait.NewLogStrategy("aio container started").WithStartupTimeout(30 * time.Second),
|
||||||
Name: "aio",
|
Name: "aio",
|
||||||
Hostname: "aio",
|
Hostname: "aio",
|
||||||
HostConfigModifier: func(hc *docker.HostConfig) {
|
NetworkMode: "host",
|
||||||
hc.NetworkMode = "host"
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
aioC, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
ContainerRequest: req,
|
ContainerRequest: req,
|
||||||
|
@ -451,14 +442,14 @@ func createDockerContainer(ctx context.Context, t *testing.T, image string) test
|
||||||
return aioC
|
return aioC
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultConfig() *appCfg {
|
func getDefaultConfig() *viper.Viper {
|
||||||
v := settings()
|
v := settings()
|
||||||
v.config().SetDefault(cfgPeers+".0.address", "localhost:8080")
|
v.SetDefault(cfgPeers+".0.address", "localhost:8080")
|
||||||
v.config().SetDefault(cfgPeers+".0.weight", 1)
|
v.SetDefault(cfgPeers+".0.weight", 1)
|
||||||
v.config().SetDefault(cfgPeers+".0.priority", 1)
|
v.SetDefault(cfgPeers+".0.priority", 1)
|
||||||
|
|
||||||
v.config().SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
v.SetDefault(cfgRPCEndpoint, "http://localhost:30333")
|
||||||
v.config().SetDefault("server.0.address", testListenAddress)
|
v.SetDefault("server.0.address", testListenAddress)
|
||||||
|
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
@ -477,7 +468,7 @@ func getPool(ctx context.Context, t *testing.T, key *keys.PrivateKey) *pool.Pool
|
||||||
return clientPool
|
return clientPool
|
||||||
}
|
}
|
||||||
|
|
||||||
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID) (cid.ID, error) {
|
func createContainer(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID user.ID, version string) (cid.ID, error) {
|
||||||
var policy netmap.PlacementPolicy
|
var policy netmap.PlacementPolicy
|
||||||
err := policy.DecodeString("REP 1")
|
err := policy.DecodeString("REP 1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -537,19 +528,7 @@ func putObject(ctx context.Context, t *testing.T, clientPool *pool.Pool, ownerID
|
||||||
return id.ObjectID
|
return id.ObjectID
|
||||||
}
|
}
|
||||||
|
|
||||||
func registerUser(t *testing.T, ctx context.Context, aioContainer testcontainers.Container, pathToWallet string) {
|
func makeBearerToken(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) string {
|
||||||
err := aioContainer.CopyFileToContainer(ctx, pathToWallet, "/usr/wallet.json", 644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
_, _, err = aioContainer.Exec(ctx, []string{
|
|
||||||
"/usr/bin/frostfs-s3-authmate", "register-user",
|
|
||||||
"--wallet", "/usr/wallet.json",
|
|
||||||
"--rpc-endpoint", "http://localhost:30333",
|
|
||||||
"--contract-wallet", "/config/s3-gw-wallet.json"})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, version string) (jsonTokenBase64, binaryTokenBase64 string) {
|
|
||||||
tkn := new(bearer.Token)
|
tkn := new(bearer.Token)
|
||||||
tkn.ForUser(ownerID)
|
tkn.ForUser(ownerID)
|
||||||
tkn.SetExp(10000)
|
tkn.SetExp(10000)
|
||||||
|
@ -563,16 +542,10 @@ func makeBearerTokens(t *testing.T, key *keys.PrivateKey, ownerID user.ID, versi
|
||||||
err := tkn.Sign(key.PrivateKey)
|
err := tkn.Sign(key.PrivateKey)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
jsonToken, err := tkn.MarshalJSON()
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
require.NoError(t, err)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
jsonTokenBase64 = base64.StdEncoding.EncodeToString(jsonToken)
|
return t64
|
||||||
binaryTokenBase64 = base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
|
|
||||||
require.NotEmpty(t, jsonTokenBase64)
|
|
||||||
require.NotEmpty(t, binaryTokenBase64)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
func makeTempWallet(t *testing.T, key *keys.PrivateKey, path string) {
|
||||||
|
|
|
@ -1,174 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/zapjournald"
|
|
||||||
"github.com/spf13/viper"
|
|
||||||
"github.com/ssgreg/journald"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"go.uber.org/zap/zapcore"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
|
||||||
var lvl zapcore.Level
|
|
||||||
lvlStr := v.GetString(cfgLoggerLevel)
|
|
||||||
err := lvl.UnmarshalText([]byte(lvlStr))
|
|
||||||
if err != nil {
|
|
||||||
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
|
||||||
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
|
||||||
zapcore.DebugLevel,
|
|
||||||
zapcore.InfoLevel,
|
|
||||||
zapcore.WarnLevel,
|
|
||||||
zapcore.ErrorLevel,
|
|
||||||
zapcore.DPanicLevel,
|
|
||||||
zapcore.PanicLevel,
|
|
||||||
zapcore.FatalLevel,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return lvl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ zapcore.Core = (*zapCoreTagFilterWrapper)(nil)
|
|
||||||
|
|
||||||
type zapCoreTagFilterWrapper struct {
|
|
||||||
core zapcore.Core
|
|
||||||
settings TagFilterSettings
|
|
||||||
extra []zap.Field
|
|
||||||
}
|
|
||||||
|
|
||||||
type TagFilterSettings interface {
|
|
||||||
LevelEnabled(tag string, lvl zapcore.Level) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Enabled(level zapcore.Level) bool {
|
|
||||||
return c.core.Enabled(level)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) With(fields []zapcore.Field) zapcore.Core {
|
|
||||||
return &zapCoreTagFilterWrapper{
|
|
||||||
core: c.core.With(fields),
|
|
||||||
settings: c.settings,
|
|
||||||
extra: append(c.extra, fields...),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Check(entry zapcore.Entry, checked *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
|
||||||
if c.core.Enabled(entry.Level) {
|
|
||||||
return checked.AddCore(entry, c)
|
|
||||||
}
|
|
||||||
return checked
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Write(entry zapcore.Entry, fields []zapcore.Field) error {
|
|
||||||
if c.shouldSkip(entry, fields) || c.shouldSkip(entry, c.extra) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.core.Write(entry, fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) shouldSkip(entry zapcore.Entry, fields []zap.Field) bool {
|
|
||||||
for _, field := range fields {
|
|
||||||
if field.Key == logs.TagFieldName && field.Type == zapcore.StringType {
|
|
||||||
if !c.settings.LevelEnabled(field.String, entry.Level) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *zapCoreTagFilterWrapper) Sync() error {
|
|
||||||
return c.core.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyZapCoreMiddlewares(core zapcore.Core, v *viper.Viper, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) zapcore.Core {
|
|
||||||
core = &zapCoreTagFilterWrapper{
|
|
||||||
core: core,
|
|
||||||
settings: tagSetting,
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
|
||||||
core = zapcore.NewSamplerWithOptions(core,
|
|
||||||
v.GetDuration(cfgLoggerSamplingInterval),
|
|
||||||
v.GetInt(cfgLoggerSamplingInitial),
|
|
||||||
v.GetInt(cfgLoggerSamplingThereafter),
|
|
||||||
zapcore.SamplerHook(func(_ zapcore.Entry, dec zapcore.SamplingDecision) {
|
|
||||||
if dec&zapcore.LogDropped > 0 {
|
|
||||||
loggerSettings.DroppedLogsInc()
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
return core
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogEncoder() zapcore.Encoder {
|
|
||||||
c := zap.NewProductionEncoderConfig()
|
|
||||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
|
||||||
|
|
||||||
return zapcore.NewConsoleEncoder(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newStdoutLogger constructs a zap.Logger instance for current application.
|
|
||||||
// Panics on failure.
|
|
||||||
//
|
|
||||||
// Logger is built from zap's production logging configuration with:
|
|
||||||
// - parameterized level (debug by default)
|
|
||||||
// - console encoding
|
|
||||||
// - ISO8601 time encoding
|
|
||||||
//
|
|
||||||
// Logger records a stack trace for all messages at or above fatal level.
|
|
||||||
//
|
|
||||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
|
||||||
func newStdoutLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
|
||||||
stdout := zapcore.AddSync(os.Stderr)
|
|
||||||
|
|
||||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, lvl)
|
|
||||||
consoleOutCore = applyZapCoreMiddlewares(consoleOutCore, v, loggerSettings, tagSetting)
|
|
||||||
|
|
||||||
return &Logger{
|
|
||||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
|
||||||
lvl: lvl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJournaldLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSetting TagFilterSettings) *Logger {
|
|
||||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
|
||||||
|
|
||||||
core := zapjournald.NewCore(lvl, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
|
||||||
coreWithContext := core.With([]zapcore.Field{
|
|
||||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
|
||||||
zapjournald.SyslogIdentifier(),
|
|
||||||
zapjournald.SyslogPid(),
|
|
||||||
})
|
|
||||||
|
|
||||||
coreWithContext = applyZapCoreMiddlewares(coreWithContext, v, loggerSettings, tagSetting)
|
|
||||||
|
|
||||||
return &Logger{
|
|
||||||
logger: zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
|
||||||
lvl: lvl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type LoggerAppSettings interface {
|
|
||||||
DroppedLogsInc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func pickLogger(v *viper.Viper, lvl zap.AtomicLevel, loggerSettings LoggerAppSettings, tagSettings TagFilterSettings) *Logger {
|
|
||||||
dest := v.GetString(cfgLoggerDestination)
|
|
||||||
|
|
||||||
switch dest {
|
|
||||||
case destinationStdout:
|
|
||||||
return newStdoutLogger(v, lvl, loggerSettings, tagSettings)
|
|
||||||
case destinationJournald:
|
|
||||||
return newJournaldLogger(v, lvl, loggerSettings, tagSettings)
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -8,9 +8,10 @@ import (
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
globalContext, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
cfg := settings()
|
v := settings()
|
||||||
|
logger, atomicLevel := pickLogger(v)
|
||||||
|
|
||||||
application := newApp(globalContext, cfg)
|
application := newApp(globalContext, WithLogger(logger, atomicLevel), WithConfig(v))
|
||||||
go application.Serve()
|
go application.Serve()
|
||||||
application.Wait()
|
application.Wait()
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,19 +12,20 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
internalnet "git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/net"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/service/frostfs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/zapjournald"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
"github.com/ssgreg/journald"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zapcore"
|
"go.uber.org/zap/zapcore"
|
||||||
|
@ -109,11 +110,6 @@ const (
|
||||||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||||
|
|
||||||
cfgLoggerTags = "logger.tags"
|
|
||||||
cfgLoggerTagsPrefixTmpl = cfgLoggerTags + ".%d."
|
|
||||||
cfgLoggerTagsNameTmpl = cfgLoggerTagsPrefixTmpl + "name"
|
|
||||||
cfgLoggerTagsLevelTmpl = cfgLoggerTagsPrefixTmpl + "level"
|
|
||||||
|
|
||||||
// Wallet.
|
// Wallet.
|
||||||
cfgWalletPassphrase = "wallet.passphrase"
|
cfgWalletPassphrase = "wallet.passphrase"
|
||||||
cfgWalletPath = "wallet.path"
|
cfgWalletPath = "wallet.path"
|
||||||
|
@ -132,13 +128,8 @@ const (
|
||||||
cfgResolveOrder = "resolve_order"
|
cfgResolveOrder = "resolve_order"
|
||||||
|
|
||||||
// Zip compression.
|
// Zip compression.
|
||||||
//
|
|
||||||
// Deprecated: Use cfgArchiveCompression instead.
|
|
||||||
cfgZipCompression = "zip.compression"
|
cfgZipCompression = "zip.compression"
|
||||||
|
|
||||||
// Archive compression.
|
|
||||||
cfgArchiveCompression = "archive.compression"
|
|
||||||
|
|
||||||
// Runtime.
|
// Runtime.
|
||||||
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
cfgSoftMemoryLimit = "runtime.soft_memory_limit"
|
||||||
|
|
||||||
|
@ -153,7 +144,6 @@ const (
|
||||||
// Caching.
|
// Caching.
|
||||||
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
cfgBucketsCacheLifetime = "cache.buckets.lifetime"
|
||||||
cfgBucketsCacheSize = "cache.buckets.size"
|
cfgBucketsCacheSize = "cache.buckets.size"
|
||||||
cfgNetmapCacheLifetime = "cache.netmap.lifetime"
|
|
||||||
|
|
||||||
// Bucket resolving options.
|
// Bucket resolving options.
|
||||||
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
cfgResolveNamespaceHeader = "resolve_bucket.namespace_header"
|
||||||
|
@ -174,10 +164,6 @@ const (
|
||||||
cfgMultinetFallbackDelay = "multinet.fallback_delay"
|
cfgMultinetFallbackDelay = "multinet.fallback_delay"
|
||||||
cfgMultinetSubnets = "multinet.subnets"
|
cfgMultinetSubnets = "multinet.subnets"
|
||||||
|
|
||||||
// Feature.
|
|
||||||
cfgFeaturesEnableFilepathFallback = "features.enable_filepath_fallback"
|
|
||||||
cfgFeaturesTreePoolNetmapSupport = "features.tree_pool_netmap_support"
|
|
||||||
|
|
||||||
// Command line args.
|
// Command line args.
|
||||||
cmdHelp = "help"
|
cmdHelp = "help"
|
||||||
cmdVersion = "version"
|
cmdVersion = "version"
|
||||||
|
@ -196,79 +182,14 @@ var ignore = map[string]struct{}{
|
||||||
cmdVersion: {},
|
cmdVersion: {},
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultTags = []string{logs.TagApp, logs.TagDatapath, logs.TagExternalStorage, logs.TagExternalStorageTree}
|
func settings() *viper.Viper {
|
||||||
|
|
||||||
type Logger struct {
|
|
||||||
logger *zap.Logger
|
|
||||||
lvl zap.AtomicLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
type appCfg struct {
|
|
||||||
flags *pflag.FlagSet
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
|
||||||
settings *viper.Viper
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) reload() error {
|
|
||||||
old := a.config()
|
|
||||||
|
|
||||||
v, err := newViper(a.flags)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if old.IsSet(cmdConfig) {
|
|
||||||
v.Set(cmdConfig, old.Get(cmdConfig))
|
|
||||||
}
|
|
||||||
if old.IsSet(cmdConfigDir) {
|
|
||||||
v.Set(cmdConfigDir, old.Get(cmdConfigDir))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = readInConfig(v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
a.setConfig(v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) config() *viper.Viper {
|
|
||||||
a.mu.RLock()
|
|
||||||
defer a.mu.RUnlock()
|
|
||||||
|
|
||||||
return a.settings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *appCfg) setConfig(v *viper.Viper) {
|
|
||||||
a.mu.Lock()
|
|
||||||
a.settings = v
|
|
||||||
a.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newViper(flags *pflag.FlagSet) (*viper.Viper, error) {
|
|
||||||
v := viper.New()
|
v := viper.New()
|
||||||
|
|
||||||
v.AutomaticEnv()
|
v.AutomaticEnv()
|
||||||
v.SetEnvPrefix(Prefix)
|
v.SetEnvPrefix(Prefix)
|
||||||
v.AllowEmptyEnv(true)
|
v.AllowEmptyEnv(true)
|
||||||
v.SetConfigType("yaml")
|
v.SetConfigType("yaml")
|
||||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||||
|
|
||||||
if err := bindFlags(v, flags); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
setDefaults(v, flags)
|
|
||||||
|
|
||||||
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
|
||||||
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func settings() *appCfg {
|
|
||||||
// flags setup:
|
// flags setup:
|
||||||
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
flags := pflag.NewFlagSet("commandline", pflag.ExitOnError)
|
||||||
flags.SetOutput(os.Stdout)
|
flags.SetOutput(os.Stdout)
|
||||||
|
@ -292,17 +213,92 @@ func settings() *appCfg {
|
||||||
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
flags.String(cmdListenAddress, "0.0.0.0:8080", "addresses to listen")
|
||||||
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
flags.String(cfgTLSCertFile, "", "TLS certificate path")
|
||||||
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
flags.String(cfgTLSKeyFile, "", "TLS key path")
|
||||||
flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
peers := flags.StringArrayP(cfgPeers, "p", nil, "FrostFS nodes")
|
||||||
|
|
||||||
flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
resolveMethods := flags.StringSlice(cfgResolveOrder, []string{resolver.NNSResolver, resolver.DNSResolver}, "set container name resolve order")
|
||||||
|
|
||||||
|
// set defaults:
|
||||||
|
|
||||||
|
// logger:
|
||||||
|
v.SetDefault(cfgLoggerLevel, "debug")
|
||||||
|
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||||
|
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
||||||
|
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
||||||
|
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
||||||
|
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
||||||
|
|
||||||
|
// pool:
|
||||||
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
|
|
||||||
|
// frostfs:
|
||||||
|
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
||||||
|
|
||||||
|
// web-server:
|
||||||
|
v.SetDefault(cfgWebReadBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
||||||
|
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
||||||
|
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
||||||
|
v.SetDefault(cfgWebStreamRequestBody, true)
|
||||||
|
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
||||||
|
|
||||||
|
v.SetDefault(cfgWorkerPoolSize, 1000)
|
||||||
|
// upload header
|
||||||
|
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
||||||
|
|
||||||
|
// zip:
|
||||||
|
v.SetDefault(cfgZipCompression, false)
|
||||||
|
|
||||||
|
// metrics
|
||||||
|
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
||||||
|
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
||||||
|
|
||||||
|
// resolve bucket
|
||||||
|
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
||||||
|
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
||||||
|
|
||||||
|
// multinet
|
||||||
|
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
||||||
|
|
||||||
|
// Binding flags
|
||||||
|
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlags(flags); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := flags.Parse(os.Args); err != nil {
|
if err := flags.Parse(os.Args); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err := newViper(flags)
|
if v.IsSet(cfgServer+".0."+cfgTLSKeyFile) && v.IsSet(cfgServer+".0."+cfgTLSCertFile) {
|
||||||
if err != nil {
|
v.Set(cfgServer+".0."+cfgTLSEnabled, true)
|
||||||
panic(fmt.Errorf("bind flags: %w", err))
|
}
|
||||||
|
|
||||||
|
if resolveMethods != nil {
|
||||||
|
v.SetDefault(cfgResolveOrder, *resolveMethods)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -347,97 +343,15 @@ func settings() *appCfg {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &appCfg{
|
if peers != nil && len(*peers) > 0 {
|
||||||
flags: flags,
|
for i := range *peers {
|
||||||
settings: v,
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", (*peers)[i])
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(v *viper.Viper, flags *pflag.FlagSet) {
|
|
||||||
// set defaults:
|
|
||||||
|
|
||||||
// logger:
|
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
|
||||||
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
|
||||||
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
|
||||||
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
|
||||||
|
|
||||||
// pool:
|
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
|
||||||
|
|
||||||
// frostfs:
|
|
||||||
v.SetDefault(cfgBufferMaxSizeForPut, defaultBufferMaxSizeForPut)
|
|
||||||
|
|
||||||
// web-server:
|
|
||||||
v.SetDefault(cfgWebReadBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebWriteBufferSize, 4096)
|
|
||||||
v.SetDefault(cfgWebReadTimeout, time.Minute*10)
|
|
||||||
v.SetDefault(cfgWebWriteTimeout, time.Minute*5)
|
|
||||||
v.SetDefault(cfgWebStreamRequestBody, true)
|
|
||||||
v.SetDefault(cfgWebMaxRequestBodySize, fasthttp.DefaultMaxRequestBodySize)
|
|
||||||
|
|
||||||
v.SetDefault(cfgWorkerPoolSize, 1000)
|
|
||||||
// upload header
|
|
||||||
v.SetDefault(cfgUploaderHeaderEnableDefaultTimestamp, false)
|
|
||||||
|
|
||||||
// metrics
|
|
||||||
v.SetDefault(cfgPprofAddress, "localhost:8083")
|
|
||||||
v.SetDefault(cfgPrometheusAddress, "localhost:8084")
|
|
||||||
|
|
||||||
// resolve bucket
|
|
||||||
v.SetDefault(cfgResolveNamespaceHeader, defaultNamespaceHeader)
|
|
||||||
v.SetDefault(cfgResolveDefaultNamespaces, []string{"", "root"})
|
|
||||||
|
|
||||||
// multinet
|
|
||||||
v.SetDefault(cfgMultinetFallbackDelay, defaultMultinetFallbackDelay)
|
|
||||||
|
|
||||||
if resolveMethods, err := flags.GetStringSlice(cfgResolveOrder); err == nil {
|
|
||||||
v.SetDefault(cfgResolveOrder, resolveMethods)
|
|
||||||
}
|
|
||||||
|
|
||||||
if peers, err := flags.GetStringArray(cfgPeers); err == nil {
|
|
||||||
for i := range peers {
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".address", peers[i])
|
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".weight", 1)
|
||||||
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
v.SetDefault(cfgPeers+"."+strconv.Itoa(i)+".priority", 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func bindFlags(v *viper.Viper, flags *pflag.FlagSet) error {
|
return v
|
||||||
// Binding flags
|
|
||||||
if err := v.BindPFlag(cfgPprofEnabled, flags.Lookup(cmdPprof)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgPrometheusEnabled, flags.Lookup(cmdMetrics)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletPath, flags.Lookup(cmdWallet)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgWalletAddress, flags.Lookup(cmdAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlags(flags); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := v.BindPFlag(cfgServer+".0.address", flags.Lookup(cmdListenAddress)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSKeyFile, flags.Lookup(cfgTLSKeyFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := v.BindPFlag(cfgServer+".0."+cfgTLSCertFile, flags.Lookup(cfgTLSCertFile)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func readInConfig(v *viper.Viper) error {
|
func readInConfig(v *viper.Viper) error {
|
||||||
|
@ -504,33 +418,107 @@ func mergeConfig(v *viper.Viper, fileName string) error {
|
||||||
return v.MergeConfig(cfgFile)
|
return v.MergeConfig(cfgFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchLogTagsConfig(v *viper.Viper, defaultLvl zapcore.Level) (map[string]zapcore.Level, error) {
|
func pickLogger(v *viper.Viper) (*zap.Logger, zap.AtomicLevel) {
|
||||||
res := make(map[string]zapcore.Level)
|
lvl, err := getLogLevel(v)
|
||||||
|
if err != nil {
|
||||||
for i := 0; ; i++ {
|
panic(err)
|
||||||
name := v.GetString(fmt.Sprintf(cfgLoggerTagsNameTmpl, i))
|
|
||||||
if name == "" {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
lvl := defaultLvl
|
dest := v.GetString(cfgLoggerDestination)
|
||||||
level := v.GetString(fmt.Sprintf(cfgLoggerTagsLevelTmpl, i))
|
|
||||||
if level != "" {
|
switch dest {
|
||||||
if err := lvl.Set(level); err != nil {
|
case destinationStdout:
|
||||||
return nil, fmt.Errorf("failed to parse log tags config, unknown level: '%s'", level)
|
return newStdoutLogger(v, lvl)
|
||||||
|
case destinationJournald:
|
||||||
|
return newJournaldLogger(v, lvl)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
res[name] = lvl
|
// newStdoutLogger constructs a zap.Logger instance for current application.
|
||||||
|
// Panics on failure.
|
||||||
|
//
|
||||||
|
// Logger is built from zap's production logging configuration with:
|
||||||
|
// - parameterized level (debug by default)
|
||||||
|
// - console encoding
|
||||||
|
// - ISO8601 time encoding
|
||||||
|
//
|
||||||
|
// Logger records a stack trace for all messages at or above fatal level.
|
||||||
|
//
|
||||||
|
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||||
|
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
|
stdout := zapcore.AddSync(os.Stderr)
|
||||||
|
level := zap.NewAtomicLevelAt(lvl)
|
||||||
|
|
||||||
|
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||||
|
consoleOutCore = samplingEnabling(v, consoleOutCore)
|
||||||
|
|
||||||
|
l := zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||||
|
return l, level
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(res) == 0 && !v.IsSet(cfgLoggerTags) {
|
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) (*zap.Logger, zap.AtomicLevel) {
|
||||||
for _, tag := range defaultTags {
|
level := zap.NewAtomicLevelAt(lvl)
|
||||||
res[tag] = defaultLvl
|
|
||||||
}
|
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||||
|
|
||||||
|
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||||
|
coreWithContext := core.With([]zapcore.Field{
|
||||||
|
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||||
|
zapjournald.SyslogIdentifier(),
|
||||||
|
zapjournald.SyslogPid(),
|
||||||
|
})
|
||||||
|
|
||||||
|
coreWithContext = samplingEnabling(v, coreWithContext)
|
||||||
|
|
||||||
|
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||||
|
|
||||||
|
return l, level
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, nil
|
func newLogEncoder() zapcore.Encoder {
|
||||||
|
c := zap.NewProductionEncoderConfig()
|
||||||
|
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||||
|
|
||||||
|
return zapcore.NewConsoleEncoder(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
|
||||||
|
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
|
||||||
|
// and message within the specified time interval.
|
||||||
|
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
|
||||||
|
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
|
||||||
|
// cfgLoggerSamplingThereafter is specified here.
|
||||||
|
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||||
|
core = zapcore.NewSamplerWithOptions(
|
||||||
|
core,
|
||||||
|
v.GetDuration(cfgLoggerSamplingInterval),
|
||||||
|
v.GetInt(cfgLoggerSamplingInitial),
|
||||||
|
v.GetInt(cfgLoggerSamplingThereafter),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return core
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
|
var lvl zapcore.Level
|
||||||
|
lvlStr := v.GetString(cfgLoggerLevel)
|
||||||
|
err := lvl.UnmarshalText([]byte(lvlStr))
|
||||||
|
if err != nil {
|
||||||
|
return lvl, fmt.Errorf("incorrect logger level configuration %s (%v), "+
|
||||||
|
"value should be one of %v", lvlStr, err, [...]zapcore.Level{
|
||||||
|
zapcore.DebugLevel,
|
||||||
|
zapcore.InfoLevel,
|
||||||
|
zapcore.WarnLevel,
|
||||||
|
zapcore.ErrorLevel,
|
||||||
|
zapcore.DPanicLevel,
|
||||||
|
zapcore.PanicLevel,
|
||||||
|
zapcore.FatalLevel,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return lvl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
func fetchReconnectInterval(cfg *viper.Viper) time.Duration {
|
||||||
|
@ -546,19 +534,20 @@ func fetchIndexPageTemplate(v *viper.Viper, l *zap.Logger) (string, bool) {
|
||||||
if !v.GetBool(cfgIndexPageEnabled) {
|
if !v.GetBool(cfgIndexPageEnabled) {
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
reader, err := os.Open(v.GetString(cfgIndexPageTemplatePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||||
return "", true
|
return "", true
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpl, err := io.ReadAll(reader)
|
tmpl, err := io.ReadAll(reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err), logs.TagField(logs.TagApp))
|
l.Warn(logs.FailedToReadIndexPageTemplate, zap.Error(err))
|
||||||
return "", true
|
return "", true
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Info(logs.SetCustomIndexPageTemplate, logs.TagField(logs.TagApp))
|
l.Info(logs.SetCustomIndexPageTemplate)
|
||||||
return string(tmpl), true
|
return string(tmpl), true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -599,7 +588,7 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := seen[serverInfo.Address]; ok {
|
if _, ok := seen[serverInfo.Address]; ok {
|
||||||
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address), logs.TagField(logs.TagApp))
|
log.Warn(logs.WarnDuplicateAddress, zap.String("address", serverInfo.Address))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
seen[serverInfo.Address] = struct{}{}
|
seen[serverInfo.Address] = struct{}{}
|
||||||
|
@ -609,10 +598,10 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *app) initPools(ctx context.Context) {
|
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper, dialSource *internalnet.DialerSource) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||||
key, err := getFrostFSKey(a.config(), a.log)
|
key, err := getFrostFSKey(cfg, logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.CouldNotLoadFrostFSPrivateKey, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
var prm pool.InitParameters
|
var prm pool.InitParameters
|
||||||
|
@ -620,84 +609,77 @@ func (a *app) initPools(ctx context.Context) {
|
||||||
|
|
||||||
prm.SetKey(&key.PrivateKey)
|
prm.SetKey(&key.PrivateKey)
|
||||||
prmTree.SetKey(key)
|
prmTree.SetKey(key)
|
||||||
a.log.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())),
|
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
|
|
||||||
for _, peer := range fetchPeers(a.log, a.config()) {
|
for _, peer := range fetchPeers(logger, cfg) {
|
||||||
prm.AddNode(peer)
|
prm.AddNode(peer)
|
||||||
prmTree.AddNode(peer)
|
prmTree.AddNode(peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
connTimeout := a.config().GetDuration(cfgConTimeout)
|
connTimeout := cfg.GetDuration(cfgConTimeout)
|
||||||
if connTimeout <= 0 {
|
if connTimeout <= 0 {
|
||||||
connTimeout = defaultConnectTimeout
|
connTimeout = defaultConnectTimeout
|
||||||
}
|
}
|
||||||
prm.SetNodeDialTimeout(connTimeout)
|
prm.SetNodeDialTimeout(connTimeout)
|
||||||
prmTree.SetNodeDialTimeout(connTimeout)
|
prmTree.SetNodeDialTimeout(connTimeout)
|
||||||
|
|
||||||
streamTimeout := a.config().GetDuration(cfgStreamTimeout)
|
streamTimeout := cfg.GetDuration(cfgStreamTimeout)
|
||||||
if streamTimeout <= 0 {
|
if streamTimeout <= 0 {
|
||||||
streamTimeout = defaultStreamTimeout
|
streamTimeout = defaultStreamTimeout
|
||||||
}
|
}
|
||||||
prm.SetNodeStreamTimeout(streamTimeout)
|
prm.SetNodeStreamTimeout(streamTimeout)
|
||||||
prmTree.SetNodeStreamTimeout(streamTimeout)
|
prmTree.SetNodeStreamTimeout(streamTimeout)
|
||||||
|
|
||||||
healthCheckTimeout := a.config().GetDuration(cfgReqTimeout)
|
healthCheckTimeout := cfg.GetDuration(cfgReqTimeout)
|
||||||
if healthCheckTimeout <= 0 {
|
if healthCheckTimeout <= 0 {
|
||||||
healthCheckTimeout = defaultRequestTimeout
|
healthCheckTimeout = defaultRequestTimeout
|
||||||
}
|
}
|
||||||
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
prm.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
prmTree.SetHealthcheckTimeout(healthCheckTimeout)
|
||||||
|
|
||||||
rebalanceInterval := a.config().GetDuration(cfgRebalance)
|
rebalanceInterval := cfg.GetDuration(cfgRebalance)
|
||||||
if rebalanceInterval <= 0 {
|
if rebalanceInterval <= 0 {
|
||||||
rebalanceInterval = defaultRebalanceTimer
|
rebalanceInterval = defaultRebalanceTimer
|
||||||
}
|
}
|
||||||
prm.SetClientRebalanceInterval(rebalanceInterval)
|
prm.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
prmTree.SetClientRebalanceInterval(rebalanceInterval)
|
||||||
|
|
||||||
errorThreshold := a.config().GetUint32(cfgPoolErrorThreshold)
|
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||||
if errorThreshold <= 0 {
|
if errorThreshold <= 0 {
|
||||||
errorThreshold = defaultPoolErrorThreshold
|
errorThreshold = defaultPoolErrorThreshold
|
||||||
}
|
}
|
||||||
prm.SetErrorThreshold(errorThreshold)
|
prm.SetErrorThreshold(errorThreshold)
|
||||||
prm.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
prm.SetLogger(logger)
|
||||||
prmTree.SetLogger(a.log.With(logs.TagField(logs.TagDatapath)))
|
prmTree.SetLogger(logger)
|
||||||
|
|
||||||
prmTree.SetMaxRequestAttempts(a.config().GetInt(cfgTreePoolMaxAttempts))
|
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||||
|
|
||||||
interceptors := []grpc.DialOption{
|
interceptors := []grpc.DialOption{
|
||||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||||
grpc.WithContextDialer(a.settings.dialerSource.GrpcContextDialer()),
|
grpc.WithContextDialer(dialSource.GrpcContextDialer()),
|
||||||
}
|
}
|
||||||
prm.SetGRPCDialOptions(interceptors...)
|
prm.SetGRPCDialOptions(interceptors...)
|
||||||
prmTree.SetGRPCDialOptions(interceptors...)
|
prmTree.SetGRPCDialOptions(interceptors...)
|
||||||
|
|
||||||
p, err := pool.NewPool(prm)
|
p, err := pool.NewPool(prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.FailedToCreateConnectionPool, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = p.Dial(ctx); err != nil {
|
if err = p.Dial(ctx); err != nil {
|
||||||
a.log.Fatal(logs.FailedToDialConnectionPool, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||||
}
|
|
||||||
|
|
||||||
if a.config().GetBool(cfgFeaturesTreePoolNetmapSupport) {
|
|
||||||
prmTree.SetNetMapInfoSource(frostfs.NewSource(frostfs.NewFrostFS(p), cache.NewNetmapCache(getNetmapCacheOptions(a.config(), a.log)), a.bucketCache, a.log))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
treePool, err := treepool.NewPool(prmTree)
|
treePool, err := treepool.NewPool(prmTree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Fatal(logs.FailedToCreateTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||||
}
|
}
|
||||||
if err = treePool.Dial(ctx); err != nil {
|
if err = treePool.Dial(ctx); err != nil {
|
||||||
a.log.Fatal(logs.FailedToDialTreePool, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.FailedToDialTreePool, zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.pool = p
|
return p, treePool, key
|
||||||
a.treePool = treePool
|
|
||||||
a.key = key
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||||
|
@ -723,8 +705,7 @@ func fetchPeers(l *zap.Logger, v *viper.Viper) []pool.NodeParam {
|
||||||
l.Info(logs.AddedStoragePeer,
|
l.Info(logs.AddedStoragePeer,
|
||||||
zap.Int("priority", priority),
|
zap.Int("priority", priority),
|
||||||
zap.String("address", address),
|
zap.String("address", address),
|
||||||
zap.Float64("weight", weight),
|
zap.Float64("weight", weight))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes
|
return nodes
|
||||||
|
@ -739,7 +720,7 @@ func fetchSoftMemoryLimit(cfg *viper.Viper) int64 {
|
||||||
return int64(softMemoryLimit)
|
return int64(softMemoryLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
func getCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
cacheCfg := cache.DefaultBucketConfig(l)
|
cacheCfg := cache.DefaultBucketConfig(l)
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgBucketsCacheLifetime, cacheCfg.Lifetime)
|
||||||
|
@ -748,14 +729,6 @@ func getBucketCacheOptions(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
return cacheCfg
|
return cacheCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNetmapCacheOptions(v *viper.Viper, l *zap.Logger) *cache.NetmapCacheConfig {
|
|
||||||
cacheCfg := cache.DefaultNetmapConfig(l)
|
|
||||||
|
|
||||||
cacheCfg.Lifetime = fetchCacheLifetime(v, l, cfgNetmapCacheLifetime, cacheCfg.Lifetime)
|
|
||||||
|
|
||||||
return cacheCfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue time.Duration) time.Duration {
|
||||||
if v.IsSet(cfgEntry) {
|
if v.IsSet(cfgEntry) {
|
||||||
lifetime := v.GetDuration(cfgEntry)
|
lifetime := v.GetDuration(cfgEntry)
|
||||||
|
@ -763,8 +736,7 @@ func fetchCacheLifetime(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultV
|
||||||
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
l.Error(logs.InvalidLifetimeUsingDefaultValue,
|
||||||
zap.String("parameter", cfgEntry),
|
zap.String("parameter", cfgEntry),
|
||||||
zap.Duration("value in config", lifetime),
|
zap.Duration("value in config", lifetime),
|
||||||
zap.Duration("default", defaultValue),
|
zap.Duration("default", defaultValue))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
} else {
|
||||||
return lifetime
|
return lifetime
|
||||||
}
|
}
|
||||||
|
@ -780,8 +752,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
||||||
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
l.Error(logs.InvalidCacheSizeUsingDefaultValue,
|
||||||
zap.String("parameter", cfgEntry),
|
zap.String("parameter", cfgEntry),
|
||||||
zap.Int("value in config", size),
|
zap.Int("value in config", size),
|
||||||
zap.Int("default", defaultValue),
|
zap.Int("default", defaultValue))
|
||||||
logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
} else {
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
@ -793,7 +764,7 @@ func fetchCacheSize(v *viper.Viper, l *zap.Logger, cfgEntry string, defaultValue
|
||||||
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
func getDialerSource(logger *zap.Logger, cfg *viper.Viper) *internalnet.DialerSource {
|
||||||
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
source, err := internalnet.NewDialerSource(fetchMultinetConfig(cfg, logger))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err), logs.TagField(logs.TagApp))
|
logger.Fatal(logs.FailedToLoadMultinetConfig, zap.Error(err))
|
||||||
}
|
}
|
||||||
return source
|
return source
|
||||||
}
|
}
|
||||||
|
@ -844,10 +815,3 @@ func fetchTracingAttributes(v *viper.Viper) (map[string]string, error) {
|
||||||
|
|
||||||
return attributes, nil
|
return attributes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchArchiveCompression(v *viper.Viper) bool {
|
|
||||||
if v.IsSet(cfgZipCompression) {
|
|
||||||
return v.GetBool(cfgZipCompression)
|
|
||||||
}
|
|
||||||
return v.GetBool(cfgArchiveCompression)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,60 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestConfigReload(t *testing.T) {
|
|
||||||
f, err := os.CreateTemp("", "conf")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer func() {
|
|
||||||
require.NoError(t, os.Remove(f.Name()))
|
|
||||||
}()
|
|
||||||
|
|
||||||
confData := `
|
|
||||||
pprof:
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
resolve_bucket:
|
|
||||||
default_namespaces: [""]
|
|
||||||
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
`
|
|
||||||
|
|
||||||
_, err = f.WriteString(confData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, f.Close())
|
|
||||||
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--config", f.Name(), "--connect_timeout", "15s"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{""}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
|
|
||||||
require.NoError(t, os.Truncate(f.Name(), 0))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.False(t, cfg.config().GetBool(cfgPprofEnabled))
|
|
||||||
require.Equal(t, []string{"", "root"}, cfg.config().GetStringSlice(cfgResolveDefaultNamespaces))
|
|
||||||
require.Equal(t, []string{resolver.NNSResolver, resolver.DNSResolver}, cfg.config().GetStringSlice(cfgResolveOrder))
|
|
||||||
require.Equal(t, 15*time.Second, cfg.config().GetDuration(cfgConTimeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSetTLSEnabled(t *testing.T) {
|
|
||||||
cfg := settings()
|
|
||||||
|
|
||||||
require.NoError(t, cfg.flags.Parse([]string{"--" + cfgTLSCertFile, "tls.crt", "--" + cfgTLSKeyFile, "tls.key"}))
|
|
||||||
require.NoError(t, cfg.reload())
|
|
||||||
|
|
||||||
require.True(t, cfg.config().GetBool(cfgServer+".0."+cfgTLSEnabled))
|
|
||||||
}
|
|
|
@ -20,8 +20,6 @@ HTTP_GW_LOGGER_SAMPLING_ENABLED=false
|
||||||
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
HTTP_GW_LOGGER_SAMPLING_INITIAL=100
|
||||||
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
HTTP_GW_LOGGER_SAMPLING_THEREAFTER=100
|
||||||
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
HTTP_GW_LOGGER_SAMPLING_INTERVAL=1s
|
||||||
HTTP_GW_LOGGER_TAGS_0_NAME=app
|
|
||||||
HTTP_GW_LOGGER_TAGS_1_NAME=datapath
|
|
||||||
|
|
||||||
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
HTTP_GW_SERVER_0_ADDRESS=0.0.0.0:443
|
||||||
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
HTTP_GW_SERVER_0_TLS_ENABLED=false
|
||||||
|
@ -99,13 +97,9 @@ HTTP_GW_REBALANCE_TIMER=30s
|
||||||
# The number of errors on connection after which node is considered as unhealthy
|
# The number of errors on connection after which node is considered as unhealthy
|
||||||
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
HTTP_GW_POOL_ERROR_THRESHOLD=100
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
# Enable zip compression to download files by common prefix.
|
||||||
# DEPRECATED: Use HTTP_GW_ARCHIVE_COMPRESSION instead.
|
|
||||||
HTTP_GW_ZIP_COMPRESSION=false
|
HTTP_GW_ZIP_COMPRESSION=false
|
||||||
|
|
||||||
# Enable archive compression to download files by common prefix.
|
|
||||||
HTTP_GW_ARCHIVE_COMPRESSION=false
|
|
||||||
|
|
||||||
HTTP_GW_TRACING_ENABLED=true
|
HTTP_GW_TRACING_ENABLED=true
|
||||||
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
HTTP_GW_TRACING_ENDPOINT="localhost:4317"
|
||||||
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
HTTP_GW_TRACING_EXPORTER="otlp_grpc"
|
||||||
|
@ -127,8 +121,6 @@ HTTP_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||||
# Cache which contains mapping of bucket name to bucket info
|
# Cache which contains mapping of bucket name to bucket info
|
||||||
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
HTTP_GW_CACHE_BUCKETS_LIFETIME=1m
|
||||||
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
HTTP_GW_CACHE_BUCKETS_SIZE=1000
|
||||||
# Cache which stores netmap
|
|
||||||
HTTP_GW_CACHE_NETMAP_LIFETIME=1m
|
|
||||||
|
|
||||||
# Header to determine zone to resolve bucket name
|
# Header to determine zone to resolve bucket name
|
||||||
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
HTTP_GW_RESOLVE_BUCKET_NAMESPACE_HEADER=X-Frostfs-Namespace
|
||||||
|
@ -167,8 +159,3 @@ HTTP_GW_WORKER_POOL_SIZE=1000
|
||||||
HTTP_GW_INDEX_PAGE_ENABLED=false
|
HTTP_GW_INDEX_PAGE_ENABLED=false
|
||||||
# Index page template path
|
# Index page template path
|
||||||
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
HTTP_GW_INDEX_PAGE_TEMPLATE_PATH=internal/handler/templates/index.gotmpl
|
||||||
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
HTTP_GW_FEATURES_ENABLE_FILEPATH_FALLBACK=false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
HTTP_GW_FEATURES_TREE_POOL_NETMAP_SUPPORT=true
|
|
||||||
|
|
|
@ -29,10 +29,6 @@ logger:
|
||||||
initial: 100
|
initial: 100
|
||||||
thereafter: 100
|
thereafter: 100
|
||||||
interval: 1s
|
interval: 1s
|
||||||
tags:
|
|
||||||
- name: app
|
|
||||||
- name: datapath
|
|
||||||
level: debug
|
|
||||||
|
|
||||||
server:
|
server:
|
||||||
- address: 0.0.0.0:8080
|
- address: 0.0.0.0:8080
|
||||||
|
@ -120,19 +116,13 @@ pool_error_threshold: 100 # The number of errors on connection after which node
|
||||||
# Number of workers in handler's worker pool
|
# Number of workers in handler's worker pool
|
||||||
worker_pool_size: 1000
|
worker_pool_size: 1000
|
||||||
|
|
||||||
# Enables index page to see objects list for specified container and prefix
|
# Enable index page to see objects list for specified container and prefix
|
||||||
index_page:
|
index_page:
|
||||||
enabled: false
|
enabled: false
|
||||||
template_path: internal/handler/templates/index.gotmpl
|
template_path: internal/handler/templates/index.gotmpl
|
||||||
|
|
||||||
# Deprecated: Use archive.compression instead
|
|
||||||
zip:
|
zip:
|
||||||
# Enables zip compression to download files by common prefix.
|
compression: false # Enable zip compression to download files by common prefix.
|
||||||
compression: false
|
|
||||||
|
|
||||||
archive:
|
|
||||||
# Enables archive compression to download files by common prefix.
|
|
||||||
compression: false
|
|
||||||
|
|
||||||
runtime:
|
runtime:
|
||||||
soft_memory_limit: 1gb
|
soft_memory_limit: 1gb
|
||||||
|
@ -153,9 +143,6 @@ cache:
|
||||||
buckets:
|
buckets:
|
||||||
lifetime: 1m
|
lifetime: 1m
|
||||||
size: 1000
|
size: 1000
|
||||||
# Cache which stores netmap
|
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
|
|
||||||
resolve_bucket:
|
resolve_bucket:
|
||||||
namespace_header: X-Frostfs-Namespace
|
namespace_header: X-Frostfs-Namespace
|
||||||
|
@ -185,9 +172,3 @@ multinet:
|
||||||
source_ips:
|
source_ips:
|
||||||
- 1.2.3.4
|
- 1.2.3.4
|
||||||
- 1.2.3.5
|
- 1.2.3.5
|
||||||
|
|
||||||
features:
|
|
||||||
# Enable using fallback path to search for a object by attribute
|
|
||||||
enable_filepath_fallback: false
|
|
||||||
# Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service
|
|
||||||
tree_pool_netmap_support: true
|
|
||||||
|
|
23
docs/api.md
23
docs/api.md
|
@ -1,14 +1,14 @@
|
||||||
# HTTP Gateway Specification
|
# HTTP Gateway Specification
|
||||||
|
|
||||||
| Route | Description |
|
| Route | Description |
|
||||||
|-------------------------------------------------|--------------------------------------------------|
|
|-------------------------------------------------|----------------------------------------------|
|
||||||
| `/upload/{cid}` | [Put object](#put-object) |
|
| `/upload/{cid}` | [Put object](#put-object) |
|
||||||
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
| `/get/{cid}/{oid}` | [Get object](#get-object) |
|
||||||
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
| `/get_by_attribute/{cid}/{attr_key}/{attr_val}` | [Search object](#search-object) |
|
||||||
| `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}` | [Download objects in archive](#download-archive) |
|
| `/zip/{cid}/{prefix}` | [Download objects in archive](#download-zip) |
|
||||||
|
|
||||||
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
**Note:** `cid` parameter can be base58 encoded container ID or container name
|
||||||
(the name must be registered in NNS, see appropriate section in [nns.md](./nns.md)).
|
(the name must be registered in NNS, see appropriate section in [README](../README.md#nns)).
|
||||||
|
|
||||||
Route parameters can be:
|
Route parameters can be:
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ Route parameters can be:
|
||||||
|
|
||||||
### Bearer token
|
### Bearer token
|
||||||
|
|
||||||
All routes can accept [bearer token](./authentication.md) from:
|
All routes can accept [bearer token](../README.md#authentication) from:
|
||||||
|
|
||||||
* `Authorization` header with `Bearer` type and base64-encoded token in
|
* `Authorization` header with `Bearer` type and base64-encoded token in
|
||||||
credentials field
|
credentials field
|
||||||
|
@ -57,12 +57,10 @@ Upload file as object with attributes to FrostFS.
|
||||||
###### Headers
|
###### Headers
|
||||||
|
|
||||||
| Header | Description |
|
| Header | Description |
|
||||||
|------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| Common headers | See [bearer token](#bearer-token). |
|
| Common headers | See [bearer token](#bearer-token). |
|
||||||
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
| `X-Attribute-System-*` | Used to set system FrostFS object attributes <br/> (e.g. use "X-Attribute-System-Expiration-Epoch" to set `__SYSTEM__EXPIRATION_EPOCH` attribute). |
|
||||||
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
| `X-Attribute-*` | Used to set regular object attributes <br/> (e.g. use "X-Attribute-My-Tag" to set `My-Tag` attribute). |
|
||||||
| `X-Explode-Archive` | If set, gate tries to read files from uploading `tar` archive and creates an object for each file in it. Uploading `tar` could be compressed via Gzip by setting a `Content-Encoding` header. Sets a `FilePath` attribute as a relative path from archive root and a `FileName` as the last path element of the `FilePath`. |
|
|
||||||
| `Content-Encoding` | If set and value is `gzip`, gate will handle uploading file as a `Gzip` compressed `tar` file. |
|
|
||||||
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
| `Date` | This header is used to calculate the right `__SYSTEM__EXPIRATION` attribute for object. If the header is missing, the current server time is used. |
|
||||||
|
|
||||||
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
There are some reserved headers type of `X-Attribute-FROSTFS-*` (headers are arranged in descending order of priority):
|
||||||
|
@ -271,9 +269,9 @@ If more than one object is found, an arbitrary one will be used to get attribute
|
||||||
| 400 | Some error occurred during operation. |
|
| 400 | Some error occurred during operation. |
|
||||||
| 404 | Container or object not found. |
|
| 404 | Container or object not found. |
|
||||||
|
|
||||||
## Download archive
|
## Download zip
|
||||||
|
|
||||||
Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
Route: `/zip/{cid}/{prefix}`
|
||||||
|
|
||||||
| Route parameter | Type | Description |
|
| Route parameter | Type | Description |
|
||||||
|-----------------|-----------|---------------------------------------------------------|
|
|-----------------|-----------|---------------------------------------------------------|
|
||||||
|
@ -284,13 +282,12 @@ Route: `/zip/{cid}/{prefix}`, `/tar/{cid}/{prefix}`
|
||||||
|
|
||||||
#### GET
|
#### GET
|
||||||
|
|
||||||
Find objects by prefix for `FilePath` attributes. Return found objects in zip or tar archive.
|
Find objects by prefix for `FilePath` attributes. Return found objects in zip archive.
|
||||||
Name of files in archive sets to `FilePath` attribute of objects.
|
Name of files in archive sets to `FilePath` attribute of objects.
|
||||||
Time of files sets to time when object has started downloading.
|
Time of files sets to time when object has started downloading.
|
||||||
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` or
|
You can download all files in container that have `FilePath` attribute by `/zip/{cid}/` route.
|
||||||
`/tar/{cid}/` route.
|
|
||||||
|
|
||||||
Archive can be compressed (see http-gw [configuration](gate-configuration.md#archive-section)).
|
Archive can be compressed (see http-gw [configuration](gate-configuration.md#zip-section)).
|
||||||
|
|
||||||
##### Request
|
##### Request
|
||||||
|
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
# Request authentication
|
|
||||||
|
|
||||||
HTTP Gateway does not authorize requests. Gateway converts HTTP request to a
|
|
||||||
FrostFS request and signs it with its own private key.
|
|
||||||
|
|
||||||
You can always upload files to public containers (open for anyone to put
|
|
||||||
objects into), but for restricted containers you need to explicitly allow PUT
|
|
||||||
operations for a request signed with your HTTP Gateway keys.
|
|
||||||
|
|
||||||
If you don't want to manage gateway's secret keys and adjust policies when
|
|
||||||
gateway configuration changes (new gate, key rotation, etc) or you plan to use
|
|
||||||
public services, there is an option to let your application backend (or you) to
|
|
||||||
issue Bearer Tokens and pass them from the client via gate down to FrostFS level
|
|
||||||
to grant access.
|
|
||||||
|
|
||||||
FrostFS Bearer Token basically is a container owner-signed policy (refer to FrostFS
|
|
||||||
documentation for more details). There are two options to pass them to gateway:
|
|
||||||
* "Authorization" header with "Bearer" type and base64-encoded token in
|
|
||||||
credentials field
|
|
||||||
* "Bearer" cookie with base64-encoded token contents
|
|
||||||
|
|
||||||
For example, you have a mobile application frontend with a backend part storing
|
|
||||||
data in FrostFS. When a user authorizes in the mobile app, the backend issues a FrostFS
|
|
||||||
Bearer token and provides it to the frontend. Then, the mobile app may generate
|
|
||||||
some data and upload it via any available FrostFS HTTP Gateway by adding
|
|
||||||
the corresponding header to the upload request. Accessing policy protected data
|
|
||||||
works the same way.
|
|
||||||
|
|
||||||
##### Example
|
|
||||||
In order to generate a bearer token, you need to have wallet (which will be used to sign the token)
|
|
||||||
|
|
||||||
1. Suppose you have a container with private policy for wallet key
|
|
||||||
|
|
||||||
```
|
|
||||||
$ frostfs-cli container create -r <endpoint> --wallet <wallet> -policy <policy> --basic-acl 0 --await
|
|
||||||
CID: 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z
|
|
||||||
|
|
||||||
$ frostfs-cli ape-manager add -r <endpoint> --wallet <wallet> \
|
|
||||||
--target-type container --target-name 9dfzyvq82JnFqp5svxcREf2iy6XNuifYcJPusEDnGK9Z \
|
|
||||||
--rule "allow Object.* RequestCondition:"\$Actor:publicKey"=03b09baabff3f6107c7e9acb8721a6fc5618d45b50247a314d82e548702cce8cd5 *" \
|
|
||||||
--chain-id <chainID>
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
2. Form a Bearer token (10000 is lifetime expiration in epoch) to impersonate
|
|
||||||
HTTP Gateway request as wallet signed request and save it to **bearer.json**:
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"allowImpersonate": true,
|
|
||||||
"lifetime": {
|
|
||||||
"exp": "10000",
|
|
||||||
"nbf": "0",
|
|
||||||
"iat": "0"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"signature": null
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Sign it with the wallet:
|
|
||||||
```
|
|
||||||
$ frostfs-cli util sign bearer-token --from bearer.json --to signed.json -w <wallet>
|
|
||||||
```
|
|
||||||
|
|
||||||
4. Encode to base64 to use in header:
|
|
||||||
```
|
|
||||||
$ base64 -w 0 signed.json
|
|
||||||
# output: Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==
|
|
||||||
```
|
|
||||||
|
|
||||||
After that, the Bearer token can be used:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ curl -F 'file=@cat.jpeg;filename=cat.jpeg' -H "Authorization: Bearer Ck4KKgoECAIQBhIiCiCZGdlbN7DPGPMg9rsWqV+p2XdMzUqknRiexewSFp8kmBIbChk17MUri6OJ0X5ftsHzy7NERDNFB4C92PcaGgMIkE4SZgohAxpsb7vfAso1F0X6hrm6WpRS14WsT3/Ct1SMoqRsT89KEkEEGxKi8GjKSf52YqhppgaOTQHbUsL3jn7SHLqS3ndAQ7NtAATnmRHleZw2V2xRRSRBQdjDC05KK83LhdSax72Fsw==" \
|
|
||||||
http://localhost:8082/upload/BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K
|
|
||||||
# output:
|
|
||||||
# {
|
|
||||||
# "object_id": "DhfES9nVrFksxGDD2jQLunGADfrXExxNwqXbDafyBn9X",
|
|
||||||
# "container_id": "BJeErH9MWmf52VsR1mLWKkgF3pRm3FkubYxM7TZkBP4K"
|
|
||||||
# }
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Bearer Token owner
|
|
||||||
|
|
||||||
You can specify exact key who can use Bearer Token (gateway wallet address).
|
|
||||||
To do this, encode wallet address in base64 format
|
|
||||||
|
|
||||||
```
|
|
||||||
$ echo 'NhVtreTTCoqsMQV5Wp55fqnriiUCpEaKm3' | base58 --decode | base64
|
|
||||||
# output: NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg==
|
|
||||||
```
|
|
||||||
|
|
||||||
Then specify this value in Bearer Token Json
|
|
||||||
```
|
|
||||||
{
|
|
||||||
"body": {
|
|
||||||
"ownerID": {
|
|
||||||
"value": "NezFK4ujidF+X7bB88uzREQzRQeAvdj3Gg=="
|
|
||||||
},
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Note: Policy override
|
|
||||||
|
|
||||||
Instead of impersonation, you can define the set of policies that will be applied
|
|
||||||
to the request sender. This allows to restrict access to specific operation and
|
|
||||||
specific objects without giving full impersonation control to the token user.
|
|
|
@ -59,7 +59,7 @@ $ cat http.log
|
||||||
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
| `resolve_bucket` | [Bucket name resolving configuration](#resolve_bucket-section) |
|
||||||
| `index_page` | [Index page configuration](#index_page-section) |
|
| `index_page` | [Index page configuration](#index_page-section) |
|
||||||
| `multinet` | [Multinet configuration](#multinet-section) |
|
| `multinet` | [Multinet configuration](#multinet-section) |
|
||||||
| `features` | [Features configuration](#features-section) |
|
|
||||||
|
|
||||||
# General section
|
# General section
|
||||||
|
|
||||||
|
@ -174,11 +174,6 @@ logger:
|
||||||
initial: 100
|
initial: 100
|
||||||
thereafter: 100
|
thereafter: 100
|
||||||
interval: 1s
|
interval: 1s
|
||||||
tags:
|
|
||||||
- name: "app"
|
|
||||||
level: info
|
|
||||||
- name: "datapath"
|
|
||||||
- name: "external_storage_tree"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
@ -189,30 +184,6 @@ logger:
|
||||||
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
| `sampling.initial` | `int` | no | '100' | Sampling count of first log entries. |
|
||||||
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
| `sampling.thereafter` | `int` | no | '100' | Sampling count of entries after an `interval`. |
|
||||||
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
| `sampling.interval` | `duration` | no | '1s' | Sampling interval of messaging similar entries. |
|
||||||
| `sampling.tags` | `[]Tag` | yes | | Tagged log entries that should be additionally logged (available tags see in the next section). |
|
|
||||||
|
|
||||||
## Tags
|
|
||||||
|
|
||||||
There are additional log entries that can hurt performance and can be additionally logged by using `logger.tags`
|
|
||||||
parameter. Available tags:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
tags:
|
|
||||||
- name: "app"
|
|
||||||
level: info
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-----------------------|------------|---------------|---------------------------|-------------------------------------------------------------------------------------------------------|
|
|
||||||
| `name` | `string` | yes | | Tag name. Possible values see below in `Tag values` section. |
|
|
||||||
| `level` | `string` | yes | Value from `logger.level` | Logging level for specific tag. Possible values: `debug`, `info`, `warn`, `dpanic`, `panic`, `fatal`. |
|
|
||||||
|
|
||||||
### Tag values
|
|
||||||
|
|
||||||
* `app` - common application logs (enabled by default).
|
|
||||||
* `datapath` - main logic of application (enabled by default).
|
|
||||||
* `external_storage` - external interaction with storage node (enabled by default).
|
|
||||||
* `external_storage_tree` - external interaction with tree service in storage node (enabled by default).
|
|
||||||
|
|
||||||
# `web` section
|
# `web` section
|
||||||
|
|
||||||
|
@ -247,9 +218,8 @@ upload_header:
|
||||||
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
|-------------------------|--------|---------------|---------------|-------------------------------------------------------------|
|
||||||
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
| `use_default_timestamp` | `bool` | yes | `false` | Create timestamp for object if it isn't provided by header. |
|
||||||
|
|
||||||
# `zip` section
|
|
||||||
|
|
||||||
> **_DEPRECATED:_** Use archive section instead
|
# `zip` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
zip:
|
zip:
|
||||||
|
@ -260,17 +230,6 @@ zip:
|
||||||
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
|---------------|--------|---------------|---------------|--------------------------------------------------------------|
|
||||||
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
| `compression` | `bool` | yes | `false` | Enable zip compression when download files by common prefix. |
|
||||||
|
|
||||||
# `archive` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
archive:
|
|
||||||
compression: false
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|---------------|--------|---------------|---------------|------------------------------------------------------------------|
|
|
||||||
| `compression` | `bool` | yes | `false` | Enable archive compression when download files by common prefix. |
|
|
||||||
|
|
||||||
|
|
||||||
# `pprof` section
|
# `pprof` section
|
||||||
|
|
||||||
|
@ -380,14 +339,12 @@ cache:
|
||||||
buckets:
|
buckets:
|
||||||
lifetime: 1m
|
lifetime: 1m
|
||||||
size: 1000
|
size: 1000
|
||||||
netmap:
|
|
||||||
lifetime: 1m
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | Default value | Description |
|
||||||
|-----------|-----------------------------------|---------------------------------|---------------------------------------------------------------------------|
|
|-----------------|-----------------------------------|-----------------------------------|----------------------------------------------------------------------------------------|
|
||||||
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
| `buckets` | [Cache config](#cache-subsection) | `lifetime: 60s`<br>`size: 1000` | Cache which contains mapping of bucket name to bucket info. |
|
||||||
| `netmap` | [Cache config](#cache-subsection) | `lifetime: 1m` | Cache which stores netmap. `netmap.size` isn't applicable for this cache. |
|
|
||||||
|
|
||||||
|
|
||||||
#### `cache` subsection
|
#### `cache` subsection
|
||||||
|
@ -500,18 +457,3 @@ multinet:
|
||||||
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
|
|--------------|------------|---------------|---------------|----------------------------------------------------------------------|
|
||||||
| `mask` | `string` | yes | | Destination subnet. |
|
| `mask` | `string` | yes | | Destination subnet. |
|
||||||
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
|
| `source_ips` | `[]string` | yes | | Array of source IP addresses to use when dialing destination subnet. |
|
||||||
|
|
||||||
# `features` section
|
|
||||||
|
|
||||||
Contains parameters for enabling features.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
features:
|
|
||||||
enable_filepath_fallback: true
|
|
||||||
tree_pool_netmap_support: true
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|-------------------------------------|--------|---------------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `features.enable_filepath_fallback` | `bool` | yes | `false` | Enable using fallback path to search for a object by attribute. If the value of the `FilePath` attribute in the request contains no `/` symbols or single leading `/` symbol and the object was not found, then an attempt is made to search for the object by the attribute `FileName`. |
|
|
||||||
| `features.tree_pool_netmap_support` | `bool` | no | `false` | Enable using new version of tree pool, which uses netmap to select nodes, for requests to tree service. |
|
|
||||||
|
|
36
docs/nns.md
36
docs/nns.md
|
@ -1,36 +0,0 @@
|
||||||
# Nicename Resolving with NNS
|
|
||||||
|
|
||||||
Steps to start using name resolving:
|
|
||||||
|
|
||||||
1. Enable NNS resolving in config (`rpc_endpoint` must be a valid neo rpc node, see [configs](./config) for other examples):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
|
||||||
resolve_order:
|
|
||||||
- nns
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Make sure your container is registered in NNS contract. If you use [frostfs-dev-env](https://git.frostfs.info/TrueCloudLab/frostfs-dev-env)
|
|
||||||
you can check if your container (e.g. with `container-name` name) is registered in NNS:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl -s --data '{"id":1,"jsonrpc":"2.0","method":"getcontractstate","params":[1]}' \
|
|
||||||
http://morph-chain.frostfs.devenv:30333 | jq -r '.result.hash'
|
|
||||||
|
|
||||||
0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667
|
|
||||||
|
|
||||||
$ docker exec -it morph_chain neo-go \
|
|
||||||
contract testinvokefunction \
|
|
||||||
-r http://morph-chain.frostfs.devenv:30333 0x8e6c3cd4b976b28e84a3788f6ea9e2676c15d667 \
|
|
||||||
resolve string:container-name.container int:16 \
|
|
||||||
| jq -r '.stack[0].value | if type=="array" then .[0].value else . end' \
|
|
||||||
| base64 -d && echo
|
|
||||||
|
|
||||||
7f3vvkw4iTiS5ZZbu5BQXEmJtETWbi3uUjLNaSs29xrL
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Use container name instead of its `$CID`. For example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
$ curl http://localhost:8082/get_by_attribute/container-name/FileName/object-name
|
|
||||||
```
|
|
96
go.mod
96
go.mod
|
@ -3,13 +3,13 @@ module git.frostfs.info/TrueCloudLab/frostfs-http-gw
|
||||||
go 1.22
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241125133852-37bd75821121
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20241011114054-f0fc40e116d1
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20250130095343-593dd77d841a
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20241112082307-f17779933e88
|
||||||
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20241022124111-5361f0ecebd3
|
||||||
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
git.frostfs.info/TrueCloudLab/multinet v0.0.0-20241015075604-6cb0d80e0972
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
github.com/bluele/gcache v0.0.2
|
github.com/bluele/gcache v0.0.2
|
||||||
github.com/docker/docker v27.1.1+incompatible
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/go-units v0.5.0
|
|
||||||
github.com/fasthttp/router v1.4.1
|
github.com/fasthttp/router v1.4.1
|
||||||
github.com/nspcc-dev/neo-go v0.106.2
|
github.com/nspcc-dev/neo-go v0.106.2
|
||||||
github.com/panjf2000/ants/v2 v2.5.0
|
github.com/panjf2000/ants/v2 v2.5.0
|
||||||
|
@ -19,120 +19,102 @@ require (
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/ssgreg/journald v1.0.0
|
github.com/ssgreg/journald v1.0.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/testcontainers/testcontainers-go v0.35.0
|
github.com/testcontainers/testcontainers-go v0.13.0
|
||||||
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
||||||
github.com/valyala/fasthttp v1.34.0
|
github.com/valyala/fasthttp v1.34.0
|
||||||
go.opentelemetry.io/otel v1.31.0
|
go.opentelemetry.io/otel v1.28.0
|
||||||
go.opentelemetry.io/otel/trace v1.31.0
|
go.opentelemetry.io/otel/trace v1.28.0
|
||||||
go.uber.org/zap v1.27.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||||
golang.org/x/net v0.30.0
|
golang.org/x/net v0.26.0
|
||||||
golang.org/x/sys v0.28.0
|
golang.org/x/sys v0.22.0
|
||||||
google.golang.org/grpc v1.69.2
|
google.golang.org/grpc v1.66.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
dario.cat/mergo v1.0.0 // indirect
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e // indirect
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
|
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||||
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/containerd/containerd v1.7.18 // indirect
|
github.com/containerd/cgroups v1.0.3 // indirect
|
||||||
github.com/containerd/log v0.1.0 // indirect
|
github.com/containerd/containerd v1.6.2 // indirect
|
||||||
github.com/containerd/platforms v0.2.1 // indirect
|
|
||||||
github.com/cpuguy83/dockercfg v0.3.2 // indirect
|
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/distribution/reference v0.6.0 // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.5.0 // indirect
|
github.com/docker/docker v20.10.14+incompatible // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/gorilla/websocket v1.5.1 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/ipfs/go-cid v0.0.7 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.4 // indirect
|
github.com/klauspost/compress v1.16.4 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.6 // indirect
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
github.com/moby/sys/mount v0.3.2 // indirect
|
||||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/moby/sys/user v0.1.0 // indirect
|
|
||||||
github.com/moby/term v0.5.0 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
|
||||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multiaddr v0.14.0 // indirect
|
|
||||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
|
||||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
|
||||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
|
github.com/opencontainers/runc v1.1.1 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
github.com/savsgio/gotils v0.0.0-20210617111740-97865ed5a873 // indirect
|
||||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
|
||||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
github.com/spf13/afero v1.9.3 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||||
github.com/subosito/gotenv v1.4.2 // indirect
|
github.com/subosito/gotenv v1.4.2 // indirect
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
|
||||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
github.com/urfave/cli v1.22.12 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
|
||||||
go.etcd.io/bbolt v1.3.9 // indirect
|
go.etcd.io/bbolt v1.3.9 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.31.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/crypto v0.31.0 // indirect
|
golang.org/x/crypto v0.24.0 // indirect
|
||||||
golang.org/x/sync v0.10.0 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
golang.org/x/term v0.27.0 // indirect
|
golang.org/x/term v0.21.0 // indirect
|
||||||
golang.org/x/text v0.21.0 // indirect
|
golang.org/x/text v0.16.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||||
google.golang.org/protobuf v1.36.1 // indirect
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
lukechampine.com/blake3 v1.2.1 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,15 +4,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TreeService provide interface to interact with tree service using s3 data models.
|
// TreeService provide interface to interact with tree service using s3 data models.
|
||||||
type TreeService interface {
|
type TreeService interface {
|
||||||
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error)
|
GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error)
|
||||||
GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error)
|
|
||||||
CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
|
@ -1,4 +1,4 @@
|
||||||
package data
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -7,21 +7,12 @@ import (
|
||||||
// NodeVersion represent node from tree service.
|
// NodeVersion represent node from tree service.
|
||||||
type NodeVersion struct {
|
type NodeVersion struct {
|
||||||
BaseNodeVersion
|
BaseNodeVersion
|
||||||
|
DeleteMarker bool
|
||||||
|
IsPrefixNode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// BaseNodeVersion is minimal node info from tree service.
|
// BaseNodeVersion is minimal node info from tree service.
|
||||||
// Basically used for "system" object.
|
// Basically used for "system" object.
|
||||||
type BaseNodeVersion struct {
|
type BaseNodeVersion struct {
|
||||||
ID uint64
|
|
||||||
OID oid.ID
|
OID oid.ID
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeInfo struct {
|
|
||||||
Meta []NodeMeta
|
|
||||||
}
|
|
||||||
|
|
||||||
type NodeMeta interface {
|
|
||||||
GetKey() string
|
|
||||||
GetValue() []byte
|
|
||||||
}
|
}
|
49
internal/cache/buckets.go
vendored
49
internal/cache/buckets.go
vendored
|
@ -6,7 +6,6 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"github.com/bluele/gcache"
|
"github.com/bluele/gcache"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -14,7 +13,6 @@ import (
|
||||||
// BucketCache contains cache with objects and the lifetime of cache entries.
|
// BucketCache contains cache with objects and the lifetime of cache entries.
|
||||||
type BucketCache struct {
|
type BucketCache struct {
|
||||||
cache gcache.Cache
|
cache gcache.Cache
|
||||||
cidCache gcache.Cache
|
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,45 +40,14 @@ func DefaultBucketConfig(logger *zap.Logger) *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBucketCache creates an object of BucketCache.
|
// NewBucketCache creates an object of BucketCache.
|
||||||
func NewBucketCache(config *Config, cidCache bool) *BucketCache {
|
func NewBucketCache(config *Config) *BucketCache {
|
||||||
cache := &BucketCache{
|
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
||||||
cache: gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build(),
|
return &BucketCache{cache: gc, logger: config.Logger}
|
||||||
logger: config.Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
if cidCache {
|
|
||||||
cache.cidCache = gcache.New(config.Size).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
}
|
|
||||||
return cache
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a cached object.
|
// Get returns a cached object.
|
||||||
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
func (o *BucketCache) Get(ns, bktName string) *data.BucketInfo {
|
||||||
return o.get(formKey(ns, bktName))
|
entry, err := o.cache.Get(formKey(ns, bktName))
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) GetByCID(cnrID cid.ID) *data.BucketInfo {
|
|
||||||
if o.cidCache == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
entry, err := o.cidCache.Get(cnrID)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := entry.(string)
|
|
||||||
if !ok {
|
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", key)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.get(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *BucketCache) get(key string) *data.BucketInfo {
|
|
||||||
entry, err := o.cache.Get(key)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -88,7 +55,7 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
result, ok := entry.(*data.BucketInfo)
|
result, ok := entry.(*data.BucketInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,12 +64,6 @@ func (o *BucketCache) get(key string) *data.BucketInfo {
|
||||||
|
|
||||||
// Put puts an object to cache.
|
// Put puts an object to cache.
|
||||||
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
func (o *BucketCache) Put(bkt *data.BucketInfo) error {
|
||||||
if o.cidCache != nil {
|
|
||||||
if err := o.cidCache.Set(bkt.CID, formKey(bkt.Zone, bkt.Name)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
return o.cache.Set(formKey(bkt.Zone, bkt.Name), bkt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
65
internal/cache/netmap.go
vendored
65
internal/cache/netmap.go
vendored
|
@ -1,65 +0,0 @@
|
||||||
package cache
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"github.com/bluele/gcache"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// NetmapCache provides cache for netmap.
|
|
||||||
NetmapCache struct {
|
|
||||||
cache gcache.Cache
|
|
||||||
logger *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetmapCacheConfig stores expiration params for cache.
|
|
||||||
NetmapCacheConfig struct {
|
|
||||||
Lifetime time.Duration
|
|
||||||
Logger *zap.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
DefaultNetmapCacheLifetime = time.Minute
|
|
||||||
netmapCacheSize = 1
|
|
||||||
netmapKey = "netmap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultNetmapConfig returns new default cache expiration values.
|
|
||||||
func DefaultNetmapConfig(logger *zap.Logger) *NetmapCacheConfig {
|
|
||||||
return &NetmapCacheConfig{
|
|
||||||
Lifetime: DefaultNetmapCacheLifetime,
|
|
||||||
Logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNetmapCache creates an object of NetmapCache.
|
|
||||||
func NewNetmapCache(config *NetmapCacheConfig) *NetmapCache {
|
|
||||||
gc := gcache.New(netmapCacheSize).LRU().Expiration(config.Lifetime).Build()
|
|
||||||
return &NetmapCache{cache: gc, logger: config.Logger}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Get() *netmap.NetMap {
|
|
||||||
entry, err := c.cache.Get(netmapKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
result, ok := entry.(netmap.NetMap)
|
|
||||||
if !ok {
|
|
||||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
|
||||||
zap.String("expected", fmt.Sprintf("%T", result)), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *NetmapCache) Put(nm netmap.NetMap) error {
|
|
||||||
return c.cache.Set(netmapKey, nm)
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@ package data
|
||||||
|
|
||||||
import (
|
import (
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BucketInfo struct {
|
type BucketInfo struct {
|
||||||
|
@ -10,5 +9,4 @@ type BucketInfo struct {
|
||||||
Zone string // container zone from system attribute
|
Zone string // container zone from system attribute
|
||||||
CID cid.ID
|
CID cid.ID
|
||||||
HomomorphicHashDisabled bool
|
HomomorphicHashDisabled bool
|
||||||
PlacementPolicy netmap.PlacementPolicy
|
|
||||||
}
|
}
|
|
@ -26,9 +26,7 @@ const (
|
||||||
attrOID = "OID"
|
attrOID = "OID"
|
||||||
attrCreated = "Created"
|
attrCreated = "Created"
|
||||||
attrFileName = "FileName"
|
attrFileName = "FileName"
|
||||||
attrFilePath = "FilePath"
|
|
||||||
attrSize = "Size"
|
attrSize = "Size"
|
||||||
attrDeleteMarker = "IsDeleteMarker"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -47,7 +45,6 @@ type (
|
||||||
Size string
|
Size string
|
||||||
IsDir bool
|
IsDir bool
|
||||||
GetURL string
|
GetURL string
|
||||||
IsDeleteMarker bool
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -58,7 +55,6 @@ func newListObjectsResponseS3(attrs map[string]string) ResponseObject {
|
||||||
FileName: attrs[attrFileName],
|
FileName: attrs[attrFileName],
|
||||||
Size: attrs[attrSize],
|
Size: attrs[attrSize],
|
||||||
IsDir: attrs[attrOID] == "",
|
IsDir: attrs[attrOID] == "",
|
||||||
IsDeleteMarker: attrs[attrDeleteMarker] == "true",
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +169,7 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
|
||||||
objects: make([]ResponseObject, 0, len(nodes)),
|
objects: make([]ResponseObject, 0, len(nodes)),
|
||||||
}
|
}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
meta := node.Meta
|
meta := node.GetMeta()
|
||||||
if meta == nil {
|
if meta == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -182,9 +178,6 @@ func (h *Handler) getDirObjectsS3(ctx context.Context, bucketInfo *data.BucketIn
|
||||||
attrs[m.GetKey()] = string(m.GetValue())
|
attrs[m.GetKey()] = string(m.GetValue())
|
||||||
}
|
}
|
||||||
obj := newListObjectsResponseS3(attrs)
|
obj := newListObjectsResponseS3(attrs)
|
||||||
if obj.IsDeleteMarker {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
obj.FilePath = prefix + obj.FileName
|
obj.FilePath = prefix + obj.FileName
|
||||||
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
obj.GetURL = "/get/" + bucketInfo.Name + urlencode(obj.FilePath)
|
||||||
result.objects = append(result.objects, obj)
|
result.objects = append(result.objects, obj)
|
||||||
|
@ -230,7 +223,7 @@ func (h *Handler) getDirObjectsNative(ctx context.Context, bucketInfo *data.Buck
|
||||||
}
|
}
|
||||||
for objExt := range resp {
|
for objExt := range resp {
|
||||||
if objExt.Error != nil {
|
if objExt.Error != nil {
|
||||||
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error), logs.TagField(logs.TagExternalStorage))
|
log.Error(logs.FailedToHeadObject, zap.Error(objExt.Error))
|
||||||
result.hasErrors = true
|
result.hasErrors = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -273,7 +266,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Warn(logs.FailedToSumbitTaskToPool, zap.Error(err))
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -283,7 +276,7 @@ func (h *Handler) headDirObjects(ctx context.Context, cnrID cid.ID, objectIDs Re
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToIterateOverResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Error(logs.FailedToIterateOverResponse, zap.Error(err))
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -1,22 +1,19 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bufio"
|
"bufio"
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -26,55 +23,28 @@ import (
|
||||||
|
|
||||||
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
// DownloadByAddressOrBucketName handles download requests using simple cid/oid or bucketname/key format.
|
||||||
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAddressOrBucketName")
|
oidURLParam := c.UserValue("oid").(string)
|
||||||
defer span.End()
|
downloadQueryParam := c.QueryArgs().GetBool("download")
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
cidParam := c.UserValue("cid").(string)
|
switch {
|
||||||
oidParam := c.UserValue("oid").(string)
|
case isObjectID(oidURLParam):
|
||||||
downloadParam := c.QueryArgs().GetBool("download")
|
h.byNativeAddress(c, h.receiveFile)
|
||||||
|
case !isContainerRoot(oidURLParam) && (downloadQueryParam || !isDir(oidURLParam)):
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
h.byS3Path(c, h.receiveFile)
|
||||||
zap.String("cid", cidParam),
|
default:
|
||||||
zap.String("oid", oidParam),
|
h.browseIndex(c)
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
|
||||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil && shouldDownload(oidParam, downloadParam) {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.receiveFile)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.receiveFile)
|
|
||||||
} else {
|
|
||||||
h.browseIndex(c, checkS3Err != nil)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldDownload(oidParam string, downloadParam bool) bool {
|
func (h *Handler) newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) *request {
|
||||||
return !isDir(oidParam) || downloadParam
|
return &request{
|
||||||
|
RequestCtx: ctx,
|
||||||
|
log: log,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadByAttribute handles attribute-based download requests.
|
// DownloadByAttribute handles attribute-based download requests.
|
||||||
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadByAttribute")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
h.byAttribute(c, h.receiveFile)
|
h.byAttribute(c, h.receiveFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,64 +64,13 @@ func (h *Handler) search(ctx context.Context, cnrID cid.ID, key, val string, op
|
||||||
return h.frostfs.SearchObjects(ctx, prm)
|
return h.frostfs.SearchObjects(ctx, prm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadZip handles zip by prefix requests.
|
func (h *Handler) addObjectToZip(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
||||||
func (h *Handler) DownloadZip(c *fasthttp.RequestCtx) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadZip")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
|
||||||
if err != nil {
|
|
||||||
logAndSendBucketError(c, log, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getZipResponseWriter(ctx, log, resSearch, bktInfo))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getZipResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, 3<<20)
|
|
||||||
zipWriter := zip.NewWriter(w)
|
|
||||||
var objectsWritten int
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createZipFile(zipWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
|
||||||
return
|
|
||||||
} else if objectsWritten == 0 {
|
|
||||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
if err := zipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseZipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer, error) {
|
|
||||||
method := zip.Store
|
method := zip.Store
|
||||||
if h.config.ArchiveCompression() {
|
if h.config.ZipCompression() {
|
||||||
method = zip.Deflate
|
method = zip.Deflate
|
||||||
}
|
}
|
||||||
|
|
||||||
filePath := getFilePath(obj)
|
filePath := getZipFilePath(obj)
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
||||||
}
|
}
|
||||||
|
@ -163,143 +82,99 @@ func (h *Handler) createZipFile(zw *zip.Writer, obj *object.Object) (io.Writer,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadTar forms tar.gz from objects by prefix.
|
// DownloadZipped handles zip by prefix requests.
|
||||||
func (h *Handler) DownloadTar(c *fasthttp.RequestCtx) {
|
func (h *Handler) DownloadZipped(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.DownloadTar")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
scid, _ := c.UserValue("cid").(string)
|
||||||
|
prefix, _ := c.UserValue("prefix").(string)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log)
|
log := utils.GetReqLogOrDefault(ctx, h.log)
|
||||||
|
|
||||||
|
prefix, err := url.QueryUnescape(prefix)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix), zap.Error(err))
|
||||||
|
response.Error(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
resSearch, err := h.searchObjectsByPrefix(c, log, bktInfo.CID)
|
|
||||||
|
resSearch, err := h.search(ctx, bktInfo.CID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentType, "application/gzip")
|
c.Response.Header.Set(fasthttp.HeaderContentType, "application/zip")
|
||||||
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.tar.gz\"")
|
c.Response.Header.Set(fasthttp.HeaderContentDisposition, "attachment; filename=\"archive.zip\"")
|
||||||
|
c.Response.SetStatusCode(http.StatusOK)
|
||||||
|
|
||||||
c.SetBodyStreamWriter(h.getTarResponseWriter(ctx, log, resSearch, bktInfo))
|
c.SetBodyStreamWriter(func(w *bufio.Writer) {
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) getTarResponseWriter(ctx context.Context, log *zap.Logger, resSearch ResObjectSearch, bktInfo *data.BucketInfo) func(w *bufio.Writer) {
|
|
||||||
return func(w *bufio.Writer) {
|
|
||||||
defer resSearch.Close()
|
defer resSearch.Close()
|
||||||
|
|
||||||
compressionLevel := gzip.NoCompression
|
zipWriter := zip.NewWriter(w)
|
||||||
if h.config.ArchiveCompression() {
|
|
||||||
compressionLevel = gzip.DefaultCompression
|
var bufZip []byte
|
||||||
|
var addr oid.Address
|
||||||
|
|
||||||
|
empty := true
|
||||||
|
called := false
|
||||||
|
btoken := bearerToken(ctx)
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
|
||||||
|
errIter := resSearch.Iterate(func(id oid.ID) bool {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
if empty {
|
||||||
|
bufZip = make([]byte, 3<<20) // the same as for upload
|
||||||
|
}
|
||||||
|
empty = false
|
||||||
|
|
||||||
|
addr.SetObject(id)
|
||||||
|
if err = h.zipObject(ctx, zipWriter, addr, btoken, bufZip); err != nil {
|
||||||
|
log.Error(logs.FailedToAddObjectToArchive, zap.String("oid", id.EncodeToString()), zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignore error because it's not nil only if compressionLevel argument is invalid
|
return false
|
||||||
gzipWriter, _ := gzip.NewWriterLevel(w, compressionLevel)
|
})
|
||||||
tarWriter := tar.NewWriter(gzipWriter)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if err := tarWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseTarWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
if err := gzipWriter.Close(); err != nil {
|
|
||||||
log.Error(logs.CloseGzipWriter, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var objectsWritten int
|
|
||||||
buf := make([]byte, 3<<20) // the same as for upload
|
|
||||||
|
|
||||||
errIter := resSearch.Iterate(h.putObjectToArchive(ctx, log, bktInfo.CID, buf,
|
|
||||||
func(obj *object.Object) (io.Writer, error) {
|
|
||||||
objectsWritten++
|
|
||||||
return h.createTarFile(tarWriter, obj)
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if errIter != nil {
|
if errIter != nil {
|
||||||
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter), logs.TagField(logs.TagDatapath))
|
log.Error(logs.IteratingOverSelectedObjectsFailed, zap.Error(errIter))
|
||||||
} else if objectsWritten == 0 {
|
} else if !called {
|
||||||
log.Warn(logs.ObjectsNotFound, logs.TagField(logs.TagDatapath))
|
log.Error(logs.ObjectsNotFound)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) createTarFile(tw *tar.Writer, obj *object.Object) (io.Writer, error) {
|
if err = zipWriter.Close(); err != nil {
|
||||||
filePath := getFilePath(obj)
|
log.Error(logs.CloseZipWriter, zap.Error(err))
|
||||||
if len(filePath) == 0 || filePath[len(filePath)-1] == '/' {
|
|
||||||
return nil, fmt.Errorf("invalid filepath '%s'", filePath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return tw, tw.WriteHeader(&tar.Header{
|
|
||||||
Name: filePath,
|
|
||||||
Mode: 0655,
|
|
||||||
Size: int64(obj.PayloadSize()),
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) putObjectToArchive(ctx context.Context, log *zap.Logger, cnrID cid.ID, buf []byte, createArchiveHeader func(obj *object.Object) (io.Writer, error)) func(id oid.ID) bool {
|
func (h *Handler) zipObject(ctx context.Context, zipWriter *zip.Writer, addr oid.Address, btoken *bearer.Token, bufZip []byte) error {
|
||||||
return func(id oid.ID) bool {
|
|
||||||
log = log.With(zap.String("oid", id.EncodeToString()))
|
|
||||||
|
|
||||||
prm := PrmObjectGet{
|
prm := PrmObjectGet{
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
BearerToken: bearerToken(ctx),
|
BearerToken: btoken,
|
||||||
},
|
},
|
||||||
Address: newAddress(cnrID, id),
|
Address: addr,
|
||||||
}
|
}
|
||||||
|
|
||||||
resGet, err := h.frostfs.GetObject(ctx, prm)
|
resGet, err := h.frostfs.GetObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToGetObject, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
return fmt.Errorf("get FrostFS object: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileWriter, err := createArchiveHeader(&resGet.Header)
|
objWriter, err := h.addObjectToZip(zipWriter, &resGet.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
return fmt.Errorf("zip create header: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = writeToArchive(resGet, fileWriter, buf); err != nil {
|
if _, err = io.CopyBuffer(objWriter, resGet.Payload, bufZip); err != nil {
|
||||||
log.Error(logs.FailedToAddObjectToArchive, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) searchObjectsByPrefix(c *fasthttp.RequestCtx, log *zap.Logger, cnrID cid.ID) (ResObjectSearch, error) {
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
|
||||||
prefix, _ := c.UserValue("prefix").(string)
|
|
||||||
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
|
|
||||||
prefix, err := url.QueryUnescape(prefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("prefix", prefix),
|
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not unescape prefix: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", scid), zap.String("prefix", prefix))
|
|
||||||
|
|
||||||
resSearch, err := h.search(ctx, cnrID, object.AttributeFilePath, prefix, object.MatchCommonPrefix)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
ResponseError(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resSearch, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
|
||||||
var err error
|
|
||||||
if _, err = io.CopyBuffer(objWriter, resGet.Payload, buf); err != nil {
|
|
||||||
return fmt.Errorf("copy object payload to zip file: %v", err)
|
return fmt.Errorf("copy object payload to zip file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,10 +182,14 @@ func writeToArchive(resGet *Object, objWriter io.Writer, buf []byte) error {
|
||||||
return fmt.Errorf("object body close error: %w", err)
|
return fmt.Errorf("object body close error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = zipWriter.Flush(); err != nil {
|
||||||
|
return fmt.Errorf("flush zip writer: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFilePath(obj *object.Object) string {
|
func getZipFilePath(obj *object.Object) string {
|
||||||
for _, attr := range obj.Attributes() {
|
for _, attr := range obj.Attributes() {
|
||||||
if attr.Key() == object.AttributeFilePath {
|
if attr.Key() == object.AttributeFilePath {
|
||||||
return attr.Value()
|
return attr.Value()
|
||||||
|
|
|
@ -50,8 +50,7 @@ func filterHeaders(l *zap.Logger, header *fasthttp.RequestHeader) (map[string]st
|
||||||
|
|
||||||
l.Debug(logs.AddAttributeToResultObject,
|
l.Debug(logs.AddAttributeToResultObject,
|
||||||
zap.String("key", k),
|
zap.String("key", k),
|
||||||
zap.String("val", v),
|
zap.String("val", v))
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return result, err
|
return result, err
|
||||||
|
|
|
@ -11,10 +11,10 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
@ -29,13 +29,12 @@ import (
|
||||||
|
|
||||||
type Config interface {
|
type Config interface {
|
||||||
DefaultTimestamp() bool
|
DefaultTimestamp() bool
|
||||||
ArchiveCompression() bool
|
ZipCompression() bool
|
||||||
ClientCut() bool
|
ClientCut() bool
|
||||||
IndexPageEnabled() bool
|
IndexPageEnabled() bool
|
||||||
IndexPageTemplate() string
|
IndexPageTemplate() string
|
||||||
BufferMaxSizeForPut() uint64
|
BufferMaxSizeForPut() uint64
|
||||||
NamespaceHeader() string
|
NamespaceHeader() string
|
||||||
EnableFilepathFallback() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrmContainer groups parameters of FrostFS.Container operation.
|
// PrmContainer groups parameters of FrostFS.Container operation.
|
||||||
|
@ -140,8 +139,6 @@ var (
|
||||||
ErrAccessDenied = errors.New("access denied")
|
ErrAccessDenied = errors.New("access denied")
|
||||||
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
// ErrGatewayTimeout is returned from FrostFS in case of timeout, deadline exceeded etc.
|
||||||
ErrGatewayTimeout = errors.New("gateway timeout")
|
ErrGatewayTimeout = errors.New("gateway timeout")
|
||||||
// ErrQuotaLimitReached is returned from FrostFS in case of quota exceeded.
|
|
||||||
ErrQuotaLimitReached = errors.New("quota limit reached")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// FrostFS represents virtual connection to FrostFS network.
|
// FrostFS represents virtual connection to FrostFS network.
|
||||||
|
@ -167,7 +164,7 @@ type Handler struct {
|
||||||
ownerID *user.ID
|
ownerID *user.ID
|
||||||
config Config
|
config Config
|
||||||
containerResolver ContainerResolver
|
containerResolver ContainerResolver
|
||||||
tree layer.TreeService
|
tree *tree.Tree
|
||||||
cache *cache.BucketCache
|
cache *cache.BucketCache
|
||||||
workerPool *ants.Pool
|
workerPool *ants.Pool
|
||||||
}
|
}
|
||||||
|
@ -180,7 +177,7 @@ type AppParams struct {
|
||||||
Cache *cache.BucketCache
|
Cache *cache.BucketCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(params *AppParams, config Config, tree layer.TreeService, workerPool *ants.Pool) *Handler {
|
func New(params *AppParams, config Config, tree *tree.Tree, workerPool *ants.Pool) *Handler {
|
||||||
return &Handler{
|
return &Handler{
|
||||||
log: params.Logger,
|
log: params.Logger,
|
||||||
frostfs: params.FrostFS,
|
frostfs: params.FrostFS,
|
||||||
|
@ -195,42 +192,77 @@ func New(params *AppParams, config Config, tree layer.TreeService, workerPool *a
|
||||||
|
|
||||||
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byNativeAddress is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// prepares request and object address to it.
|
// prepares request and object address to it.
|
||||||
func (h *Handler) byNativeAddress(ctx context.Context, req request, cnrID cid.ID, objID oid.ID, handler func(context.Context, request, oid.Address)) {
|
func (h *Handler) byNativeAddress(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byNativeAddress")
|
idCnr, _ := c.UserValue("cid").(string)
|
||||||
defer span.End()
|
idObj, _ := url.PathUnescape(c.UserValue("oid").(string))
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
ctx := utils.GetContextFromRequest(c)
|
||||||
handler(ctx, req, addr)
|
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||||
|
log := reqLog.With(zap.String("cid", idCnr), zap.String("oid", idObj))
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketInfo(ctx, idCnr, log)
|
||||||
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
objID := new(oid.ID)
|
||||||
|
if err = objID.DecodeString(idObj); err != nil {
|
||||||
|
log.Error(logs.WrongObjectID, zap.Error(err))
|
||||||
|
response.Error(c, "wrong object id", fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
addr := newAddress(bktInfo.CID, *objID)
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
// byS3Path is a wrapper for function (e.g. request.headObject, request.receiveFile) that
|
||||||
// resolves object address from S3-like path <bucket name>/<object key>.
|
// resolves object address from S3-like path <bucket name>/<object key>.
|
||||||
func (h *Handler) byS3Path(ctx context.Context, req request, cnrID cid.ID, path string, handler func(context.Context, request, oid.Address)) {
|
func (h *Handler) byS3Path(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "handler.byS3Path")
|
bucketname := c.UserValue("cid").(string)
|
||||||
defer span.End()
|
key := c.UserValue("oid").(string)
|
||||||
|
|
||||||
c, log := req.RequestCtx, req.log
|
ctx := utils.GetContextFromRequest(c)
|
||||||
|
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||||
|
log := reqLog.With(zap.String("bucketname", bucketname), zap.String("key", key))
|
||||||
|
|
||||||
foundOID, err := h.tree.GetLatestVersion(ctx, &cnrID, path)
|
unescapedKey, err := url.QueryUnescape(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToGetLatestVersionOfObject, zap.Error(err), zap.String("cid", cnrID.String()),
|
|
||||||
zap.String("path", path), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if foundOID.IsDeleteMarker {
|
|
||||||
log.Error(logs.ObjectWasDeleted, logs.TagField(logs.TagExternalStorageTree))
|
bktInfo, err := h.getBucketInfo(ctx, bucketname, log)
|
||||||
ResponseError(c, "object deleted", fasthttp.StatusNotFound)
|
if err != nil {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
addr := newAddress(cnrID, foundOID.OID)
|
foundOid, err := h.tree.GetLatestVersion(ctx, &bktInfo.CID, unescapedKey)
|
||||||
handler(ctx, newRequest(c, log), addr)
|
if err != nil {
|
||||||
|
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||||
|
response.Error(c, "Access Denied", fasthttp.StatusForbidden)
|
||||||
|
} else {
|
||||||
|
response.Error(c, "object wasn't found", fasthttp.StatusNotFound)
|
||||||
|
log.Error(logs.GetLatestObjectVersion, zap.Error(err))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if foundOid.DeleteMarker {
|
||||||
|
log.Error(logs.ObjectWasDeleted)
|
||||||
|
response.Error(c, "object deleted", fasthttp.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
addr := newAddress(bktInfo.CID, foundOid.OID)
|
||||||
|
|
||||||
|
f(ctx, *h.newRequest(c, log), addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// byAttribute is a wrapper similar to byNativeAddress.
|
// byAttribute is a wrapper similar to byNativeAddress.
|
||||||
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Context, request, oid.Address)) {
|
func (h *Handler) byAttribute(c *fasthttp.RequestCtx, f func(context.Context, request, oid.Address)) {
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
scid, _ := c.UserValue("cid").(string)
|
||||||
key, _ := c.UserValue("attr_key").(string)
|
key, _ := c.UserValue("attr_key").(string)
|
||||||
val, _ := c.UserValue("attr_val").(string)
|
val, _ := c.UserValue("attr_val").(string)
|
||||||
|
|
||||||
|
@ -239,92 +271,55 @@ func (h *Handler) byAttribute(c *fasthttp.RequestCtx, handler func(context.Conte
|
||||||
|
|
||||||
key, err := url.QueryUnescape(key)
|
key, err := url.QueryUnescape(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_key", key),
|
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_key", key), zap.Error(err))
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
response.Error(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
ResponseError(c, "could not unescape attr_key: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
val, err = url.QueryUnescape(val)
|
val, err = url.QueryUnescape(val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", cidParam), zap.String("attr_val", val),
|
log.Error(logs.FailedToUnescapeQuery, zap.String("cid", scid), zap.String("attr_val", val), zap.Error(err))
|
||||||
zap.Error(err), logs.TagField(logs.TagDatapath))
|
response.Error(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
ResponseError(c, "could not unescape attr_val: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if key == attrFileName {
|
log = log.With(zap.String("cid", scid), zap.String("attr_key", key), zap.String("attr_val", val))
|
||||||
val = prepareFileName(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
log = log.With(zap.String("cid", cidParam), zap.String("attr_key", key), zap.String("attr_val", val))
|
bktInfo, err := h.getBucketInfo(ctx, scid, log)
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
logAndSendBucketError(c, log, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objID, err := h.findObjectByAttribute(ctx, log, bktInfo.CID, key, val)
|
res, err := h.search(ctx, bktInfo.CID, key, val, object.MatchStringEqual)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, io.EOF) {
|
log.Error(logs.CouldNotSearchForObjects, zap.Error(err))
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusNotFound)
|
response.Error(c, "could not search for objects: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(bktInfo.CID)
|
|
||||||
addr.SetObject(objID)
|
|
||||||
|
|
||||||
handler(ctx, newRequest(c, log), addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) findObjectByAttribute(ctx context.Context, log *zap.Logger, cnrID cid.ID, attrKey, attrVal string) (oid.ID, error) {
|
|
||||||
res, err := h.search(ctx, cnrID, attrKey, attrVal, object.MatchStringEqual)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.CouldNotSearchForObjects, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("could not search for objects: %w", err)
|
|
||||||
}
|
|
||||||
defer res.Close()
|
defer res.Close()
|
||||||
|
|
||||||
buf := make([]oid.ID, 1)
|
buf := make([]oid.ID, 1)
|
||||||
|
|
||||||
n, err := res.Read(buf)
|
n, err := res.Read(buf)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
switch {
|
if errors.Is(err, io.EOF) {
|
||||||
case errors.Is(err, io.EOF) && h.needSearchByFileName(attrKey, attrVal):
|
log.Error(logs.ObjectNotFound, zap.Error(err))
|
||||||
log.Debug(logs.ObjectNotFoundByFilePathTrySearchByFileName, logs.TagField(logs.TagExternalStorage))
|
response.Error(c, "object not found", fasthttp.StatusNotFound)
|
||||||
return h.findObjectByAttribute(ctx, log, cnrID, attrFileName, prepareFileName(attrVal))
|
return
|
||||||
case errors.Is(err, io.EOF):
|
|
||||||
log.Error(logs.ObjectNotFound, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("object not found: %w", err)
|
|
||||||
default:
|
|
||||||
log.Error(logs.ReadObjectListFailed, zap.Error(err), logs.TagField(logs.TagExternalStorage))
|
|
||||||
return oid.ID{}, fmt.Errorf("read object list failed: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf[0], nil
|
log.Error(logs.ReadObjectListFailed, zap.Error(err))
|
||||||
|
response.Error(c, "read object list failed: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) needSearchByFileName(key, val string) bool {
|
var addrObj oid.Address
|
||||||
if key != attrFilePath || !h.config.EnableFilepathFallback() {
|
addrObj.SetContainer(bktInfo.CID)
|
||||||
return false
|
addrObj.SetObject(buf[0])
|
||||||
}
|
|
||||||
|
|
||||||
return strings.HasPrefix(val, "/") && strings.Count(val, "/") == 1 || !strings.Contains(val, "/")
|
f(ctx, *h.newRequest(c, log), addrObj)
|
||||||
}
|
|
||||||
|
|
||||||
func prepareFileName(fileName string) string {
|
|
||||||
if strings.HasPrefix(fileName, "/") {
|
|
||||||
return fileName[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileName
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveContainer decode container id, if it's not a valid container id
|
// resolveContainer decode container id, if it's not a valid container id
|
||||||
|
@ -353,16 +348,11 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
|
||||||
|
|
||||||
cnrID, err := h.resolveContainer(ctx, containerName)
|
cnrID, err := h.resolveContainer(ctx, containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotResolveContainerID, zap.Error(err), zap.String("cnrName", containerName),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := h.readContainer(ctx, *cnrID)
|
bktInfo, err := h.readContainer(ctx, *cnrID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.CouldNotGetContainerInfo, zap.Error(err), zap.String("cnrName", containerName),
|
|
||||||
zap.String("cnrName", cnrID.String()),
|
|
||||||
logs.TagField(logs.TagExternalStorage))
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,8 +360,7 @@ func (h *Handler) getBucketInfo(ctx context.Context, containerName string, log *
|
||||||
log.Warn(logs.CouldntPutBucketIntoCache,
|
log.Warn(logs.CouldntPutBucketIntoCache,
|
||||||
zap.String("bucket name", bktInfo.Name),
|
zap.String("bucket name", bktInfo.Name),
|
||||||
zap.Stringer("bucket cid", bktInfo.CID),
|
zap.Stringer("bucket cid", bktInfo.CID),
|
||||||
zap.Error(err),
|
zap.Error(err))
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return bktInfo, nil
|
return bktInfo, nil
|
||||||
|
@ -395,16 +384,11 @@ func (h *Handler) readContainer(ctx context.Context, cnrID cid.ID) (*data.Bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
bktInfo.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(*res)
|
||||||
bktInfo.PlacementPolicy = res.PlacementPolicy()
|
|
||||||
|
|
||||||
return bktInfo, err
|
return bktInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
func (h *Handler) browseIndex(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.browseIndex")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
if !h.config.IndexPageEnabled() {
|
if !h.config.IndexPageEnabled() {
|
||||||
c.SetStatusCode(fasthttp.StatusNotFound)
|
c.SetStatusCode(fasthttp.StatusNotFound)
|
||||||
return
|
return
|
||||||
|
@ -413,6 +397,7 @@ func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
||||||
cidURLParam := c.UserValue("cid").(string)
|
cidURLParam := c.UserValue("cid").(string)
|
||||||
oidURLParam := c.UserValue("oid").(string)
|
oidURLParam := c.UserValue("oid").(string)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||||
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
|
log := reqLog.With(zap.String("cid", cidURLParam), zap.String("oid", oidURLParam))
|
||||||
|
|
||||||
|
@ -429,9 +414,18 @@ func (h *Handler) browseIndex(c *fasthttp.RequestCtx, isNativeList bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
listFunc := h.getDirObjectsS3
|
listFunc := h.getDirObjectsS3
|
||||||
if isNativeList {
|
isNativeList := false
|
||||||
// tree probe failed, trying to use native
|
|
||||||
|
err = h.tree.CheckSettingsNodeExist(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||||
|
// tree probe failed, try to use native
|
||||||
listFunc = h.getDirObjectsNative
|
listFunc = h.getDirObjectsNative
|
||||||
|
isNativeList = true
|
||||||
|
} else {
|
||||||
|
logAndSendBucketError(c, log, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
h.browseObjects(c, browseParams{
|
h.browseObjects(c, browseParams{
|
||||||
|
|
|
@ -517,7 +517,7 @@ func DoFuzzDownloadZipped(input []byte) int {
|
||||||
r.SetUserValue("cid", cid)
|
r.SetUserValue("cid", cid)
|
||||||
r.SetUserValue("prefix", prefix)
|
r.SetUserValue("prefix", prefix)
|
||||||
|
|
||||||
hc.Handler().DownloadZip(r)
|
hc.Handler().DownloadZipped(r)
|
||||||
|
|
||||||
return fuzzSuccessExitCode
|
return fuzzSuccessExitCode
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,8 +14,8 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/resolver"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
|
@ -32,41 +32,25 @@ import (
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
type treeServiceMock struct {
|
type treeClientMock struct {
|
||||||
system map[string]map[string]*data.BaseNodeVersion
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTreeService() *treeServiceMock {
|
func (t *treeClientMock) GetNodes(context.Context, *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
return &treeServiceMock{
|
return nil, nil
|
||||||
system: make(map[string]map[string]*data.BaseNodeVersion),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *treeServiceMock) CheckSettingsNodeExists(context.Context, *data.BucketInfo) error {
|
func (t *treeClientMock) GetSubTree(context.Context, *data.BucketInfo, string, []uint64, uint32, bool) ([]tree.NodeResponse, error) {
|
||||||
_, ok := t.system["bucket-settings"]
|
|
||||||
if !ok {
|
|
||||||
return layer.ErrNodeNotFound
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetSubTreeByPrefix(context.Context, *data.BucketInfo, string, bool) ([]data.NodeInfo, string, error) {
|
|
||||||
return nil, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *treeServiceMock) GetLatestVersion(context.Context, *cid.ID, string) (*data.NodeVersion, error) {
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type configMock struct {
|
type configMock struct {
|
||||||
additionalSearch bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) DefaultTimestamp() bool {
|
func (c *configMock) DefaultTimestamp() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) ArchiveCompression() bool {
|
func (c *configMock) ZipCompression() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,17 +78,13 @@ func (c *configMock) NamespaceHeader() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMock) EnableFilepathFallback() bool {
|
|
||||||
return c.additionalSearch
|
|
||||||
}
|
|
||||||
|
|
||||||
type handlerContext struct {
|
type handlerContext struct {
|
||||||
key *keys.PrivateKey
|
key *keys.PrivateKey
|
||||||
owner user.ID
|
owner user.ID
|
||||||
|
|
||||||
h *Handler
|
h *Handler
|
||||||
frostfs *TestFrostFS
|
frostfs *TestFrostFS
|
||||||
tree *treeServiceMock
|
tree *treeClientMock
|
||||||
cfg *configMock
|
cfg *configMock
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,17 +122,17 @@ func prepareHandlerContext() (*handlerContext, error) {
|
||||||
Size: 1,
|
Size: 1,
|
||||||
Lifetime: 1,
|
Lifetime: 1,
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
}, false),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
treeMock := newTreeService()
|
treeMock := &treeClientMock{}
|
||||||
cfgMock := &configMock{}
|
cfgMock := &configMock{}
|
||||||
|
|
||||||
workerPool, err := ants.NewPool(1)
|
workerPool, err := ants.NewPool(1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
handler := New(params, cfgMock, treeMock, workerPool)
|
handler := New(params, cfgMock, tree.NewTree(treeMock), workerPool)
|
||||||
|
|
||||||
return &handlerContext{
|
return &handlerContext{
|
||||||
key: key,
|
key: key,
|
||||||
|
@ -219,8 +199,10 @@ func TestBasic(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
||||||
attr := prepareObjectAttributes(object.AttributeFilePath, objFileName)
|
attr := object.NewAttribute()
|
||||||
obj.SetAttributes(append(obj.Attributes(), attr)...)
|
attr.SetKey(object.AttributeFilePath)
|
||||||
|
attr.SetValue(objFileName)
|
||||||
|
obj.SetAttributes(append(obj.Attributes(), *attr)...)
|
||||||
|
|
||||||
t.Run("get", func(t *testing.T) {
|
t.Run("get", func(t *testing.T) {
|
||||||
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
r = prepareGetRequest(ctx, cnrID.EncodeToString(), putRes.ObjectID)
|
||||||
|
@ -239,10 +221,6 @@ func TestBasic(t *testing.T) {
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
r = prepareGetByAttributeRequest(ctx, bktName, keyAttr, valAttr)
|
||||||
hc.Handler().DownloadByAttribute(r)
|
hc.Handler().DownloadByAttribute(r)
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
require.Equal(t, content, string(r.Response.Body()))
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
|
|
||||||
hc.Handler().DownloadByAttribute(r)
|
|
||||||
require.Equal(t, content, string(r.Response.Body()))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("head by attribute", func(t *testing.T) {
|
t.Run("head by attribute", func(t *testing.T) {
|
||||||
|
@ -250,16 +228,11 @@ func TestBasic(t *testing.T) {
|
||||||
hc.Handler().HeadByAttribute(r)
|
hc.Handler().HeadByAttribute(r)
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
||||||
|
|
||||||
r = prepareGetByAttributeRequest(ctx, bktName, attrFileName, "/"+objFileName)
|
|
||||||
hc.Handler().HeadByAttribute(r)
|
|
||||||
require.Equal(t, putRes.ObjectID, string(r.Response.Header.Peek(hdrObjectID)))
|
|
||||||
require.Equal(t, putRes.ContainerID, string(r.Response.Header.Peek(hdrContainerID)))
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("zip", func(t *testing.T) {
|
t.Run("zip", func(t *testing.T) {
|
||||||
r = prepareGetZipped(ctx, bktName, "")
|
r = prepareGetZipped(ctx, bktName, "")
|
||||||
hc.Handler().DownloadZip(r)
|
hc.Handler().DownloadZipped(r)
|
||||||
|
|
||||||
readerAt := bytes.NewReader(r.Response.Body())
|
readerAt := bytes.NewReader(r.Response.Body())
|
||||||
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
zipReader, err := zip.NewReader(readerAt, int64(len(r.Response.Body())))
|
||||||
|
@ -278,178 +251,6 @@ func TestBasic(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindObjectByAttribute(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.cfg.additionalSearch = true
|
|
||||||
|
|
||||||
bktName := "bucket"
|
|
||||||
cnrID, cnr, err := hc.prepareContainer(bktName, acl.PublicRWExtended)
|
|
||||||
require.NoError(t, err)
|
|
||||||
hc.frostfs.SetContainer(cnrID, cnr)
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
ctx = middleware.SetNamespace(ctx, "")
|
|
||||||
|
|
||||||
content := "hello"
|
|
||||||
r, err := prepareUploadRequest(ctx, cnrID.EncodeToString(), content)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hc.Handler().Upload(r)
|
|
||||||
require.Equal(t, r.Response.StatusCode(), http.StatusOK)
|
|
||||||
|
|
||||||
var putRes putResponse
|
|
||||||
err = json.Unmarshal(r.Response.Body(), &putRes)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testAttrVal1 := "/folder/cat.jpg"
|
|
||||||
testAttrVal2 := "cat.jpg"
|
|
||||||
testAttrVal3 := "test-attr-val3"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
firstAttr object.Attribute
|
|
||||||
secondAttr object.Attribute
|
|
||||||
reqAttrKey string
|
|
||||||
reqAttrValue string
|
|
||||||
err string
|
|
||||||
additionalSearch bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "success search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed search by FileName",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFileName,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal2,
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "failed by FilePath (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: testAttrVal3,
|
|
||||||
err: "not found",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "success search by FilePath with leading slash (with additional search)",
|
|
||||||
firstAttr: prepareObjectAttributes(attrFilePath, testAttrVal1),
|
|
||||||
secondAttr: prepareObjectAttributes(attrFileName, testAttrVal2),
|
|
||||||
reqAttrKey: attrFilePath,
|
|
||||||
reqAttrValue: "/cat.jpg",
|
|
||||||
additionalSearch: true,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
obj := hc.frostfs.objects[putRes.ContainerID+"/"+putRes.ObjectID]
|
|
||||||
obj.SetAttributes(tc.firstAttr, tc.secondAttr)
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
objID, err := hc.Handler().findObjectByAttribute(ctx, hc.Handler().log, cnrID, tc.reqAttrKey, tc.reqAttrValue)
|
|
||||||
if tc.err != "" {
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), tc.err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, putRes.ObjectID, objID.EncodeToString())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNeedSearchByFileName(t *testing.T) {
|
|
||||||
hc, err := prepareHandlerContext()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
attrKey string
|
|
||||||
attrVal string
|
|
||||||
additionalSearch bool
|
|
||||||
expected bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "need search - not contains slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "need search - single lead slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - single slash but not lead",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - more one slash",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "/cats/cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - incorrect attribute key",
|
|
||||||
attrKey: attrFileName,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: true,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "don't need search - additional search disabled",
|
|
||||||
attrKey: attrFilePath,
|
|
||||||
attrVal: "cat.png",
|
|
||||||
additionalSearch: false,
|
|
||||||
expected: false,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
hc.cfg.additionalSearch = tc.additionalSearch
|
|
||||||
|
|
||||||
res := hc.h.needSearchByFileName(tc.attrKey, tc.attrVal)
|
|
||||||
require.Equal(t, tc.expected, res)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPrepareFileName(t *testing.T) {
|
|
||||||
fileName := "/cat.jpg"
|
|
||||||
expected := "cat.jpg"
|
|
||||||
actual := prepareFileName(fileName)
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
|
|
||||||
fileName = "cat.jpg"
|
|
||||||
actual = prepareFileName(fileName)
|
|
||||||
require.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
func prepareUploadRequest(ctx context.Context, bucket, content string) (*fasthttp.RequestCtx, error) {
|
||||||
r := new(fasthttp.RequestCtx)
|
r := new(fasthttp.RequestCtx)
|
||||||
utils.SetContextToRequest(ctx, r)
|
utils.SetContextToRequest(ctx, r)
|
||||||
|
@ -482,13 +283,6 @@ func prepareGetZipped(ctx context.Context, bucket, prefix string) *fasthttp.Requ
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareObjectAttributes(attrKey, attrValue string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(attrKey)
|
|
||||||
attr.SetValue(attrValue)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
keyAttr = "User-Attribute"
|
keyAttr = "User-Attribute"
|
||||||
valAttr = "user value"
|
valAttr = "user value"
|
||||||
|
|
|
@ -2,16 +2,13 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"github.com/valyala/fasthttp"
|
"github.com/valyala/fasthttp"
|
||||||
|
@ -46,11 +43,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
}
|
}
|
||||||
|
|
||||||
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
req.Response.Header.Set(fasthttp.HeaderContentLength, strconv.FormatUint(obj.PayloadSize(), 10))
|
||||||
var (
|
var contentType string
|
||||||
contentType string
|
|
||||||
filename string
|
|
||||||
filepath string
|
|
||||||
)
|
|
||||||
for _, attr := range obj.Attributes() {
|
for _, attr := range obj.Attributes() {
|
||||||
key := attr.Key()
|
key := attr.Key()
|
||||||
val := attr.Value()
|
val := attr.Value()
|
||||||
|
@ -68,22 +61,14 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
req.log.Info(logs.CouldntParseCreationDate,
|
req.log.Info(logs.CouldntParseCreationDate,
|
||||||
zap.String("key", key),
|
zap.String("key", key),
|
||||||
zap.String("val", val),
|
zap.String("val", val),
|
||||||
zap.Error(err),
|
zap.Error(err))
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
req.Response.Header.Set(fasthttp.HeaderLastModified, time.Unix(value, 0).UTC().Format(http.TimeFormat))
|
||||||
case object.AttributeContentType:
|
case object.AttributeContentType:
|
||||||
contentType = val
|
contentType = val
|
||||||
case object.AttributeFilePath:
|
|
||||||
filepath = val
|
|
||||||
case object.AttributeFileName:
|
|
||||||
filename = val
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if filename == "" {
|
|
||||||
filename = filepath
|
|
||||||
}
|
|
||||||
|
|
||||||
idsToResponse(&req.Response, obj)
|
idsToResponse(&req.Response, obj)
|
||||||
|
|
||||||
|
@ -98,7 +83,7 @@ func (h *Handler) headObject(ctx context.Context, req request, objectAddress oid
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.frostfs.RangeObject(ctx, prmRange)
|
return h.frostfs.RangeObject(ctx, prmRange)
|
||||||
}, filename)
|
})
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
req.handleFrostFSErr(err, start)
|
req.handleFrostFSErr(err, start)
|
||||||
return
|
return
|
||||||
|
@ -117,47 +102,18 @@ func idsToResponse(resp *fasthttp.Response, obj *object.Object) {
|
||||||
|
|
||||||
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
// HeadByAddressOrBucketName handles head requests using simple cid/oid or bucketname/key format.
|
||||||
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
func (h *Handler) HeadByAddressOrBucketName(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAddressOrBucketName")
|
test, _ := c.UserValue("oid").(string)
|
||||||
defer span.End()
|
var id oid.ID
|
||||||
|
|
||||||
cidParam, _ := c.UserValue("cid").(string)
|
err := id.DecodeString(test)
|
||||||
oidParam, _ := c.UserValue("oid").(string)
|
|
||||||
|
|
||||||
log := utils.GetReqLogOrDefault(ctx, h.log).With(
|
|
||||||
zap.String("cid", cidParam),
|
|
||||||
zap.String("oid", oidParam),
|
|
||||||
)
|
|
||||||
|
|
||||||
bktInfo, err := h.getBucketInfo(ctx, cidParam, log)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logAndSendBucketError(c, log, err)
|
h.byS3Path(c, h.headObject)
|
||||||
return
|
|
||||||
}
|
|
||||||
checkS3Err := h.tree.CheckSettingsNodeExists(ctx, bktInfo)
|
|
||||||
if checkS3Err != nil && !errors.Is(checkS3Err, layer.ErrNodeNotFound) {
|
|
||||||
log.Error(logs.FailedToCheckIfSettingsNodeExist, zap.String("cid", bktInfo.CID.String()),
|
|
||||||
zap.Error(checkS3Err), logs.TagField(logs.TagExternalStorageTree))
|
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
req := newRequest(c, log)
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if checkS3Err == nil {
|
|
||||||
h.byS3Path(ctx, req, bktInfo.CID, oidParam, h.headObject)
|
|
||||||
} else if err = objID.DecodeString(oidParam); err == nil {
|
|
||||||
h.byNativeAddress(ctx, req, bktInfo.CID, objID, h.headObject)
|
|
||||||
} else {
|
} else {
|
||||||
logAndSendBucketError(c, log, checkS3Err)
|
h.byNativeAddress(c, h.headObject)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HeadByAttribute handles attribute-based head requests.
|
// HeadByAttribute handles attribute-based head requests.
|
||||||
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
func (h *Handler) HeadByAttribute(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.HeadByAttribute")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
h.byAttribute(c, h.headObject)
|
h.byAttribute(c, h.headObject)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
||||||
|
|
||||||
name := part.FormName()
|
name := part.FormName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
l.Debug(logs.IgnorePartEmptyFormName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,10 +41,8 @@ func fetchMultipartFile(l *zap.Logger, r io.Reader, boundary string) (MultipartF
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
// ignore multipart/form-data values
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||||
if err = part.Close(); err != nil {
|
|
||||||
l.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -112,7 +112,7 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
||||||
|
|
||||||
name := part.FormName()
|
name := part.FormName()
|
||||||
if name == "" {
|
if name == "" {
|
||||||
l.Debug(logs.IgnorePartEmptyFormName, logs.TagField(logs.TagDatapath))
|
l.Debug(logs.IgnorePartEmptyFormName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,8 @@ func fetchMultipartFileDefault(l *zap.Logger, r io.Reader, boundary string) (Mul
|
||||||
|
|
||||||
// ignore multipart/form-data values
|
// ignore multipart/form-data values
|
||||||
if filename == "" {
|
if filename == "" {
|
||||||
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name), logs.TagField(logs.TagDatapath))
|
l.Debug(logs.IgnorePartEmptyFilename, zap.String("form", name))
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,14 +4,13 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -26,7 +25,7 @@ type readCloser struct {
|
||||||
|
|
||||||
// initializes io.Reader with the limited size and detects Content-Type from it.
|
// initializes io.Reader with the limited size and detects Content-Type from it.
|
||||||
// Returns r's error directly. Also returns the processed data.
|
// Returns r's error directly. Also returns the processed data.
|
||||||
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), filename string) (string, []byte, error) {
|
func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error)) (string, []byte, error) {
|
||||||
if maxSize > sizeToDetectType {
|
if maxSize > sizeToDetectType {
|
||||||
maxSize = sizeToDetectType
|
maxSize = sizeToDetectType
|
||||||
}
|
}
|
||||||
|
@ -45,20 +44,7 @@ func readContentType(maxSize uint64, rInit func(uint64) (io.Reader, error), file
|
||||||
|
|
||||||
buf = buf[:n]
|
buf = buf[:n]
|
||||||
|
|
||||||
contentType := http.DetectContentType(buf)
|
return http.DetectContentType(buf), buf, err // to not lose io.EOF
|
||||||
|
|
||||||
// Since the detector detects the "text/plain" content type for various types of text files,
|
|
||||||
// including CSS, JavaScript, and CSV files,
|
|
||||||
// we'll determine the final content type based on the file's extension.
|
|
||||||
if strings.HasPrefix(contentType, "text/plain") {
|
|
||||||
ext := path.Ext(filename)
|
|
||||||
// If the file doesn't have a file extension, we'll keep the content type as is.
|
|
||||||
if len(ext) > 0 {
|
|
||||||
contentType = mime.TypeByExtension(ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return contentType, buf, err // to not lose io.EOF
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type getMultiobjectBodyParams struct {
|
type getMultiobjectBodyParams struct {
|
||||||
|
@ -110,8 +96,7 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
||||||
if err = req.setTimestamp(val); err != nil {
|
if err = req.setTimestamp(val); err != nil {
|
||||||
req.log.Error(logs.CouldntParseCreationDate,
|
req.log.Error(logs.CouldntParseCreationDate,
|
||||||
zap.String("val", val),
|
zap.String("val", val),
|
||||||
zap.Error(err),
|
zap.Error(err))
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
}
|
||||||
case object.AttributeContentType:
|
case object.AttributeContentType:
|
||||||
contentType = val
|
contentType = val
|
||||||
|
@ -143,10 +128,10 @@ func (h *Handler) receiveFile(ctx context.Context, req request, objAddress oid.A
|
||||||
|
|
||||||
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
contentType, payloadHead, err = readContentType(payloadSize, func(uint64) (io.Reader, error) {
|
||||||
return payload, nil
|
return payload, nil
|
||||||
}, filename)
|
})
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err), logs.TagField(logs.TagDatapath))
|
req.log.Error(logs.CouldNotDetectContentTypeFromPayload, zap.Error(err))
|
||||||
ResponseError(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(req.RequestCtx, "could not detect Content-Type from payload: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,16 +10,8 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
txtContentType = "text/plain; charset=utf-8"
|
|
||||||
cssContentType = "text/css; charset=utf-8"
|
|
||||||
htmlContentType = "text/html; charset=utf-8"
|
|
||||||
javascriptContentType = "text/javascript; charset=utf-8"
|
|
||||||
|
|
||||||
htmlBody = "<!DOCTYPE html><html ><head><meta charset=\"utf-8\"><title>Test Html</title>"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDetector(t *testing.T) {
|
func TestDetector(t *testing.T) {
|
||||||
|
txtContentType := "text/plain; charset=utf-8"
|
||||||
sb := strings.Builder{}
|
sb := strings.Builder{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
|
sb.WriteString("Some txt content. Content-Type must be detected properly by detector.")
|
||||||
|
@ -27,63 +19,30 @@ func TestDetector(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
Name string
|
Name string
|
||||||
ExpectedContentType string
|
ContentType string
|
||||||
Content string
|
Expected string
|
||||||
FileName string
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
Name: "less than 512b",
|
Name: "less than 512b",
|
||||||
ExpectedContentType: txtContentType,
|
ContentType: txtContentType,
|
||||||
Content: sb.String()[:256],
|
Expected: sb.String()[:256],
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "more than 512b",
|
Name: "more than 512b",
|
||||||
ExpectedContentType: txtContentType,
|
ContentType: txtContentType,
|
||||||
Content: sb.String(),
|
Expected: sb.String(),
|
||||||
FileName: "test.txt",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "css content type",
|
|
||||||
ExpectedContentType: cssContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.css",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "javascript content type",
|
|
||||||
ExpectedContentType: javascriptContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.js",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file content",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: htmlBody,
|
|
||||||
FileName: "test.detect-by-content",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "html content type by file extension",
|
|
||||||
ExpectedContentType: htmlContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test.html",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "empty file extension",
|
|
||||||
ExpectedContentType: txtContentType,
|
|
||||||
Content: sb.String(),
|
|
||||||
FileName: "test",
|
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.Name, func(t *testing.T) {
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
contentType, data, err := readContentType(uint64(len(tc.Content)),
|
contentType, data, err := readContentType(uint64(len(tc.Expected)),
|
||||||
func(uint64) (io.Reader, error) {
|
func(uint64) (io.Reader, error) {
|
||||||
return strings.NewReader(tc.Content), nil
|
return strings.NewReader(tc.Expected), nil
|
||||||
}, tc.FileName,
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.ExpectedContentType, contentType)
|
require.Equal(t, tc.ContentType, contentType)
|
||||||
require.True(t, strings.HasPrefix(tc.Content, string(data)))
|
require.True(t, strings.HasPrefix(tc.Expected, string(data)))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,23 +1,17 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -28,7 +22,6 @@ import (
|
||||||
const (
|
const (
|
||||||
jsonHeader = "application/json; charset=UTF-8"
|
jsonHeader = "application/json; charset=UTF-8"
|
||||||
drainBufSize = 4096
|
drainBufSize = 4096
|
||||||
explodeArchiveHeader = "X-Explode-Archive"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type putResponse struct {
|
type putResponse struct {
|
||||||
|
@ -51,16 +44,17 @@ func (pr *putResponse) encode(w io.Writer) error {
|
||||||
|
|
||||||
// Upload handles multipart upload request.
|
// Upload handles multipart upload request.
|
||||||
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.Upload")
|
var (
|
||||||
defer span.End()
|
file MultipartFile
|
||||||
utils.SetContextToRequest(ctx, c)
|
idObj oid.ID
|
||||||
|
addr oid.Address
|
||||||
var file MultipartFile
|
)
|
||||||
|
|
||||||
scid, _ := c.UserValue("cid").(string)
|
scid, _ := c.UserValue("cid").(string)
|
||||||
bodyStream := c.RequestBodyStream()
|
bodyStream := c.RequestBodyStream()
|
||||||
drainBuf := make([]byte, drainBufSize)
|
drainBuf := make([]byte, drainBufSize)
|
||||||
|
|
||||||
|
ctx := utils.GetContextFromRequest(c)
|
||||||
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
reqLog := utils.GetReqLogOrDefault(ctx, h.log)
|
||||||
log := reqLog.With(zap.String("cid", scid))
|
log := reqLog.With(zap.String("cid", scid))
|
||||||
|
|
||||||
|
@ -70,84 +64,76 @@ func (h *Handler) Upload(c *fasthttp.RequestCtx) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
// If the temporary reader can be closed - let's close it.
|
||||||
|
if file == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err := file.Close()
|
||||||
|
log.Debug(
|
||||||
|
logs.CloseTemporaryMultipartFormFile,
|
||||||
|
zap.Stringer("address", addr),
|
||||||
|
zap.String("filename", file.FileName()),
|
||||||
|
zap.Error(err),
|
||||||
|
)
|
||||||
|
}()
|
||||||
|
|
||||||
boundary := string(c.Request.Header.MultipartFormBoundary())
|
boundary := string(c.Request.Header.MultipartFormBoundary())
|
||||||
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
if file, err = fetchMultipartFile(log, bodyStream, boundary); err != nil {
|
||||||
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Error(logs.CouldNotReceiveMultipartForm, zap.Error(err))
|
||||||
ResponseError(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not receive multipart/form: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
filtered, err := filterHeaders(log, &c.Request.Header)
|
filtered, err := filterHeaders(log, &c.Request.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(logs.FailedToFilterHeaders, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Error(logs.CouldNotProcessHeaders, zap.Error(err))
|
||||||
ResponseError(c, err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, err.Error(), fasthttp.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.Request.Header.Peek(explodeArchiveHeader) != nil {
|
now := time.Now()
|
||||||
h.explodeArchive(request{c, log}, bktInfo, file, filtered)
|
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
||||||
|
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
||||||
|
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err))
|
||||||
} else {
|
} else {
|
||||||
h.uploadSingleObject(request{c, log}, bktInfo, file, filtered)
|
now = parsed
|
||||||
}
|
|
||||||
|
|
||||||
// Multipart is multipart and thus can contain more than one part which
|
|
||||||
// we ignore at the moment. Also, when dealing with chunked encoding
|
|
||||||
// the last zero-length chunk might be left unread (because multipart
|
|
||||||
// reader only cares about its boundary and doesn't look further) and
|
|
||||||
// it will be (erroneously) interpreted as the start of the next
|
|
||||||
// pipelined header. Thus, we need to drain the body buffer.
|
|
||||||
for {
|
|
||||||
_, err = bodyStream.Read(drainBuf)
|
|
||||||
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) uploadSingleObject(req request, bkt *data.BucketInfo, file MultipartFile, filtered map[string]string) {
|
if err = utils.PrepareExpirationHeader(c, h.frostfs, filtered, now); err != nil {
|
||||||
c, log := req.RequestCtx, req.log
|
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err))
|
||||||
|
response.Error(c, "could not prepare expiration header: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.uploadSingleObject")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
setIfNotExist(filtered, object.AttributeFileName, file.FileName())
|
|
||||||
|
|
||||||
attributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, file)
|
attributes := make([]object.Attribute, 0, len(filtered))
|
||||||
if err != nil {
|
// prepares attributes from filtered headers
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
for key, val := range filtered {
|
||||||
return
|
attribute := object.NewAttribute()
|
||||||
|
attribute.SetKey(key)
|
||||||
|
attribute.SetValue(val)
|
||||||
|
attributes = append(attributes, *attribute)
|
||||||
}
|
}
|
||||||
log.Debug(logs.ObjectUploaded,
|
// sets FileName attribute if it wasn't set from header
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
if _, ok := filtered[object.AttributeFileName]; !ok {
|
||||||
zap.String("FileName", file.FileName()),
|
filename := object.NewAttribute()
|
||||||
logs.TagField(logs.TagExternalStorage),
|
filename.SetKey(object.AttributeFileName)
|
||||||
)
|
filename.SetValue(file.FileName())
|
||||||
|
attributes = append(attributes, *filename)
|
||||||
addr := newAddress(bkt.CID, idObj)
|
|
||||||
c.Response.Header.SetContentType(jsonHeader)
|
|
||||||
// Try to return the response, otherwise, if something went wrong, throw an error.
|
|
||||||
if err = newPutResponse(addr).encode(c); err != nil {
|
|
||||||
log.Error(logs.CouldNotEncodeResponse, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not encode response", fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
||||||
|
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
||||||
|
timestamp := object.NewAttribute()
|
||||||
|
timestamp.SetKey(object.AttributeTimestamp)
|
||||||
|
timestamp.SetValue(strconv.FormatInt(time.Now().Unix(), 10))
|
||||||
|
attributes = append(attributes, *timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, attrs []object.Attribute, file io.Reader) (oid.ID, error) {
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
|
|
||||||
obj := object.New()
|
obj := object.New()
|
||||||
obj.SetContainerID(bkt.CID)
|
obj.SetContainerID(bktInfo.CID)
|
||||||
obj.SetOwnerID(*h.ownerID)
|
obj.SetOwnerID(*h.ownerID)
|
||||||
obj.SetAttributes(attrs...)
|
obj.SetAttributes(attributes...)
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
prm := PrmObjectCreate{
|
||||||
PrmAuth: PrmAuth{
|
PrmAuth: PrmAuth{
|
||||||
|
@ -156,135 +142,48 @@ func (h *Handler) uploadObject(c *fasthttp.RequestCtx, bkt *data.BucketInfo, att
|
||||||
Object: obj,
|
Object: obj,
|
||||||
Payload: file,
|
Payload: file,
|
||||||
ClientCut: h.config.ClientCut(),
|
ClientCut: h.config.ClientCut(),
|
||||||
WithoutHomomorphicHash: bkt.HomomorphicHashDisabled,
|
WithoutHomomorphicHash: bktInfo.HomomorphicHashDisabled,
|
||||||
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
BufferMaxSize: h.config.BufferMaxSizeForPut(),
|
||||||
}
|
}
|
||||||
|
|
||||||
idObj, err := h.frostfs.CreateObject(ctx, prm)
|
if idObj, err = h.frostfs.CreateObject(ctx, prm); err != nil {
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return idObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Handler) extractAttributes(c *fasthttp.RequestCtx, log *zap.Logger, filtered map[string]string) ([]object.Attribute, error) {
|
|
||||||
ctx := utils.GetContextFromRequest(c)
|
|
||||||
now := time.Now()
|
|
||||||
if rawHeader := c.Request.Header.Peek(fasthttp.HeaderDate); rawHeader != nil {
|
|
||||||
if parsed, err := time.Parse(http.TimeFormat, string(rawHeader)); err != nil {
|
|
||||||
log.Warn(logs.CouldNotParseClientTime, zap.String("Date header", string(rawHeader)), zap.Error(err),
|
|
||||||
logs.TagField(logs.TagDatapath))
|
|
||||||
} else {
|
|
||||||
now = parsed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := utils.PrepareExpirationHeader(ctx, h.frostfs, filtered, now); err != nil {
|
|
||||||
log.Error(logs.CouldNotPrepareExpirationHeader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
attributes := make([]object.Attribute, 0, len(filtered))
|
|
||||||
// prepares attributes from filtered headers
|
|
||||||
for key, val := range filtered {
|
|
||||||
attribute := newAttribute(key, val)
|
|
||||||
attributes = append(attributes, attribute)
|
|
||||||
}
|
|
||||||
// sets Timestamp attribute if it wasn't set from header and enabled by settings
|
|
||||||
if _, ok := filtered[object.AttributeTimestamp]; !ok && h.config.DefaultTimestamp() {
|
|
||||||
timestamp := newAttribute(object.AttributeTimestamp, strconv.FormatInt(time.Now().Unix(), 10))
|
|
||||||
attributes = append(attributes, timestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return attributes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAttribute(key string, val string) object.Attribute {
|
|
||||||
attr := object.NewAttribute()
|
|
||||||
attr.SetKey(key)
|
|
||||||
attr.SetValue(val)
|
|
||||||
return *attr
|
|
||||||
}
|
|
||||||
|
|
||||||
// explodeArchive read files from archive and creates objects for each of them.
|
|
||||||
// Sets FilePath attribute with name from tar.Header.
|
|
||||||
func (h *Handler) explodeArchive(req request, bkt *data.BucketInfo, file io.ReadCloser, filtered map[string]string) {
|
|
||||||
c, log := req.RequestCtx, req.log
|
|
||||||
|
|
||||||
ctx, span := tracing.StartSpanFromContext(utils.GetContextFromRequest(c), "handler.explodeArchive")
|
|
||||||
defer span.End()
|
|
||||||
utils.SetContextToRequest(ctx, c)
|
|
||||||
|
|
||||||
// remove user attributes which vary for each file in archive
|
|
||||||
// to guarantee that they won't appear twice
|
|
||||||
delete(filtered, object.AttributeFileName)
|
|
||||||
delete(filtered, object.AttributeFilePath)
|
|
||||||
|
|
||||||
commonAttributes, err := h.extractAttributes(c, log, filtered)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToGetAttributes, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not extract attributes: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
attributes := commonAttributes
|
|
||||||
|
|
||||||
reader := file
|
|
||||||
if bytes.EqualFold(c.Request.Header.Peek(fasthttp.HeaderContentEncoding), []byte("gzip")) {
|
|
||||||
log.Debug(logs.GzipReaderSelected, logs.TagField(logs.TagDatapath))
|
|
||||||
gzipReader, err := gzip.NewReader(file)
|
|
||||||
if err != nil {
|
|
||||||
log.Error(logs.FailedToCreateGzipReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could read gzip file: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := gzipReader.Close(); err != nil {
|
|
||||||
log.Warn(logs.FailedToCloseReader, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
reader = gzipReader
|
|
||||||
}
|
|
||||||
|
|
||||||
tarReader := tar.NewReader(reader)
|
|
||||||
for {
|
|
||||||
obj, err := tarReader.Next()
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
log.Error(logs.FailedToReadFileFromTar, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
ResponseError(c, "could not get next entry: "+err.Error(), fasthttp.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if isDir(obj.Name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// set varying attributes
|
|
||||||
attributes = attributes[:len(commonAttributes)]
|
|
||||||
fileName := filepath.Base(obj.Name)
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFilePath, obj.Name))
|
|
||||||
attributes = append(attributes, newAttribute(object.AttributeFileName, fileName))
|
|
||||||
|
|
||||||
idObj, err := h.uploadObject(c, bkt, attributes, tarReader)
|
|
||||||
if err != nil {
|
|
||||||
h.handlePutFrostFSErr(c, err, log)
|
h.handlePutFrostFSErr(c, err, log)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug(logs.ObjectUploaded,
|
addr.SetObject(idObj)
|
||||||
zap.String("oid", idObj.EncodeToString()),
|
addr.SetContainer(bktInfo.CID)
|
||||||
zap.String("FileName", fileName),
|
|
||||||
logs.TagField(logs.TagExternalStorage),
|
// Try to return the response, otherwise, if something went wrong, throw an error.
|
||||||
)
|
if err = newPutResponse(addr).encode(c); err != nil {
|
||||||
|
log.Error(logs.CouldNotEncodeResponse, zap.Error(err))
|
||||||
|
response.Error(c, "could not encode response", fasthttp.StatusBadRequest)
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
// Multipart is multipart and thus can contain more than one part which
|
||||||
|
// we ignore at the moment. Also, when dealing with chunked encoding
|
||||||
|
// the last zero-length chunk might be left unread (because multipart
|
||||||
|
// reader only cares about its boundary and doesn't look further) and
|
||||||
|
// it will be (erroneously) interpreted as the start of the next
|
||||||
|
// pipelined header. Thus we need to drain the body buffer.
|
||||||
|
for {
|
||||||
|
_, err = bodyStream.Read(drainBuf)
|
||||||
|
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Report status code and content type.
|
||||||
|
c.Response.SetStatusCode(fasthttp.StatusOK)
|
||||||
|
c.Response.Header.SetContentType(jsonHeader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
func (h *Handler) handlePutFrostFSErr(r *fasthttp.RequestCtx, err error, log *zap.Logger) {
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not store file in frostfs", err)
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not store file in frostfs", err)
|
||||||
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
logFields := append([]zap.Field{zap.Error(err)}, additionalFields...)
|
||||||
|
|
||||||
log.Error(logs.CouldNotStoreFileInFrostfs, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
log.Error(logs.CouldNotStoreFileInFrostfs, logFields...)
|
||||||
ResponseError(r, msg, statusCode)
|
response.Error(r, msg, statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
func (h *Handler) fetchBearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
|
|
@ -2,16 +2,14 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/response"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -24,23 +22,16 @@ type request struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRequest(ctx *fasthttp.RequestCtx, log *zap.Logger) request {
|
|
||||||
return request{
|
|
||||||
RequestCtx: ctx,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
func (r *request) handleFrostFSErr(err error, start time.Time) {
|
||||||
logFields := []zap.Field{
|
logFields := []zap.Field{
|
||||||
zap.Stringer("elapsed", time.Since(start)),
|
zap.Stringer("elapsed", time.Since(start)),
|
||||||
zap.Error(err),
|
zap.Error(err),
|
||||||
}
|
}
|
||||||
statusCode, msg, additionalFields := formErrorResponse("could not receive object", err)
|
statusCode, msg, additionalFields := response.FormErrorResponse("could not receive object", err)
|
||||||
logFields = append(logFields, additionalFields...)
|
logFields = append(logFields, additionalFields...)
|
||||||
|
|
||||||
r.log.Error(logs.CouldNotReceiveObject, append(logFields, logs.TagField(logs.TagExternalStorage))...)
|
r.log.Error(logs.CouldNotReceiveObject, logFields...)
|
||||||
ResponseError(r.RequestCtx, msg, statusCode)
|
response.Error(r.RequestCtx, msg, statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bearerToken(ctx context.Context) *bearer.Token {
|
func bearerToken(ctx context.Context) *bearer.Token {
|
||||||
|
@ -51,7 +42,16 @@ func bearerToken(ctx context.Context) *bearer.Token {
|
||||||
}
|
}
|
||||||
|
|
||||||
func isDir(name string) bool {
|
func isDir(name string) bool {
|
||||||
return name == "" || strings.HasSuffix(name, "/")
|
return strings.HasSuffix(name, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isObjectID(s string) bool {
|
||||||
|
var objID oid.ID
|
||||||
|
return objID.DecodeString(s) == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isContainerRoot(key string) bool {
|
||||||
|
return key == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadAttributes(attrs []object.Attribute) map[string]string {
|
func loadAttributes(attrs []object.Attribute) map[string]string {
|
||||||
|
@ -85,13 +85,13 @@ func isValidValue(s string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
func logAndSendBucketError(c *fasthttp.RequestCtx, log *zap.Logger, err error) {
|
||||||
log.Error(logs.CouldNotGetBucket, zap.Error(err), logs.TagField(logs.TagDatapath))
|
log.Error(logs.CouldntGetBucket, zap.Error(err))
|
||||||
|
|
||||||
if client.IsErrContainerNotFound(err) {
|
if client.IsErrContainerNotFound(err) {
|
||||||
ResponseError(c, "Not Found", fasthttp.StatusNotFound)
|
response.Error(c, "Not Found", fasthttp.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ResponseError(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
response.Error(c, "could not get bucket: "+err.Error(), fasthttp.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||||
|
@ -100,43 +100,3 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||||
addr.SetObject(obj)
|
addr.SetObject(obj)
|
||||||
return addr
|
return addr
|
||||||
}
|
}
|
||||||
|
|
||||||
// setIfNotExist sets key value to map if key is not present yet.
|
|
||||||
func setIfNotExist(m map[string]string, key, value string) {
|
|
||||||
if _, ok := m[key]; !ok {
|
|
||||||
m[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ResponseError(r *fasthttp.RequestCtx, msg string, code int) {
|
|
||||||
r.Error(msg+"\n", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formErrorResponse(message string, err error) (int, string, []zap.Field) {
|
|
||||||
var (
|
|
||||||
msg string
|
|
||||||
statusCode int
|
|
||||||
logFields []zap.Field
|
|
||||||
)
|
|
||||||
|
|
||||||
st := new(sdkstatus.ObjectAccessDenied)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case errors.As(err, &st):
|
|
||||||
statusCode = fasthttp.StatusForbidden
|
|
||||||
reason := st.Reason()
|
|
||||||
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
|
||||||
logFields = append(logFields, zap.String("error_detail", reason))
|
|
||||||
case errors.Is(err, ErrQuotaLimitReached):
|
|
||||||
statusCode = fasthttp.StatusConflict
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
|
||||||
statusCode = fasthttp.StatusNotFound
|
|
||||||
msg = "Not Found"
|
|
||||||
default:
|
|
||||||
statusCode = fasthttp.StatusBadRequest
|
|
||||||
msg = fmt.Sprintf("%s: %v", message, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return statusCode, msg, logFields
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,131 +1,90 @@
|
||||||
package logs
|
package logs
|
||||||
|
|
||||||
import "go.uber.org/zap"
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TagFieldName = "tag"
|
CouldntParseCreationDate = "couldn't parse creation date" // Info in ../../downloader/*
|
||||||
|
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload" // Error in ../../downloader/download.go
|
||||||
TagApp = "app"
|
CouldNotReceiveObject = "could not receive object" // Error in ../../downloader/download.go
|
||||||
TagDatapath = "datapath"
|
WrongObjectID = "wrong object id" // Error in ../../downloader/download.go
|
||||||
TagExternalStorage = "external_storage"
|
GetLatestObjectVersion = "get latest object version" // Error in ../../downloader/download.go
|
||||||
TagExternalStorageTree = "external_storage_tree"
|
ObjectWasDeleted = "object was deleted" // Error in ../../downloader/download.go
|
||||||
)
|
CouldNotSearchForObjects = "could not search for objects" // Error in ../../downloader/download.go
|
||||||
|
ObjectNotFound = "object not found" // Error in ../../downloader/download.go
|
||||||
func TagField(tag string) zap.Field {
|
ReadObjectListFailed = "read object list failed" // Error in ../../downloader/download.go
|
||||||
return zap.String(TagFieldName, tag)
|
FailedToAddObjectToArchive = "failed to add object to archive" // Error in ../../downloader/download.go
|
||||||
}
|
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed" // Error in ../../downloader/download.go
|
||||||
|
ObjectsNotFound = "objects not found" // Error in ../../downloader/download.go
|
||||||
// Log messages with the "app" tag.
|
CloseZipWriter = "close zip writer" // Error in ../../downloader/download.go
|
||||||
const (
|
ServiceIsRunning = "service is running" // Info in ../../metrics/service.go
|
||||||
ServiceIsRunning = "service is running"
|
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port" // Warn in ../../metrics/service.go
|
||||||
ServiceCouldntStartOnConfiguredPort = "service couldn't start on configured port"
|
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled" // Info in ../../metrics/service.go
|
||||||
ServiceHasntStartedSinceItsDisabled = "service hasn't started since it's disabled"
|
ShuttingDownService = "shutting down service" // Info in ../../metrics/service.go
|
||||||
ShuttingDownService = "shutting down service"
|
CantShutDownService = "can't shut down service" // Panic in ../../metrics/service.go
|
||||||
CantShutDownService = "can't shut down service"
|
CantGracefullyShutDownService = "can't gracefully shut down service, force stop" // Error in ../../metrics/service.go
|
||||||
CantGracefullyShutDownService = "can't gracefully shut down service, force stop"
|
IgnorePartEmptyFormName = "ignore part, empty form name" // Debug in ../../uploader/upload.go
|
||||||
FailedToCreateResolver = "failed to create resolver"
|
IgnorePartEmptyFilename = "ignore part, empty filename" // Debug in ../../uploader/upload.go
|
||||||
FailedToCreateWorkerPool = "failed to create worker pool"
|
CloseTemporaryMultipartFormFile = "close temporary multipart/form file" // Debug in ../../uploader/upload.go
|
||||||
StartingApplication = "starting application"
|
CouldNotReceiveMultipartForm = "could not receive multipart/form" // Error in ../../uploader/upload.go
|
||||||
StartingServer = "starting server"
|
CouldNotProcessHeaders = "could not process headers" // Error in ../../uploader/upload.go
|
||||||
ListenAndServe = "listen and serve"
|
CouldNotParseClientTime = "could not parse client time" // Warn in ../../uploader/upload.go
|
||||||
ShuttingDownWebServer = "shutting down web server"
|
CouldNotPrepareExpirationHeader = "could not prepare expiration header" // Error in ../../uploader/upload.go
|
||||||
FailedToShutdownTracing = "failed to shutdown tracing"
|
CouldNotEncodeResponse = "could not encode response" // Error in ../../uploader/upload.go
|
||||||
AddedPathUploadCid = "added path /upload/{cid}"
|
CouldNotStoreFileInFrostfs = "could not store file in frostfs" // Error in ../../uploader/upload.go
|
||||||
AddedPathGetCidOid = "added path /get/{cid}/{oid}"
|
AddAttributeToResultObject = "add attribute to result object" // Debug in ../../uploader/filter.go
|
||||||
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}"
|
FailedToCreateResolver = "failed to create resolver" // Fatal in ../../app.go
|
||||||
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}"
|
FailedToCreateWorkerPool = "failed to create worker pool" // Fatal in ../../app.go
|
||||||
FailedToAddServer = "failed to add server"
|
FailedToReadIndexPageTemplate = "failed to read index page template" // Error in ../../app.go
|
||||||
AddServer = "add server"
|
SetCustomIndexPageTemplate = "set custom index page template" // Info in ../../app.go
|
||||||
NoHealthyServers = "no healthy servers"
|
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty" // Info in ../../app.go
|
||||||
FailedToInitializeTracing = "failed to initialize tracing"
|
MetricsAreDisabled = "metrics are disabled" // Warn in ../../app.go
|
||||||
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped"
|
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run" // Info in ../../app.go
|
||||||
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated"
|
StartingApplication = "starting application" // Info in ../../app.go
|
||||||
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key"
|
StartingServer = "starting server" // Info in ../../app.go
|
||||||
UsingCredentials = "using credentials"
|
ListenAndServe = "listen and serve" // Fatal in ../../app.go
|
||||||
FailedToCreateConnectionPool = "failed to create connection pool"
|
ShuttingDownWebServer = "shutting down web server" // Info in ../../app.go
|
||||||
FailedToDialConnectionPool = "failed to dial connection pool"
|
FailedToShutdownTracing = "failed to shutdown tracing" // Warn in ../../app.go
|
||||||
FailedToCreateTreePool = "failed to create tree pool"
|
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../app.go
|
||||||
FailedToDialTreePool = "failed to dial tree pool"
|
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed" // Warn in ../../app.go
|
||||||
|
FailedToReloadConfig = "failed to reload config" // Warn in ../../app.go
|
||||||
|
LogLevelWontBeUpdated = "log level won't be updated" // Warn in ../../app.go
|
||||||
|
FailedToUpdateResolvers = "failed to update resolvers" // Warn in ../../app.go
|
||||||
|
FailedToReloadServerParameters = "failed to reload server parameters" // Warn in ../../app.go
|
||||||
|
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed" // Info in ../../app.go
|
||||||
|
AddedPathUploadCid = "added path /upload/{cid}" // Info in ../../app.go
|
||||||
|
AddedPathGetCidOid = "added path /get/{cid}/{oid}" // Info in ../../app.go
|
||||||
|
AddedPathGetByAttributeCidAttrKeyAttrVal = "added path /get_by_attribute/{cid}/{attr_key}/{attr_val:*}" // Info in ../../app.go
|
||||||
|
AddedPathZipCidPrefix = "added path /zip/{cid}/{prefix}" // Info in ../../app.go
|
||||||
|
Request = "request" // Info in ../../app.go
|
||||||
|
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token" // Error in ../../app.go
|
||||||
|
FailedToAddServer = "failed to add server" // Warn in ../../app.go
|
||||||
|
AddServer = "add server" // Info in ../../app.go
|
||||||
|
NoHealthyServers = "no healthy servers" // Fatal in ../../app.go
|
||||||
|
FailedToInitializeTracing = "failed to initialize tracing" // Warn in ../../app.go
|
||||||
|
TracingConfigUpdated = "tracing config updated" // Info in ../../app.go
|
||||||
|
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided" // Warn in ../../app.go
|
||||||
|
RuntimeSoftMemoryDefinedWithGOMEMLIMIT = "soft runtime memory defined with GOMEMLIMIT environment variable, config value skipped" // Warn in ../../app.go
|
||||||
|
RuntimeSoftMemoryLimitUpdated = "soft runtime memory limit value updated" // Info in ../../app.go
|
||||||
|
CouldNotLoadFrostFSPrivateKey = "could not load FrostFS private key" // Fatal in ../../settings.go
|
||||||
|
UsingCredentials = "using credentials" // Info in ../../settings.go
|
||||||
|
FailedToCreateConnectionPool = "failed to create connection pool" // Fatal in ../../settings.go
|
||||||
|
FailedToDialConnectionPool = "failed to dial connection pool" // Fatal in ../../settings.go
|
||||||
|
FailedToCreateTreePool = "failed to create tree pool" // Fatal in ../../settings.go
|
||||||
|
FailedToDialTreePool = "failed to dial tree pool" // Fatal in ../../settings.go
|
||||||
|
AddedStoragePeer = "added storage peer" // Info in ../../settings.go
|
||||||
|
CouldntGetBucket = "could not get bucket" // Error in ../handler/utils.go
|
||||||
|
CouldntPutBucketIntoCache = "couldn't put bucket info into cache" // Warn in ../handler/handler.go
|
||||||
|
FailedToSumbitTaskToPool = "failed to submit task to pool" // Error in ../handler/browse.go
|
||||||
|
FailedToHeadObject = "failed to head object" // Error in ../handler/browse.go
|
||||||
|
FailedToIterateOverResponse = "failed to iterate over search response" // Error in ../handler/browse.go
|
||||||
|
InvalidCacheEntryType = "invalid cache entry type" // Warn in ../cache/buckets.go
|
||||||
|
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)" // Error in ../../cmd/http-gw/settings.go
|
||||||
|
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value" // Error in ../../cmd/http-gw/settings.go
|
||||||
|
FailedToUnescapeQuery = "failed to unescape query"
|
||||||
ServerReconnecting = "reconnecting server..."
|
ServerReconnecting = "reconnecting server..."
|
||||||
ServerReconnectedSuccessfully = "server reconnected successfully"
|
ServerReconnectedSuccessfully = "server reconnected successfully"
|
||||||
ServerReconnectFailed = "failed to reconnect server"
|
ServerReconnectFailed = "failed to reconnect server"
|
||||||
|
WarnDuplicateAddress = "duplicate address"
|
||||||
MultinetDialSuccess = "multinet dial successful"
|
MultinetDialSuccess = "multinet dial successful"
|
||||||
MultinetDialFail = "multinet dial failed"
|
MultinetDialFail = "multinet dial failed"
|
||||||
ContainerResolverWillBeDisabledBecauseOfResolversResolverOrderIsEmpty = "container resolver will be disabled because of resolvers 'resolver_order' is empty"
|
|
||||||
MetricsAreDisabled = "metrics are disabled"
|
|
||||||
NoWalletPathSpecifiedCreatingEphemeralKeyAutomaticallyForThisRun = "no wallet path specified, creating ephemeral key automatically for this run"
|
|
||||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started"
|
|
||||||
FailedToReloadConfigBecauseItsMissed = "failed to reload config because it's missed"
|
|
||||||
FailedToReloadConfig = "failed to reload config"
|
|
||||||
FailedToUpdateResolvers = "failed to update resolvers"
|
|
||||||
FailedToReloadServerParameters = "failed to reload server parameters"
|
|
||||||
SIGHUPConfigReloadCompleted = "SIGHUP config reload completed"
|
|
||||||
TracingConfigUpdated = "tracing config updated"
|
|
||||||
ResolverNNSWontBeUsedSinceRPCEndpointIsntProvided = "resolver nns won't be used since rpc_endpoint isn't provided"
|
|
||||||
AddedStoragePeer = "added storage peer"
|
|
||||||
InvalidLifetimeUsingDefaultValue = "invalid lifetime, using default value (in seconds)"
|
|
||||||
InvalidCacheSizeUsingDefaultValue = "invalid cache size, using default value"
|
|
||||||
WarnDuplicateAddress = "duplicate address"
|
|
||||||
FailedToLoadMultinetConfig = "failed to load multinet config"
|
FailedToLoadMultinetConfig = "failed to load multinet config"
|
||||||
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
MultinetConfigWontBeUpdated = "multinet config won't be updated"
|
||||||
LogLevelWontBeUpdated = "log level won't be updated"
|
|
||||||
TagsLogConfigWontBeUpdated = "tags log config won't be updated"
|
|
||||||
FailedToReadIndexPageTemplate = "failed to read index page template"
|
|
||||||
SetCustomIndexPageTemplate = "set custom index page template"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "datapath" tag.
|
|
||||||
const (
|
|
||||||
CouldntParseCreationDate = "couldn't parse creation date"
|
|
||||||
CouldNotDetectContentTypeFromPayload = "could not detect Content-Type from payload"
|
|
||||||
FailedToAddObjectToArchive = "failed to add object to archive"
|
|
||||||
CloseZipWriter = "close zip writer"
|
|
||||||
IgnorePartEmptyFormName = "ignore part, empty form name"
|
|
||||||
IgnorePartEmptyFilename = "ignore part, empty filename"
|
|
||||||
CouldNotParseClientTime = "could not parse client time"
|
|
||||||
CouldNotPrepareExpirationHeader = "could not prepare expiration header"
|
|
||||||
CouldNotEncodeResponse = "could not encode response"
|
|
||||||
AddAttributeToResultObject = "add attribute to result object"
|
|
||||||
Request = "request"
|
|
||||||
CouldNotFetchAndStoreBearerToken = "could not fetch and store bearer token"
|
|
||||||
CouldntPutBucketIntoCache = "couldn't put bucket info into cache"
|
|
||||||
FailedToIterateOverResponse = "failed to iterate over search response"
|
|
||||||
InvalidCacheEntryType = "invalid cache entry type"
|
|
||||||
FailedToUnescapeQuery = "failed to unescape query"
|
|
||||||
CouldntCacheNetmap = "couldn't cache netmap"
|
|
||||||
FailedToCloseReader = "failed to close reader"
|
|
||||||
FailedToFilterHeaders = "failed to filter headers"
|
|
||||||
FailedToReadFileFromTar = "failed to read file from tar"
|
|
||||||
FailedToGetAttributes = "failed to get attributes"
|
|
||||||
CloseGzipWriter = "close gzip writer"
|
|
||||||
CloseTarWriter = "close tar writer"
|
|
||||||
FailedToCreateGzipReader = "failed to create gzip reader"
|
|
||||||
GzipReaderSelected = "gzip reader selected"
|
|
||||||
CouldNotReceiveMultipartForm = "could not receive multipart/form"
|
|
||||||
ObjectsNotFound = "objects not found"
|
|
||||||
IteratingOverSelectedObjectsFailed = "iterating over selected objects failed"
|
|
||||||
CouldNotGetBucket = "could not get bucket"
|
|
||||||
CouldNotResolveContainerID = "could not resolve container id"
|
|
||||||
FailedToSumbitTaskToPool = "failed to submit task to pool"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "external_storage" tag.
|
|
||||||
const (
|
|
||||||
CouldNotReceiveObject = "could not receive object"
|
|
||||||
CouldNotSearchForObjects = "could not search for objects"
|
|
||||||
ObjectNotFound = "object not found"
|
|
||||||
ReadObjectListFailed = "read object list failed"
|
|
||||||
CouldNotStoreFileInFrostfs = "could not store file in frostfs"
|
|
||||||
FailedToHeadObject = "failed to head object"
|
|
||||||
ObjectNotFoundByFilePathTrySearchByFileName = "object not found by filePath attribute, try search by fileName"
|
|
||||||
FailedToGetObject = "failed to get object"
|
|
||||||
ObjectUploaded = "object uploaded"
|
|
||||||
CouldNotGetContainerInfo = "could not get container info"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log messages with the "external_storage_tree" tag.
|
|
||||||
const (
|
|
||||||
ObjectWasDeleted = "object was deleted"
|
|
||||||
FailedToGetLatestVersionOfObject = "failed to get latest version of object"
|
|
||||||
FailedToCheckIfSettingsNodeExist = "Failed to check if settings node exists"
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -17,11 +17,9 @@ func (l LogEventHandler) DialPerformed(sourceIP net.Addr, _, address string, err
|
||||||
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
sourceIPString = sourceIP.Network() + "://" + sourceIP.String()
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString),
|
l.logger.Debug(logs.MultinetDialSuccess, zap.String("source", sourceIPString), zap.String("destination", address))
|
||||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
|
||||||
} else {
|
} else {
|
||||||
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString),
|
l.logger.Debug(logs.MultinetDialFail, zap.String("source", sourceIPString), zap.String("destination", address), zap.Error(err))
|
||||||
zap.String("destination", address), logs.TagField(logs.TagApp))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,10 +9,8 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/utils"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
@ -36,9 +34,6 @@ func NewFrostFS(p *pool.Pool) *FrostFS {
|
||||||
|
|
||||||
// Container implements frostfs.FrostFS interface method.
|
// Container implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContainer) (*container.Container, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.Container")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
prm := pool.PrmContainerGet{
|
prm := pool.PrmContainerGet{
|
||||||
ContainerID: containerPrm.ContainerID,
|
ContainerID: containerPrm.ContainerID,
|
||||||
}
|
}
|
||||||
|
@ -53,9 +48,6 @@ func (x *FrostFS) Container(ctx context.Context, containerPrm handler.PrmContain
|
||||||
|
|
||||||
// CreateObject implements frostfs.FrostFS interface method.
|
// CreateObject implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
func (x *FrostFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.CreateObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmPut pool.PrmObjectPut
|
var prmPut pool.PrmObjectPut
|
||||||
prmPut.SetHeader(*prm.Object)
|
prmPut.SetHeader(*prm.Object)
|
||||||
prmPut.SetPayload(prm.Payload)
|
prmPut.SetPayload(prm.Payload)
|
||||||
|
@ -90,9 +82,6 @@ func (x payloadReader) Read(p []byte) (int, error) {
|
||||||
|
|
||||||
// HeadObject implements frostfs.FrostFS interface method.
|
// HeadObject implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*object.Object, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.HeadObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmHead pool.PrmObjectHead
|
var prmHead pool.PrmObjectHead
|
||||||
prmHead.SetAddress(prm.Address)
|
prmHead.SetAddress(prm.Address)
|
||||||
|
|
||||||
|
@ -110,9 +99,6 @@ func (x *FrostFS) HeadObject(ctx context.Context, prm handler.PrmObjectHead) (*o
|
||||||
|
|
||||||
// GetObject implements frostfs.FrostFS interface method.
|
// GetObject implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*handler.Object, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmGet pool.PrmObjectGet
|
var prmGet pool.PrmObjectGet
|
||||||
prmGet.SetAddress(prm.Address)
|
prmGet.SetAddress(prm.Address)
|
||||||
|
|
||||||
|
@ -133,9 +119,6 @@ func (x *FrostFS) GetObject(ctx context.Context, prm handler.PrmObjectGet) (*han
|
||||||
|
|
||||||
// RangeObject implements frostfs.FrostFS interface method.
|
// RangeObject implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (io.ReadCloser, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.RangeObject")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmRange pool.PrmObjectRange
|
var prmRange pool.PrmObjectRange
|
||||||
prmRange.SetAddress(prm.Address)
|
prmRange.SetAddress(prm.Address)
|
||||||
prmRange.SetOffset(prm.PayloadRange[0])
|
prmRange.SetOffset(prm.PayloadRange[0])
|
||||||
|
@ -155,9 +138,6 @@ func (x *FrostFS) RangeObject(ctx context.Context, prm handler.PrmObjectRange) (
|
||||||
|
|
||||||
// SearchObjects implements frostfs.FrostFS interface method.
|
// SearchObjects implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch) (handler.ResObjectSearch, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SearchObjects")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmSearch pool.PrmObjectSearch
|
var prmSearch pool.PrmObjectSearch
|
||||||
prmSearch.SetContainerID(prm.Container)
|
prmSearch.SetContainerID(prm.Container)
|
||||||
prmSearch.SetFilters(prm.Filters)
|
prmSearch.SetFilters(prm.Filters)
|
||||||
|
@ -176,9 +156,6 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm handler.PrmObjectSearch
|
||||||
|
|
||||||
// GetEpochDurations implements frostfs.FrostFS interface method.
|
// GetEpochDurations implements frostfs.FrostFS interface method.
|
||||||
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetEpochDurations")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -196,18 +173,6 @@ func (x *FrostFS) GetEpochDurations(ctx context.Context) (*utils.EpochDurations,
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *FrostFS) NetmapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.NetmapSnapshot")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
netmapSnapshot, err := x.pool.NetMapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmapSnapshot, handleObjectError("get netmap via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||||
// It implements resolver.FrostFS.
|
// It implements resolver.FrostFS.
|
||||||
type ResolverFrostFS struct {
|
type ResolverFrostFS struct {
|
||||||
|
@ -221,9 +186,6 @@ func NewResolverFrostFS(p *pool.Pool) *ResolverFrostFS {
|
||||||
|
|
||||||
// SystemDNS implements resolver.FrostFS interface method.
|
// SystemDNS implements resolver.FrostFS interface method.
|
||||||
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
func (x *ResolverFrostFS) SystemDNS(ctx context.Context) (string, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.SystemDNS")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
networkInfo, err := x.pool.NetworkInfo(ctx)
|
networkInfo, err := x.pool.NetworkInfo(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", handleObjectError("read network info via client", err)
|
return "", handleObjectError("read network info via client", err)
|
||||||
|
@ -243,10 +205,6 @@ func handleObjectError(msg string, err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
if reason, ok := IsErrObjectAccessDenied(err); ok {
|
||||||
if strings.Contains(reason, "limit reached") {
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrQuotaLimitReached, reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
return fmt.Errorf("%s: %w: %s", msg, handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,83 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHandleObjectError(t *testing.T) {
|
|
||||||
msg := "some msg"
|
|
||||||
|
|
||||||
t.Run("nil error", func(t *testing.T) {
|
|
||||||
err := handleObjectError(msg, nil)
|
|
||||||
require.Nil(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple access denied", func(t *testing.T) {
|
|
||||||
reason := "some reason"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrAccessDenied)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("access denied - quota reached", func(t *testing.T) {
|
|
||||||
reason := "Quota limit reached"
|
|
||||||
inputErr := new(apistatus.ObjectAccessDenied)
|
|
||||||
inputErr.WriteReason(reason)
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrQuotaLimitReached)
|
|
||||||
require.Contains(t, err.Error(), reason)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("simple timeout", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("timeout")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("deadline exceeded", func(t *testing.T) {
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
<-ctx.Done()
|
|
||||||
|
|
||||||
err := handleObjectError(msg, ctx.Err())
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), ctx.Err().Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("grpc deadline exceeded", func(t *testing.T) {
|
|
||||||
inputErr := fmt.Errorf("wrap grpc error: %w", status.Error(codes.DeadlineExceeded, "error"))
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, handler.ErrGatewayTimeout)
|
|
||||||
require.Contains(t, err.Error(), inputErr.Error())
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("unknown error", func(t *testing.T) {
|
|
||||||
inputErr := errors.New("unknown error")
|
|
||||||
|
|
||||||
err := handleObjectError(msg, inputErr)
|
|
||||||
require.ErrorIs(t, err, inputErr)
|
|
||||||
require.Contains(t, err.Error(), msg)
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -75,9 +74,6 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
func (x *FrostFS) InitMultiObjectReader(ctx context.Context, p handler.PrmInitMultiObjectReader) (io.Reader, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitMultiObjectReader")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
combinedObj, err := x.GetObject(ctx, handler.PrmObjectGet{
|
||||||
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
PrmAuth: handler.PrmAuth{BearerToken: p.Bearer},
|
||||||
Address: p.Addr,
|
Address: p.Addr,
|
||||||
|
@ -219,9 +215,6 @@ func (x *MultiObjectReader) Read(p []byte) (n int, err error) {
|
||||||
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
// InitFrostFSObjectPayloadReader initializes payload reader of the FrostFS object.
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
// Zero range corresponds to full payload (panics if only offset is set).
|
||||||
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
func (x *FrostFS) InitFrostFSObjectPayloadReader(ctx context.Context, p GetFrostFSParams) (io.ReadCloser, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.InitFrostFSObjectPayloadReader")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
var prmAuth handler.PrmAuth
|
var prmAuth handler.PrmAuth
|
||||||
|
|
||||||
if p.Off+p.Ln != 0 {
|
if p.Off+p.Ln != 0 {
|
||||||
|
|
|
@ -9,21 +9,20 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/tree"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
apitree "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/api/tree"
|
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
grpcService "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GetNodeByPathResponseInfoWrapper struct {
|
type GetNodeByPathResponseInfoWrapper struct {
|
||||||
response *apitree.GetNodeByPathResponseInfo
|
response *grpcService.GetNodeByPathResponse_Info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
||||||
return []uint64{n.response.GetNodeID()}
|
return []uint64{n.response.GetNodeId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
||||||
return []uint64{n.response.GetParentID()}
|
return []uint64{n.response.GetParentId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||||
|
@ -31,8 +30,8 @@ func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
for i, value := range n.response.GetMeta() {
|
for i, value := range n.response.Meta {
|
||||||
res[i] = value
|
res[i] = value
|
||||||
}
|
}
|
||||||
return res
|
return res
|
||||||
|
@ -47,9 +46,6 @@ func NewPoolWrapper(p *treepool.Pool) *PoolWrapper {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([]tree.NodeResponse, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetNodes")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
poolPrm := treepool.GetNodesParams{
|
poolPrm := treepool.GetNodesParams{
|
||||||
CID: prm.CnrID,
|
CID: prm.CnrID,
|
||||||
TreeID: prm.TreeID,
|
TreeID: prm.TreeID,
|
||||||
|
@ -97,9 +93,6 @@ func handleError(err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "frostfs.GetSubTree")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
order := treepool.NoneOrder
|
order := treepool.NoneOrder
|
||||||
if sort {
|
if sort {
|
||||||
order = treepool.AscendingOrder
|
order = treepool.AscendingOrder
|
||||||
|
@ -140,15 +133,15 @@ func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetSubTreeResponseBodyWrapper struct {
|
type GetSubTreeResponseBodyWrapper struct {
|
||||||
response *apitree.GetSubTreeResponseBody
|
response *grpcService.GetSubTreeResponse_Body
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
||||||
return n.response.GetNodeID()
|
return n.response.GetNodeId()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
||||||
resp := n.response.GetParentID()
|
resp := n.response.GetParentId()
|
||||||
if resp == nil {
|
if resp == nil {
|
||||||
// storage sends nil that should be interpreted as []uint64{0}
|
// storage sends nil that should be interpreted as []uint64{0}
|
||||||
// due to protobuf compatibility, see 'GetSubTree' function
|
// due to protobuf compatibility, see 'GetSubTree' function
|
||||||
|
@ -162,8 +155,8 @@ func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
func (n GetSubTreeResponseBodyWrapper) GetMeta() []tree.Meta {
|
||||||
res := make([]tree.Meta, len(n.response.GetMeta()))
|
res := make([]tree.Meta, len(n.response.Meta))
|
||||||
for i, value := range n.response.GetMeta() {
|
for i, value := range n.response.Meta {
|
||||||
res[i] = value
|
res[i] = value
|
||||||
}
|
}
|
||||||
return res
|
return res
|
|
@ -1,69 +0,0 @@
|
||||||
package frostfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/cache"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/handler"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/logs"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Source struct {
|
|
||||||
frostFS *FrostFS
|
|
||||||
netmapCache *cache.NetmapCache
|
|
||||||
bucketCache *cache.BucketCache
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSource(frostFS *FrostFS, netmapCache *cache.NetmapCache, bucketCache *cache.BucketCache, log *zap.Logger) *Source {
|
|
||||||
return &Source{
|
|
||||||
frostFS: frostFS,
|
|
||||||
netmapCache: netmapCache,
|
|
||||||
bucketCache: bucketCache,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) NetMapSnapshot(ctx context.Context) (netmap.NetMap, error) {
|
|
||||||
cachedNetmap := s.netmapCache.Get()
|
|
||||||
if cachedNetmap != nil {
|
|
||||||
return *cachedNetmap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
netmapSnapshot, err := s.frostFS.NetmapSnapshot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.NetMap{}, fmt.Errorf("get netmap: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = s.netmapCache.Put(netmapSnapshot); err != nil {
|
|
||||||
s.log.Warn(logs.CouldntCacheNetmap, zap.Error(err), logs.TagField(logs.TagDatapath))
|
|
||||||
}
|
|
||||||
|
|
||||||
return netmapSnapshot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Source) PlacementPolicy(ctx context.Context, cnrID cid.ID) (netmap.PlacementPolicy, error) {
|
|
||||||
info := s.bucketCache.GetByCID(cnrID)
|
|
||||||
if info != nil {
|
|
||||||
return info.PlacementPolicy, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := handler.PrmContainer{
|
|
||||||
ContainerID: cnrID,
|
|
||||||
}
|
|
||||||
res, err := s.frostFS.Container(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return netmap.PlacementPolicy{}, fmt.Errorf("get container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't put container back to the cache to keep cache
|
|
||||||
// coherent to the requests made by users. FrostFS Source
|
|
||||||
// is being used by SDK Tree Pool and it should not fill cache
|
|
||||||
// with possibly irrelevant container values.
|
|
||||||
|
|
||||||
return res.PlacementPolicy(), nil
|
|
||||||
}
|
|
|
@ -76,15 +76,6 @@ var appMetricsDesc = map[string]map[string]Description{
|
||||||
VariableLabels: []string{"endpoint"},
|
VariableLabels: []string{"endpoint"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
statisticSubsystem: {
|
|
||||||
droppedLogs: Description{
|
|
||||||
Type: dto.MetricType_COUNTER,
|
|
||||||
Namespace: namespace,
|
|
||||||
Subsystem: statisticSubsystem,
|
|
||||||
Name: droppedLogs,
|
|
||||||
Help: "Dropped logs (by sampling) count",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Description struct {
|
type Description struct {
|
||||||
|
@ -157,12 +148,3 @@ func mustNewGaugeVec(description Description) *prometheus.GaugeVec {
|
||||||
description.VariableLabels,
|
description.VariableLabels,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustNewCounter(description Description) prometheus.Counter {
|
|
||||||
if description.Type != dto.MetricType_COUNTER {
|
|
||||||
panic("invalid metric type")
|
|
||||||
}
|
|
||||||
return prometheus.NewCounter(
|
|
||||||
prometheus.CounterOpts(newOpts(description)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
|
@ -14,13 +14,11 @@ const (
|
||||||
stateSubsystem = "state"
|
stateSubsystem = "state"
|
||||||
poolSubsystem = "pool"
|
poolSubsystem = "pool"
|
||||||
serverSubsystem = "server"
|
serverSubsystem = "server"
|
||||||
statisticSubsystem = "statistic"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
healthMetric = "health"
|
healthMetric = "health"
|
||||||
versionInfoMetric = "version_info"
|
versionInfoMetric = "version_info"
|
||||||
droppedLogs = "dropped_logs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -37,6 +35,8 @@ const (
|
||||||
methodGetContainer = "get_container"
|
methodGetContainer = "get_container"
|
||||||
methodListContainer = "list_container"
|
methodListContainer = "list_container"
|
||||||
methodDeleteContainer = "delete_container"
|
methodDeleteContainer = "delete_container"
|
||||||
|
methodGetContainerEacl = "get_container_eacl"
|
||||||
|
methodSetContainerEacl = "set_container_eacl"
|
||||||
methodEndpointInfo = "endpoint_info"
|
methodEndpointInfo = "endpoint_info"
|
||||||
methodNetworkInfo = "network_info"
|
methodNetworkInfo = "network_info"
|
||||||
methodPutObject = "put_object"
|
methodPutObject = "put_object"
|
||||||
|
@ -69,7 +69,6 @@ type GateMetrics struct {
|
||||||
stateMetrics
|
stateMetrics
|
||||||
poolMetricsCollector
|
poolMetricsCollector
|
||||||
serverMetrics
|
serverMetrics
|
||||||
statisticMetrics
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type stateMetrics struct {
|
type stateMetrics struct {
|
||||||
|
@ -77,10 +76,6 @@ type stateMetrics struct {
|
||||||
versionInfo *prometheus.GaugeVec
|
versionInfo *prometheus.GaugeVec
|
||||||
}
|
}
|
||||||
|
|
||||||
type statisticMetrics struct {
|
|
||||||
droppedLogs prometheus.Counter
|
|
||||||
}
|
|
||||||
|
|
||||||
type poolMetricsCollector struct {
|
type poolMetricsCollector struct {
|
||||||
scraper StatisticScraper
|
scraper StatisticScraper
|
||||||
overallErrors prometheus.Gauge
|
overallErrors prometheus.Gauge
|
||||||
|
@ -101,14 +96,10 @@ func NewGateMetrics(p StatisticScraper) *GateMetrics {
|
||||||
serverMetric := newServerMetrics()
|
serverMetric := newServerMetrics()
|
||||||
serverMetric.register()
|
serverMetric.register()
|
||||||
|
|
||||||
statsMetric := newStatisticMetrics()
|
|
||||||
statsMetric.register()
|
|
||||||
|
|
||||||
return &GateMetrics{
|
return &GateMetrics{
|
||||||
stateMetrics: *stateMetric,
|
stateMetrics: *stateMetric,
|
||||||
poolMetricsCollector: *poolMetric,
|
poolMetricsCollector: *poolMetric,
|
||||||
serverMetrics: *serverMetric,
|
serverMetrics: *serverMetric,
|
||||||
statisticMetrics: *statsMetric,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,7 +107,6 @@ func (g *GateMetrics) Unregister() {
|
||||||
g.stateMetrics.unregister()
|
g.stateMetrics.unregister()
|
||||||
prometheus.Unregister(&g.poolMetricsCollector)
|
prometheus.Unregister(&g.poolMetricsCollector)
|
||||||
g.serverMetrics.unregister()
|
g.serverMetrics.unregister()
|
||||||
g.statisticMetrics.unregister()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStateMetrics() *stateMetrics {
|
func newStateMetrics() *stateMetrics {
|
||||||
|
@ -126,20 +116,6 @@ func newStateMetrics() *stateMetrics {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newStatisticMetrics() *statisticMetrics {
|
|
||||||
return &statisticMetrics{
|
|
||||||
droppedLogs: mustNewCounter(appMetricsDesc[statisticSubsystem][droppedLogs]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) register() {
|
|
||||||
prometheus.MustRegister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *statisticMetrics) unregister() {
|
|
||||||
prometheus.Unregister(s.droppedLogs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m stateMetrics) register() {
|
func (m stateMetrics) register() {
|
||||||
prometheus.MustRegister(m.healthCheck)
|
prometheus.MustRegister(m.healthCheck)
|
||||||
prometheus.MustRegister(m.versionInfo)
|
prometheus.MustRegister(m.versionInfo)
|
||||||
|
@ -158,13 +134,6 @@ func (m stateMetrics) SetVersion(ver string) {
|
||||||
m.versionInfo.WithLabelValues(ver).Set(1)
|
m.versionInfo.WithLabelValues(ver).Set(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *statisticMetrics) DroppedLogsInc() {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.droppedLogs.Inc()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
func newPoolMetricsCollector(p StatisticScraper) *poolMetricsCollector {
|
||||||
return &poolMetricsCollector{
|
return &poolMetricsCollector{
|
||||||
scraper: p,
|
scraper: p,
|
||||||
|
|
|
@ -25,24 +25,24 @@ type Config struct {
|
||||||
// Start runs http service with the exposed endpoint on the configured port.
|
// Start runs http service with the exposed endpoint on the configured port.
|
||||||
func (ms *Service) Start() {
|
func (ms *Service) Start() {
|
||||||
if ms.enabled {
|
if ms.enabled {
|
||||||
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
ms.log.Info(logs.ServiceIsRunning, zap.String("endpoint", ms.Addr))
|
||||||
err := ms.ListenAndServe()
|
err := ms.ListenAndServe()
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort, logs.TagField(logs.TagApp))
|
ms.log.Warn(logs.ServiceCouldntStartOnConfiguredPort)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled, logs.TagField(logs.TagApp))
|
ms.log.Info(logs.ServiceHasntStartedSinceItsDisabled)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShutDown stops the service.
|
// ShutDown stops the service.
|
||||||
func (ms *Service) ShutDown(ctx context.Context) {
|
func (ms *Service) ShutDown(ctx context.Context) {
|
||||||
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr), logs.TagField(logs.TagApp))
|
ms.log.Info(logs.ShuttingDownService, zap.String("endpoint", ms.Addr))
|
||||||
err := ms.Shutdown(ctx)
|
err := ms.Shutdown(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
ms.log.Error(logs.CantGracefullyShutDownService, zap.Error(err))
|
||||||
if err = ms.Close(); err != nil {
|
if err = ms.Close(); err != nil {
|
||||||
ms.log.Panic(logs.CantShutDownService, zap.Error(err), logs.TagField(logs.TagApp))
|
ms.log.Panic(logs.CantShutDownService, zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
41
response/utils.go
Normal file
41
response/utils.go
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
package response
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
|
sdkstatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
|
"github.com/valyala/fasthttp"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Error(r *fasthttp.RequestCtx, msg string, code int) {
|
||||||
|
r.Error(msg+"\n", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormErrorResponse(message string, err error) (int, string, []zap.Field) {
|
||||||
|
var (
|
||||||
|
msg string
|
||||||
|
statusCode int
|
||||||
|
logFields []zap.Field
|
||||||
|
)
|
||||||
|
|
||||||
|
st := new(sdkstatus.ObjectAccessDenied)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &st):
|
||||||
|
statusCode = fasthttp.StatusForbidden
|
||||||
|
reason := st.Reason()
|
||||||
|
msg = fmt.Sprintf("%s: %v: %s", message, err, reason)
|
||||||
|
logFields = append(logFields, zap.String("error_detail", reason))
|
||||||
|
case client.IsErrObjectNotFound(err) || client.IsErrContainerNotFound(err):
|
||||||
|
statusCode = fasthttp.StatusNotFound
|
||||||
|
msg = "Not Found"
|
||||||
|
default:
|
||||||
|
statusCode = fasthttp.StatusBadRequest
|
||||||
|
msg = fmt.Sprintf("%s: %v", message, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusCode, msg, logFields
|
||||||
|
}
|
|
@ -82,23 +82,15 @@ func fetchBearerToken(ctx *fasthttp.RequestCtx) (*bearer.Token, error) {
|
||||||
tkn = new(bearer.Token)
|
tkn = new(bearer.Token)
|
||||||
)
|
)
|
||||||
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
|
for _, parse := range []fromHandler{BearerTokenFromHeader, BearerTokenFromCookie} {
|
||||||
buf = parse(&ctx.Request.Header)
|
if buf = parse(&ctx.Request.Header); buf == nil {
|
||||||
if buf == nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
} else if data, err := base64.StdEncoding.DecodeString(string(buf)); err != nil {
|
||||||
|
|
||||||
data, err := base64.StdEncoding.DecodeString(string(buf))
|
|
||||||
if err != nil {
|
|
||||||
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
|
lastErr = fmt.Errorf("can't base64-decode bearer token: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
} else if err = tkn.Unmarshal(data); err != nil {
|
||||||
|
|
||||||
if err = tkn.Unmarshal(data); err != nil {
|
|
||||||
if err = tkn.UnmarshalJSON(data); err != nil {
|
|
||||||
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
|
lastErr = fmt.Errorf("can't unmarshal bearer token: %w", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return tkn, nil
|
return tkn, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,14 +98,8 @@ func TestFetchBearerToken(t *testing.T) {
|
||||||
tkn := new(bearer.Token)
|
tkn := new(bearer.Token)
|
||||||
tkn.ForUser(uid)
|
tkn.ForUser(uid)
|
||||||
|
|
||||||
jsonToken, err := tkn.MarshalJSON()
|
t64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
||||||
require.NoError(t, err)
|
require.NotEmpty(t, t64)
|
||||||
|
|
||||||
jsonTokenBase64 := base64.StdEncoding.EncodeToString(jsonToken)
|
|
||||||
binaryTokenBase64 := base64.StdEncoding.EncodeToString(tkn.Marshal())
|
|
||||||
|
|
||||||
require.NotEmpty(t, jsonTokenBase64)
|
|
||||||
require.NotEmpty(t, binaryTokenBase64)
|
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -149,47 +143,25 @@ func TestFetchBearerToken(t *testing.T) {
|
||||||
error: "can't unmarshal bearer token",
|
error: "can't unmarshal bearer token",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie with binary token",
|
name: "bad header, but good cookie",
|
||||||
header: "dGVzdAo=",
|
header: "dGVzdAo=",
|
||||||
cookie: binaryTokenBase64,
|
cookie: t64,
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad cookie, but good header with binary token",
|
name: "bad cookie, but good header",
|
||||||
header: binaryTokenBase64,
|
header: t64,
|
||||||
cookie: "dGVzdAo=",
|
cookie: "dGVzdAo=",
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad header, but good cookie with json token",
|
name: "ok for header",
|
||||||
header: "dGVzdAo=",
|
header: t64,
|
||||||
cookie: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bad cookie, but good header with json token",
|
name: "ok for cookie",
|
||||||
header: jsonTokenBase64,
|
cookie: t64,
|
||||||
cookie: "dGVzdAo=",
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for header with binary token",
|
|
||||||
header: binaryTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for cookie with binary token",
|
|
||||||
cookie: binaryTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for header with json token",
|
|
||||||
header: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "ok for cookie with json token",
|
|
||||||
cookie: jsonTokenBase64,
|
|
||||||
expect: tkn,
|
expect: tkn,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
63
tree/tree.go
63
tree/tree.go
|
@ -6,9 +6,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-http-gw/internal/layer"
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
@ -118,7 +118,7 @@ func (n *treeNode) FileName() (string, bool) {
|
||||||
return value, ok
|
return value, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) {
|
func newNodeVersion(node NodeResponse) (*api.NodeVersion, error) {
|
||||||
tNode, err := newTreeNode(node)
|
tNode, err := newTreeNode(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid tree node: %w", err)
|
return nil, fmt.Errorf("invalid tree node: %w", err)
|
||||||
|
@ -127,30 +127,20 @@ func newNodeVersion(node NodeResponse) (*data.NodeVersion, error) {
|
||||||
return newNodeVersionFromTreeNode(tNode), nil
|
return newNodeVersionFromTreeNode(tNode), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeVersionFromTreeNode(treeNode *treeNode) *data.NodeVersion {
|
func newNodeVersionFromTreeNode(treeNode *treeNode) *api.NodeVersion {
|
||||||
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
_, isDeleteMarker := treeNode.Get(isDeleteMarkerKV)
|
||||||
version := &data.NodeVersion{
|
size, _ := treeNode.Get(sizeKV)
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
version := &api.NodeVersion{
|
||||||
|
BaseNodeVersion: api.BaseNodeVersion{
|
||||||
OID: treeNode.ObjID,
|
OID: treeNode.ObjID,
|
||||||
IsDeleteMarker: isDeleteMarker,
|
|
||||||
},
|
},
|
||||||
|
DeleteMarker: isDeleteMarker,
|
||||||
|
IsPrefixNode: size == "",
|
||||||
}
|
}
|
||||||
|
|
||||||
return version
|
return version
|
||||||
}
|
}
|
||||||
|
|
||||||
func newNodeInfo(node NodeResponse) data.NodeInfo {
|
|
||||||
nodeMeta := node.GetMeta()
|
|
||||||
nodeInfo := data.NodeInfo{
|
|
||||||
Meta: make([]data.NodeMeta, 0, len(nodeMeta)),
|
|
||||||
}
|
|
||||||
for _, meta := range nodeMeta {
|
|
||||||
nodeInfo.Meta = append(nodeInfo.Meta, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodeInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
|
func newMultiNode(nodes []NodeResponse) (*multiSystemNode, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
|
@ -190,10 +180,7 @@ func (m *multiSystemNode) Old() []*treeNode {
|
||||||
return m.nodes[1:]
|
return m.nodes[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*data.NodeVersion, error) {
|
func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName string) (*api.NodeVersion, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetLatestVersion")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
nodes, err := c.GetVersions(ctx, cnrID, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -208,9 +195,6 @@ func (c *Tree) GetLatestVersion(ctx context.Context, cnrID *cid.ID, objectName s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string) ([]NodeResponse, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetVersions")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
meta := []string{oidKV, isDeleteMarkerKV, sizeKV}
|
||||||
path := pathFromName(objectName)
|
path := pathFromName(objectName)
|
||||||
|
|
||||||
|
@ -226,10 +210,7 @@ func (c *Tree) GetVersions(ctx context.Context, cnrID *cid.ID, objectName string
|
||||||
return c.service.GetNodes(ctx, p)
|
return c.service.GetNodes(ctx, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) CheckSettingsNodeExists(ctx context.Context, bktInfo *data.BucketInfo) error {
|
func (c *Tree) CheckSettingsNodeExist(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.CheckSettingsNodeExists")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
_, err := c.getSystemNode(ctx, bktInfo, settingsFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -255,7 +236,7 @@ func (c *Tree) getSystemNode(ctx context.Context, bktInfo *data.BucketInfo, name
|
||||||
nodes = filterMultipartNodes(nodes)
|
nodes = filterMultipartNodes(nodes)
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return newMultiNode(nodes)
|
return newMultiNode(nodes)
|
||||||
|
@ -317,17 +298,14 @@ func pathFromName(objectName string) []string {
|
||||||
return strings.Split(objectName, separator)
|
return strings.Split(objectName, separator)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]data.NodeInfo, string, error) {
|
func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
|
||||||
ctx, span := tracing.StartSpanFromContext(ctx, "tree.GetSubTreeByPrefix")
|
|
||||||
defer span.End()
|
|
||||||
|
|
||||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, versionTree, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
subTree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, rootID, 2, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
if errors.Is(err, layer.ErrNodeNotFound) {
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -362,23 +340,14 @@ func (c *Tree) GetSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
|
||||||
nodesMap[fileName] = nodes
|
nodesMap[fileName] = nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
result := make([]data.NodeInfo, 0, len(subTree))
|
result := make([]NodeResponse, 0, len(subTree))
|
||||||
for _, nodes := range nodesMap {
|
for _, nodes := range nodesMap {
|
||||||
result = append(result, nodeResponseToNodeInfo(nodes)...)
|
result = append(result, nodes...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
return result, strings.TrimSuffix(prefix, tailPrefix), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeResponseToNodeInfo(nodes []NodeResponse) []data.NodeInfo {
|
|
||||||
nodesInfo := make([]data.NodeInfo, 0, len(nodes))
|
|
||||||
for _, node := range nodes {
|
|
||||||
nodesInfo = append(nodesInfo, newNodeInfo(node))
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodesInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
func (c *Tree) determinePrefixNode(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) ([]uint64, string, error) {
|
||||||
rootID := []uint64{0}
|
rootID := []uint64{0}
|
||||||
path := strings.Split(prefix, separator)
|
path := strings.Split(prefix, separator)
|
||||||
|
|
Loading…
Add table
Reference in a new issue