forked from TrueCloudLab/frostfs-s3-gw
Compare commits
43 commits
feature/lo
...
master
Author | SHA1 | Date | |
---|---|---|---|
d919e6cce2 | |||
056f168d77 | |||
9bdfe2a016 | |||
d6b506f6d9 | |||
a2e0b92575 | |||
b08f476ea7 | |||
f4275d837a | |||
664f83b2b7 | |||
136b5521fe | |||
a5f670d904 | |||
d76c4fe2a2 | |||
0637133c61 | |||
bf00fa6aa9 | |||
ff690ce996 | |||
534ae7f0f1 | |||
77673797f9 | |||
9e1766ff74 | |||
e73f11c251 | |||
5cb77018f8 | |||
fa68a4ce40 | |||
0644067496 | |||
481520705a | |||
28723f4a68 | |||
20719bd85c | |||
4f27e34974 | |||
3dc989d7fe | |||
69e77aecc9 | |||
c34680d157 | |||
f5326b9f04 | |||
51c5c227c2 | |||
c506620199 | |||
6cb0026007 | |||
971006a28c | |||
527e0dc612 | |||
3213dd7236 | |||
a031777a1b | |||
b2a5da8247 | |||
ec349e4523 | |||
977a20760b | |||
2948d1f942 | |||
c0011ebb8d | |||
456319d2f1 | |||
1d965b23ab |
92 changed files with 7072 additions and 1420 deletions
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.21 as builder
|
FROM golang:1.22 AS builder
|
||||||
|
|
||||||
ARG BUILD=now
|
ARG BUILD=now
|
||||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||||
|
|
|
@ -6,7 +6,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.20', '1.21' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
|
|
||||||
- name: Run commit format checker
|
- name: Run commit format checker
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
cache: true
|
cache: true
|
||||||
|
|
||||||
- name: Install linters
|
- name: Install linters
|
||||||
|
@ -24,7 +24,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go_versions: [ '1.20', '1.21' ]
|
go_versions: [ '1.22', '1.23' ]
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.21'
|
go-version: '1.23'
|
||||||
|
|
||||||
- name: Install govulncheck
|
- name: Install govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|
|
@ -12,7 +12,8 @@ run:
|
||||||
# output configuration options
|
# output configuration options
|
||||||
output:
|
output:
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
format: tab
|
formats:
|
||||||
|
- format: tab
|
||||||
|
|
||||||
# all available settings of specific linters
|
# all available settings of specific linters
|
||||||
linters-settings:
|
linters-settings:
|
||||||
|
|
49
CHANGELOG.md
49
CHANGELOG.md
|
@ -4,28 +4,67 @@ This document outlines major changes between releases.
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Add support for virtual hosted style addressing (#446, #449)
|
||||||
|
- Support new param `frostfs.graceful_close_on_switch_timeout` (#475)
|
||||||
|
- Support patch object method (#479)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Update go version to go1.19 (#470)
|
||||||
|
|
||||||
|
## [0.30.0] - Kangshung -2024-07-19
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
- Fix HTTP/2 requests (#341)
|
- Fix HTTP/2 requests (#341)
|
||||||
- Fix Decoder.CharsetReader is nil (#379)
|
- Fix Decoder.CharsetReader is nil (#379)
|
||||||
- Fix flaky ACL encode test (#340)
|
- Fix flaky ACL encode test (#340)
|
||||||
|
- Docs grammar (#432)
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- Add new `reconnect_interval` config param for server rebinding (#291)
|
- Add new `reconnect_interval` config param for server rebinding (#291)
|
||||||
- Support `GetBucketPolicyStatus` (#301)
|
- Support `GetBucketPolicyStatus` (#301)
|
||||||
- Support request IP filter with policy (#371, #377)
|
- Support request IP filter with policy (#371, #377)
|
||||||
- Support tag checks in policies (#357, #365, #392)
|
- Support tag checks in policies (#357, #365, #392, #403, #411)
|
||||||
- Support IAM-MFA checks (#367)
|
- Support IAM-MFA checks (#367)
|
||||||
- More docs (#334, #353)
|
- More docs (#334, #353)
|
||||||
- Add `register-user` command to `authmate` (#414)
|
- Add `register-user` command to `authmate` (#414)
|
||||||
|
- `User` field in request log (#396)
|
||||||
|
- Erasure coding support in placement policy (#400)
|
||||||
|
- Improved test coverage (#402)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Update dependencies noted by govulncheck (#368)
|
- Update dependencies noted by govulncheck (#368)
|
||||||
- Improve test coverate (#380, #387)
|
- Improve test coverage (#380, #387)
|
||||||
- Support updated naming in native policy JSON (#385)
|
- Support updated naming in native policy JSON (#385)
|
||||||
|
- Improve determining AccessBox latest version (#335)
|
||||||
|
- Don't set full_control policy for bucket owner (#407)
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
- Remove control api (#406)
|
- Remove control api (#406)
|
||||||
- Remove notifications (#401)
|
- Remove notifications (#401)
|
||||||
|
- Remove `layer.Client` interface (#410)
|
||||||
|
- Remove extended ACL related code (#372)
|
||||||
|
|
||||||
|
## [0.29.3] - 2024-07-19
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Support tree split environment when multiple nodes
|
||||||
|
may be part of the same sub path (#430)
|
||||||
|
- Collision of multipart name and system data in the tree (#430)
|
||||||
|
- Workaround for removal of multiple null versions in unversioned bucket (#430)
|
||||||
|
|
||||||
|
## [0.29.2] - 2024-07-03
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- Parsing of put-bucket-setting retry configuration (#398)
|
||||||
|
|
||||||
|
## [0.29.1] - 2024-06-20
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- OPTIONS request processing for object operations (#399)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Retries of put-bucket-setting operation during container creation (#398)
|
||||||
|
|
||||||
## [0.29.0] - Zemu - 2024-05-27
|
## [0.29.0] - Zemu - 2024-05-27
|
||||||
|
|
||||||
|
@ -198,4 +237,8 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
|
||||||
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
[0.28.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.0...v0.28.1
|
||||||
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
|
[0.28.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.1...v0.28.2
|
||||||
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
|
[0.29.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.28.2...v0.29.0
|
||||||
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...master
|
[0.29.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.0...v0.29.1
|
||||||
|
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
|
||||||
|
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
|
||||||
|
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
|
||||||
|
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...master
|
||||||
|
|
40
Makefile
40
Makefile
|
@ -3,9 +3,9 @@
|
||||||
# Common variables
|
# Common variables
|
||||||
REPO ?= $(shell go list -m)
|
REPO ?= $(shell go list -m)
|
||||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||||
GO_VERSION ?= 1.20
|
GO_VERSION ?= 1.22
|
||||||
LINT_VERSION ?= 1.56.1
|
LINT_VERSION ?= 1.60.1
|
||||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
|
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||||
BINDIR = bin
|
BINDIR = bin
|
||||||
|
|
||||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||||
|
@ -23,6 +23,12 @@ OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
||||||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||||
TMP_DIR := .cache
|
TMP_DIR := .cache
|
||||||
|
|
||||||
|
# Variables for fuzzing
|
||||||
|
FUZZ_NGFUZZ_DIR ?= ""
|
||||||
|
FUZZ_TIMEOUT ?= 30
|
||||||
|
FUZZ_FUNCTIONS ?= "all"
|
||||||
|
FUZZ_AUX ?= ""
|
||||||
|
|
||||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||||
|
|
||||||
# .deb package versioning
|
# .deb package versioning
|
||||||
|
@ -76,6 +82,34 @@ cover:
|
||||||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||||
@go tool cover -html=coverage.txt -o coverage.html
|
@go tool cover -html=coverage.txt -o coverage.html
|
||||||
|
|
||||||
|
# Run fuzzing
|
||||||
|
CLANG := $(shell which clang-17 2>/dev/null)
|
||||||
|
.PHONY: check-clang all
|
||||||
|
check-clang:
|
||||||
|
ifeq ($(CLANG),)
|
||||||
|
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
||||||
|
@exit 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: check-ngfuzz all
|
||||||
|
check-ngfuzz:
|
||||||
|
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
||||||
|
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: install-fuzzing-deps
|
||||||
|
install-fuzzing-deps: check-clang check-ngfuzz
|
||||||
|
|
||||||
|
.PHONY: fuzz
|
||||||
|
fuzz: install-fuzzing-deps
|
||||||
|
@START_PATH=$$(pwd); \
|
||||||
|
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
||||||
|
cd $(FUZZ_NGFUZZ_DIR) && \
|
||||||
|
./ngfuzz -clean && \
|
||||||
|
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||||
|
./ngfuzz -report
|
||||||
|
|
||||||
# Reformat code
|
# Reformat code
|
||||||
format:
|
format:
|
||||||
@echo "⇒ Processing gofmt check"
|
@echo "⇒ Processing gofmt check"
|
||||||
|
|
18
README.md
18
README.md
|
@ -93,6 +93,24 @@ HTTP/1.1 200 OK
|
||||||
|
|
||||||
Also, you can configure domains using `.env` variables or `yaml` file.
|
Also, you can configure domains using `.env` variables or `yaml` file.
|
||||||
|
|
||||||
|
## Fuzzing
|
||||||
|
To run fuzzing tests use the following command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ make fuzz
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
||||||
|
|
||||||
|
You can also use the following arguments:
|
||||||
|
|
||||||
|
```
|
||||||
|
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
||||||
|
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
||||||
|
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
||||||
|
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
||||||
|
````
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
- [Configuration](./docs/configuration.md)
|
- [Configuration](./docs/configuration.md)
|
||||||
|
|
26
SECURITY.md
Normal file
26
SECURITY.md
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
|
||||||
|
## How To Report a Vulnerability
|
||||||
|
|
||||||
|
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
||||||
|
|
||||||
|
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
||||||
|
|
||||||
|
Instead, you can report it using one of the following ways:
|
||||||
|
|
||||||
|
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
||||||
|
|
||||||
|
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||||
|
|
||||||
|
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
||||||
|
* Affected version(s)
|
||||||
|
* Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
* Step-by-step instructions to reproduce the issue
|
||||||
|
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
* Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
* Any special configuration required to reproduce the issue
|
||||||
|
* Any log files that are related to this issue (if possible)
|
||||||
|
* Proof-of-concept or exploit code (if possible)
|
||||||
|
|
||||||
|
This information will help us triage your report more quickly.
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
v0.29.0
|
v0.30.0
|
||||||
|
|
|
@ -270,7 +270,9 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
return nil, fmt.Errorf("failed to parse x-amz-date field: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
addr, err := getAddress(submatches["access_key_id"])
|
accessKeyID := submatches["access_key_id"]
|
||||||
|
|
||||||
|
addr, err := getAddress(accessKeyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -283,14 +285,22 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||||
secret := box.Gate.SecretKey
|
secret := box.Gate.SecretKey
|
||||||
service, region := submatches["service"], submatches["region"]
|
service, region := submatches["service"], submatches["region"]
|
||||||
|
|
||||||
signature := signStr(secret, service, region, signatureDateTime, policy)
|
signature := SignStr(secret, service, region, signatureDateTime, policy)
|
||||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
||||||
if signature != reqSignature {
|
if signature != reqSignature {
|
||||||
return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
||||||
reqSignature, signature)
|
reqSignature, signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &middleware.Box{AccessBox: box, Attributes: attrs}, nil
|
return &middleware.Box{
|
||||||
|
AccessBox: box,
|
||||||
|
AuthHeaders: &middleware.AuthHeader{
|
||||||
|
AccessKeyID: accessKeyID,
|
||||||
|
Region: region,
|
||||||
|
SignatureV4: signature,
|
||||||
|
},
|
||||||
|
Attributes: attrs,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
func cloneRequest(r *http.Request, authHeader *AuthHeader) *http.Request {
|
||||||
|
@ -349,7 +359,7 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func signStr(secret, service, region string, t time.Time, strToSign string) string {
|
func SignStr(secret, service, region string, t time.Time, strToSign string) string {
|
||||||
creds := deriveKey(secret, service, region, t)
|
creds := deriveKey(secret, service, region, t)
|
||||||
signature := hmacSHA256(creds, []byte(strToSign))
|
signature := hmacSHA256(creds, []byte(strToSign))
|
||||||
return hex.EncodeToString(signature)
|
return hex.EncodeToString(signature)
|
||||||
|
|
88
api/auth/center_fuzz_test.go
Normal file
88
api/auth/center_fuzz_test.go
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
utils "github.com/trailofbits/go-fuzz-utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fuzzSuccessExitCode = 0
|
||||||
|
fuzzFailExitCode = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
func InitFuzzAuthenticate() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzAuthenticate(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var accessKeyAddr oid.Address
|
||||||
|
err = tp.Fill(accessKeyAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||||
|
secretKey, err := tp.GetString()
|
||||||
|
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
||||||
|
|
||||||
|
reqData := RequestData{
|
||||||
|
Method: "GET",
|
||||||
|
Endpoint: "http://localhost:8084",
|
||||||
|
Bucket: "my-bucket",
|
||||||
|
Object: "@obj/name",
|
||||||
|
}
|
||||||
|
presignData := PresignData{
|
||||||
|
Service: "s3",
|
||||||
|
Region: "spb",
|
||||||
|
Lifetime: 10 * time.Minute,
|
||||||
|
SignTime: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := PresignRequest(awsCreds, reqData, presignData)
|
||||||
|
if req == nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
expBox := &accessbox.Box{
|
||||||
|
Gate: &accessbox.GateData{
|
||||||
|
SecretKey: secretKey,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
mock := newTokensFrostfsMock()
|
||||||
|
mock.addBox(accessKeyAddr, expBox)
|
||||||
|
|
||||||
|
c := &Center{
|
||||||
|
cli: mock,
|
||||||
|
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||||
|
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = c.Authenticate(req)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzAuthenticate(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzAuthenticate(data)
|
||||||
|
})
|
||||||
|
}
|
|
@ -115,7 +115,7 @@ func TestSignature(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
signature := signStr(secret, "s3", "us-east-1", signTime, strToSign)
|
signature := SignStr(secret, "s3", "us-east-1", signTime, strToSign)
|
||||||
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
require.Equal(t, "dfbe886241d9e369cf4b329ca0f15eb27306c97aa1022cc0bb5a914c4ef87634", signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -434,7 +434,7 @@ func TestAuthenticate(t *testing.T) {
|
||||||
|
|
||||||
func TestHTTPPostAuthenticate(t *testing.T) {
|
func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
const (
|
const (
|
||||||
policyBase64 = "eyAiZXhwaXJhdGlvbiI6ICIyMDA3LTEyLTAxVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJhY2wiOiAicHVibGljLXJlYWQiIH0sCiAgICB7ImJ1Y2tldCI6ICJqb2huc21pdGgiIH0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci9lcmljLyJdLAogIF0KfQ=="
|
policyBase64 = "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXQpdfQ=="
|
||||||
invalidValue = "invalid-value"
|
invalidValue = "invalid-value"
|
||||||
defaultFieldName = "file"
|
defaultFieldName = "file"
|
||||||
service = "s3"
|
service = "s3"
|
||||||
|
@ -492,7 +492,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST valid",
|
name: "HTTP POST valid",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -501,7 +501,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST valid with custom field name",
|
name: "HTTP POST valid with custom field name",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "files")
|
||||||
}(),
|
}(),
|
||||||
|
@ -510,7 +510,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST valid with field name with a capital letter",
|
name: "HTTP POST valid with field name with a capital letter",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, "File")
|
||||||
}(),
|
}(),
|
||||||
|
@ -530,7 +530,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST invalid signature date time",
|
name: "HTTP POST invalid signature date time",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, creds, invalidValue, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -539,7 +539,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "HTTP POST invalid creds",
|
name: "HTTP POST invalid creds",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, invalidValue, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -550,7 +550,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST missing policy",
|
name: "HTTP POST missing policy",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, "", creds, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -560,7 +560,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST invalid accessKeyId",
|
name: "HTTP POST invalid accessKeyId",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
|
creds := getCredsStr(invalidValue, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -570,7 +570,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST invalid accessKeyId - a non-existent box",
|
name: "HTTP POST invalid accessKeyId - a non-existent box",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(invalidAccessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, policyBase64)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -580,7 +580,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
name: "HTTP POST invalid signature",
|
name: "HTTP POST invalid signature",
|
||||||
request: func() *http.Request {
|
request: func() *http.Request {
|
||||||
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
sign := signStr(secret.SecretKey, service, region, timeToSign, invalidValue)
|
sign := SignStr(secret.SecretKey, service, region, timeToSign, invalidValue)
|
||||||
|
|
||||||
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
return getRequestWithMultipartForm(t, policyBase64, creds, timeToSignStr, sign, defaultFieldName)
|
||||||
}(),
|
}(),
|
||||||
|
@ -602,6 +602,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
||||||
} else {
|
} else {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
require.Equal(t, secret.SecretKey, box.AccessBox.Gate.SecretKey)
|
||||||
|
require.Equal(t, accessKeyID, box.AuthHeaders.AccessKeyID)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -616,7 +617,7 @@ func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldN
|
||||||
writer := multipart.NewWriter(body)
|
writer := multipart.NewWriter(body)
|
||||||
defer writer.Close()
|
defer writer.Close()
|
||||||
|
|
||||||
err := writer.WriteField("Policy", policy)
|
err := writer.WriteField("policy", policy)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = writer.WriteField(AmzCredential, creds)
|
err = writer.WriteField(AmzCredential, creds)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
20
api/cache/system.go
vendored
20
api/cache/system.go
vendored
|
@ -88,6 +88,22 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
|
||||||
|
entry, err := o.cache.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
result, ok := entry.(*data.LifecycleConfiguration)
|
||||||
|
if !ok {
|
||||||
|
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||||
|
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
||||||
entry, err := o.cache.Get(key)
|
entry, err := o.cache.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -133,6 +149,10 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
|
||||||
return o.cache.Set(key, obj)
|
return o.cache.Set(key, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
|
||||||
|
return o.cache.Set(key, obj)
|
||||||
|
}
|
||||||
|
|
||||||
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
||||||
return o.cache.Set(key, settings)
|
return o.cache.Set(key, settings)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
const (
|
const (
|
||||||
bktSettingsObject = ".s3-settings"
|
bktSettingsObject = ".s3-settings"
|
||||||
bktCORSConfigurationObject = ".s3-cors"
|
bktCORSConfigurationObject = ".s3-cors"
|
||||||
|
bktLifecycleConfigurationObject = ".s3-lifecycle"
|
||||||
|
|
||||||
VersioningUnversioned = "Unversioned"
|
VersioningUnversioned = "Unversioned"
|
||||||
VersioningEnabled = "Enabled"
|
VersioningEnabled = "Enabled"
|
||||||
|
@ -81,13 +82,28 @@ type (
|
||||||
VersionID string
|
VersionID string
|
||||||
NoErrorOnDeleteMarker bool
|
NoErrorOnDeleteMarker bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreatedObjectInfo stores created object info.
|
||||||
|
CreatedObjectInfo struct {
|
||||||
|
ID oid.ID
|
||||||
|
Size uint64
|
||||||
|
HashSum []byte
|
||||||
|
MD5Sum []byte
|
||||||
|
CreationEpoch uint64
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// SettingsObjectName is a system name for a bucket settings file.
|
// SettingsObjectName is a system name for a bucket settings file.
|
||||||
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
func (b *BucketInfo) SettingsObjectName() string { return bktSettingsObject }
|
||||||
|
|
||||||
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
// CORSObjectName returns a system name for a bucket CORS configuration file.
|
||||||
func (b *BucketInfo) CORSObjectName() string { return bktCORSConfigurationObject }
|
func (b *BucketInfo) CORSObjectName() string {
|
||||||
|
return b.CID.EncodeToString() + bktCORSConfigurationObject
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
|
||||||
|
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
|
||||||
|
}
|
||||||
|
|
||||||
// VersionID returns object version from ObjectInfo.
|
// VersionID returns object version from ObjectInfo.
|
||||||
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }
|
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }
|
||||||
|
|
54
api/data/lifecycle.go
Normal file
54
api/data/lifecycle.go
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
package data
|
||||||
|
|
||||||
|
import "encoding/xml"
|
||||||
|
|
||||||
|
const (
|
||||||
|
LifecycleStatusEnabled = "Enabled"
|
||||||
|
LifecycleStatusDisabled = "Disabled"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
LifecycleConfiguration struct {
|
||||||
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
|
||||||
|
Rules []LifecycleRule `xml:"Rule"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRule struct {
|
||||||
|
Status string `xml:"Status,omitempty"`
|
||||||
|
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||||
|
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
|
||||||
|
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
||||||
|
ID string `xml:"ID,omitempty"`
|
||||||
|
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
AbortIncompleteMultipartUpload struct {
|
||||||
|
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleExpiration struct {
|
||||||
|
Date string `xml:"Date,omitempty"`
|
||||||
|
Days *int `xml:"Days,omitempty"`
|
||||||
|
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRuleFilter struct {
|
||||||
|
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
|
||||||
|
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||||
|
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
|
Tag *Tag `xml:"Tag,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
LifecycleRuleAndOperator struct {
|
||||||
|
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||||
|
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||||
|
Prefix string `xml:"Prefix,omitempty"`
|
||||||
|
Tags []Tag `xml:"Tag"`
|
||||||
|
}
|
||||||
|
|
||||||
|
NonCurrentVersionExpiration struct {
|
||||||
|
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
|
||||||
|
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
|
||||||
|
}
|
||||||
|
)
|
|
@ -72,6 +72,7 @@ type BaseNodeVersion struct {
|
||||||
Created *time.Time
|
Created *time.Time
|
||||||
Owner *user.ID
|
Owner *user.ID
|
||||||
IsDeleteMarker bool
|
IsDeleteMarker bool
|
||||||
|
CreationEpoch uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
||||||
|
@ -110,6 +111,7 @@ type MultipartInfo struct {
|
||||||
Meta map[string]string
|
Meta map[string]string
|
||||||
CopiesNumbers []uint32
|
CopiesNumbers []uint32
|
||||||
Finished bool
|
Finished bool
|
||||||
|
CreationEpoch uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartInfo is upload information about part.
|
// PartInfo is upload information about part.
|
||||||
|
@ -124,6 +126,14 @@ type PartInfo struct {
|
||||||
Created time.Time `json:"created"`
|
Created time.Time `json:"created"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PartInfoExtended struct {
|
||||||
|
PartInfo
|
||||||
|
|
||||||
|
// Timestamp is used to find the latest version of part info in case of tree split
|
||||||
|
// when there are multiple nodes for the same part.
|
||||||
|
Timestamp uint64
|
||||||
|
}
|
||||||
|
|
||||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||||
func (p *PartInfo) ToHeaderString() string {
|
func (p *PartInfo) ToHeaderString() string {
|
||||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
||||||
|
|
|
@ -187,6 +187,9 @@ const (
|
||||||
ErrInvalidRequestLargeCopy
|
ErrInvalidRequestLargeCopy
|
||||||
ErrInvalidStorageClass
|
ErrInvalidStorageClass
|
||||||
VersionIDMarkerWithoutKeyMarker
|
VersionIDMarkerWithoutKeyMarker
|
||||||
|
ErrInvalidRangeLength
|
||||||
|
ErrRangeOutOfBounds
|
||||||
|
ErrMissingContentRange
|
||||||
|
|
||||||
ErrMalformedJSON
|
ErrMalformedJSON
|
||||||
ErrInsecureClientRequest
|
ErrInsecureClientRequest
|
||||||
|
@ -1739,6 +1742,24 @@ var errorCodes = errorCodeMap{
|
||||||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||||
HTTPStatusCode: http.StatusBadRequest,
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
|
ErrInvalidRangeLength: {
|
||||||
|
ErrCode: ErrInvalidRangeLength,
|
||||||
|
Code: "InvalidRange",
|
||||||
|
Description: "Provided range length must be equal to content length",
|
||||||
|
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||||
|
},
|
||||||
|
ErrRangeOutOfBounds: {
|
||||||
|
ErrCode: ErrRangeOutOfBounds,
|
||||||
|
Code: "InvalidRange",
|
||||||
|
Description: "Provided range is outside of object bounds",
|
||||||
|
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||||
|
},
|
||||||
|
ErrMissingContentRange: {
|
||||||
|
ErrCode: ErrMissingContentRange,
|
||||||
|
Code: "MissingContentRange",
|
||||||
|
Description: "Content-Range header is mandatory for this type of request",
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
},
|
||||||
// Add your error structure here.
|
// Add your error structure here.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,10 +81,17 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: versionedObject,
|
Objects: versionedObject,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
|
NetworkInfo: networkInfo,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||||
deletedObject := deletedObjects[0]
|
deletedObject := deletedObjects[0]
|
||||||
|
@ -181,10 +188,17 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &layer.DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: toRemove,
|
Objects: toRemove,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
|
NetworkInfo: networkInfo,
|
||||||
IsMultiple: true,
|
IsMultiple: true,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||||
|
@ -237,9 +251,18 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
sessionToken = boxData.Gate.SessionTokenForDelete()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skipObjCheck := false
|
||||||
|
if value, ok := r.Header[api.AmzForceBucketDelete]; ok {
|
||||||
|
s := value[0]
|
||||||
|
if s == "true" {
|
||||||
|
skipObjCheck = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
|
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
SessionToken: sessionToken,
|
SessionToken: sessionToken,
|
||||||
|
SkipCheck: skipObjCheck,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
|
h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -85,6 +85,24 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||||
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
deleteBucket(t, hc, bktName, http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestForceDeleteBucket(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
|
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(nodeVersion.OID)
|
||||||
|
|
||||||
|
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
|
||||||
|
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
|
||||||
|
}
|
||||||
|
|
||||||
func TestDeleteMultipleObjectCheckUniqueness(t *testing.T) {
|
func TestDeleteMultipleObjectCheckUniqueness(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -471,6 +489,16 @@ func putBucketVersioning(t *testing.T, tc *handlerContext, bktName string, enabl
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBucketVersioning(hc *handlerContext, bktName string) *VersioningConfiguration {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().GetBucketVersioningHandler(w, r)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
res := &VersioningConfiguration{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
|
func deleteObject(t *testing.T, tc *handlerContext, bktName, objName, version string) (string, bool) {
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Add(api.QueryVersionID, version)
|
query.Add(api.QueryVersionID, version)
|
||||||
|
@ -507,6 +535,13 @@ func deleteObjectsBase(hc *handlerContext, bktName string, objVersions [][2]stri
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deleteBucketForce(t *testing.T, tc *handlerContext, bktName string, code int, value string) {
|
||||||
|
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||||
|
r.Header.Set(api.AmzForceBucketDelete, value)
|
||||||
|
tc.Handler().DeleteBucketHandler(w, r)
|
||||||
|
assertStatus(t, w, code)
|
||||||
|
}
|
||||||
|
|
||||||
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
func deleteBucket(t *testing.T, tc *handlerContext, bktName string, code int) {
|
||||||
w, r := prepareTestRequest(tc, bktName, "", nil)
|
w, r := prepareTestRequest(tc, bktName, "", nil)
|
||||||
tc.Handler().DeleteBucketHandler(w, r)
|
tc.Handler().DeleteBucketHandler(w, r)
|
||||||
|
|
|
@ -37,7 +37,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
||||||
|
|
||||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err := tc.MockedPool().ReadObject(tc.Context(), layer.PrmObjectRead{Container: bktInfo.CID, Object: objInfo.ID})
|
obj, err := tc.MockedPool().GetObject(tc.Context(), layer.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
encryptedContent, err := io.ReadAll(obj.Payload)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -288,6 +288,21 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
|
||||||
|
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
|
||||||
|
assertStatus(hc.t, w, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Set(uploadIDQuery, uploadID)
|
||||||
|
|
||||||
|
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||||
|
hc.Handler().AbortMultipartUploadHandler(w, r)
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||||
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
||||||
}
|
}
|
||||||
|
|
|
@ -228,6 +228,14 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
||||||
return content
|
return content
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte {
|
||||||
|
w := getObjectBaseResponse(tc, bktName, objName, version)
|
||||||
|
assertStatus(tc.t, w, http.StatusOK)
|
||||||
|
content, err := io.ReadAll(w.Result().Body)
|
||||||
|
require.NoError(tc.t, err)
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
|
||||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
||||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
||||||
|
|
998
api/handler/handler_fuzz_test.go
Normal file
998
api/handler/handler_fuzz_test.go
Normal file
|
@ -0,0 +1,998 @@
|
||||||
|
//go:build gofuzz
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/xml"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
tt "testing" // read https://github.com/AdamKorcz/go-118-fuzz-build?tab=readme-ov-file#workflow
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
|
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
|
||||||
|
utils "github.com/trailofbits/go-fuzz-utils"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fuzzBktName string
|
||||||
|
fuzzBox *accessbox.Box
|
||||||
|
fuzzHc *handlerContextBase
|
||||||
|
fuzzt *tt.T
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
fuzzSuccessExitCode = 0
|
||||||
|
fuzzFailExitCode = -1
|
||||||
|
)
|
||||||
|
|
||||||
|
func createTestBucketAndInitContext() {
|
||||||
|
fuzzt = new(tt.T)
|
||||||
|
|
||||||
|
log := zaptest.NewLogger(fuzzt)
|
||||||
|
var err error
|
||||||
|
fuzzHc, err = prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzBktName = "bucket"
|
||||||
|
fuzzBox, _ = createAccessBox(fuzzt)
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL, nil)
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
fuzzHc.Handler().CreateBucketHandler(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareStrings(tp *utils.TypeProvider, count int) ([]string, error) {
|
||||||
|
array := make([]string, count)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
err = tp.Reset()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
array[i], err = tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return array, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addMD5Header(tp *utils.TypeProvider, r *http.Request, rawBody []byte) error {
|
||||||
|
if len(rawBody) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
rand, err := tp.GetBool()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if rand == true {
|
||||||
|
var dst []byte
|
||||||
|
base64.StdEncoding.Encode(dst, rawBody)
|
||||||
|
hash := md5.Sum(dst)
|
||||||
|
r.Header.Set("Content-Md5", hex.EncodeToString(hash[:]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateParams(tp *utils.TypeProvider, input string, params []string) (string, error) {
|
||||||
|
input += "?"
|
||||||
|
|
||||||
|
count, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
count = count % len(params)
|
||||||
|
if count < 0 {
|
||||||
|
count += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
position, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
position = position % len(params)
|
||||||
|
if position < 0 {
|
||||||
|
position += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
input += params[position] + "=" + v + "&"
|
||||||
|
}
|
||||||
|
|
||||||
|
return input, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateHeaders(tp *utils.TypeProvider, r *http.Request, params []string) error {
|
||||||
|
count, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
count = count % len(params)
|
||||||
|
if count < 0 {
|
||||||
|
count += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
position, err := tp.GetInt()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
position = position % len(params)
|
||||||
|
if position < 0 {
|
||||||
|
position += len(params)
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Header.Set(params[position], v)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzCreateBucketHandler() {
|
||||||
|
fuzzt = new(tt.T)
|
||||||
|
|
||||||
|
log := zaptest.NewLogger(fuzzt)
|
||||||
|
var err error
|
||||||
|
fuzzHc, err = prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzBox, _ = createAccessBox(fuzzt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzCreateBucketHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
strings, err := prepareStrings(tp, 4)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
bktName := strings[0]
|
||||||
|
body := strings[1]
|
||||||
|
|
||||||
|
bodyXml, err := xml.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(bodyXml))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-acl", "x-amz-bucket-object-lock-enabled", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write", "x-amz-grant-write-acp", "x-amz-object-ownership"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().CreateBucketHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzCreateBucketHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzCreateBucketHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutBucketCorsHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutBucketCorsHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var cors data.CORSConfiguration
|
||||||
|
err = tp.Fill(&cors)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyXml, err := xml.Marshal(cors)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+"?cors", bytes.NewReader(bodyXml))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutBucketCorsHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutBucketCorsHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutBucketCorsHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutBucketPolicyHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutBucketPolicyHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutBucketPolicyHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutBucketPolicyHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var policy engineiam.Policy
|
||||||
|
err = tp.Fill(&policy)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyXml, err := xml.Marshal(policy)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+"?policy", bytes.NewReader(bodyXml))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-confirm-remove-self-bucket-access"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, bodyXml)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutBucketPolicyHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDeleteMultipleObjectsHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDeleteMultipleObjectsHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDeleteMultipleObjectsHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDeleteMultipleObjectsHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var body DeleteObjectsRequest
|
||||||
|
err = tp.Fill(&body)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyXml, err := xml.Marshal(body)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPost, defaultURL+"?delete", bytes.NewReader(bodyXml))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention", "x-amz-mfa"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, bodyXml)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().DeleteMultipleObjectsHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPostObject() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPostObject(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPostObject(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func postObject(tp *utils.TypeProvider) ([]byte, string, error) {
|
||||||
|
strings, err := prepareStrings(tp, 2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyXml, err := xml.Marshal(strings[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
objName := strings[1]
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPost, defaultURL, bytes.NewReader(bodyXml))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"X-Amz-Grant-Read", "X-Amz-Grant-Full-Control", "X-Amz-Grant-Write", "X-Amz-Acl", "x-amz-expected-bucket-owner"})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
var file multipart.Form
|
||||||
|
err = tp.Fill(&file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.MultipartForm = &file
|
||||||
|
|
||||||
|
fuzzHc.Handler().PostObject(w, r)
|
||||||
|
|
||||||
|
return bodyXml, objName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPostObject(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = postObject(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDeleteBucketHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDeleteBucketHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDeleteBucketHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDeleteBucketHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodDelete, defaultURL, nil)
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().DeleteBucketHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDeleteBucketCorsHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDeleteBucketCorsHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDeleteBucketCorsHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDeleteBucketCorsHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodDelete, defaultURL+"?cors", nil)
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().DeleteBucketCorsHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDeleteBucketPolicyHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDeleteBucketPolicyHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDeleteBucketPolicyHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDeleteBucketPolicyHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodDelete, defaultURL+"?policy", nil)
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().DeleteBucketPolicyHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzCopyObjectHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzCopyObjectHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzCopyObjectHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzCopyObjectHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
var r *http.Request
|
||||||
|
|
||||||
|
key, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := generateParams(tp, key, []string{"versionId"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r = httptest.NewRequest(http.MethodPut, defaultURL+params, nil)
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-acl", "x-amz-checksum-algorithm", "x-amz-copy-source", "x-amz-copy-source-if-match", "x-amz-copy-source-if-match", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-if-match", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-server-side-encryption-customer-algorithm", "x-amz-copy-source-server-side-encryption-customer-key", "x-amz-copy-source-server-side-encryption-customer-key-MD5", "x-amz-expected-bucket-owner", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write-acp", "x-amz-metadata-directive", "x-amz-website-redirect-location", "x-amz-object-lock-legal-hold", "x-amz-object-lock-mode", "x-amz-object-lock-retain-until-date", "x-amz-request-payer", "x-amz-server-side-encryption", "x-amz-server-side-encryption-aws-kms-key-id", "x-amz-server-side-encryption-bucket-key-enabled", "x-amz-server-side-encryption-context", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "x-amz-source-expected-bucket-owner", "x-amz-storage-class", "x-amz-tagging", "x-amz-tagging-directive", "x-amz-website-redirect-location"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().CopyObjectHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzDeleteObjectHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzDeleteObjectHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzDeleteObjectHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzDeleteObjectHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, objName, err := postObject(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
var r *http.Request
|
||||||
|
|
||||||
|
params, err := generateParams(tp, objName, []string{"versionId"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
r = httptest.NewRequest(http.MethodDelete, defaultURL+params, nil)
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention", "x-amz-mfa"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().DeleteObjectHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzGetObjectHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzGetObjectHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzGetObjectHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzGetObjectHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, objName, err := postObject(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := generateParams(tp, objName, []string{"versionId", "partNumber", "Range", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodGet, defaultURL+params, nil)
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "If-Match", "If-None-Match", "If-Modified-Since", "If-Unmodified-Since", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "Range"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().GetObjectHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutObjectHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutObjectHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
objName, err := tp.GetString()
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := tp.GetBytes()
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+objName, bytes.NewReader(body))
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "X-Amz-Grant-Read", "X-Amz-Grant-Full-Control", "X-Amz-Grant-Write", "X-Amz-Acl", "X-Amz-Tagging", "Content-Type", "Cache-Control", "Expires", "Content-Language", "Content-Encoding", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "X-Amz-Content-Sha256", "X-Amz-Object-Lock-Legal-Hold", "X-Amz-Object-Lock-Mode", "X-Amz-Object-Lock-Retain-Until-Date", "X-Amz-Bypass-Governance-Retention", "X-Amz-Meta-*"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, body)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutObjectHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutObjectHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutObjectHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutObjectLegalHoldHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutObjectLegalHoldHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, objName, err := postObject(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var hold data.LegalHold
|
||||||
|
err = tp.Fill(&hold)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(hold)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+objName+"?legal-hold", bytes.NewReader(rawBody))
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, rawBody)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutObjectLegalHoldHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutObjectLegalHoldHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutObjectLegalHoldHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutBucketObjectLockConfigHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutBucketObjectLockConfigHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var hold data.ObjectLockConfiguration
|
||||||
|
err = tp.Fill(&hold)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(&hold)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+"?object-lock", bytes.NewReader(rawBody))
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, rawBody)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bucket-object-lock-token"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutBucketObjectLockConfigHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutBucketObjectLockConfigHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutBucketObjectLockConfigHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutObjectRetentionHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutObjectRetentionHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
_, objName, err := postObject(tp)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var retention data.Retention
|
||||||
|
err = tp.Fill(&retention)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(retention)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+objName+"?retention", bytes.NewReader(rawBody))
|
||||||
|
if r != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, rawBody)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutObjectRetentionHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutObjectRetentionHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutObjectRetentionHandler(data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitFuzzPutBucketAclHandler() {
|
||||||
|
createTestBucketAndInitContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DoFuzzPutBucketAclHandler(input []byte) int {
|
||||||
|
// FUZZER INIT
|
||||||
|
if len(input) < 100 {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
tp, err := utils.NewTypeProvider(input)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
var policy AccessControlPolicy
|
||||||
|
err = tp.Fill(&policy)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(policy)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPut, defaultURL+"?acl", bytes.NewReader(rawBody))
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
|
||||||
|
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
|
||||||
|
|
||||||
|
err = addMD5Header(tp, r, rawBody)
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-acl", "x-amz-expected-bucket-owner", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write", "x-amz-grant-write-acp"})
|
||||||
|
if err != nil {
|
||||||
|
return fuzzFailExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
fuzzHc.Handler().PutBucketACLHandler(w, r)
|
||||||
|
|
||||||
|
return fuzzSuccessExitCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func FuzzPutBucketAclHandler(f *testing.F) {
|
||||||
|
f.Fuzz(func(t *testing.T, data []byte) {
|
||||||
|
DoFuzzPutBucketAclHandler(data)
|
||||||
|
})
|
||||||
|
}
|
|
@ -37,8 +37,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type handlerContext struct {
|
type handlerContext struct {
|
||||||
owner user.ID
|
*handlerContextBase
|
||||||
t *testing.T
|
t *testing.T
|
||||||
|
}
|
||||||
|
|
||||||
|
type handlerContextBase struct {
|
||||||
|
owner user.ID
|
||||||
h *handler
|
h *handler
|
||||||
tp *layer.TestFrostFS
|
tp *layer.TestFrostFS
|
||||||
tree *tree.Tree
|
tree *tree.Tree
|
||||||
|
@ -50,19 +54,19 @@ type handlerContext struct {
|
||||||
cache *layer.Cache
|
cache *layer.Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Handler() *handler {
|
func (hc *handlerContextBase) Handler() *handler {
|
||||||
return hc.h
|
return hc.h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
|
func (hc *handlerContextBase) MockedPool() *layer.TestFrostFS {
|
||||||
return hc.tp
|
return hc.tp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Layer() *layer.Layer {
|
func (hc *handlerContextBase) Layer() *layer.Layer {
|
||||||
return hc.h.obj
|
return hc.h.obj
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Context() context.Context {
|
func (hc *handlerContextBase) Context() context.Context {
|
||||||
return hc.context
|
return hc.context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,22 +140,34 @@ func (c *configMock) RetryStrategy() RetryStrategy {
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||||
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
|
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &handlerContext{
|
||||||
|
handlerContextBase: hc,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||||
return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
|
hc, err := prepareHandlerContextBase(getMinCacheConfig(zap.NewExample()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &handlerContext{
|
||||||
|
handlerContextBase: hc,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
|
func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBase, error) {
|
||||||
key, err := keys.NewPrivateKey()
|
key, err := keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
l := zap.NewExample()
|
log := zap.NewExample()
|
||||||
tp := layer.NewTestFrostFS(key)
|
tp := layer.NewTestFrostFS(key)
|
||||||
|
|
||||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (cid.ID, error) {
|
||||||
return tp.ContainerID(name)
|
return tp.ContainerID(name)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -159,7 +175,9 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
memCli, err := tree.NewTreeServiceClientMemory()
|
memCli, err := tree.NewTreeServiceClientMemory()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
treeMock := tree.NewTree(memCli, zap.NewExample())
|
treeMock := tree.NewTree(memCli, zap.NewExample())
|
||||||
|
|
||||||
|
@ -176,32 +194,38 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
||||||
|
|
||||||
var pp netmap.PlacementPolicy
|
var pp netmap.PlacementPolicy
|
||||||
err = pp.DecodeString("REP 1")
|
err = pp.DecodeString("REP 1")
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
cfg := &configMock{
|
cfg := &configMock{
|
||||||
defaultPolicy: pp,
|
defaultPolicy: pp,
|
||||||
}
|
}
|
||||||
h := &handler{
|
h := &handler{
|
||||||
log: l,
|
log: log,
|
||||||
obj: layer.NewLayer(l, tp, layerCfg),
|
obj: layer.NewLayer(log, tp, layerCfg),
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
ape: newAPEMock(),
|
ape: newAPEMock(),
|
||||||
frostfsid: newFrostfsIDMock(),
|
frostfsid: newFrostfsIDMock(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return &handlerContext{
|
accessBox, err := newTestAccessBox(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &handlerContextBase{
|
||||||
owner: owner,
|
owner: owner,
|
||||||
t: t,
|
|
||||||
h: h,
|
h: h,
|
||||||
tp: tp,
|
tp: tp,
|
||||||
tree: treeMock,
|
tree: treeMock,
|
||||||
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: newTestAccessBox(t, key)}),
|
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: accessBox}),
|
||||||
config: cfg,
|
config: cfg,
|
||||||
|
|
||||||
layerFeatures: features,
|
layerFeatures: features,
|
||||||
treeMock: memCli,
|
treeMock: memCli,
|
||||||
cache: layerCfg.Cache,
|
cache: layerCfg.Cache,
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||||
|
|
|
@ -119,21 +119,25 @@ func TestIsAvailableToResolve(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
|
func newTestAccessBox(key *keys.PrivateKey) (*accessbox.Box, error) {
|
||||||
var err error
|
var err error
|
||||||
if key == nil {
|
if key == nil {
|
||||||
key, err = keys.NewPrivateKey()
|
key, err = keys.NewPrivateKey()
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var btoken bearer.Token
|
var btoken bearer.Token
|
||||||
btoken.SetImpersonate(true)
|
btoken.SetImpersonate(true)
|
||||||
err = btoken.Sign(key.PrivateKey)
|
err = btoken.Sign(key.PrivateKey)
|
||||||
require.NoError(t, err)
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &accessbox.Box{
|
return &accessbox.Box{
|
||||||
Gate: &accessbox.GateData{
|
Gate: &accessbox.GateData{
|
||||||
BearerToken: &btoken,
|
BearerToken: &btoken,
|
||||||
},
|
},
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
235
api/handler/lifecycle.go
Normal file
235
api/handler/lifecycle.go
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxRules = 1000
|
||||||
|
maxRuleIDLen = 255
|
||||||
|
maxNewerNoncurrentVersions = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := h.obj.GetBucketLifecycleConfiguration(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = middleware.EncodeToResponse(w, cfg); err != nil {
|
||||||
|
h.logAndSendError(w, "could not encode GetBucketLifecycle response", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
tee := io.TeeReader(r.Body, &buf)
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
// Content-Md5 is required and should be set
|
||||||
|
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||||
|
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||||
|
h.logAndSendError(w, "missing Content-MD5", reqInfo, apiErr.GetAPIError(apiErr.ErrMissingContentMD5))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := new(data.LifecycleConfiguration)
|
||||||
|
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
|
||||||
|
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkLifecycleConfiguration(cfg); err != nil {
|
||||||
|
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &layer.PutBucketLifecycleParams{
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
LifecycleCfg: cfg,
|
||||||
|
LifecycleReader: &buf,
|
||||||
|
MD5Hash: r.Header.Get(api.ContentMD5),
|
||||||
|
}
|
||||||
|
|
||||||
|
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
|
||||||
|
h.logAndSendError(w, "could not put bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = h.obj.DeleteBucketLifecycleConfiguration(ctx, bktInfo); err != nil {
|
||||||
|
h.logAndSendError(w, "could not delete bucket lifecycle configuration", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLifecycleConfiguration(cfg *data.LifecycleConfiguration) error {
|
||||||
|
if len(cfg.Rules) > maxRules {
|
||||||
|
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
||||||
|
}
|
||||||
|
|
||||||
|
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||||
|
for _, rule := range cfg.Rules {
|
||||||
|
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
||||||
|
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
||||||
|
}
|
||||||
|
ids[rule.ID] = struct{}{}
|
||||||
|
|
||||||
|
if len(rule.ID) > maxRuleIDLen {
|
||||||
|
return fmt.Errorf("'ID' value cannot be longer than %d characters", maxRuleIDLen)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
|
||||||
|
return fmt.Errorf("invalid lifecycle status: %s", rule.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
|
||||||
|
return fmt.Errorf("at least one action needs to be specified in a rule")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.AbortIncompleteMultipartUpload != nil {
|
||||||
|
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
|
||||||
|
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
|
||||||
|
return fmt.Errorf("days after initiation must be a positive integer: %d", *rule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
|
return fmt.Errorf("abort incomplete multipart upload cannot be specified with tags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Expiration != nil {
|
||||||
|
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
|
||||||
|
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
|
||||||
|
return fmt.Errorf("expired object delete marker cannot be specified with days or date")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||||
|
return fmt.Errorf("expired object delete marker cannot be specified with tags")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
|
||||||
|
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date); rule.Expiration.Date != "" && err != nil {
|
||||||
|
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.NonCurrentVersionExpiration != nil {
|
||||||
|
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
|
||||||
|
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
|
||||||
|
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
|
||||||
|
return fmt.Errorf("invalid value of newer noncurrent versions: %d", *rule.NonCurrentVersionExpiration.NewerNonCurrentVersions)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
|
||||||
|
return fmt.Errorf("invalid value of noncurrent days: %d", *rule.NonCurrentVersionExpiration.NonCurrentDays)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||||
|
if filter == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var fields int
|
||||||
|
|
||||||
|
if filter.And != nil {
|
||||||
|
fields++
|
||||||
|
for _, tag := range filter.And.Tags {
|
||||||
|
err := checkTag(tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.And.ObjectSizeGreaterThan != nil && filter.And.ObjectSizeLessThan != nil &&
|
||||||
|
*filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
|
||||||
|
return fmt.Errorf("the maximum object size must be larger than the minimum object size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.ObjectSizeGreaterThan != nil {
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.ObjectSizeLessThan != nil {
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Prefix != "" {
|
||||||
|
fields++
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Tag != nil {
|
||||||
|
fields++
|
||||||
|
err := checkTag(*filter.Tag)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fields > 1 {
|
||||||
|
return fmt.Errorf("filter cannot have more than one field")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
457
api/handler/lifecycle_test.go
Normal file
457
api/handler/lifecycle_test.go
Normal file
|
@ -0,0 +1,457 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"github.com/mr-tron/base58"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
body *data.LifecycleConfiguration
|
||||||
|
error bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "correct configuration",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
|
||||||
|
Local: "LifecycleConfiguration",
|
||||||
|
},
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
Date: time.Now().Format("2006-01-02T15:04:05.000Z"),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
Tags: []data.Tag{{Key: "key", Value: "value"}, {Key: "tag", Value: ""}},
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
ExpiredObjectDeleteMarker: ptr(true),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
ObjectSizeLessThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(1),
|
||||||
|
NonCurrentDays: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "too many rules",
|
||||||
|
body: func() *data.LifecycleConfiguration {
|
||||||
|
lifecycle := new(data.LifecycleConfiguration)
|
||||||
|
for i := 0; i <= maxRules; i++ {
|
||||||
|
lifecycle.Rules = append(lifecycle.Rules, data.LifecycleRule{
|
||||||
|
ID: "Rule" + strconv.Itoa(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return lifecycle
|
||||||
|
}(),
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate rule ID",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
ID: "Rule",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ID: "Rule",
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "too long rule ID",
|
||||||
|
body: func() *data.LifecycleConfiguration {
|
||||||
|
id := make([]byte, maxRuleIDLen+1)
|
||||||
|
_, err := io.ReadFull(rand.Reader, id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
ID: base58.Encode(id)[:maxRuleIDLen+1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}(),
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid status",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: "invalid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no actions",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid days after initiation",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expired object delete marker declaration",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
ExpiredObjectDeleteMarker: ptr(false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expiration days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid expiration date",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Date: "invalid",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions is too small",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "newer noncurrent versions is too large",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NewerNonCurrentVersions: ptr(101),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid noncurrent days",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||||
|
NonCurrentDays: ptr(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "more than one filter field",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Prefix: "prefix/",
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid tag in filter",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Tag: &data.Tag{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "abort incomplete multipart upload with tag",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||||
|
DaysAfterInitiation: ptr(14),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
Tag: &data.Tag{Key: "key", Value: "value"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "expired object delete marker with tag",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
ExpiredObjectDeleteMarker: ptr(true),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
Tags: []data.Tag{{Key: "key", Value: "value"}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid size range",
|
||||||
|
body: &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
Filter: &data.LifecycleRuleFilter{
|
||||||
|
And: &data.LifecycleRuleAndOperator{
|
||||||
|
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||||
|
ObjectSizeLessThan: ptr(uint64(100)),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
error: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
if tc.error {
|
||||||
|
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apiErrors.GetAPIError(apiErrors.ErrMalformedXML))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
||||||
|
|
||||||
|
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
require.Equal(t, *tc.body, *cfg)
|
||||||
|
|
||||||
|
deleteBucketLifecycleConfiguration(hc, bktName)
|
||||||
|
getBucketLifecycleConfigurationErr(hc, bktName, apiErrors.GetAPIError(apiErrors.ErrNoSuchLifecycleConfiguration))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-md5"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
lifecycle := &data.LifecycleConfiguration{
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrMissingContentMD5))
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
r.Header.Set(api.ContentMD5, "")
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest))
|
||||||
|
|
||||||
|
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||||
|
r.Header.Set(api.ContentMD5, "some-hash")
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-lifecycle-invalid-xml"
|
||||||
|
createBucket(hc, bktName)
|
||||||
|
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", &data.CORSConfiguration{})
|
||||||
|
r.Header.Set(api.ContentMD5, "")
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
assertS3Error(hc.t, w, apiErrors.GetAPIError(apiErrors.ErrMalformedXML))
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
|
||||||
|
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apiErrors.Error) {
|
||||||
|
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||||
|
assertS3Error(hc.t, w, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||||
|
|
||||||
|
rawBody, err := xml.Marshal(cfg)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
hash.Write(rawBody)
|
||||||
|
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||||
|
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfiguration(hc *handlerContext, bktName string) *data.LifecycleConfiguration {
|
||||||
|
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
res := &data.LifecycleConfiguration{}
|
||||||
|
parseTestResponse(hc.t, w, res)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, err apiErrors.Error) {
|
||||||
|
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertS3Error(hc.t, w, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().GetBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBucketLifecycleConfiguration(hc *handlerContext, bktName string) {
|
||||||
|
w := deleteBucketLifecycleConfigurationBase(hc, bktName)
|
||||||
|
assertStatus(hc.t, w, http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||||
|
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||||
|
hc.Handler().DeleteBucketLifecycleHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -30,6 +31,7 @@ type (
|
||||||
Bucket string `xml:"Bucket"`
|
Bucket string `xml:"Bucket"`
|
||||||
Key string `xml:"Key"`
|
Key string `xml:"Key"`
|
||||||
ETag string `xml:"ETag"`
|
ETag string `xml:"ETag"`
|
||||||
|
Location string `xml:"Location"`
|
||||||
}
|
}
|
||||||
|
|
||||||
ListMultipartUploadsResponse struct {
|
ListMultipartUploadsResponse struct {
|
||||||
|
@ -54,11 +56,11 @@ type (
|
||||||
Initiator Initiator `xml:"Initiator"`
|
Initiator Initiator `xml:"Initiator"`
|
||||||
IsTruncated bool `xml:"IsTruncated"`
|
IsTruncated bool `xml:"IsTruncated"`
|
||||||
Key string `xml:"Key"`
|
Key string `xml:"Key"`
|
||||||
MaxParts int `xml:"MaxParts,omitempty"`
|
MaxParts int `xml:"MaxParts"`
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
|
||||||
Owner Owner `xml:"Owner"`
|
Owner Owner `xml:"Owner"`
|
||||||
Parts []*layer.Part `xml:"Part"`
|
Parts []*layer.Part `xml:"Part"`
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
PartNumberMarker int `xml:"PartNumberMarker"`
|
||||||
StorageClass string `xml:"StorageClass"`
|
StorageClass string `xml:"StorageClass"`
|
||||||
UploadID string `xml:"UploadId"`
|
UploadID string `xml:"UploadId"`
|
||||||
}
|
}
|
||||||
|
@ -426,6 +428,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
Bucket: objInfo.Bucket,
|
Bucket: objInfo.Bucket,
|
||||||
Key: objInfo.Name,
|
Key: objInfo.Name,
|
||||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||||
|
Location: getObjectLocation(r, reqInfo.BucketName, reqInfo.ObjectName, reqInfo.RequestVHSEnabled),
|
||||||
}
|
}
|
||||||
|
|
||||||
if settings.VersioningEnabled() {
|
if settings.VersioningEnabled() {
|
||||||
|
@ -437,6 +440,34 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// returns "https" if the tls boolean is true, "http" otherwise.
|
||||||
|
func getURLScheme(r *http.Request) string {
|
||||||
|
if r.TLS != nil {
|
||||||
|
return "https"
|
||||||
|
}
|
||||||
|
return "http"
|
||||||
|
}
|
||||||
|
|
||||||
|
// getObjectLocation gets the fully qualified URL of an object.
|
||||||
|
func getObjectLocation(r *http.Request, bucket, object string, vhsEnabled bool) string {
|
||||||
|
proto := middleware.GetSourceScheme(r)
|
||||||
|
if proto == "" {
|
||||||
|
proto = getURLScheme(r)
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Host: r.Host,
|
||||||
|
Path: path.Join("/", bucket, object),
|
||||||
|
Scheme: proto,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If vhs enabled then we need to use bucket DNS style.
|
||||||
|
if vhsEnabled {
|
||||||
|
u.Path = path.Join("/", object)
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo) (*data.ObjectInfo, error) {
|
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo) (*data.ObjectInfo, error) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
||||||
|
|
|
@ -17,6 +17,10 @@ import (
|
||||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
|
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,6 +72,19 @@ func TestDeleteMultipartAllParts(t *testing.T) {
|
||||||
require.Empty(t, hc.tp.Objects())
|
require.Empty(t, hc.tp.Objects())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSpecialMultipartName(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
|
||||||
|
bktName, objName := "bucket", "bucket-settings"
|
||||||
|
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
createMultipartUpload(hc, bktName, objName, nil)
|
||||||
|
res := getBucketVersioning(hc, bktName)
|
||||||
|
require.Equal(t, enabledValue, res.Status)
|
||||||
|
}
|
||||||
|
|
||||||
func TestMultipartReUploadPart(t *testing.T) {
|
func TestMultipartReUploadPart(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -109,6 +126,108 @@ func TestMultipartReUploadPart(t *testing.T) {
|
||||||
equalDataSlices(t, append(data1, data2...), data)
|
equalDataSlices(t, append(data1, data2...), data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultipartRemovePartsSplit(t *testing.T) {
|
||||||
|
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||||
|
partSize := 8
|
||||||
|
|
||||||
|
t.Run("reupload part", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||||
|
require.Len(t, list.Parts, 1)
|
||||||
|
require.Equal(t, `"etag"`, list.Parts[0].ETag)
|
||||||
|
|
||||||
|
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||||
|
require.Len(t, list.Parts, 1)
|
||||||
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||||
|
|
||||||
|
require.Len(t, hc.tp.Objects(), 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("abort multipart", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
|
||||||
|
require.Empty(t, hc.tp.Objects())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("complete multipart", func(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||||
|
|
||||||
|
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||||
|
|
||||||
|
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||||
|
"Number": "1",
|
||||||
|
"OID": objID.EncodeToString(),
|
||||||
|
"Owner": usertest.ID().EncodeToString(),
|
||||||
|
"ETag": "etag",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||||
|
require.Len(t, hc.tp.Objects(), 2)
|
||||||
|
|
||||||
|
completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
|
||||||
|
require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func containsOID(objects []*object.Object, objID oid.ID) bool {
|
||||||
|
for _, o := range objects {
|
||||||
|
oID, _ := o.ID()
|
||||||
|
if oID.Equals(objID) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func TestListMultipartUploads(t *testing.T) {
|
func TestListMultipartUploads(t *testing.T) {
|
||||||
hc := prepareHandlerContext(t)
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -279,13 +398,19 @@ func TestListParts(t *testing.T) {
|
||||||
require.Len(t, list.Parts, 2)
|
require.Len(t, list.Parts, 2)
|
||||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||||
require.Equal(t, etag2, list.Parts[1].ETag)
|
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||||
|
require.Zero(t, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 2, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 1)
|
require.Len(t, list.Parts, 1)
|
||||||
require.Equal(t, etag2, list.Parts[0].ETag)
|
require.Equal(t, etag2, list.Parts[0].ETag)
|
||||||
|
require.Equal(t, 1, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 2, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 0)
|
require.Len(t, list.Parts, 0)
|
||||||
|
require.Equal(t, 2, list.PartNumberMarker)
|
||||||
|
require.Equal(t, 0, list.NextPartNumberMarker)
|
||||||
|
|
||||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
|
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
|
||||||
require.Len(t, list.Parts, 0)
|
require.Len(t, list.Parts, 0)
|
||||||
|
@ -422,6 +547,80 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMultipartObjectLocation(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
req *http.Request
|
||||||
|
bucket string
|
||||||
|
object string
|
||||||
|
vhsEnabled bool
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "127.0.0.1:8084",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
|
||||||
|
},
|
||||||
|
bucket: "testbucket1",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://127.0.0.1:8084/testbucket1/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "localhost:8084",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
|
||||||
|
},
|
||||||
|
bucket: "testbucket1",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "https://localhost:8084/testbucket1/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "s3.mybucket.org",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://s3.mybucket.org/mybucket/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{Host: "mys3.mybucket.org"},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
expected: "http://mys3.mybucket.org/mybucket/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{Host: "s3.bucket.org", TLS: &tls.ConnectionState{}},
|
||||||
|
bucket: "bucket",
|
||||||
|
object: "obj",
|
||||||
|
expected: "https://s3.bucket.org/bucket/obj",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "mybucket.s3dev.frostfs.devenv",
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
vhsEnabled: true,
|
||||||
|
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
req: &http.Request{
|
||||||
|
Host: "mybucket.s3dev.frostfs.devenv",
|
||||||
|
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
|
||||||
|
},
|
||||||
|
bucket: "mybucket",
|
||||||
|
object: "test/1.txt",
|
||||||
|
vhsEnabled: true,
|
||||||
|
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
location := getObjectLocation(tc.req, tc.bucket, tc.object, tc.vhsEnabled)
|
||||||
|
require.Equal(t, tc.expected, location)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||||
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,10 +7,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||||
}
|
}
|
||||||
|
|
|
@ -232,7 +232,7 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
response := encodeListObjectVersionsToResponse(info, p.BktInfo.Name, h.cfg.MD5Enabled())
|
response := encodeListObjectVersionsToResponse(p, info, p.BktInfo.Name, h.cfg.MD5Enabled())
|
||||||
if err = middleware.EncodeToResponse(w, response); err != nil {
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
}
|
}
|
||||||
|
@ -264,24 +264,28 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, bucketName string, md5Enabled bool) *ListObjectsVersionsResponse {
|
func encodeListObjectVersionsToResponse(p *layer.ListObjectVersionsParams, info *layer.ListObjectVersionsInfo, bucketName string, md5Enabled bool) *ListObjectsVersionsResponse {
|
||||||
res := ListObjectsVersionsResponse{
|
res := ListObjectsVersionsResponse{
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
IsTruncated: info.IsTruncated,
|
IsTruncated: info.IsTruncated,
|
||||||
KeyMarker: info.KeyMarker,
|
KeyMarker: s3PathEncode(info.KeyMarker, p.Encode),
|
||||||
NextKeyMarker: info.NextKeyMarker,
|
NextKeyMarker: s3PathEncode(info.NextKeyMarker, p.Encode),
|
||||||
NextVersionIDMarker: info.NextVersionIDMarker,
|
NextVersionIDMarker: info.NextVersionIDMarker,
|
||||||
VersionIDMarker: info.VersionIDMarker,
|
VersionIDMarker: info.VersionIDMarker,
|
||||||
|
Prefix: s3PathEncode(p.Prefix, p.Encode),
|
||||||
|
Delimiter: s3PathEncode(p.Delimiter, p.Encode),
|
||||||
|
EncodingType: p.Encode,
|
||||||
|
MaxKeys: p.MaxKeys,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, prefix := range info.CommonPrefixes {
|
for _, prefix := range info.CommonPrefixes {
|
||||||
res.CommonPrefixes = append(res.CommonPrefixes, CommonPrefix{Prefix: prefix})
|
res.CommonPrefixes = append(res.CommonPrefixes, CommonPrefix{Prefix: s3PathEncode(prefix, p.Encode)})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ver := range info.Version {
|
for _, ver := range info.Version {
|
||||||
res.Version = append(res.Version, ObjectVersionResponse{
|
res.Version = append(res.Version, ObjectVersionResponse{
|
||||||
IsLatest: ver.IsLatest,
|
IsLatest: ver.IsLatest,
|
||||||
Key: ver.NodeVersion.FilePath,
|
Key: s3PathEncode(ver.NodeVersion.FilePath, p.Encode),
|
||||||
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
LastModified: ver.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: ver.NodeVersion.Owner.String(),
|
ID: ver.NodeVersion.Owner.String(),
|
||||||
|
@ -297,7 +301,7 @@ func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, buck
|
||||||
for _, del := range info.DeleteMarker {
|
for _, del := range info.DeleteMarker {
|
||||||
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
res.DeleteMarker = append(res.DeleteMarker, DeleteMarkerEntry{
|
||||||
IsLatest: del.IsLatest,
|
IsLatest: del.IsLatest,
|
||||||
Key: del.NodeVersion.FilePath,
|
Key: s3PathEncode(del.NodeVersion.FilePath, p.Encode),
|
||||||
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
LastModified: del.NodeVersion.Created.UTC().Format(time.RFC3339),
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: del.NodeVersion.Owner.String(),
|
ID: del.NodeVersion.Owner.String(),
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -15,8 +16,11 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap"
|
||||||
"go.uber.org/zap/zaptest"
|
"go.uber.org/zap/zaptest"
|
||||||
|
"go.uber.org/zap/zaptest/observer"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseContinuationToken(t *testing.T) {
|
func TestParseContinuationToken(t *testing.T) {
|
||||||
|
@ -93,12 +97,39 @@ func TestListObjectsWithOldTreeNodes(t *testing.T) {
|
||||||
checkListVersionsOldNodes(hc, listVers.Version, objInfos)
|
checkListVersionsOldNodes(hc, listVers.Version, objInfos)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestListObjectsVersionsSkipLogTaggingNodesError(t *testing.T) {
|
||||||
|
loggerCore, observedLog := observer.New(zap.DebugLevel)
|
||||||
|
log := zap.New(loggerCore)
|
||||||
|
|
||||||
|
hcBase, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
|
||||||
|
require.NoError(t, err)
|
||||||
|
hc := &handlerContext{
|
||||||
|
handlerContextBase: hcBase,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
|
||||||
|
bktName, objName := "bucket-versioning-enabled", "versions/object"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
|
||||||
|
putObjectTagging(hc.t, hc, bktName, objName, map[string]string{"tag1": "val1"})
|
||||||
|
|
||||||
|
listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||||
|
|
||||||
|
filtered := observedLog.Filter(func(entry observer.LoggedEntry) bool {
|
||||||
|
return strings.Contains(entry.Message, logs.ParseTreeNode)
|
||||||
|
})
|
||||||
|
require.Empty(t, filtered)
|
||||||
|
}
|
||||||
|
|
||||||
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
||||||
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", 0, 0)
|
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0, true)
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node.GetNodeID() == 0 {
|
if node.GetNodeID()[0] == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
meta := make(map[string]string, len(node.GetMeta()))
|
meta := make(map[string]string, len(node.GetMeta()))
|
||||||
|
@ -108,7 +139,7 @@ func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = hc.treeMock.MoveNode(hc.Context(), bktInfo, "version", node.GetNodeID(), node.GetParentID(), meta)
|
err = hc.treeMock.MoveNode(hc.Context(), bktInfo, "version", node.GetNodeID()[0], node.GetParentID()[0], meta)
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,11 +169,17 @@ func checkListVersionsOldNodes(hc *handlerContext, list []ObjectVersionResponse,
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListObjectsContextCanceled(t *testing.T) {
|
func TestListObjectsContextCanceled(t *testing.T) {
|
||||||
layerCfg := layer.DefaultCachesConfigs(zaptest.NewLogger(t))
|
log := zaptest.NewLogger(t)
|
||||||
|
layerCfg := layer.DefaultCachesConfigs(log)
|
||||||
layerCfg.SessionList.Lifetime = time.Hour
|
layerCfg.SessionList.Lifetime = time.Hour
|
||||||
layerCfg.SessionList.Size = 1
|
layerCfg.SessionList.Size = 1
|
||||||
|
|
||||||
hc := prepareHandlerContextBase(t, layerCfg)
|
hcBase, err := prepareHandlerContextBase(layerCfg)
|
||||||
|
require.NoError(t, err)
|
||||||
|
hc := &handlerContext{
|
||||||
|
handlerContextBase: hcBase,
|
||||||
|
t: t,
|
||||||
|
}
|
||||||
|
|
||||||
bktName := "bucket-versioning-enabled"
|
bktName := "bucket-versioning-enabled"
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
@ -675,6 +712,49 @@ func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
|
||||||
require.Equal(t, page1.NextVersionIDMarker, page2.VersionIDMarker)
|
require.Equal(t, page1.NextVersionIDMarker, page2.VersionIDMarker)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestListObjectVersionsEncoding(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
bktName := "bucket-for-listing-versions-encoding"
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
objects := []string{"foo()/bar", "foo()/bar/xyzzy", "auux ab/thud", "asdf+b"}
|
||||||
|
for _, objName := range objects {
|
||||||
|
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||||
|
}
|
||||||
|
deleteObject(t, hc, bktName, "auux ab/thud", "")
|
||||||
|
|
||||||
|
listResponse := listObjectsVersionsURL(hc, bktName, "foo(", ")", "", "", -1)
|
||||||
|
|
||||||
|
require.Len(t, listResponse.CommonPrefixes, 1)
|
||||||
|
require.Equal(t, "foo%28%29", listResponse.CommonPrefixes[0].Prefix)
|
||||||
|
require.Len(t, listResponse.Version, 0)
|
||||||
|
require.Len(t, listResponse.DeleteMarker, 0)
|
||||||
|
require.Equal(t, "foo%28", listResponse.Prefix)
|
||||||
|
require.Equal(t, "%29", listResponse.Delimiter)
|
||||||
|
require.Equal(t, "url", listResponse.EncodingType)
|
||||||
|
require.Equal(t, maxObjectList, listResponse.MaxKeys)
|
||||||
|
|
||||||
|
listResponse = listObjectsVersions(hc, bktName, "", "", "", "", 1)
|
||||||
|
require.Empty(t, listResponse.EncodingType)
|
||||||
|
|
||||||
|
listResponse = listObjectsVersionsURL(hc, bktName, "", "", listResponse.NextKeyMarker, listResponse.NextVersionIDMarker, 3)
|
||||||
|
|
||||||
|
require.Len(t, listResponse.CommonPrefixes, 0)
|
||||||
|
require.Len(t, listResponse.Version, 2)
|
||||||
|
require.Equal(t, "auux%20ab/thud", listResponse.Version[0].Key)
|
||||||
|
require.False(t, listResponse.Version[0].IsLatest)
|
||||||
|
require.Equal(t, "foo%28%29/bar", listResponse.Version[1].Key)
|
||||||
|
require.Len(t, listResponse.DeleteMarker, 1)
|
||||||
|
require.Equal(t, "auux%20ab/thud", listResponse.DeleteMarker[0].Key)
|
||||||
|
require.True(t, listResponse.DeleteMarker[0].IsLatest)
|
||||||
|
require.Equal(t, "asdf%2Bb", listResponse.KeyMarker)
|
||||||
|
require.Equal(t, "foo%28%29/bar", listResponse.NextKeyMarker)
|
||||||
|
require.Equal(t, "url", listResponse.EncodingType)
|
||||||
|
require.Equal(t, 3, listResponse.MaxKeys)
|
||||||
|
}
|
||||||
|
|
||||||
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
|
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
|
||||||
for i, v := range versions.Version {
|
for i, v := range versions.Version {
|
||||||
require.Equal(t, names[i], v.Key)
|
require.Equal(t, names[i], v.Key)
|
||||||
|
@ -777,6 +857,14 @@ func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string
|
||||||
}
|
}
|
||||||
|
|
||||||
func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
|
func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
|
||||||
|
return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listObjectsVersionsURL(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
|
||||||
|
return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int, encode bool) *ListObjectsVersionsResponse {
|
||||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||||
if len(keyMarker) != 0 {
|
if len(keyMarker) != 0 {
|
||||||
query.Add("key-marker", keyMarker)
|
query.Add("key-marker", keyMarker)
|
||||||
|
@ -784,6 +872,9 @@ func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMark
|
||||||
if len(versionIDMarker) != 0 {
|
if len(versionIDMarker) != 0 {
|
||||||
query.Add("version-id-marker", versionIDMarker)
|
query.Add("version-id-marker", versionIDMarker)
|
||||||
}
|
}
|
||||||
|
if encode {
|
||||||
|
query.Add("encoding-type", "url")
|
||||||
|
}
|
||||||
|
|
||||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||||
hc.Handler().ListBucketObjectVersionsHandler(w, r)
|
hc.Handler().ListBucketObjectVersionsHandler(w, r)
|
||||||
|
|
195
api/handler/patch.go
Normal file
195
api/handler/patch.go
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxPatchSize = 5 * 1024 * 1024 * 1024 // 5GB
|
||||||
|
|
||||||
|
func (h *handler) PatchObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
|
|
||||||
|
if _, ok := r.Header[api.ContentRange]; !ok {
|
||||||
|
h.logAndSendError(w, "missing Content-Range", reqInfo, errors.GetAPIError(errors.ErrMissingContentRange))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := r.Header[api.ContentLength]; !ok {
|
||||||
|
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
conditional, err := parsePatchConditionalHeaders(r.Header)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not parse conditional headers", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
srcObjPrm := &layer.HeadObjectParams{
|
||||||
|
Object: reqInfo.ObjectName,
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
|
}
|
||||||
|
|
||||||
|
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||||
|
|
||||||
|
if err = checkPreconditions(srcObjInfo, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||||
|
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
byteRange, err := parsePatchByteRange(r.Header.Get(api.ContentRange), srcSize)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxPatchSize < byteRange.End-byteRange.Start+1 {
|
||||||
|
h.logAndSendError(w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if uint64(r.ContentLength) != (byteRange.End - byteRange.Start + 1) {
|
||||||
|
h.logAndSendError(w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if byteRange.Start > srcSize {
|
||||||
|
h.logAndSendError(w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &layer.PatchObjectParams{
|
||||||
|
Object: extendedSrcObjInfo,
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
NewBytes: r.Body,
|
||||||
|
Range: byteRange,
|
||||||
|
VersioningEnabled: settings.VersioningEnabled(),
|
||||||
|
}
|
||||||
|
|
||||||
|
params.CopiesNumbers, err = h.pickCopiesNumbers(nil, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
|
||||||
|
if err != nil {
|
||||||
|
if isErrObjectLocked(err) {
|
||||||
|
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||||
|
} else {
|
||||||
|
h.logAndSendError(w, "could not patch object", reqInfo, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if settings.VersioningEnabled() {
|
||||||
|
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
|
||||||
|
|
||||||
|
resp := PatchObjectResult{
|
||||||
|
Object: PatchObject{
|
||||||
|
LastModified: extendedObjInfo.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||||
|
ETag: data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||||
|
h.logAndSendError(w, "could not encode PatchObjectResult to response", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePatchConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||||
|
var err error
|
||||||
|
args := &conditionalArgs{
|
||||||
|
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return args, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePatchByteRange(rangeStr string, objSize uint64) (*layer.RangeParams, error) {
|
||||||
|
const prefix = "bytes "
|
||||||
|
|
||||||
|
if rangeStr == "" {
|
||||||
|
return nil, fmt.Errorf("empty range")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(rangeStr, prefix) {
|
||||||
|
return nil, fmt.Errorf("unknown unit in range header")
|
||||||
|
}
|
||||||
|
|
||||||
|
rangeStr, _, found := strings.Cut(strings.TrimPrefix(rangeStr, prefix), "/") // value after / is ignored
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
startStr, endStr, found := strings.Cut(rangeStr, "-")
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
start, err := strconv.ParseUint(startStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid start byte: %s", startStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
end := objSize - 1
|
||||||
|
if len(endStr) > 0 {
|
||||||
|
end, err = strconv.ParseUint(endStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid end byte: %s", endStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if start > end {
|
||||||
|
return nil, fmt.Errorf("start byte is greater than end byte")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &layer.RangeParams{
|
||||||
|
Start: start,
|
||||||
|
End: end,
|
||||||
|
}, nil
|
||||||
|
}
|
524
api/handler/patch_test.go
Normal file
524
api/handler/patch_test.go
Normal file
|
@ -0,0 +1,524 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPatch(t *testing.T) {
|
||||||
|
tc := prepareHandlerContext(t)
|
||||||
|
tc.config.md5Enabled = true
|
||||||
|
|
||||||
|
bktName, objName := "bucket-for-patch", "object-for-patch"
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
content := []byte("old object content")
|
||||||
|
md5Hash := md5.New()
|
||||||
|
md5Hash.Write(content)
|
||||||
|
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
|
||||||
|
|
||||||
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||||
|
created := time.Now()
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||||
|
|
||||||
|
patchPayload := []byte("new")
|
||||||
|
sha256Hash := sha256.New()
|
||||||
|
sha256Hash.Write(patchPayload)
|
||||||
|
sha256Hash.Write(content[len(patchPayload):])
|
||||||
|
hash := hex.EncodeToString(sha256Hash.Sum(nil))
|
||||||
|
|
||||||
|
for _, tt := range []struct {
|
||||||
|
name string
|
||||||
|
rng string
|
||||||
|
headers map[string]string
|
||||||
|
code s3errors.ErrorCode
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "success",
|
||||||
|
rng: "bytes 0-2/*",
|
||||||
|
headers: map[string]string{
|
||||||
|
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
|
||||||
|
api.IfMatch: etag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid range syntax",
|
||||||
|
rng: "bytes 0-2",
|
||||||
|
code: s3errors.ErrInvalidRange,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid range length",
|
||||||
|
rng: "bytes 0-5/*",
|
||||||
|
code: s3errors.ErrInvalidRangeLength,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid range start",
|
||||||
|
rng: "bytes 20-22/*",
|
||||||
|
code: s3errors.ErrRangeOutOfBounds,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "range is too long",
|
||||||
|
rng: "bytes 0-5368709120/*",
|
||||||
|
code: s3errors.ErrInvalidRange,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "If-Unmodified-Since precondition are not satisfied",
|
||||||
|
rng: "bytes 0-2/*",
|
||||||
|
headers: map[string]string{
|
||||||
|
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
|
||||||
|
},
|
||||||
|
code: s3errors.ErrPreconditionFailed,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "If-Match precondition are not satisfied",
|
||||||
|
rng: "bytes 0-2/*",
|
||||||
|
headers: map[string]string{
|
||||||
|
api.IfMatch: "etag",
|
||||||
|
},
|
||||||
|
code: s3errors.ErrPreconditionFailed,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.code == 0 {
|
||||||
|
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
|
||||||
|
require.Equal(t, data.Quote(hash), res.Object.ETag)
|
||||||
|
} else {
|
||||||
|
patchObjectErr(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatchMultipartObject(t *testing.T) {
|
||||||
|
tc := prepareHandlerContextWithMinCache(t)
|
||||||
|
tc.config.md5Enabled = true
|
||||||
|
|
||||||
|
bktName, objName, partSize := "bucket-for-multipart-patch", "object-for-multipart-patch", 5*1024*1024
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
t.Run("patch beginning of the first part", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchSize := partSize / 2
|
||||||
|
patchBody := make([]byte, patchSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(patchSize-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{patchBody, data1[patchSize:], data2, data3}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch middle of the first part", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchSize := partSize / 2
|
||||||
|
patchBody := make([]byte, patchSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/4)+"-"+strconv.Itoa(partSize*3/4-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/4], patchBody, data1[partSize*3/4:], data2, data3}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch first and second parts", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchSize := partSize / 2
|
||||||
|
patchBody := make([]byte, patchSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3/4)+"-"+strconv.Itoa(partSize*5/4-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize*3/4], patchBody, data2[partSize/4:], data3}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch all parts", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchSize := partSize * 2
|
||||||
|
patchBody := make([]byte, patchSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2-1)+"-"+strconv.Itoa(partSize/2+patchSize-2)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2-1], patchBody, data3[partSize/2-1:]}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch all parts and append bytes", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchSize := partSize * 3
|
||||||
|
patchBody := make([]byte, patchSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2)+"-"+strconv.Itoa(partSize/2+patchSize-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2], patchBody}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*7/2, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch second part", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize)+"-"+strconv.Itoa(partSize*2-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1, patchBody, data3}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch last part, equal size", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch last part, increase size", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize+1)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3+1, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch last part with offset and append bytes", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2+3)+"-"+strconv.Itoa(partSize*3+2)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3[:3], patchBody}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*3+3, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("append bytes", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||||
|
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||||
|
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3)+"-"+strconv.Itoa(partSize*4-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3, patchBody}, []byte("")), object)
|
||||||
|
require.Equal(t, partSize*4, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("patch empty multipart", func(t *testing.T) {
|
||||||
|
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||||
|
etag, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, 0)
|
||||||
|
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag})
|
||||||
|
|
||||||
|
patchBody := make([]byte, partSize)
|
||||||
|
_, err := rand.Read(patchBody)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(partSize-1)+"/*", patchBody, nil)
|
||||||
|
object, header := getObject(tc, bktName, objName)
|
||||||
|
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||||
|
require.NoError(t, err)
|
||||||
|
equalDataSlices(t, patchBody, object)
|
||||||
|
require.Equal(t, partSize, contentLen)
|
||||||
|
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-1"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatchWithVersion(t *testing.T) {
|
||||||
|
hc := prepareHandlerContextWithMinCache(t)
|
||||||
|
bktName, objName := "bucket", "obj"
|
||||||
|
createVersionedBucket(hc, bktName)
|
||||||
|
objHeader := putObjectContent(hc, bktName, objName, "content")
|
||||||
|
|
||||||
|
putObjectContent(hc, bktName, objName, "some content")
|
||||||
|
|
||||||
|
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
|
||||||
|
|
||||||
|
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
|
||||||
|
require.False(t, res.IsTruncated)
|
||||||
|
require.Len(t, res.Version, 3)
|
||||||
|
|
||||||
|
for _, version := range res.Version {
|
||||||
|
content := getObjectVersion(hc, bktName, objName, version.VersionID)
|
||||||
|
if version.IsLatest {
|
||||||
|
require.Equal(t, []byte("content updated"), content)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if version.VersionID == objHeader.Get(api.AmzVersionID) {
|
||||||
|
require.Equal(t, []byte("content"), content)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
require.Equal(t, []byte("some content"), content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatchEncryptedObject(t *testing.T) {
|
||||||
|
tc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||||
|
setEncryptHeaders(r)
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
patchObjectErr(t, tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, s3errors.ErrInternalError)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatchMissingHeaders(t *testing.T) {
|
||||||
|
tc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
|
||||||
|
createTestBucket(tc, bktName)
|
||||||
|
|
||||||
|
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||||
|
setEncryptHeaders(r)
|
||||||
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
w = httptest.NewRecorder()
|
||||||
|
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||||
|
tc.Handler().PatchObjectHandler(w, r)
|
||||||
|
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentRange))
|
||||||
|
|
||||||
|
w = httptest.NewRecorder()
|
||||||
|
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||||
|
r.Header.Set(api.ContentRange, "bytes 0-2/*")
|
||||||
|
tc.Handler().PatchObjectHandler(w, r)
|
||||||
|
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePatchByteRange(t *testing.T) {
|
||||||
|
for _, tt := range []struct {
|
||||||
|
rng string
|
||||||
|
size uint64
|
||||||
|
expected *layer.RangeParams
|
||||||
|
err bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
rng: "bytes 2-7/*",
|
||||||
|
expected: &layer.RangeParams{
|
||||||
|
Start: 2,
|
||||||
|
End: 7,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2-7/3",
|
||||||
|
expected: &layer.RangeParams{
|
||||||
|
Start: 2,
|
||||||
|
End: 7,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2-/*",
|
||||||
|
size: 9,
|
||||||
|
expected: &layer.RangeParams{
|
||||||
|
Start: 2,
|
||||||
|
End: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2-/3",
|
||||||
|
size: 9,
|
||||||
|
expected: &layer.RangeParams{
|
||||||
|
Start: 2,
|
||||||
|
End: 8,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "2-7/*",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 7-2/*",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2-7",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2/*",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes a-7/*",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
rng: "bytes 2-a/*",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
|
||||||
|
rng, err := parsePatchByteRange(tt.rng, tt.size)
|
||||||
|
if tt.err {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tt.expected.Start, rng.Start)
|
||||||
|
require.Equal(t, tt.expected.End, rng.End)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
|
||||||
|
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
result := &PatchObjectResult{}
|
||||||
|
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
|
||||||
|
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
|
||||||
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
|
result := &PatchObjectResult{}
|
||||||
|
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchObjectErr(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code s3errors.ErrorCode) {
|
||||||
|
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||||
|
assertS3Error(t, w, s3errors.GetAPIError(code))
|
||||||
|
}
|
||||||
|
|
||||||
|
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
|
||||||
|
query := make(url.Values)
|
||||||
|
if len(version) > 0 {
|
||||||
|
query.Add(api.QueryVersionID, version)
|
||||||
|
}
|
||||||
|
|
||||||
|
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
|
||||||
|
r.Header.Set(api.ContentRange, rng)
|
||||||
|
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
|
||||||
|
for k, v := range headers {
|
||||||
|
r.Header.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
tc.Handler().PatchObjectHandler(w, r)
|
||||||
|
return w
|
||||||
|
}
|
|
@ -9,6 +9,7 @@ import (
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"mime/multipart"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
@ -469,21 +470,47 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reqInfo.ObjectName = auth.MultipartFormValue(r, "key")
|
||||||
|
|
||||||
var contentReader io.Reader
|
var contentReader io.Reader
|
||||||
var size uint64
|
var size uint64
|
||||||
|
var filename string
|
||||||
|
|
||||||
if content, ok := r.MultipartForm.Value["file"]; ok {
|
if content, ok := r.MultipartForm.Value["file"]; ok {
|
||||||
contentReader = bytes.NewBufferString(content[0])
|
fullContent := strings.Join(content, "")
|
||||||
size = uint64(len(content[0]))
|
contentReader = bytes.NewBufferString(fullContent)
|
||||||
} else {
|
size = uint64(len(fullContent))
|
||||||
file, head, err := r.FormFile("file")
|
|
||||||
|
if reqInfo.ObjectName == "" || strings.Contains(reqInfo.ObjectName, "${filename}") {
|
||||||
|
_, head, err := r.FormFile("file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could get uploading file", reqInfo, err)
|
h.logAndSendError(w, "could not parse file field", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
contentReader = file
|
filename = head.Filename
|
||||||
size = uint64(head.Size)
|
|
||||||
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", head.Filename)
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
var head *multipart.FileHeader
|
||||||
|
contentReader, head, err = r.FormFile("file")
|
||||||
|
if err != nil {
|
||||||
|
h.logAndSendError(w, "could not parse file field", reqInfo, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
size = uint64(head.Size)
|
||||||
|
filename = head.Filename
|
||||||
|
}
|
||||||
|
|
||||||
|
if reqInfo.ObjectName == "" {
|
||||||
|
reqInfo.ObjectName = filename
|
||||||
|
} else {
|
||||||
|
reqInfo.ObjectName = strings.ReplaceAll(reqInfo.ObjectName, "${filename}", filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
if reqInfo.ObjectName == "" {
|
||||||
|
h.logAndSendError(w, "missing object name", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if !policy.CheckContentLength(size) {
|
if !policy.CheckContentLength(size) {
|
||||||
h.logAndSendError(w, "invalid content-length", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
h.logAndSendError(w, "invalid content-length", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
||||||
return
|
return
|
||||||
|
@ -599,10 +626,6 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
|
||||||
if key == "content-type" {
|
if key == "content-type" {
|
||||||
metadata[api.ContentType] = value
|
metadata[api.ContentType] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
if key == "key" {
|
|
||||||
reqInfo.ObjectName = value
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cond := range policy.Conditions {
|
for _, cond := range policy.Conditions {
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
@ -122,6 +123,92 @@ func TestEmptyPostPolicy(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if content length is greater than this value
|
||||||
|
// data will be writen to file location.
|
||||||
|
const maxContentSizeForFormData = 10
|
||||||
|
|
||||||
|
func TestPostObject(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
ns, bktName := "", "bucket"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
key string
|
||||||
|
filename string
|
||||||
|
content string
|
||||||
|
objName string
|
||||||
|
err bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
key: "user/user1/${filename}",
|
||||||
|
filename: "object",
|
||||||
|
content: "content",
|
||||||
|
objName: "user/user1/object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "user/user1/${filename}",
|
||||||
|
filename: "object",
|
||||||
|
content: "maxContentSizeForFormData",
|
||||||
|
objName: "user/user1/object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "user/user1/key-object",
|
||||||
|
filename: "object",
|
||||||
|
content: "",
|
||||||
|
objName: "user/user1/key-object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "user/user1/key-object",
|
||||||
|
filename: "object",
|
||||||
|
content: "maxContentSizeForFormData",
|
||||||
|
objName: "user/user1/key-object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "",
|
||||||
|
filename: "object",
|
||||||
|
content: "",
|
||||||
|
objName: "object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "",
|
||||||
|
filename: "object",
|
||||||
|
content: "maxContentSizeForFormData",
|
||||||
|
objName: "object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// RFC 7578, Section 4.2 requires that if a filename is provided, the
|
||||||
|
// directory path information must not be used.
|
||||||
|
key: "",
|
||||||
|
filename: "dir/object",
|
||||||
|
content: "content",
|
||||||
|
objName: "object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "object",
|
||||||
|
filename: "",
|
||||||
|
content: "content",
|
||||||
|
objName: "object",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "",
|
||||||
|
filename: "",
|
||||||
|
err: true,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.key+";"+tc.filename, func(t *testing.T) {
|
||||||
|
w := postObjectBase(hc, ns, bktName, tc.key, tc.filename, tc.content)
|
||||||
|
if tc.err {
|
||||||
|
assertS3Error(hc.t, w, s3errors.GetAPIError(s3errors.ErrInternalError))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assertStatus(hc.t, w, http.StatusNoContent)
|
||||||
|
content, _ := getObject(hc, bktName, tc.objName)
|
||||||
|
require.Equal(t, tc.content, string(content))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
||||||
tc := prepareHandlerContext(t)
|
tc := prepareHandlerContext(t)
|
||||||
|
|
||||||
|
@ -449,3 +536,85 @@ func TestPutObjectWithContentLanguage(t *testing.T) {
|
||||||
tc.Handler().HeadObjectHandler(w, r)
|
tc.Handler().HeadObjectHandler(w, r)
|
||||||
require.Equal(t, expectedContentLanguage, w.Header().Get(api.ContentLanguage))
|
require.Equal(t, expectedContentLanguage, w.Header().Get(api.ContentLanguage))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func postObjectBase(hc *handlerContext, ns, bktName, key, filename, content string) *httptest.ResponseRecorder {
|
||||||
|
policy := "eyJleHBpcmF0aW9uIjogIjIwMjUtMTItMDFUMTI6MDA6MDAuMDAwWiIsImNvbmRpdGlvbnMiOiBbCiBbInN0YXJ0cy13aXRoIiwgIiR4LWFtei1jcmVkZW50aWFsIiwgIiJdLAogWyJzdGFydHMtd2l0aCIsICIkeC1hbXotZGF0ZSIsICIiXSwKIFsic3RhcnRzLXdpdGgiLCAiJGtleSIsICIiXQpdfQ=="
|
||||||
|
|
||||||
|
timeToSign := time.Now()
|
||||||
|
timeToSignStr := timeToSign.Format("20060102T150405Z")
|
||||||
|
region := "default"
|
||||||
|
service := "s3"
|
||||||
|
|
||||||
|
accessKeyID := "5jizSbYu8hX345aqCKDgRWKCJYHxnzxRS8e6SUYHZ8Fw0HiRkf3KbJAWBn5mRzmiyHQ3UHADGyzVXLusn1BrmAfLn"
|
||||||
|
secretKey := "abf066d77c6744cd956a123a0b9612df587f5c14d3350ecb01b363f182dd7279"
|
||||||
|
|
||||||
|
creds := getCredsStr(accessKeyID, timeToSignStr, region, service)
|
||||||
|
sign := auth.SignStr(secretKey, service, region, timeToSign, policy)
|
||||||
|
|
||||||
|
body, contentType, err := getMultipartFormBody(policy, creds, timeToSignStr, sign, key, filename, content)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
w, r := prepareTestPostRequest(hc, bktName, body)
|
||||||
|
r.Header.Set(auth.ContentTypeHdr, contentType)
|
||||||
|
r.Header.Set("X-Frostfs-Namespace", ns)
|
||||||
|
|
||||||
|
err = r.ParseMultipartForm(50 * 1024 * 1024)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
hc.Handler().PostObject(w, r)
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCredsStr(accessKeyID, timeToSign, region, service string) string {
|
||||||
|
return accessKeyID + "/" + timeToSign + "/" + region + "/" + service + "/aws4_request"
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMultipartFormBody(policy, creds, date, sign, key, filename, content string) (io.Reader, string, error) {
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
defer writer.Close()
|
||||||
|
|
||||||
|
if err := writer.WriteField("policy", policy); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := writer.WriteField("key", key); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if err := writer.WriteField(strings.ToLower(auth.AmzCredential), creds); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if err := writer.WriteField(strings.ToLower(auth.AmzDate), date); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
if err := writer.WriteField(strings.ToLower(auth.AmzSignature), sign); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := writer.CreateFormFile("file", filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(content) < maxContentSizeForFormData {
|
||||||
|
if err = writer.WriteField("file", content); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if _, err = file.Write([]byte(content)); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, writer.FormDataContentType(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareTestPostRequest(hc *handlerContext, bktName string, payload io.Reader) (*httptest.ResponseRecorder, *http.Request) {
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodPost, defaultURL+bktName, payload)
|
||||||
|
|
||||||
|
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName}, "")
|
||||||
|
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
|
||||||
|
|
||||||
|
return w, r
|
||||||
|
}
|
||||||
|
|
|
@ -176,6 +176,9 @@ type ListObjectsVersionsResponse struct {
|
||||||
DeleteMarker []DeleteMarkerEntry `xml:"DeleteMarker"`
|
DeleteMarker []DeleteMarkerEntry `xml:"DeleteMarker"`
|
||||||
Version []ObjectVersionResponse `xml:"Version"`
|
Version []ObjectVersionResponse `xml:"Version"`
|
||||||
CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"`
|
CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"`
|
||||||
|
Prefix string `xml:"Prefix"`
|
||||||
|
Delimiter string `xml:"Delimiter,omitempty"`
|
||||||
|
MaxKeys int `xml:"MaxKeys"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// VersioningConfiguration contains VersioningConfiguration XML representation.
|
// VersioningConfiguration contains VersioningConfiguration XML representation.
|
||||||
|
@ -192,6 +195,15 @@ type PostResponse struct {
|
||||||
ETag string `xml:"Etag"`
|
ETag string `xml:"Etag"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PatchObjectResult struct {
|
||||||
|
Object PatchObject `xml:"Object"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PatchObject struct {
|
||||||
|
LastModified string `xml:"LastModified"`
|
||||||
|
ETag string `xml:"ETag"`
|
||||||
|
}
|
||||||
|
|
||||||
// MarshalXML -- StringMap marshals into XML.
|
// MarshalXML -- StringMap marshals into XML.
|
||||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
tokens := []xml.Token{start}
|
tokens := []xml.Token{start}
|
||||||
|
|
|
@ -11,10 +11,6 @@ func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Requ
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||||
}
|
}
|
||||||
|
@ -51,10 +47,6 @@ func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request)
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,7 @@ const (
|
||||||
AmzMaxParts = "X-Amz-Max-Parts"
|
AmzMaxParts = "X-Amz-Max-Parts"
|
||||||
AmzPartNumberMarker = "X-Amz-Part-Number-Marker"
|
AmzPartNumberMarker = "X-Amz-Part-Number-Marker"
|
||||||
AmzStorageClass = "X-Amz-Storage-Class"
|
AmzStorageClass = "X-Amz-Storage-Class"
|
||||||
|
AmzForceBucketDelete = "X-Amz-Force-Delete-Bucket"
|
||||||
|
|
||||||
AmzServerSideEncryptionCustomerAlgorithm = "x-amz-server-side-encryption-customer-algorithm"
|
AmzServerSideEncryptionCustomerAlgorithm = "x-amz-server-side-encryption-customer-algorithm"
|
||||||
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
AmzServerSideEncryptionCustomerKey = "x-amz-server-side-encryption-customer-key"
|
||||||
|
|
|
@ -233,7 +233,7 @@ func (c *Cache) PutSettings(owner user.ID, bktInfo *data.BucketInfo, settings *d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) GetCORS(owner user.ID, bkt *data.BucketInfo) *data.CORSConfiguration {
|
func (c *Cache) GetCORS(owner user.ID, bkt *data.BucketInfo) *data.CORSConfiguration {
|
||||||
key := bkt.Name + bkt.CORSObjectName()
|
key := bkt.CORSObjectName()
|
||||||
|
|
||||||
if !c.accessCache.Get(owner, key) {
|
if !c.accessCache.Get(owner, key) {
|
||||||
return nil
|
return nil
|
||||||
|
@ -243,7 +243,7 @@ func (c *Cache) GetCORS(owner user.ID, bkt *data.BucketInfo) *data.CORSConfigura
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConfiguration) {
|
func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConfiguration) {
|
||||||
key := bkt.Name + bkt.CORSObjectName()
|
key := bkt.CORSObjectName()
|
||||||
|
|
||||||
if err := c.systemCache.PutCORS(key, cors); err != nil {
|
if err := c.systemCache.PutCORS(key, cors); err != nil {
|
||||||
c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err))
|
c.logger.Warn(logs.CouldntCacheCors, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||||
|
@ -255,5 +255,31 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) {
|
func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) {
|
||||||
c.systemCache.Delete(bktInfo.Name + bktInfo.CORSObjectName())
|
c.systemCache.Delete(bktInfo.CORSObjectName())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) GetLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo) *data.LifecycleConfiguration {
|
||||||
|
key := bkt.LifecycleConfigurationObjectName()
|
||||||
|
|
||||||
|
if !c.accessCache.Get(owner, key) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.systemCache.GetLifecycleConfiguration(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, cfg *data.LifecycleConfiguration) {
|
||||||
|
key := bkt.LifecycleConfigurationObjectName()
|
||||||
|
|
||||||
|
if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil {
|
||||||
|
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.accessCache.Put(owner, key); err != nil {
|
||||||
|
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) DeleteLifecycleConfiguration(bktInfo *data.BucketInfo) {
|
||||||
|
c.systemCache.Delete(bktInfo.LifecycleConfigurationObjectName())
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||||
if zone != info.Zone {
|
if zone != info.Zone {
|
||||||
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
|
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
||||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||||
}
|
}
|
||||||
|
|
||||||
zone, _ := n.features.FormContainerZone(p.Namespace)
|
zone := n.features.FormContainerZone(p.Namespace)
|
||||||
|
|
||||||
bktInfo := &data.BucketInfo{
|
bktInfo := &data.BucketInfo{
|
||||||
Name: p.Name,
|
Name: p.Name,
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,29 +39,36 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
prm := PrmObjectCreate{
|
||||||
Container: p.BktInfo.CID,
|
|
||||||
Payload: &buf,
|
Payload: &buf,
|
||||||
Filepath: p.BktInfo.CORSObjectName(),
|
Filepath: p.BktInfo.CORSObjectName(),
|
||||||
CreationTime: TimeNow(ctx),
|
CreationTime: TimeNow(ctx),
|
||||||
CopiesNumber: p.CopiesNumbers,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, objID, _, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
var corsBkt *data.BucketInfo
|
||||||
|
if n.corsCnrInfo == nil {
|
||||||
|
corsBkt = p.BktInfo
|
||||||
|
prm.CopiesNumber = p.CopiesNumbers
|
||||||
|
} else {
|
||||||
|
corsBkt = n.corsCnrInfo
|
||||||
|
prm.PrmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
prm.Container = corsBkt.CID
|
||||||
|
|
||||||
|
createdObj, err := n.objectPutAndHash(ctx, prm, corsBkt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("put system object: %w", err)
|
return fmt.Errorf("put cors object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
objIDToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, objID)
|
objsToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, newAddress(corsBkt.CID, createdObj.ID))
|
||||||
objIDToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
objToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||||
if err != nil && !objIDToDeleteNotFound {
|
if err != nil && !objToDeleteNotFound {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !objIDToDeleteNotFound {
|
if !objToDeleteNotFound {
|
||||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
for _, addr := range objsToDelete {
|
||||||
n.reqLogger(ctx).Error(logs.CouldntDeleteCorsObject, zap.Error(err),
|
n.deleteCORSObject(ctx, p.BktInfo, addr)
|
||||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
|
||||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,12 +77,25 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteCORSObject removes object and logs in case of error.
|
||||||
|
func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
|
||||||
|
var prmAuth PrmAuth
|
||||||
|
corsBkt := bktInfo
|
||||||
|
if !addr.Container().Equals(bktInfo.CID) && !addr.Container().Equals(cid.ID{}) {
|
||||||
|
corsBkt = &data.BucketInfo{CID: addr.Container()}
|
||||||
|
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.objectDeleteWithAuth(ctx, corsBkt, addr.Object(), prmAuth); err != nil {
|
||||||
|
n.reqLogger(ctx).Error(logs.CouldntDeleteCorsObject, zap.Error(err),
|
||||||
|
zap.String("cnrID", corsBkt.CID.EncodeToString()),
|
||||||
|
zap.String("objID", addr.Object().EncodeToString()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error) {
|
func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||||
cors, err := n.getCORS(ctx, bktInfo)
|
cors, err := n.getCORS(ctx, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
|
||||||
}
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,14 +103,15 @@ func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
|
func (n *Layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||||
objID, err := n.treeService.DeleteBucketCORS(ctx, bktInfo)
|
objs, err := n.treeService.DeleteBucketCORS(ctx, bktInfo)
|
||||||
objIDNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
objNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||||
if err != nil && !objIDNotFound {
|
if err != nil && !objNotFound {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !objIDNotFound {
|
|
||||||
if err = n.objectDelete(ctx, bktInfo, objID); err != nil {
|
if !objNotFound {
|
||||||
return err
|
for _, addr := range objs {
|
||||||
|
n.deleteCORSObject(ctx, bktInfo, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,8 +78,32 @@ type PrmAuth struct {
|
||||||
PrivateKey *ecdsa.PrivateKey
|
PrivateKey *ecdsa.PrivateKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrmObjectRead groups parameters of FrostFS.ReadObject operation.
|
// PrmObjectHead groups parameters of FrostFS.HeadObject operation.
|
||||||
type PrmObjectRead struct {
|
type PrmObjectHead struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Container to read the object header from.
|
||||||
|
Container cid.ID
|
||||||
|
|
||||||
|
// ID of the object for which to read the header.
|
||||||
|
Object oid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectGet groups parameters of FrostFS.GetObject operation.
|
||||||
|
type PrmObjectGet struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Container to read the object header from.
|
||||||
|
Container cid.ID
|
||||||
|
|
||||||
|
// ID of the object for which to read the header.
|
||||||
|
Object oid.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrmObjectRange groups parameters of FrostFS.RangeObject operation.
|
||||||
|
type PrmObjectRange struct {
|
||||||
// Authentication parameters.
|
// Authentication parameters.
|
||||||
PrmAuth
|
PrmAuth
|
||||||
|
|
||||||
|
@ -89,20 +113,14 @@ type PrmObjectRead struct {
|
||||||
// ID of the object for which to read the header.
|
// ID of the object for which to read the header.
|
||||||
Object oid.ID
|
Object oid.ID
|
||||||
|
|
||||||
// Flag to read object header.
|
|
||||||
WithHeader bool
|
|
||||||
|
|
||||||
// Flag to read object payload. False overlaps payload range.
|
|
||||||
WithPayload bool
|
|
||||||
|
|
||||||
// Offset-length range of the object payload to be read.
|
// Offset-length range of the object payload to be read.
|
||||||
PayloadRange [2]uint64
|
PayloadRange [2]uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectPart represents partially read FrostFS object.
|
// Object represents full read FrostFS object.
|
||||||
type ObjectPart struct {
|
type Object struct {
|
||||||
// Object header with optional in-memory payload part.
|
// Object header (doesn't contain payload).
|
||||||
Head *object.Object
|
Header object.Object
|
||||||
|
|
||||||
// Object payload part encapsulated in io.Reader primitive.
|
// Object payload part encapsulated in io.Reader primitive.
|
||||||
// Returns ErrAccessDenied on read access violation.
|
// Returns ErrAccessDenied on read access violation.
|
||||||
|
@ -148,6 +166,12 @@ type PrmObjectCreate struct {
|
||||||
BufferMaxSize uint64
|
BufferMaxSize uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateObjectResult is a result parameter of FrostFS.CreateObject operation.
|
||||||
|
type CreateObjectResult struct {
|
||||||
|
ObjectID oid.ID
|
||||||
|
CreationEpoch uint64
|
||||||
|
}
|
||||||
|
|
||||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||||
type PrmObjectDelete struct {
|
type PrmObjectDelete struct {
|
||||||
// Authentication parameters.
|
// Authentication parameters.
|
||||||
|
@ -176,6 +200,27 @@ type PrmObjectSearch struct {
|
||||||
FilePrefix string
|
FilePrefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrmObjectPatch groups parameters of FrostFS.PatchObject operation.
|
||||||
|
type PrmObjectPatch struct {
|
||||||
|
// Authentication parameters.
|
||||||
|
PrmAuth
|
||||||
|
|
||||||
|
// Container of the patched object.
|
||||||
|
Container cid.ID
|
||||||
|
|
||||||
|
// Identifier of the patched object.
|
||||||
|
Object oid.ID
|
||||||
|
|
||||||
|
// Object patch payload encapsulated in io.Reader primitive.
|
||||||
|
Payload io.Reader
|
||||||
|
|
||||||
|
// Object range to patch.
|
||||||
|
Offset, Length uint64
|
||||||
|
|
||||||
|
// Size of original object payload.
|
||||||
|
ObjectSize uint64
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||||
ErrAccessDenied = errors.New("access denied")
|
ErrAccessDenied = errors.New("access denied")
|
||||||
|
@ -213,13 +258,15 @@ type FrostFS interface {
|
||||||
// It returns any error encountered which prevented the removal request from being sent.
|
// It returns any error encountered which prevented the removal request from being sent.
|
||||||
DeleteContainer(context.Context, cid.ID, *session.Container) error
|
DeleteContainer(context.Context, cid.ID, *session.Container) error
|
||||||
|
|
||||||
// ReadObject reads a part of the object from the FrostFS container by identifier.
|
// HeadObject reads an info of the object from the FrostFS container by identifier.
|
||||||
// Exact part is returned according to the parameters:
|
|
||||||
// * with header only: empty payload (both in-mem and reader parts are nil);
|
|
||||||
// * with payload only: header is nil (zero range means full payload);
|
|
||||||
// * with header and payload: full in-mem object, payload reader is nil.
|
|
||||||
//
|
//
|
||||||
// WithHeader or WithPayload is true. Range length is positive if offset is positive.
|
// It returns ErrAccessDenied on read access violation.
|
||||||
|
//
|
||||||
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
|
// prevented the object header from being read.
|
||||||
|
HeadObject(ctx context.Context, prm PrmObjectHead) (*object.Object, error)
|
||||||
|
|
||||||
|
// GetObject reads an object from the FrostFS container by identifier.
|
||||||
//
|
//
|
||||||
// Payload reader should be closed if it is no longer needed.
|
// Payload reader should be closed if it is no longer needed.
|
||||||
//
|
//
|
||||||
|
@ -227,19 +274,29 @@ type FrostFS interface {
|
||||||
//
|
//
|
||||||
// It returns exactly one non-nil value. It returns any error encountered which
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
// prevented the object header from being read.
|
// prevented the object header from being read.
|
||||||
ReadObject(context.Context, PrmObjectRead) (*ObjectPart, error)
|
GetObject(ctx context.Context, prm PrmObjectGet) (*Object, error)
|
||||||
|
|
||||||
|
// RangeObject reads a part of object from the FrostFS container by identifier.
|
||||||
|
//
|
||||||
|
// Payload reader should be closed if it is no longer needed.
|
||||||
|
//
|
||||||
|
// It returns ErrAccessDenied on read access violation.
|
||||||
|
//
|
||||||
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
|
// prevented the object header from being read.
|
||||||
|
RangeObject(ctx context.Context, prm PrmObjectRange) (io.ReadCloser, error)
|
||||||
|
|
||||||
// CreateObject creates and saves a parameterized object in the FrostFS container.
|
// CreateObject creates and saves a parameterized object in the FrostFS container.
|
||||||
// It sets 'Timestamp' attribute to the current time.
|
// It sets 'Timestamp' attribute to the current time.
|
||||||
// It returns the ID of the saved object.
|
// It returns the ID and creation epoch of the saved object.
|
||||||
//
|
//
|
||||||
// Creation time should be written into the object (UTC).
|
// Creation time should be written into the object (UTC).
|
||||||
//
|
//
|
||||||
// It returns ErrAccessDenied on write access violation.
|
// It returns ErrAccessDenied on write access violation.
|
||||||
//
|
//
|
||||||
// It returns exactly one non-zero value. It returns any error encountered which
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
// prevented the container from being created.
|
// prevented the object from being created.
|
||||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
CreateObject(context.Context, PrmObjectCreate) (*CreateObjectResult, error)
|
||||||
|
|
||||||
// DeleteObject marks the object to be removed from the FrostFS container by identifier.
|
// DeleteObject marks the object to be removed from the FrostFS container by identifier.
|
||||||
// Successful return does not guarantee actual removal.
|
// Successful return does not guarantee actual removal.
|
||||||
|
@ -258,6 +315,15 @@ type FrostFS interface {
|
||||||
// prevented the objects from being selected.
|
// prevented the objects from being selected.
|
||||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||||
|
|
||||||
|
// PatchObject performs object patch in the FrostFS container.
|
||||||
|
// It returns the ID of the patched object.
|
||||||
|
//
|
||||||
|
// It returns ErrAccessDenied on selection access violation.
|
||||||
|
//
|
||||||
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
|
// prevented the objects from being patched.
|
||||||
|
PatchObject(context.Context, PrmObjectPatch) (oid.ID, error)
|
||||||
|
|
||||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||||
// Note:
|
// Note:
|
||||||
// * future time must be after the now
|
// * future time must be after the now
|
||||||
|
@ -265,4 +331,7 @@ type FrostFS interface {
|
||||||
//
|
//
|
||||||
// It returns any error encountered which prevented computing epochs.
|
// It returns any error encountered which prevented computing epochs.
|
||||||
TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error)
|
TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error)
|
||||||
|
|
||||||
|
// NetworkInfo returns parameters of FrostFS network.
|
||||||
|
NetworkInfo(context.Context) (netmap.NetworkInfo, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,10 @@ import (
|
||||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
@ -50,12 +52,12 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
|
||||||
k.md5Enabled = md5Enabled
|
k.md5Enabled = md5Enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
|
func (k *FeatureSettingsMock) FormContainerZone(ns string) string {
|
||||||
if ns == "" {
|
if ns == "" {
|
||||||
return v2container.SysAttributeZoneDefault, true
|
return v2container.SysAttributeZoneDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
return ns + ".ns", false
|
return ns + ".ns"
|
||||||
}
|
}
|
||||||
|
|
||||||
type TestFrostFS struct {
|
type TestFrostFS struct {
|
||||||
|
@ -204,10 +206,10 @@ func (t *TestFrostFS) UserContainers(context.Context, PrmUserContainers) ([]cid.
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*ObjectPart, error) {
|
func (t *TestFrostFS) retrieveObject(ctx context.Context, cnrID cid.ID, objID oid.ID) (*object.Object, error) {
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(prm.Container)
|
addr.SetContainer(cnrID)
|
||||||
addr.SetObject(prm.Object)
|
addr.SetObject(objID)
|
||||||
|
|
||||||
sAddr := addr.EncodeToString()
|
sAddr := addr.EncodeToString()
|
||||||
|
|
||||||
|
@ -217,30 +219,48 @@ func (t *TestFrostFS) ReadObject(ctx context.Context, prm PrmObjectRead) (*Objec
|
||||||
|
|
||||||
if obj, ok := t.objects[sAddr]; ok {
|
if obj, ok := t.objects[sAddr]; ok {
|
||||||
owner := getBearerOwner(ctx)
|
owner := getBearerOwner(ctx)
|
||||||
if !t.checkAccess(prm.Container, owner) {
|
if !t.checkAccess(cnrID, owner) {
|
||||||
return nil, ErrAccessDenied
|
return nil, ErrAccessDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
payload := obj.Payload()
|
return obj, nil
|
||||||
|
|
||||||
if prm.PayloadRange[0]+prm.PayloadRange[1] > 0 {
|
|
||||||
off := prm.PayloadRange[0]
|
|
||||||
payload = payload[off : off+prm.PayloadRange[1]]
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ObjectPart{
|
|
||||||
Head: obj,
|
|
||||||
Payload: io.NopCloser(bytes.NewReader(payload)),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
func (t *TestFrostFS) HeadObject(ctx context.Context, prm PrmObjectHead) (*object.Object, error) {
|
||||||
|
return t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) GetObject(ctx context.Context, prm PrmObjectGet) (*Object, error) {
|
||||||
|
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Object{
|
||||||
|
Header: *obj,
|
||||||
|
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) RangeObject(ctx context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
|
||||||
|
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
off := prm.PayloadRange[0]
|
||||||
|
payload := obj.Payload()[off : off+prm.PayloadRange[1]]
|
||||||
|
|
||||||
|
return io.NopCloser(bytes.NewReader(payload)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (*CreateObjectResult, error) {
|
||||||
b := make([]byte, 32)
|
b := make([]byte, 32)
|
||||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||||
return oid.ID{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var id oid.ID
|
var id oid.ID
|
||||||
id.SetSHA256(sha256.Sum256(b))
|
id.SetSHA256(sha256.Sum256(b))
|
||||||
|
@ -248,7 +268,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
||||||
attrs := make([]object.Attribute, 0)
|
attrs := make([]object.Attribute, 0)
|
||||||
|
|
||||||
if err := t.objectPutErrors[prm.Filepath]; err != nil {
|
if err := t.objectPutErrors[prm.Filepath]; err != nil {
|
||||||
return oid.ID{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if prm.Filepath != "" {
|
if prm.Filepath != "" {
|
||||||
|
@ -293,7 +313,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
||||||
if prm.Payload != nil {
|
if prm.Payload != nil {
|
||||||
all, err := io.ReadAll(prm.Payload)
|
all, err := io.ReadAll(prm.Payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oid.ID{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
obj.SetPayload(all)
|
obj.SetPayload(all)
|
||||||
obj.SetPayloadSize(uint64(len(all)))
|
obj.SetPayloadSize(uint64(len(all)))
|
||||||
|
@ -307,7 +327,10 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
||||||
|
|
||||||
addr := newAddress(cnrID, objID)
|
addr := newAddress(cnrID, objID)
|
||||||
t.objects[addr.EncodeToString()] = obj
|
t.objects[addr.EncodeToString()] = obj
|
||||||
return objID, nil
|
return &CreateObjectResult{
|
||||||
|
ObjectID: objID,
|
||||||
|
CreationEpoch: t.currentEpoch - 1,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
|
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
|
||||||
|
@ -386,6 +409,49 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) ([]o
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
||||||
|
ni := netmap.NetworkInfo{}
|
||||||
|
ni.SetCurrentEpoch(t.currentEpoch)
|
||||||
|
|
||||||
|
return ni, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestFrostFS) PatchObject(ctx context.Context, prm PrmObjectPatch) (oid.ID, error) {
|
||||||
|
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newObj := *obj
|
||||||
|
|
||||||
|
patchBytes, err := io.ReadAll(prm.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var newPayload []byte
|
||||||
|
if prm.Offset > 0 {
|
||||||
|
newPayload = append(newPayload, obj.Payload()[:prm.Offset]...)
|
||||||
|
}
|
||||||
|
newPayload = append(newPayload, patchBytes...)
|
||||||
|
if prm.Offset+prm.Length < obj.PayloadSize() {
|
||||||
|
newPayload = append(newPayload, obj.Payload()[prm.Offset+prm.Length:]...)
|
||||||
|
}
|
||||||
|
newObj.SetPayload(newPayload)
|
||||||
|
newObj.SetPayloadSize(uint64(len(newPayload)))
|
||||||
|
|
||||||
|
var hash checksum.Checksum
|
||||||
|
checksum.Calculate(&hash, checksum.SHA256, newPayload)
|
||||||
|
newObj.SetPayloadChecksum(hash)
|
||||||
|
|
||||||
|
newID := oidtest.ID()
|
||||||
|
newObj.SetID(newID)
|
||||||
|
|
||||||
|
t.objects[newAddress(prm.Container, newID).EncodeToString()] = &newObj
|
||||||
|
|
||||||
|
return newID, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
|
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
|
||||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
|
|
@ -6,9 +6,11 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
|
stderrors "errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -16,6 +18,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
@ -32,14 +35,14 @@ import (
|
||||||
|
|
||||||
type (
|
type (
|
||||||
BucketResolver interface {
|
BucketResolver interface {
|
||||||
Resolve(ctx context.Context, name string) (cid.ID, error)
|
Resolve(ctx context.Context, zone, name string) (cid.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
FeatureSettings interface {
|
FeatureSettings interface {
|
||||||
ClientCut() bool
|
ClientCut() bool
|
||||||
BufferMaxSizeForPut() uint64
|
BufferMaxSizeForPut() uint64
|
||||||
MD5Enabled() bool
|
MD5Enabled() bool
|
||||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
FormContainerZone(ns string) string
|
||||||
}
|
}
|
||||||
|
|
||||||
Layer struct {
|
Layer struct {
|
||||||
|
@ -51,6 +54,9 @@ type (
|
||||||
cache *Cache
|
cache *Cache
|
||||||
treeService TreeService
|
treeService TreeService
|
||||||
features FeatureSettings
|
features FeatureSettings
|
||||||
|
gateKey *keys.PrivateKey
|
||||||
|
corsCnrInfo *data.BucketInfo
|
||||||
|
lifecycleCnrInfo *data.BucketInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
Config struct {
|
Config struct {
|
||||||
|
@ -61,6 +67,9 @@ type (
|
||||||
Resolver BucketResolver
|
Resolver BucketResolver
|
||||||
TreeService TreeService
|
TreeService TreeService
|
||||||
Features FeatureSettings
|
Features FeatureSettings
|
||||||
|
GateKey *keys.PrivateKey
|
||||||
|
CORSCnrInfo *data.BucketInfo
|
||||||
|
LifecycleCnrInfo *data.BucketInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// AnonymousKey contains data for anonymous requests.
|
// AnonymousKey contains data for anonymous requests.
|
||||||
|
@ -118,6 +127,7 @@ type (
|
||||||
BktInfo *data.BucketInfo
|
BktInfo *data.BucketInfo
|
||||||
Objects []*VersionedObject
|
Objects []*VersionedObject
|
||||||
Settings *data.BucketSettings
|
Settings *data.BucketSettings
|
||||||
|
NetworkInfo netmap.NetworkInfo
|
||||||
IsMultiple bool
|
IsMultiple bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,6 +160,7 @@ type (
|
||||||
DstEncryption encryption.Params
|
DstEncryption encryption.Params
|
||||||
CopiesNumbers []uint32
|
CopiesNumbers []uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateBucketParams stores bucket create request parameters.
|
// CreateBucketParams stores bucket create request parameters.
|
||||||
CreateBucketParams struct {
|
CreateBucketParams struct {
|
||||||
Name string
|
Name string
|
||||||
|
@ -163,6 +174,7 @@ type (
|
||||||
DeleteBucketParams struct {
|
DeleteBucketParams struct {
|
||||||
BktInfo *data.BucketInfo
|
BktInfo *data.BucketInfo
|
||||||
SessionToken *session.Container
|
SessionToken *session.Container
|
||||||
|
SkipCheck bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectVersionsParams stores list objects versions parameters.
|
// ListObjectVersionsParams stores list objects versions parameters.
|
||||||
|
@ -233,6 +245,9 @@ func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) *Layer {
|
||||||
cache: config.Cache,
|
cache: config.Cache,
|
||||||
treeService: config.TreeService,
|
treeService: config.TreeService,
|
||||||
features: config.Features,
|
features: config.Features,
|
||||||
|
gateKey: config.GateKey,
|
||||||
|
corsCnrInfo: config.CORSCnrInfo,
|
||||||
|
lifecycleCnrInfo: config.LifecycleCnrInfo,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,6 +300,10 @@ func (n *Layer) reqLogger(ctx context.Context) *zap.Logger {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
||||||
|
if prm.BearerToken != nil || prm.PrivateKey != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
if bd, err := middleware.GetBoxData(ctx); err == nil && bd.Gate.BearerToken != nil {
|
||||||
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
if bd.Gate.BearerToken.Impersonate() || bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
||||||
prm.BearerToken = bd.Gate.BearerToken
|
prm.BearerToken = bd.Gate.BearerToken
|
||||||
|
@ -303,13 +322,13 @@ func (n *Layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
||||||
}
|
}
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||||
|
|
||||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||||
return bktInfo, nil
|
return bktInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
containerID, err := n.ResolveBucket(ctx, name)
|
containerID, err := n.ResolveBucket(ctx, zone, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
|
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
|
||||||
|
@ -333,13 +352,13 @@ func (n *Layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||||
|
|
||||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||||
return bktInfo.CID, nil
|
return bktInfo.CID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return n.ResolveBucket(ctx, name)
|
return n.ResolveBucket(ctx, zone, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListBuckets returns all user containers. The name of the bucket is a container
|
// ListBuckets returns all user containers. The name of the bucket is a container
|
||||||
|
@ -528,19 +547,29 @@ func getRandomOID() (oid.ID, error) {
|
||||||
return objID, nil
|
return objID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject) *VersionedObject {
|
func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject,
|
||||||
|
networkInfo netmap.NetworkInfo) *VersionedObject {
|
||||||
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
||||||
var nodeVersion *data.NodeVersion
|
var nodeVersions []*data.NodeVersion
|
||||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
if nodeVersions, obj.Error = n.getNodeVersionsToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||||
return n.handleNotFoundError(bkt, obj)
|
return n.handleNotFoundError(bkt, obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, nodeVersion := range nodeVersions {
|
||||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nodeVersion.ID)
|
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||||
|
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
|
if obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID); obj.Error != nil {
|
||||||
n.cache.CleanListCacheEntriesContainingObject(obj.Name, bkt.CID)
|
return obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,20 +582,30 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
||||||
if settings.VersioningSuspended() {
|
if settings.VersioningSuspended() {
|
||||||
obj.VersionID = data.UnversionedObjectVersionID
|
obj.VersionID = data.UnversionedObjectVersionID
|
||||||
|
|
||||||
var nullVersionToDelete *data.NodeVersion
|
var nodeVersions []*data.NodeVersion
|
||||||
if lastVersion.IsUnversioned {
|
if nodeVersions, obj.Error = n.getNodeVersionsToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||||
if !lastVersion.IsDeleteMarker {
|
|
||||||
nullVersionToDelete = lastVersion
|
|
||||||
}
|
|
||||||
} else if nullVersionToDelete, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
|
||||||
if !isNotFoundError(obj.Error) {
|
if !isNotFoundError(obj.Error) {
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if nullVersionToDelete != nil {
|
for _, nodeVersion := range nodeVersions {
|
||||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nullVersionToDelete, obj); obj.Error != nil {
|
if nodeVersion.ID == lastVersion.ID && nodeVersion.IsDeleteMarker {
|
||||||
return n.handleObjectDeleteErrors(ctx, bkt, obj, nullVersionToDelete.ID)
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !nodeVersion.IsDeleteMarker {
|
||||||
|
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
||||||
|
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
||||||
|
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID); obj.Error != nil {
|
||||||
|
return obj
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -591,6 +630,7 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
||||||
Created: &now,
|
Created: &now,
|
||||||
Owner: &n.gateOwner,
|
Owner: &n.gateOwner,
|
||||||
IsDeleteMarker: true,
|
IsDeleteMarker: true,
|
||||||
|
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||||
},
|
},
|
||||||
IsUnversioned: settings.VersioningSuspended(),
|
IsUnversioned: settings.VersioningSuspended(),
|
||||||
}
|
}
|
||||||
|
@ -614,36 +654,70 @@ func (n *Layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
|
||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) handleObjectDeleteErrors(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject, nodeID uint64) *VersionedObject {
|
|
||||||
if !client.IsErrObjectAlreadyRemoved(obj.Error) && !client.IsErrObjectNotFound(obj.Error) {
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
n.reqLogger(ctx).Debug(logs.CouldntDeleteObjectFromStorageContinueDeleting,
|
|
||||||
zap.Stringer("cid", bkt.CID), zap.String("oid", obj.VersionID), zap.Error(obj.Error))
|
|
||||||
|
|
||||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeID)
|
|
||||||
if obj.Error == nil {
|
|
||||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNotFoundError(err error) bool {
|
func isNotFoundError(err error) bool {
|
||||||
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
||||||
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) ([]*data.NodeVersion, error) {
|
||||||
objVersion := &data.ObjectVersion{
|
var versionsToDelete []*data.NodeVersion
|
||||||
BktInfo: bkt,
|
versions, err := n.treeService.GetVersions(ctx, bkt, obj.Name)
|
||||||
ObjectName: obj.Name,
|
if err != nil {
|
||||||
VersionID: obj.VersionID,
|
if stderrors.Is(err, ErrNodeNotFound) {
|
||||||
NoErrorOnDeleteMarker: true,
|
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return n.getNodeVersion(ctx, objVersion)
|
if len(versions) == 0 {
|
||||||
|
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(versions, func(i, j int) bool {
|
||||||
|
return versions[i].Timestamp < versions[j].Timestamp
|
||||||
|
})
|
||||||
|
|
||||||
|
var matchFn func(nv *data.NodeVersion) bool
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case obj.VersionID == data.UnversionedObjectVersionID:
|
||||||
|
matchFn = func(nv *data.NodeVersion) bool {
|
||||||
|
return nv.IsUnversioned
|
||||||
|
}
|
||||||
|
case len(obj.VersionID) == 0:
|
||||||
|
latest := versions[len(versions)-1]
|
||||||
|
if latest.IsUnversioned {
|
||||||
|
matchFn = func(nv *data.NodeVersion) bool {
|
||||||
|
return nv.IsUnversioned
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
matchFn = func(nv *data.NodeVersion) bool {
|
||||||
|
return nv.ID == latest.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
matchFn = func(nv *data.NodeVersion) bool {
|
||||||
|
return nv.OID.EncodeToString() == obj.VersionID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var oids []string
|
||||||
|
for _, v := range versions {
|
||||||
|
if matchFn(v) {
|
||||||
|
versionsToDelete = append(versionsToDelete, v)
|
||||||
|
if !v.IsDeleteMarker {
|
||||||
|
oids = append(oids, v.OID.EncodeToString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(versionsToDelete) == 0 {
|
||||||
|
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||||
|
}
|
||||||
|
|
||||||
|
n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids))
|
||||||
|
|
||||||
|
return versionsToDelete, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
func (n *Layer) getLastNodeVersion(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
||||||
|
@ -676,7 +750,7 @@ func (n *Layer) removeCombinedObject(ctx context.Context, bkt *data.BucketInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
var parts []*data.PartInfo
|
var parts []*data.PartInfo
|
||||||
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
||||||
return fmt.Errorf("unmarshal combined object parts: %w", err)
|
return fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,7 +773,7 @@ func (n *Layer) removeCombinedObject(ctx context.Context, bkt *data.BucketInfo,
|
||||||
// DeleteObjects from the storage.
|
// DeleteObjects from the storage.
|
||||||
func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
|
func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
|
||||||
for i, obj := range p.Objects {
|
for i, obj := range p.Objects {
|
||||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj)
|
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj, p.NetworkInfo)
|
||||||
if p.IsMultiple && p.Objects[i].Error != nil {
|
if p.IsMultiple && p.Objects[i].Error != nil {
|
||||||
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
|
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
|
||||||
}
|
}
|
||||||
|
@ -724,10 +798,10 @@ func (n *Layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.
|
||||||
return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
|
return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
|
func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||||
var cnrID cid.ID
|
var cnrID cid.ID
|
||||||
if err := cnrID.DecodeString(name); err != nil {
|
if err := cnrID.DecodeString(name); err != nil {
|
||||||
if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
|
if cnrID, err = n.resolver.Resolve(ctx, zone, name); err != nil {
|
||||||
return cid.ID{}, err
|
return cid.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -738,6 +812,7 @@ func (n *Layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||||
|
if !p.SkipCheck {
|
||||||
res, _, err := n.getAllObjectsVersions(ctx, commonVersionsListingParams{
|
res, _, err := n.getAllObjectsVersions(ctx, commonVersionsListingParams{
|
||||||
BktInfo: p.BktInfo,
|
BktInfo: p.BktInfo,
|
||||||
MaxKeys: 1,
|
MaxKeys: 1,
|
||||||
|
@ -749,7 +824,41 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
||||||
if len(res) != 0 {
|
if len(res) != 0 {
|
||||||
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
n.cache.DeleteBucket(p.BktInfo)
|
n.cache.DeleteBucket(p.BktInfo)
|
||||||
return n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
|
||||||
|
corsObj, err := n.treeService.GetBucketCORS(ctx, p.BktInfo)
|
||||||
|
if err != nil {
|
||||||
|
n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycleObj, treeErr := n.treeService.GetBucketLifecycleConfiguration(ctx, p.BktInfo)
|
||||||
|
if treeErr != nil {
|
||||||
|
n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("delete container: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !corsObj.Container().Equals(p.BktInfo.CID) && !corsObj.Container().Equals(cid.ID{}) {
|
||||||
|
n.deleteCORSObject(ctx, p.BktInfo, corsObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
if treeErr == nil && !lifecycleObj.Container().Equals(p.BktInfo.CID) {
|
||||||
|
n.deleteLifecycleObject(ctx, p.BktInfo, lifecycleObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) GetNetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) {
|
||||||
|
networkInfo, err := n.frostFS.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return networkInfo, fmt.Errorf("get network info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return networkInfo, nil
|
||||||
}
|
}
|
||||||
|
|
148
api/layer/lifecycle.go
Normal file
148
api/layer/lifecycle.go
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
package layer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PutBucketLifecycleParams struct {
|
||||||
|
BktInfo *data.BucketInfo
|
||||||
|
LifecycleCfg *data.LifecycleConfiguration
|
||||||
|
LifecycleReader io.Reader
|
||||||
|
CopiesNumbers []uint32
|
||||||
|
MD5Hash string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
|
||||||
|
prm := PrmObjectCreate{
|
||||||
|
Payload: p.LifecycleReader,
|
||||||
|
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
|
||||||
|
CreationTime: TimeNow(ctx),
|
||||||
|
}
|
||||||
|
|
||||||
|
var lifecycleBkt *data.BucketInfo
|
||||||
|
if n.lifecycleCnrInfo == nil {
|
||||||
|
lifecycleBkt = p.BktInfo
|
||||||
|
prm.CopiesNumber = p.CopiesNumbers
|
||||||
|
} else {
|
||||||
|
lifecycleBkt = n.lifecycleCnrInfo
|
||||||
|
prm.PrmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
prm.Container = lifecycleBkt.CID
|
||||||
|
|
||||||
|
createdObj, err := n.objectPutAndHash(ctx, prm, lifecycleBkt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("put lifecycle object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hashBytes, err := base64.StdEncoding.DecodeString(p.MD5Hash)
|
||||||
|
if err != nil {
|
||||||
|
return apiErr.GetAPIError(apiErr.ErrInvalidDigest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(hashBytes, createdObj.MD5Sum) {
|
||||||
|
n.deleteLifecycleObject(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||||
|
|
||||||
|
return apiErr.GetAPIError(apiErr.ErrInvalidDigest)
|
||||||
|
}
|
||||||
|
|
||||||
|
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||||
|
objsToDeleteNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||||
|
if err != nil && !objsToDeleteNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !objsToDeleteNotFound {
|
||||||
|
for _, addr := range objsToDelete {
|
||||||
|
n.deleteLifecycleObject(ctx, p.BktInfo, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.cache.PutLifecycleConfiguration(n.BearerOwner(ctx), p.BktInfo, p.LifecycleCfg)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteLifecycleObject removes object and logs in case of error.
|
||||||
|
func (n *Layer) deleteLifecycleObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
|
||||||
|
var prmAuth PrmAuth
|
||||||
|
lifecycleBkt := bktInfo
|
||||||
|
if !addr.Container().Equals(bktInfo.CID) {
|
||||||
|
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
|
||||||
|
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.objectDeleteWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth); err != nil {
|
||||||
|
n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err),
|
||||||
|
zap.String("cid", lifecycleBkt.CID.EncodeToString()),
|
||||||
|
zap.String("oid", addr.Object().EncodeToString()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.LifecycleConfiguration, error) {
|
||||||
|
owner := n.BearerOwner(ctx)
|
||||||
|
if cfg := n.cache.GetLifecycleConfiguration(owner, bktInfo); cfg != nil {
|
||||||
|
return cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := n.treeService.GetBucketLifecycleConfiguration(ctx, bktInfo)
|
||||||
|
objNotFound := errors.Is(err, ErrNodeNotFound)
|
||||||
|
if err != nil && !objNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if objNotFound {
|
||||||
|
return nil, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
var prmAuth PrmAuth
|
||||||
|
lifecycleBkt := bktInfo
|
||||||
|
if !addr.Container().Equals(bktInfo.CID) {
|
||||||
|
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
|
||||||
|
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := n.objectGetWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get lifecycle object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycleCfg := &data.LifecycleConfiguration{}
|
||||||
|
|
||||||
|
if err = xml.NewDecoder(obj.Payload).Decode(&lifecycleCfg); err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal lifecycle configuration: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
|
||||||
|
|
||||||
|
return lifecycleCfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||||
|
objs, err := n.treeService.DeleteBucketLifecycleConfiguration(ctx, bktInfo)
|
||||||
|
objsNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||||
|
if err != nil && !objsNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !objsNotFound {
|
||||||
|
for _, addr := range objs {
|
||||||
|
n.deleteLifecycleObject(ctx, bktInfo, addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
n.cache.DeleteLifecycleConfiguration(bktInfo)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
65
api/layer/lifecycle_test.go
Normal file
65
api/layer/lifecycle_test.go
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
package layer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBucketLifecycle(t *testing.T) {
|
||||||
|
tc := prepareContext(t)
|
||||||
|
|
||||||
|
lifecycle := &data.LifecycleConfiguration{
|
||||||
|
XMLName: xml.Name{
|
||||||
|
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
|
||||||
|
Local: "LifecycleConfiguration",
|
||||||
|
},
|
||||||
|
Rules: []data.LifecycleRule{
|
||||||
|
{
|
||||||
|
Status: data.LifecycleStatusEnabled,
|
||||||
|
Expiration: &data.LifecycleExpiration{
|
||||||
|
Days: ptr(21),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
lifecycleBytes, err := xml.Marshal(lifecycle)
|
||||||
|
require.NoError(t, err)
|
||||||
|
hash := md5.New()
|
||||||
|
hash.Write(lifecycleBytes)
|
||||||
|
|
||||||
|
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||||
|
require.Equal(t, apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), frostfsErrors.UnwrapErr(err))
|
||||||
|
|
||||||
|
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
|
||||||
|
BktInfo: tc.bktInfo,
|
||||||
|
LifecycleCfg: lifecycle,
|
||||||
|
LifecycleReader: bytes.NewReader(lifecycleBytes),
|
||||||
|
MD5Hash: base64.StdEncoding.EncodeToString(hash.Sum(nil)),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cfg, err := tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, *lifecycle, *cfg)
|
||||||
|
|
||||||
|
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||||
|
require.Equal(t, apiErr.GetAPIError(apiErr.ErrNoSuchLifecycleConfiguration), frostfsErrors.UnwrapErr(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
func ptr[T any](t T) *T {
|
||||||
|
return &t
|
||||||
|
}
|
|
@ -150,6 +150,11 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
||||||
metaSize += len(p.Data.TagSet)
|
metaSize += len(p.Data.TagSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
networkInfo, err := n.frostFS.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get network info: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
info := &data.MultipartInfo{
|
info := &data.MultipartInfo{
|
||||||
Key: p.Info.Key,
|
Key: p.Info.Key,
|
||||||
UploadID: p.Info.UploadID,
|
UploadID: p.Info.UploadID,
|
||||||
|
@ -157,6 +162,7 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
||||||
Created: TimeNow(ctx),
|
Created: TimeNow(ctx),
|
||||||
Meta: make(map[string]string, metaSize),
|
Meta: make(map[string]string, metaSize),
|
||||||
CopiesNumbers: p.CopiesNumbers,
|
CopiesNumbers: p.CopiesNumbers,
|
||||||
|
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, val := range p.Header {
|
for key, val := range p.Header {
|
||||||
|
@ -229,7 +235,7 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||||
|
|
||||||
size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -238,21 +244,21 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
||||||
}
|
}
|
||||||
if hex.EncodeToString(hashBytes) != hex.EncodeToString(md5Hash) {
|
if hex.EncodeToString(hashBytes) != hex.EncodeToString(createdObj.MD5Sum) {
|
||||||
prm := PrmObjectDelete{
|
prm := PrmObjectDelete{
|
||||||
Object: id,
|
Object: createdObj.ID,
|
||||||
Container: bktInfo.CID,
|
Container: bktInfo.CID,
|
||||||
}
|
}
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||||
err = n.frostFS.DeleteObject(ctx, prm)
|
err = n.frostFS.DeleteObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
}
|
}
|
||||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.Info.Encryption.Enabled() {
|
if p.Info.Encryption.Enabled() {
|
||||||
size = decSize
|
createdObj.Size = decSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||||
|
@ -260,10 +266,10 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(contentHashBytes, hash) {
|
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||||
err = n.objectDelete(ctx, bktInfo, id)
|
err = n.objectDelete(ctx, bktInfo, createdObj.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
}
|
}
|
||||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||||
}
|
}
|
||||||
|
@ -271,34 +277,36 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
||||||
|
|
||||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
|
|
||||||
partInfo := &data.PartInfo{
|
partInfo := &data.PartInfo{
|
||||||
Key: p.Info.Key,
|
Key: p.Info.Key,
|
||||||
UploadID: p.Info.UploadID,
|
UploadID: p.Info.UploadID,
|
||||||
Number: p.PartNumber,
|
Number: p.PartNumber,
|
||||||
OID: id,
|
OID: createdObj.ID,
|
||||||
Size: size,
|
Size: createdObj.Size,
|
||||||
ETag: hex.EncodeToString(hash),
|
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||||
Created: prm.CreationTime,
|
Created: prm.CreationTime,
|
||||||
MD5: hex.EncodeToString(md5Hash),
|
MD5: hex.EncodeToString(createdObj.MD5Sum),
|
||||||
}
|
}
|
||||||
|
|
||||||
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
oldPartIDs, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
||||||
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
|
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||||
if err != nil && !oldPartIDNotFound {
|
if err != nil && !oldPartIDNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !oldPartIDNotFound {
|
if !oldPartIDNotFound {
|
||||||
|
for _, oldPartID := range oldPartIDs {
|
||||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||||
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
||||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||||
zap.String("oid", oldPartID.EncodeToString()))
|
zap.String("oid", oldPartID.EncodeToString()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
objInfo := &data.ObjectInfo{
|
objInfo := &data.ObjectInfo{
|
||||||
ID: id,
|
ID: createdObj.ID,
|
||||||
CID: bktInfo.CID,
|
CID: bktInfo.CID,
|
||||||
|
|
||||||
Owner: bktInfo.Owner,
|
Owner: bktInfo.Owner,
|
||||||
|
@ -379,16 +387,15 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
|
|
||||||
var multipartObjetSize uint64
|
var multipartObjetSize uint64
|
||||||
var encMultipartObjectSize uint64
|
var encMultipartObjectSize uint64
|
||||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
parts := make([]*data.PartInfoExtended, 0, len(p.Parts))
|
||||||
|
|
||||||
var completedPartsHeader strings.Builder
|
var completedPartsHeader strings.Builder
|
||||||
md5Hash := md5.New()
|
md5Hash := md5.New()
|
||||||
for i, part := range p.Parts {
|
for i, part := range p.Parts {
|
||||||
partInfo := partsInfo[part.PartNumber]
|
partInfo := partsInfo.Extract(part.PartNumber, data.UnQuote(part.ETag), n.features.MD5Enabled())
|
||||||
if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
|
if partInfo == nil {
|
||||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||||
}
|
}
|
||||||
delete(partsInfo, part.PartNumber)
|
|
||||||
|
|
||||||
// for the last part we have no minimum size limit
|
// for the last part we have no minimum size limit
|
||||||
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
|
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
|
||||||
|
@ -469,7 +476,8 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
|
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(p.Info.Bkt.CID)
|
addr.SetContainer(p.Info.Bkt.CID)
|
||||||
for _, partInfo := range partsInfo {
|
for _, prts := range partsInfo {
|
||||||
|
for _, partInfo := range prts {
|
||||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||||
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
||||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||||
|
@ -478,6 +486,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
||||||
addr.SetObject(partInfo.OID)
|
addr.SetObject(partInfo.OID)
|
||||||
n.cache.DeleteObject(addr)
|
n.cache.DeleteObject(addr)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
|
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
|
||||||
}
|
}
|
||||||
|
@ -548,12 +557,14 @@ func (n *Layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, info := range parts {
|
for _, infos := range parts {
|
||||||
|
for _, info := range infos {
|
||||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||||
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
|
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo)
|
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo)
|
||||||
}
|
}
|
||||||
|
@ -575,7 +586,12 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
||||||
|
|
||||||
parts := make([]*Part, 0, len(partsInfo))
|
parts := make([]*Part, 0, len(partsInfo))
|
||||||
|
|
||||||
for _, partInfo := range partsInfo {
|
for _, infos := range partsInfo {
|
||||||
|
sort.Slice(infos, func(i, j int) bool {
|
||||||
|
return infos[i].Timestamp < infos[j].Timestamp
|
||||||
|
})
|
||||||
|
|
||||||
|
partInfo := infos[len(infos)-1]
|
||||||
parts = append(parts, &Part{
|
parts = append(parts, &Part{
|
||||||
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
|
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
|
||||||
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
||||||
|
@ -603,16 +619,31 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
||||||
|
|
||||||
if len(parts) > p.MaxParts {
|
if len(parts) > p.MaxParts {
|
||||||
res.IsTruncated = true
|
res.IsTruncated = true
|
||||||
res.NextPartNumberMarker = parts[p.MaxParts-1].PartNumber
|
|
||||||
parts = parts[:p.MaxParts]
|
parts = parts[:p.MaxParts]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
res.NextPartNumberMarker = parts[len(parts)-1].PartNumber
|
||||||
res.Parts = parts
|
res.Parts = parts
|
||||||
|
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
type PartsInfo map[int][]*data.PartInfoExtended
|
||||||
|
|
||||||
|
func (p PartsInfo) Extract(part int, etag string, md5Enabled bool) *data.PartInfoExtended {
|
||||||
|
parts := p[part]
|
||||||
|
|
||||||
|
for i, info := range parts {
|
||||||
|
if info.GetETag(md5Enabled) == etag {
|
||||||
|
p[part] = append(parts[:i], parts[i+1:]...)
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, PartsInfo, error) {
|
||||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
if errors.Is(err, ErrNodeNotFound) {
|
||||||
|
@ -626,11 +657,11 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
res := make(map[int]*data.PartInfo, len(parts))
|
res := make(map[int][]*data.PartInfoExtended, len(parts))
|
||||||
partsNumbers := make([]int, len(parts))
|
partsNumbers := make([]int, len(parts))
|
||||||
oids := make([]string, len(parts))
|
oids := make([]string, len(parts))
|
||||||
for i, part := range parts {
|
for i, part := range parts {
|
||||||
res[part.Number] = part
|
res[part.Number] = append(res[part.Number], part)
|
||||||
partsNumbers[i] = part.Number
|
partsNumbers[i] = part.Number
|
||||||
oids[i] = part.OID.EncodeToString()
|
oids[i] = part.OID.EncodeToString()
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,20 +68,14 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
||||||
|
|
||||||
// objectHead returns all object's headers.
|
// objectHead returns all object's headers.
|
||||||
func (n *Layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
|
func (n *Layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
|
||||||
prm := PrmObjectRead{
|
prm := PrmObjectHead{
|
||||||
Container: bktInfo.CID,
|
Container: bktInfo.CID,
|
||||||
Object: idObj,
|
Object: idObj,
|
||||||
WithHeader: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||||
|
|
||||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
return n.frostFS.HeadObject(ctx, prm)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Head, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
||||||
|
@ -100,7 +94,7 @@ func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
var parts []*data.PartInfo
|
var parts []*data.PartInfo
|
||||||
if err = json.Unmarshal(combinedObj.Payload(), &parts); err != nil {
|
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
||||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,16 +126,27 @@ func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Re
|
||||||
// initializes payload reader of the FrostFS object.
|
// initializes payload reader of the FrostFS object.
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
// Zero range corresponds to full payload (panics if only offset is set).
|
||||||
func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||||
prm := PrmObjectRead{
|
var prmAuth PrmAuth
|
||||||
|
n.prepareAuthParameters(ctx, &prmAuth, p.bktInfo.Owner)
|
||||||
|
|
||||||
|
if p.off+p.ln != 0 {
|
||||||
|
prm := PrmObjectRange{
|
||||||
|
PrmAuth: prmAuth,
|
||||||
Container: p.bktInfo.CID,
|
Container: p.bktInfo.CID,
|
||||||
Object: p.oid,
|
Object: p.oid,
|
||||||
WithPayload: true,
|
|
||||||
PayloadRange: [2]uint64{p.off, p.ln},
|
PayloadRange: [2]uint64{p.off, p.ln},
|
||||||
}
|
}
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, p.bktInfo.Owner)
|
return n.frostFS.RangeObject(ctx, prm)
|
||||||
|
}
|
||||||
|
|
||||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
prm := PrmObjectGet{
|
||||||
|
PrmAuth: prmAuth,
|
||||||
|
Container: p.bktInfo.CID,
|
||||||
|
Object: p.oid,
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := n.frostFS.GetObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -150,22 +155,25 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
|
||||||
}
|
}
|
||||||
|
|
||||||
// objectGet returns an object with payload in the object.
|
// objectGet returns an object with payload in the object.
|
||||||
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*object.Object, error) {
|
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*Object, error) {
|
||||||
prm := PrmObjectRead{
|
return n.objectGetBase(ctx, bktInfo, objID, PrmAuth{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectGetWithAuth returns an object with payload in the object. Uses provided PrmAuth.
|
||||||
|
func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
|
||||||
|
return n.objectGetBase(ctx, bktInfo, objID, auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
|
||||||
|
prm := PrmObjectGet{
|
||||||
|
PrmAuth: auth,
|
||||||
Container: bktInfo.CID,
|
Container: bktInfo.CID,
|
||||||
Object: objID,
|
Object: objID,
|
||||||
WithHeader: true,
|
|
||||||
WithPayload: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||||
|
|
||||||
res, err := n.frostFS.ReadObject(ctx, prm)
|
return n.frostFS.GetObject(ctx, prm)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Head, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeByFilePath detect mime type by file path extension.
|
// MimeByFilePath detect mime type by file path extension.
|
||||||
|
@ -263,7 +271,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||||
}
|
}
|
||||||
|
|
||||||
size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -272,10 +280,10 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(headerMd5Hash, md5Hash) {
|
if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) {
|
||||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
}
|
}
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
||||||
}
|
}
|
||||||
|
@ -286,25 +294,26 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(contentHashBytes, hash) {
|
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
}
|
}
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||||
now := TimeNow(ctx)
|
now := TimeNow(ctx)
|
||||||
newVersion := &data.NodeVersion{
|
newVersion := &data.NodeVersion{
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
BaseNodeVersion: data.BaseNodeVersion{
|
||||||
OID: id,
|
OID: createdObj.ID,
|
||||||
ETag: hex.EncodeToString(hash),
|
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||||
FilePath: p.Object,
|
FilePath: p.Object,
|
||||||
Size: p.Size,
|
Size: p.Size,
|
||||||
Created: &now,
|
Created: &now,
|
||||||
Owner: &n.gateOwner,
|
Owner: &n.gateOwner,
|
||||||
|
CreationEpoch: createdObj.CreationEpoch,
|
||||||
},
|
},
|
||||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||||
|
@ -312,7 +321,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
if len(p.CompleteMD5Hash) > 0 {
|
if len(p.CompleteMD5Hash) > 0 {
|
||||||
newVersion.MD5 = p.CompleteMD5Hash
|
newVersion.MD5 = p.CompleteMD5Hash
|
||||||
} else {
|
} else {
|
||||||
newVersion.MD5 = hex.EncodeToString(md5Hash)
|
newVersion.MD5 = hex.EncodeToString(createdObj.MD5Sum)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||||
|
@ -324,7 +333,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
ObjVersion: &data.ObjectVersion{
|
ObjVersion: &data.ObjectVersion{
|
||||||
BktInfo: p.BktInfo,
|
BktInfo: p.BktInfo,
|
||||||
ObjectName: p.Object,
|
ObjectName: p.Object,
|
||||||
VersionID: id.EncodeToString(),
|
VersionID: createdObj.ID.EncodeToString(),
|
||||||
},
|
},
|
||||||
NewLock: p.Lock,
|
NewLock: p.Lock,
|
||||||
CopiesNumbers: p.CopiesNumbers,
|
CopiesNumbers: p.CopiesNumbers,
|
||||||
|
@ -339,13 +348,13 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
||||||
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
||||||
|
|
||||||
objInfo := &data.ObjectInfo{
|
objInfo := &data.ObjectInfo{
|
||||||
ID: id,
|
ID: createdObj.ID,
|
||||||
CID: p.BktInfo.CID,
|
CID: p.BktInfo.CID,
|
||||||
|
|
||||||
Owner: n.gateOwner,
|
Owner: n.gateOwner,
|
||||||
Bucket: p.BktInfo.Name,
|
Bucket: p.BktInfo.Name,
|
||||||
Name: p.Object,
|
Name: p.Object,
|
||||||
Size: size,
|
Size: createdObj.Size,
|
||||||
Created: prm.CreationTime,
|
Created: prm.CreationTime,
|
||||||
Headers: p.Header,
|
Headers: p.Header,
|
||||||
ContentType: p.Header[api.ContentType],
|
ContentType: p.Header[api.ContentType],
|
||||||
|
@ -460,7 +469,17 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
||||||
|
|
||||||
// objectDelete puts tombstone object into frostfs.
|
// objectDelete puts tombstone object into frostfs.
|
||||||
func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
||||||
|
return n.objectDeleteBase(ctx, bktInfo, idObj, PrmAuth{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectDeleteWithAuth puts tombstone object into frostfs. Uses provided PrmAuth.
|
||||||
|
func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
|
||||||
|
return n.objectDeleteBase(ctx, bktInfo, idObj, auth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
|
||||||
prm := PrmObjectDelete{
|
prm := PrmObjectDelete{
|
||||||
|
PrmAuth: auth,
|
||||||
Container: bktInfo.CID,
|
Container: bktInfo.CID,
|
||||||
Object: idObj,
|
Object: idObj,
|
||||||
}
|
}
|
||||||
|
@ -473,8 +492,7 @@ func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
||||||
}
|
}
|
||||||
|
|
||||||
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
||||||
// Returns object ID and payload sha256 hash.
|
func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (*data.CreatedObjectInfo, error) {
|
||||||
func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||||
prm.ClientCut = n.features.ClientCut()
|
prm.ClientCut = n.features.ClientCut()
|
||||||
prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
|
prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
|
||||||
|
@ -487,15 +505,21 @@ func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
|
||||||
hash.Write(buf)
|
hash.Write(buf)
|
||||||
md5Hash.Write(buf)
|
md5Hash.Write(buf)
|
||||||
})
|
})
|
||||||
id, err := n.frostFS.CreateObject(ctx, prm)
|
res, err := n.frostFS.CreateObject(ctx, prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
|
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
|
||||||
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, oid.ID{}, nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
|
return &data.CreatedObjectInfo{
|
||||||
|
ID: res.ObjectID,
|
||||||
|
Size: size,
|
||||||
|
HashSum: hash.Sum(nil),
|
||||||
|
MD5Sum: md5Hash.Sum(nil),
|
||||||
|
CreationEpoch: res.CreationEpoch,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type logWrapper struct {
|
type logWrapper struct {
|
||||||
|
|
|
@ -44,7 +44,7 @@ func TestGoroutinesDontLeakInPutAndHash(t *testing.T) {
|
||||||
|
|
||||||
expErr := errors.New("some error")
|
expErr := errors.New("some error")
|
||||||
tc.testFrostFS.SetObjectPutError(tc.obj, expErr)
|
tc.testFrostFS.SetObjectPutError(tc.obj, expErr)
|
||||||
_, _, _, _, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
|
_, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
|
||||||
require.ErrorIs(t, err, expErr)
|
require.ErrorIs(t, err, expErr)
|
||||||
require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader")
|
require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader")
|
||||||
}
|
}
|
||||||
|
|
264
api/layer/patch.go
Normal file
264
api/layer/patch.go
Normal file
|
@ -0,0 +1,264 @@
|
||||||
|
package layer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PatchObjectParams struct {
|
||||||
|
Object *data.ExtendedObjectInfo
|
||||||
|
BktInfo *data.BucketInfo
|
||||||
|
NewBytes io.Reader
|
||||||
|
Range *RangeParams
|
||||||
|
VersioningEnabled bool
|
||||||
|
CopiesNumbers []uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||||
|
if p.Object.ObjectInfo.Headers[AttributeDecryptedSize] != "" {
|
||||||
|
return nil, fmt.Errorf("patch encrypted object")
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Object.ObjectInfo.Headers[MultipartObjectSize] != "" {
|
||||||
|
return n.patchMultipartObject(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
prmPatch := PrmObjectPatch{
|
||||||
|
Container: p.BktInfo.CID,
|
||||||
|
Object: p.Object.ObjectInfo.ID,
|
||||||
|
Payload: p.NewBytes,
|
||||||
|
Offset: p.Range.Start,
|
||||||
|
Length: p.Range.End - p.Range.Start + 1,
|
||||||
|
ObjectSize: p.Object.ObjectInfo.Size,
|
||||||
|
}
|
||||||
|
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
|
||||||
|
|
||||||
|
createdObj, err := n.patchObject(ctx, prmPatch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("patch object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newVersion := &data.NodeVersion{
|
||||||
|
BaseNodeVersion: data.BaseNodeVersion{
|
||||||
|
OID: createdObj.ID,
|
||||||
|
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||||
|
FilePath: p.Object.ObjectInfo.Name,
|
||||||
|
Size: createdObj.Size,
|
||||||
|
Created: &p.Object.ObjectInfo.Created,
|
||||||
|
Owner: &n.gateOwner,
|
||||||
|
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
|
||||||
|
},
|
||||||
|
IsUnversioned: !p.VersioningEnabled,
|
||||||
|
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Object.ObjectInfo.ID = createdObj.ID
|
||||||
|
p.Object.ObjectInfo.Size = createdObj.Size
|
||||||
|
p.Object.ObjectInfo.MD5Sum = ""
|
||||||
|
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
|
||||||
|
p.Object.NodeVersion = newVersion
|
||||||
|
|
||||||
|
return p.Object, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) patchObject(ctx context.Context, p PrmObjectPatch) (*data.CreatedObjectInfo, error) {
|
||||||
|
objID, err := n.frostFS.PatchObject(ctx, p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("patch object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prmHead := PrmObjectHead{
|
||||||
|
PrmAuth: p.PrmAuth,
|
||||||
|
Container: p.Container,
|
||||||
|
Object: objID,
|
||||||
|
}
|
||||||
|
obj, err := n.frostFS.HeadObject(ctx, prmHead)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("head object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadChecksum, _ := obj.PayloadChecksum()
|
||||||
|
|
||||||
|
return &data.CreatedObjectInfo{
|
||||||
|
ID: objID,
|
||||||
|
Size: obj.PayloadSize(),
|
||||||
|
HashSum: payloadChecksum.Value(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||||
|
combinedObj, err := n.objectGet(ctx, p.BktInfo, p.Object.ObjectInfo.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get combined object '%s': %w", p.Object.ObjectInfo.ID.EncodeToString(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var parts []*data.PartInfo
|
||||||
|
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
||||||
|
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
prmPatch := PrmObjectPatch{
|
||||||
|
Container: p.BktInfo.CID,
|
||||||
|
}
|
||||||
|
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
|
||||||
|
|
||||||
|
off, ln := p.Range.Start, p.Range.End-p.Range.Start+1
|
||||||
|
var multipartObjectSize uint64
|
||||||
|
for i, part := range parts {
|
||||||
|
if off > part.Size || (off == part.Size && i != len(parts)-1) || ln == 0 {
|
||||||
|
multipartObjectSize += part.Size
|
||||||
|
if ln != 0 {
|
||||||
|
off -= part.Size
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var createdObj *data.CreatedObjectInfo
|
||||||
|
createdObj, off, ln, err = n.patchPart(ctx, part, p, &prmPatch, off, ln, i == len(parts)-1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("patch part: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts[i].OID = createdObj.ID
|
||||||
|
parts[i].Size = createdObj.Size
|
||||||
|
parts[i].MD5 = ""
|
||||||
|
parts[i].ETag = hex.EncodeToString(createdObj.HashSum)
|
||||||
|
|
||||||
|
multipartObjectSize += createdObj.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
return n.updateCombinedObject(ctx, parts, multipartObjectSize, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns patched part info, updated offset and length.
|
||||||
|
func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObjectParams, prmPatch *PrmObjectPatch, off, ln uint64, lastPart bool) (*data.CreatedObjectInfo, uint64, uint64, error) {
|
||||||
|
if off == 0 && ln >= part.Size {
|
||||||
|
curLen := part.Size
|
||||||
|
if lastPart {
|
||||||
|
curLen = ln
|
||||||
|
}
|
||||||
|
prm := PrmObjectCreate{
|
||||||
|
Container: p.BktInfo.CID,
|
||||||
|
Payload: io.LimitReader(p.NewBytes, int64(curLen)),
|
||||||
|
CreationTime: part.Created,
|
||||||
|
CopiesNumber: p.CopiesNumbers,
|
||||||
|
}
|
||||||
|
|
||||||
|
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, 0, fmt.Errorf("put new part object '%s': %w", part.OID.EncodeToString(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ln -= curLen
|
||||||
|
|
||||||
|
return createdObj, off, ln, err
|
||||||
|
}
|
||||||
|
|
||||||
|
curLen := ln
|
||||||
|
if off+curLen > part.Size && !lastPart {
|
||||||
|
curLen = part.Size - off
|
||||||
|
}
|
||||||
|
prmPatch.Object = part.OID
|
||||||
|
prmPatch.ObjectSize = part.Size
|
||||||
|
prmPatch.Offset = off
|
||||||
|
prmPatch.Length = curLen
|
||||||
|
|
||||||
|
prmPatch.Payload = io.LimitReader(p.NewBytes, int64(prmPatch.Length))
|
||||||
|
|
||||||
|
createdObj, err := n.patchObject(ctx, *prmPatch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, 0, fmt.Errorf("patch part object '%s': %w", part.OID.EncodeToString(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ln -= curLen
|
||||||
|
off = 0
|
||||||
|
|
||||||
|
return createdObj, off, ln, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *Layer) updateCombinedObject(ctx context.Context, parts []*data.PartInfo, fullObjSize uint64, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||||
|
newParts, err := json.Marshal(parts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("marshal parts for combined object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var headerParts strings.Builder
|
||||||
|
for i, part := range parts {
|
||||||
|
headerPart := part.ToHeaderString()
|
||||||
|
if i != len(parts)-1 {
|
||||||
|
headerPart += ","
|
||||||
|
}
|
||||||
|
headerParts.WriteString(headerPart)
|
||||||
|
}
|
||||||
|
|
||||||
|
prm := PrmObjectCreate{
|
||||||
|
Container: p.BktInfo.CID,
|
||||||
|
PayloadSize: fullObjSize,
|
||||||
|
Filepath: p.Object.ObjectInfo.Name,
|
||||||
|
Payload: bytes.NewReader(newParts),
|
||||||
|
CreationTime: p.Object.ObjectInfo.Created,
|
||||||
|
CopiesNumber: p.CopiesNumbers,
|
||||||
|
}
|
||||||
|
|
||||||
|
prm.Attributes = make([][2]string, 0, len(p.Object.ObjectInfo.Headers)+1)
|
||||||
|
|
||||||
|
for k, v := range p.Object.ObjectInfo.Headers {
|
||||||
|
switch k {
|
||||||
|
case MultipartObjectSize:
|
||||||
|
prm.Attributes = append(prm.Attributes, [2]string{MultipartObjectSize, strconv.FormatUint(fullObjSize, 10)})
|
||||||
|
case UploadCompletedParts:
|
||||||
|
prm.Attributes = append(prm.Attributes, [2]string{UploadCompletedParts, headerParts.String()})
|
||||||
|
case api.ContentType:
|
||||||
|
default:
|
||||||
|
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prm.Attributes = append(prm.Attributes, [2]string{api.ContentType, p.Object.ObjectInfo.ContentType})
|
||||||
|
|
||||||
|
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("put new combined object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newVersion := &data.NodeVersion{
|
||||||
|
BaseNodeVersion: data.BaseNodeVersion{
|
||||||
|
OID: createdObj.ID,
|
||||||
|
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||||
|
MD5: hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts)),
|
||||||
|
FilePath: p.Object.ObjectInfo.Name,
|
||||||
|
Size: fullObjSize,
|
||||||
|
Created: &p.Object.ObjectInfo.Created,
|
||||||
|
Owner: &n.gateOwner,
|
||||||
|
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
|
||||||
|
},
|
||||||
|
IsUnversioned: !p.VersioningEnabled,
|
||||||
|
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
|
||||||
|
}
|
||||||
|
|
||||||
|
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||||
|
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Object.ObjectInfo.ID = createdObj.ID
|
||||||
|
p.Object.ObjectInfo.Size = createdObj.Size
|
||||||
|
p.Object.ObjectInfo.MD5Sum = hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts))
|
||||||
|
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
|
||||||
|
p.Object.ObjectInfo.Headers[MultipartObjectSize] = strconv.FormatUint(fullObjSize, 10)
|
||||||
|
p.Object.ObjectInfo.Headers[UploadCompletedParts] = headerParts.String()
|
||||||
|
p.Object.NodeVersion = newVersion
|
||||||
|
|
||||||
|
return p.Object, nil
|
||||||
|
}
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -125,8 +126,12 @@ func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, id, _, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||||
return id, err
|
if err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return createdObj.ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion) (*data.LockInfo, error) {
|
func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion) (*data.LockInfo, error) {
|
||||||
|
@ -159,24 +164,30 @@ func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
||||||
return cors, nil
|
return cors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
objID, err := n.treeService.GetBucketCORS(ctx, bkt)
|
addr, err := n.treeService.GetBucketCORS(ctx, bkt)
|
||||||
objIDNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
objNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
||||||
if err != nil && !objIDNotFound {
|
if err != nil && !objNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if objIDNotFound {
|
if objNotFound {
|
||||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := n.objectGet(ctx, bkt, objID)
|
var prmAuth PrmAuth
|
||||||
|
corsBkt := bkt
|
||||||
|
if !addr.Container().Equals(bkt.CID) && !addr.Container().Equals(cid.ID{}) {
|
||||||
|
corsBkt = &data.BucketInfo{CID: addr.Container()}
|
||||||
|
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
obj, err := n.objectGetWithAuth(ctx, corsBkt, addr.Object(), prmAuth)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("get cors object: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cors := &data.CORSConfiguration{}
|
cors := &data.CORSConfiguration{}
|
||||||
|
if err = xml.NewDecoder(obj.Payload).Decode(&cors); err != nil {
|
||||||
if err = xml.Unmarshal(obj.Payload(), &cors); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal cors: %w", err)
|
return nil, fmt.Errorf("unmarshal cors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
@ -33,7 +34,7 @@ type TreeServiceMock struct {
|
||||||
locks map[string]map[uint64]*data.LockInfo
|
locks map[string]map[uint64]*data.LockInfo
|
||||||
tags map[string]map[uint64]map[string]string
|
tags map[string]map[uint64]map[string]string
|
||||||
multiparts map[string]map[string][]*data.MultipartInfo
|
multiparts map[string]map[string][]*data.MultipartInfo
|
||||||
parts map[string]map[int]*data.PartInfo
|
parts map[string]map[int]*data.PartInfoExtended
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||||
|
@ -92,7 +93,7 @@ func NewTreeService() *TreeServiceMock {
|
||||||
locks: make(map[string]map[uint64]*data.LockInfo),
|
locks: make(map[string]map[uint64]*data.LockInfo),
|
||||||
tags: make(map[string]map[uint64]map[string]string),
|
tags: make(map[string]map[uint64]map[string]string),
|
||||||
multiparts: make(map[string]map[string][]*data.MultipartInfo),
|
multiparts: make(map[string]map[string][]*data.MultipartInfo),
|
||||||
parts: make(map[string]map[int]*data.PartInfo),
|
parts: make(map[string]map[int]*data.PartInfoExtended),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,36 +111,39 @@ func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.Bucke
|
||||||
return settings, nil
|
return settings, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetBucketCORS(_ context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
func (t *TreeServiceMock) GetBucketCORS(_ context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
|
||||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return oid.ID{}, nil
|
return oid.Address{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
node, ok := systemMap["cors"]
|
node, ok := systemMap["cors"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return oid.ID{}, nil
|
return oid.Address{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return node.OID, nil
|
var addr oid.Address
|
||||||
|
addr.SetContainer(bktInfo.CID)
|
||||||
|
addr.SetObject(node.OID)
|
||||||
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
|
||||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
systemMap = make(map[string]*data.BaseNodeVersion)
|
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
systemMap["cors"] = &data.BaseNodeVersion{
|
systemMap["cors"] = &data.BaseNodeVersion{
|
||||||
OID: objID,
|
OID: addr.Object(),
|
||||||
}
|
}
|
||||||
|
|
||||||
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
||||||
|
|
||||||
return oid.ID{}, ErrNoNodeToRemove
|
return nil, ErrNoNodeToRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) (oid.ID, error) {
|
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([]oid.Address, error) {
|
||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,28 +347,31 @@ func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.Bu
|
||||||
return nil, ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
|
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) {
|
||||||
multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID)
|
multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oid.ID{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if multipartInfo.ID != multipartNodeID {
|
if multipartInfo.ID != multipartNodeID {
|
||||||
return oid.ID{}, fmt.Errorf("invalid multipart info id")
|
return nil, fmt.Errorf("invalid multipart info id")
|
||||||
}
|
}
|
||||||
|
|
||||||
partsMap, ok := t.parts[info.UploadID]
|
partsMap, ok := t.parts[info.UploadID]
|
||||||
if !ok {
|
if !ok {
|
||||||
partsMap = make(map[int]*data.PartInfo)
|
partsMap = make(map[int]*data.PartInfoExtended)
|
||||||
}
|
}
|
||||||
|
|
||||||
partsMap[info.Number] = info
|
partsMap[info.Number] = &data.PartInfoExtended{
|
||||||
|
PartInfo: *info,
|
||||||
|
Timestamp: uint64(time.Now().UnixMicro()),
|
||||||
|
}
|
||||||
|
|
||||||
t.parts[info.UploadID] = partsMap
|
t.parts[info.UploadID] = partsMap
|
||||||
return oid.ID{}, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
|
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) {
|
||||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||||
|
|
||||||
var foundMultipart *data.MultipartInfo
|
var foundMultipart *data.MultipartInfo
|
||||||
|
@ -384,7 +391,7 @@ LOOP:
|
||||||
}
|
}
|
||||||
|
|
||||||
partsMap := t.parts[foundMultipart.UploadID]
|
partsMap := t.parts[foundMultipart.UploadID]
|
||||||
result := make([]*data.PartInfo, 0, len(partsMap))
|
result := make([]*data.PartInfoExtended, 0, len(partsMap))
|
||||||
for _, part := range partsMap {
|
for _, part := range partsMap {
|
||||||
result = append(result, part)
|
result = append(result, part)
|
||||||
}
|
}
|
||||||
|
@ -392,6 +399,51 @@ LOOP:
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *TreeServiceMock) PutBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
|
||||||
|
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
systemMap["lifecycle"] = &data.BaseNodeVersion{
|
||||||
|
OID: addr.Object(),
|
||||||
|
}
|
||||||
|
|
||||||
|
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
||||||
|
|
||||||
|
return nil, ErrNoNodeToRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeServiceMock) GetBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
|
||||||
|
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
return oid.Address{}, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
node, ok := systemMap["lifecycle"]
|
||||||
|
if !ok {
|
||||||
|
return oid.Address{}, ErrNodeNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
return newAddress(bktInfo.CID, node.OID), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeServiceMock) DeleteBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error) {
|
||||||
|
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNoNodeToRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
node, ok := systemMap["lifecycle"]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNoNodeToRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(systemMap, "lifecycle")
|
||||||
|
|
||||||
|
return []oid.Address{newAddress(bktInfo.CID, node.OID)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
||||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||||
|
|
||||||
|
|
|
@ -21,17 +21,17 @@ type TreeService interface {
|
||||||
// GetBucketCORS gets an object id that corresponds to object with bucket CORS.
|
// GetBucketCORS gets an object id that corresponds to object with bucket CORS.
|
||||||
//
|
//
|
||||||
// If object id is not found returns ErrNodeNotFound error.
|
// If object id is not found returns ErrNodeNotFound error.
|
||||||
GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
|
GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)
|
||||||
|
|
||||||
// PutBucketCORS puts a node to a system tree and returns objectID of a previous cors config which must be deleted in FrostFS.
|
// PutBucketCORS puts a node to a system tree and returns objectID of a previous cors config which must be deleted in FrostFS.
|
||||||
//
|
//
|
||||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
// If object ids to remove is not found returns ErrNoNodeToRemove error.
|
||||||
PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error)
|
PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error)
|
||||||
|
|
||||||
// DeleteBucketCORS removes a node from a system tree and returns objID which must be deleted in FrostFS.
|
// DeleteBucketCORS removes a node from a system tree and returns objID which must be deleted in FrostFS.
|
||||||
//
|
//
|
||||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
// If object ids to remove is not found returns ErrNoNodeToRemove error.
|
||||||
DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error)
|
DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error)
|
||||||
|
|
||||||
GetObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, error)
|
GetObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, error)
|
||||||
PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion, tagSet map[string]string) error
|
PutObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion, tagSet map[string]string) error
|
||||||
|
@ -57,11 +57,15 @@ type TreeService interface {
|
||||||
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
||||||
|
|
||||||
// AddPart puts a node to a system tree as a child of appropriate multipart upload
|
// AddPart puts a node to a system tree as a child of appropriate multipart upload
|
||||||
// and returns objectID of a previous part which must be deleted in FrostFS.
|
// and returns objectIDs of a previous part/s which must be deleted in FrostFS.
|
||||||
//
|
//
|
||||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
// If object ids to remove is not found returns ErrNoNodeToRemove error.
|
||||||
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
|
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error)
|
||||||
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error)
|
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error)
|
||||||
|
|
||||||
|
PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error)
|
||||||
|
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)
|
||||||
|
DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error)
|
||||||
|
|
||||||
// Compound methods for optimizations
|
// Compound methods for optimizations
|
||||||
|
|
||||||
|
|
148
api/middleware/address_style.go
Normal file
148
api/middleware/address_style.go
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wildcardPlaceholder = "<wildcard>"
|
||||||
|
|
||||||
|
type VHSSettings interface {
|
||||||
|
Domains() []string
|
||||||
|
GlobalVHS() bool
|
||||||
|
VHSHeader() string
|
||||||
|
ServernameHeader() string
|
||||||
|
VHSNamespacesEnabled() map[string]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrepareAddressStyle(settings VHSSettings, log *zap.Logger) Func {
|
||||||
|
return func(h http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
reqInfo := GetReqInfo(ctx)
|
||||||
|
reqLogger := reqLogOrDefault(ctx, log)
|
||||||
|
headerVHSEnabled := r.Header.Get(settings.VHSHeader())
|
||||||
|
|
||||||
|
if isVHSAddress(headerVHSEnabled, settings.GlobalVHS(), settings.VHSNamespacesEnabled(), reqInfo.Namespace) {
|
||||||
|
prepareVHSAddress(reqInfo, r, settings)
|
||||||
|
} else {
|
||||||
|
preparePathStyleAddress(reqInfo, r, reqLogger)
|
||||||
|
}
|
||||||
|
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isVHSAddress(headerVHSEnabled string, enabledFlag bool, vhsNamespaces map[string]bool, namespace string) bool {
|
||||||
|
if result, err := strconv.ParseBool(headerVHSEnabled); err == nil {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
result := enabledFlag
|
||||||
|
if v, ok := vhsNamespaces[namespace]; ok {
|
||||||
|
result = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareVHSAddress(reqInfo *ReqInfo, r *http.Request, settings VHSSettings) {
|
||||||
|
reqInfo.RequestVHSEnabled = true
|
||||||
|
bktName, match := checkDomain(r.Host, getDomains(r, settings))
|
||||||
|
if match {
|
||||||
|
if bktName == "" {
|
||||||
|
reqInfo.RequestType = noneType
|
||||||
|
} else {
|
||||||
|
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
|
||||||
|
reqInfo.RequestType = objectType
|
||||||
|
reqInfo.ObjectName = objName
|
||||||
|
reqInfo.BucketName = bktName
|
||||||
|
} else {
|
||||||
|
reqInfo.RequestType = bucketType
|
||||||
|
reqInfo.BucketName = bktName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
parts := strings.Split(r.Host, ".")
|
||||||
|
reqInfo.BucketName = parts[0]
|
||||||
|
|
||||||
|
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
|
||||||
|
reqInfo.RequestType = objectType
|
||||||
|
reqInfo.ObjectName = objName
|
||||||
|
} else {
|
||||||
|
reqInfo.RequestType = bucketType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDomains(r *http.Request, settings VHSSettings) []string {
|
||||||
|
if headerServername := r.Header.Get(settings.ServernameHeader()); headerServername != "" {
|
||||||
|
return []string{headerServername}
|
||||||
|
}
|
||||||
|
|
||||||
|
return settings.Domains()
|
||||||
|
}
|
||||||
|
|
||||||
|
func preparePathStyleAddress(reqInfo *ReqInfo, r *http.Request, reqLogger *zap.Logger) {
|
||||||
|
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
||||||
|
if bktObj == "" {
|
||||||
|
reqInfo.RequestType = noneType
|
||||||
|
} else if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
|
||||||
|
reqInfo.RequestType = objectType
|
||||||
|
reqInfo.BucketName = bktObj[:ind]
|
||||||
|
reqInfo.ObjectName = bktObj[ind+1:]
|
||||||
|
|
||||||
|
if r.URL.RawPath != "" {
|
||||||
|
// we have to do this because of
|
||||||
|
// https://github.com/go-chi/chi/issues/641
|
||||||
|
// https://github.com/go-chi/chi/issues/642
|
||||||
|
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
||||||
|
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
||||||
|
} else {
|
||||||
|
reqInfo.ObjectName = obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
reqInfo.RequestType = bucketType
|
||||||
|
reqInfo.BucketName = strings.TrimSuffix(bktObj, "/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDomain(host string, domains []string) (bktName string, match bool) {
|
||||||
|
partsHost := strings.Split(host, ".")
|
||||||
|
for _, pattern := range domains {
|
||||||
|
partsPattern := strings.Split(pattern, ".")
|
||||||
|
bktName, match = compareMatch(partsHost, partsPattern)
|
||||||
|
if match {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareMatch(host, pattern []string) (bktName string, match bool) {
|
||||||
|
if len(host) < len(pattern) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
i, j := len(host)-1, len(pattern)-1
|
||||||
|
for j >= 0 && (pattern[j] == wildcardPlaceholder || host[i] == pattern[j]) {
|
||||||
|
i--
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case i == -1:
|
||||||
|
return "", true
|
||||||
|
case i == 0 && (j != 0 || host[i] == pattern[j]):
|
||||||
|
return host[0], true
|
||||||
|
default:
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
443
api/middleware/address_style_test.go
Normal file
443
api/middleware/address_style_test.go
Normal file
|
@ -0,0 +1,443 @@
|
||||||
|
package middleware
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
|
||||||
|
FrostfsServernameHeader = "X-Frostfs-Servername"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VHSSettingsMock struct {
|
||||||
|
domains []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VHSSettingsMock) Domains() []string {
|
||||||
|
return v.domains
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VHSSettingsMock) GlobalVHS() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VHSSettingsMock) VHSHeader() string {
|
||||||
|
return FrostfsVHSHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VHSSettingsMock) ServernameHeader() string {
|
||||||
|
return FrostfsServernameHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VHSSettingsMock) VHSNamespacesEnabled() map[string]bool {
|
||||||
|
return make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsVHSAddress(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
headerVHSEnabled string
|
||||||
|
vhsEnabledFlag bool
|
||||||
|
vhsNamespaced map[string]bool
|
||||||
|
namespace string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "vhs disabled",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vhs disabled for namespace",
|
||||||
|
vhsEnabledFlag: true,
|
||||||
|
vhsNamespaced: map[string]bool{
|
||||||
|
"kapusta": false,
|
||||||
|
},
|
||||||
|
namespace: "kapusta",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vhs enabled (global vhs flag)",
|
||||||
|
vhsEnabledFlag: true,
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vhs enabled for namespace",
|
||||||
|
vhsNamespaced: map[string]bool{
|
||||||
|
"kapusta": true,
|
||||||
|
},
|
||||||
|
namespace: "kapusta",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vhs enabled (header)",
|
||||||
|
headerVHSEnabled: "true",
|
||||||
|
vhsEnabledFlag: false,
|
||||||
|
vhsNamespaced: map[string]bool{
|
||||||
|
"kapusta": false,
|
||||||
|
},
|
||||||
|
namespace: "kapusta",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "vhs disabled (header)",
|
||||||
|
headerVHSEnabled: "false",
|
||||||
|
vhsEnabledFlag: true,
|
||||||
|
vhsNamespaced: map[string]bool{
|
||||||
|
"kapusta": true,
|
||||||
|
},
|
||||||
|
namespace: "kapusta",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
actual := isVHSAddress(tc.headerVHSEnabled, tc.vhsEnabledFlag, tc.vhsNamespaced, tc.namespace)
|
||||||
|
require.Equal(t, tc.expected, actual)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPreparePathStyleAddress(t *testing.T) {
|
||||||
|
bkt, obj := "test-bucket", "test-object"
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
urlParams string
|
||||||
|
expectedReqType ReqType
|
||||||
|
expectedBktName string
|
||||||
|
expectedObjName string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "bucket request",
|
||||||
|
urlParams: "/" + bkt,
|
||||||
|
expectedReqType: bucketType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bucket request with slash",
|
||||||
|
urlParams: "/" + bkt + "/",
|
||||||
|
expectedReqType: bucketType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request",
|
||||||
|
urlParams: "/" + bkt + "/" + obj,
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request with slash",
|
||||||
|
urlParams: "/" + bkt + "/" + obj + "/",
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj + "/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "none type request",
|
||||||
|
urlParams: "/",
|
||||||
|
expectedReqType: noneType,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
reqInfo := &ReqInfo{}
|
||||||
|
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
|
||||||
|
|
||||||
|
preparePathStyleAddress(reqInfo, r, reqLogOrDefault(r.Context(), zaptest.NewLogger(t)))
|
||||||
|
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
|
||||||
|
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
|
||||||
|
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPrepareVHSAddress(t *testing.T) {
|
||||||
|
bkt, obj, domain := "test-bucket", "test-object", "domain.com"
|
||||||
|
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
domains []string
|
||||||
|
host string
|
||||||
|
urlParams string
|
||||||
|
expectedReqType ReqType
|
||||||
|
expectedBktName string
|
||||||
|
expectedObjName string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "bucket request, the domain matched",
|
||||||
|
domains: []string{domain},
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/",
|
||||||
|
expectedReqType: bucketType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request, the domain matched",
|
||||||
|
domains: []string{domain},
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/" + obj,
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request with slash, the domain matched",
|
||||||
|
domains: []string{domain},
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/" + obj + "/",
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj + "/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list-buckets request, the domain matched",
|
||||||
|
domains: []string{domain},
|
||||||
|
host: domain,
|
||||||
|
urlParams: "/",
|
||||||
|
expectedReqType: noneType,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bucket request, the domain don't match",
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/",
|
||||||
|
expectedReqType: bucketType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request, the domain don't match",
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/" + obj,
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "object request with slash, the domain don't match",
|
||||||
|
host: bkt + "." + domain,
|
||||||
|
urlParams: "/" + obj + "/",
|
||||||
|
expectedReqType: objectType,
|
||||||
|
expectedBktName: bkt,
|
||||||
|
expectedObjName: obj + "/",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "list-buckets request, the domain don't match (list-buckets isn't supported if the domains don't match)",
|
||||||
|
host: domain,
|
||||||
|
urlParams: "/",
|
||||||
|
expectedReqType: bucketType,
|
||||||
|
expectedBktName: strings.Split(domain, ".")[0],
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
reqInfo := &ReqInfo{}
|
||||||
|
vhsSettings := &VHSSettingsMock{domains: tc.domains}
|
||||||
|
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
|
||||||
|
r.Host = tc.host
|
||||||
|
|
||||||
|
prepareVHSAddress(reqInfo, r, vhsSettings)
|
||||||
|
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
|
||||||
|
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
|
||||||
|
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckDomains(t *testing.T) {
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
domains []string
|
||||||
|
requestURL string
|
||||||
|
expectedBktName string
|
||||||
|
expectedMatch bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "valid url with bktName and namespace (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.s3.kapusta.domain.com",
|
||||||
|
expectedBktName: "bktA",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url without bktName and namespace (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "s3.kapusta.domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid bktName (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.bktB.s3.kapusta.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url without namespace (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.s3.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid infix (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.s4.kapusta.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid postfix (wildcard after protocol infix)",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.s3.kapusta.dom.su",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url with bktName and namespace (wildcard at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.kapusta.domain.com",
|
||||||
|
expectedBktName: "bktA",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url without bktName and namespace (wildcard at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "kapusta.domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid bktName (wildcard at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.bktB.kapusta.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "collision test - true, because we cannot clearly distinguish a namespace from a bucket (wildcard at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.domain.com",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url (fewer hosts)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid postfix (wildcard at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.kapusta.dom.su",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url with bktName and without wildcard (root namaspace)",
|
||||||
|
domains: []string{"domain.com"},
|
||||||
|
requestURL: "bktA.domain.com",
|
||||||
|
expectedBktName: "bktA",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url without bktName and without wildcard (root namaspace)",
|
||||||
|
domains: []string{"domain.com"},
|
||||||
|
requestURL: "domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with bktName without wildcard (root namaspace)",
|
||||||
|
domains: []string{"domain.com"},
|
||||||
|
requestURL: "bktA.dom.su",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url without wildcard (root namaspace)",
|
||||||
|
domains: []string{"domain.com"},
|
||||||
|
requestURL: "dom.su",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url, with a sorted list of domains",
|
||||||
|
domains: []string{"s3.<wildcard>.domain.com", "<wildcard>.domain.com", "domain.com"},
|
||||||
|
requestURL: "s3.kapusta.domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url with bktName, multiple wildcards (wildcards at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.s3.kapusta.domain.com",
|
||||||
|
expectedBktName: "bktA",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url without bktName, multiple wildcards (wildcards at the beginning of the domain)",
|
||||||
|
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||||
|
requestURL: "s3.kapusta.domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url with bktName, multiply wildcards",
|
||||||
|
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
|
||||||
|
requestURL: "bktA.s3.kapusta.subdomain.domain.com",
|
||||||
|
expectedBktName: "bktA",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "valid url without bktName, multiply wildcards",
|
||||||
|
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
|
||||||
|
requestURL: "s3.kapusta.subdomain.domain.com",
|
||||||
|
expectedBktName: "",
|
||||||
|
expectedMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url without one wildcard",
|
||||||
|
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||||
|
requestURL: "kapusta.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url, multiply wildcards",
|
||||||
|
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||||
|
requestURL: "s3.kapusta.dom.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid url with invalid bktName, multiply wildcards",
|
||||||
|
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||||
|
requestURL: "bktA.bktB.s3.kapusta.domain.com",
|
||||||
|
expectedMatch: false,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
bktName, match := checkDomain(tc.requestURL, tc.domains)
|
||||||
|
require.Equal(t, tc.expectedBktName, bktName)
|
||||||
|
require.Equal(t, tc.expectedMatch, match)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDomains(t *testing.T) {
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||||
|
settings := &VHSSettingsMock{
|
||||||
|
domains: []string{
|
||||||
|
"s3.domain.com",
|
||||||
|
"s3.<wildcard>.domain.com",
|
||||||
|
"domain.com",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("the request does not contain the X-Frostfs-Servername header", func(t *testing.T) {
|
||||||
|
actualDomains := getDomains(req, settings)
|
||||||
|
require.Equal(t, settings.domains, actualDomains)
|
||||||
|
})
|
||||||
|
|
||||||
|
serverName := "domain.com"
|
||||||
|
req.Header.Set(settings.ServernameHeader(), serverName)
|
||||||
|
|
||||||
|
t.Run("the request contains the X-Frostfs-Servername header", func(t *testing.T) {
|
||||||
|
actualDomains := getDomains(req, settings)
|
||||||
|
require.Equal(t, []string{serverName}, actualDomains)
|
||||||
|
})
|
||||||
|
}
|
|
@ -74,6 +74,7 @@ const (
|
||||||
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
||||||
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
||||||
DeleteObjectOperation = "DeleteObject"
|
DeleteObjectOperation = "DeleteObject"
|
||||||
|
PatchObjectOperation = "PatchObject"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -106,9 +107,3 @@ const (
|
||||||
PartNumberQuery = "partNumber"
|
PartNumberQuery = "partNumber"
|
||||||
LegalHoldQuery = "legal-hold"
|
LegalHoldQuery = "legal-hold"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
StdoutPath = "stdout"
|
|
||||||
StderrPath = "stderr"
|
|
||||||
SinkName = "lumberjack"
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,171 +0,0 @@
|
||||||
//go:build loghttp
|
|
||||||
|
|
||||||
package middleware
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
LogHTTPConfig struct {
|
|
||||||
Enabled bool
|
|
||||||
MaxBody int64
|
|
||||||
MaxLogSize int
|
|
||||||
OutputPath string
|
|
||||||
UseGzip bool
|
|
||||||
}
|
|
||||||
|
|
||||||
fileLogger struct {
|
|
||||||
*zap.Logger
|
|
||||||
logRoller *lumberjack.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// Implementation of zap.Sink for using lumberjack
|
|
||||||
lumberjackSink struct {
|
|
||||||
*lumberjack.Logger
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
var filelog = fileLogger{}
|
|
||||||
|
|
||||||
// newFileLogger returns registers zap sink and returns new fileLogger
|
|
||||||
func newFileLogger(conf *LogHTTPConfig) (fileLogger, error) {
|
|
||||||
var flog = fileLogger{}
|
|
||||||
c := flog.newLoggerConfig()
|
|
||||||
c.OutputPaths = []string{conf.OutputPath}
|
|
||||||
if conf.OutputPath != StdoutPath && conf.OutputPath != StderrPath && conf.OutputPath != "" {
|
|
||||||
err := flog.registerOutputSink(conf)
|
|
||||||
if err != nil {
|
|
||||||
return flog, err
|
|
||||||
}
|
|
||||||
c.OutputPaths[0] = SinkName + ":" + conf.OutputPath
|
|
||||||
}
|
|
||||||
log, err := c.Build()
|
|
||||||
if err != nil {
|
|
||||||
return flog, err
|
|
||||||
}
|
|
||||||
flog.Logger = log
|
|
||||||
|
|
||||||
return flog, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// registerOutputSink creates and registers sink for logger file output.
|
|
||||||
func (f *fileLogger) registerOutputSink(conf *LogHTTPConfig) error {
|
|
||||||
f.logRoller = &lumberjack.Logger{
|
|
||||||
Filename: conf.OutputPath,
|
|
||||||
MaxSize: conf.MaxLogSize,
|
|
||||||
Compress: conf.UseGzip,
|
|
||||||
}
|
|
||||||
err := zap.RegisterSink(SinkName, func(_ *url.URL) (zap.Sink, error) {
|
|
||||||
return lumberjackSink{
|
|
||||||
Logger: f.logRoller,
|
|
||||||
}, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lumberjackSink) Sync() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReloadFileLogger(conf *LogHTTPConfig) error {
|
|
||||||
if filelog.logRoller == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
filelog.logRoller.MaxSize = conf.MaxLogSize
|
|
||||||
filelog.logRoller.Compress = conf.UseGzip
|
|
||||||
|
|
||||||
if filelog.logRoller.Filename != conf.OutputPath {
|
|
||||||
filelog.logRoller.Filename = conf.OutputPath
|
|
||||||
if err := filelog.logRoller.Rotate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogHTTP logs http parameters from s3 request.
|
|
||||||
func LogHTTP(l *zap.Logger, config *LogHTTPConfig) Func {
|
|
||||||
var err error
|
|
||||||
filelog, err = newFileLogger(config)
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToInitializeHTTPLogger)
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if !config.Enabled {
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var httplog = filelog.With(
|
|
||||||
zap.String("from", r.RemoteAddr),
|
|
||||||
zap.String("URI", r.RequestURI),
|
|
||||||
zap.String("method", r.Method),
|
|
||||||
)
|
|
||||||
|
|
||||||
httplog = withFieldIfExist(httplog, "query", r.URL.Query())
|
|
||||||
httplog = withFieldIfExist(httplog, "headers", r.Header)
|
|
||||||
if r.ContentLength > 0 && r.ContentLength <= config.MaxBody {
|
|
||||||
httplog, err = withBody(httplog, r)
|
|
||||||
if err != nil {
|
|
||||||
l.Warn(logs.FailedToGetRequestBody, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
httplog.Info(logs.RequestHTTP)
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLoggerConfig creates new zap.Config with disabled base fields.
|
|
||||||
func (*fileLogger) newLoggerConfig() zap.Config {
|
|
||||||
c := zap.NewProductionConfig()
|
|
||||||
c.DisableCaller = true
|
|
||||||
c.DisableStacktrace = true
|
|
||||||
c.EncoderConfig.MessageKey = zapcore.OmitKey
|
|
||||||
c.EncoderConfig.LevelKey = zapcore.OmitKey
|
|
||||||
c.EncoderConfig.TimeKey = zapcore.OmitKey
|
|
||||||
c.EncoderConfig.FunctionKey = zapcore.OmitKey
|
|
||||||
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// withBody reads body and attach it to log output.
|
|
||||||
func withBody(httplog *zap.Logger, r *http.Request) (*zap.Logger, error) {
|
|
||||||
body, err := io.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("read body error: %w", err)
|
|
||||||
}
|
|
||||||
defer r.Body.Close()
|
|
||||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
|
||||||
|
|
||||||
httplog = httplog.With(zap.String("body", string(body)))
|
|
||||||
|
|
||||||
return httplog, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// withFieldIfExist checks whether data is not empty and attach it to log output.
|
|
||||||
func withFieldIfExist(log *zap.Logger, label string, data map[string][]string) *zap.Logger {
|
|
||||||
if len(data) != 0 {
|
|
||||||
log = log.With(zap.Any(label, data))
|
|
||||||
}
|
|
||||||
return log
|
|
||||||
}
|
|
|
@ -1,31 +0,0 @@
|
||||||
//go:build !loghttp
|
|
||||||
|
|
||||||
package middleware
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LogHTTPConfig struct {
|
|
||||||
Enabled bool
|
|
||||||
MaxBody int64
|
|
||||||
MaxLogSize int
|
|
||||||
OutputPath string
|
|
||||||
UseGzip bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func LogHTTP(l *zap.Logger, _ *LogHTTPConfig) Func {
|
|
||||||
l.Warn(logs.LogHTTPDisabledInThisBuild)
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReloadFileLogger(conf *LogHTTPConfig) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -73,7 +73,6 @@ type PolicyConfig struct {
|
||||||
Storage engine.ChainRouter
|
Storage engine.ChainRouter
|
||||||
FrostfsID FrostFSIDInformer
|
FrostfsID FrostFSIDInformer
|
||||||
Settings PolicySettings
|
Settings PolicySettings
|
||||||
Domains []string
|
|
||||||
Log *zap.Logger
|
Log *zap.Logger
|
||||||
BucketResolver BucketResolveFunc
|
BucketResolver BucketResolveFunc
|
||||||
Decoder XMLDecoder
|
Decoder XMLDecoder
|
||||||
|
@ -99,21 +98,21 @@ func PolicyCheck(cfg PolicyConfig) Func {
|
||||||
}
|
}
|
||||||
|
|
||||||
func policyCheck(r *http.Request, cfg PolicyConfig) error {
|
func policyCheck(r *http.Request, cfg PolicyConfig) error {
|
||||||
reqType, bktName, objName := getBucketObject(r, cfg.Domains)
|
reqInfo := GetReqInfo(r.Context())
|
||||||
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqType, bktName, objName)
|
|
||||||
|
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqInfo.RequestType, reqInfo.BucketName, reqInfo.ObjectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var bktInfo *data.BucketInfo
|
var bktInfo *data.BucketInfo
|
||||||
if reqType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
|
if reqInfo.RequestType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
|
||||||
bktInfo, err = cfg.BucketResolver(r.Context(), bktName)
|
bktInfo, err = cfg.BucketResolver(r.Context(), reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reqInfo := GetReqInfo(r.Context())
|
|
||||||
target := engine.NewRequestTargetWithNamespace(reqInfo.Namespace)
|
target := engine.NewRequestTargetWithNamespace(reqInfo.Namespace)
|
||||||
if bktInfo != nil {
|
if bktInfo != nil {
|
||||||
cnrTarget := engine.ContainerTarget(bktInfo.CID.EncodeToString())
|
cnrTarget := engine.ContainerTarget(bktInfo.CID.EncodeToString())
|
||||||
|
@ -208,33 +207,6 @@ const (
|
||||||
objectType
|
objectType
|
||||||
)
|
)
|
||||||
|
|
||||||
func getBucketObject(r *http.Request, domains []string) (reqType ReqType, bktName string, objName string) {
|
|
||||||
for _, domain := range domains {
|
|
||||||
ind := strings.Index(r.Host, "."+domain)
|
|
||||||
if ind == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
bkt := r.Host[:ind]
|
|
||||||
if obj := strings.TrimPrefix(r.URL.Path, "/"); obj != "" {
|
|
||||||
return objectType, bkt, obj
|
|
||||||
}
|
|
||||||
|
|
||||||
return bucketType, bkt, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
|
||||||
if bktObj == "" {
|
|
||||||
return noneType, "", ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
|
|
||||||
return objectType, bktObj[:ind], bktObj[ind+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return bucketType, strings.TrimSuffix(bktObj, "/"), ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func determineOperation(r *http.Request, reqType ReqType) (operation string) {
|
func determineOperation(r *http.Request, reqType ReqType) (operation string) {
|
||||||
switch reqType {
|
switch reqType {
|
||||||
case objectType:
|
case objectType:
|
||||||
|
@ -357,6 +329,8 @@ func determineObjectOperation(r *http.Request) string {
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodOptions:
|
case http.MethodOptions:
|
||||||
return OptionsObjectOperation
|
return OptionsObjectOperation
|
||||||
|
case http.MethodPatch:
|
||||||
|
return PatchObjectOperation
|
||||||
case http.MethodHead:
|
case http.MethodHead:
|
||||||
return HeadObjectOperation
|
return HeadObjectOperation
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
|
|
|
@ -8,79 +8,6 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestReqTypeDetermination(t *testing.T) {
|
|
||||||
bkt, obj, domain := "test-bucket", "test-object", "domain"
|
|
||||||
|
|
||||||
for _, tc := range []struct {
|
|
||||||
name string
|
|
||||||
target string
|
|
||||||
host string
|
|
||||||
domains []string
|
|
||||||
expectedType ReqType
|
|
||||||
expectedBktName string
|
|
||||||
expectedObjName string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "bucket request, path-style",
|
|
||||||
target: "/" + bkt,
|
|
||||||
expectedType: bucketType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bucket request with slash, path-style",
|
|
||||||
target: "/" + bkt + "/",
|
|
||||||
expectedType: bucketType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "object request, path-style",
|
|
||||||
target: "/" + bkt + "/" + obj,
|
|
||||||
expectedType: objectType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
expectedObjName: obj,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "object request with slash, path-style",
|
|
||||||
target: "/" + bkt + "/" + obj + "/",
|
|
||||||
expectedType: objectType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
expectedObjName: obj + "/",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "none type request",
|
|
||||||
target: "/",
|
|
||||||
expectedType: noneType,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "bucket request, virtual-hosted style",
|
|
||||||
target: "/",
|
|
||||||
host: bkt + "." + domain,
|
|
||||||
domains: []string{"some-domain", domain},
|
|
||||||
expectedType: bucketType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "object request, virtual-hosted style",
|
|
||||||
target: "/" + obj,
|
|
||||||
host: bkt + "." + domain,
|
|
||||||
domains: []string{"some-domain", domain},
|
|
||||||
expectedType: objectType,
|
|
||||||
expectedBktName: bkt,
|
|
||||||
expectedObjName: obj,
|
|
||||||
},
|
|
||||||
} {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
r := httptest.NewRequest(http.MethodPut, tc.target, nil)
|
|
||||||
r.Host = tc.host
|
|
||||||
|
|
||||||
reqType, bktName, objName := getBucketObject(r, tc.domains)
|
|
||||||
require.Equal(t, tc.expectedType, reqType)
|
|
||||||
require.Equal(t, tc.expectedBktName, bktName)
|
|
||||||
require.Equal(t, tc.expectedObjName, objName)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDetermineBucketOperation(t *testing.T) {
|
func TestDetermineBucketOperation(t *testing.T) {
|
||||||
const defaultValue = "value"
|
const defaultValue = "value"
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"github.com/go-chi/chi/v5"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
|
@ -41,6 +40,8 @@ type (
|
||||||
Namespace string
|
Namespace string
|
||||||
User string // User owner id
|
User string // User owner id
|
||||||
Tagging *data.Tagging
|
Tagging *data.Tagging
|
||||||
|
RequestVHSEnabled bool
|
||||||
|
RequestType ReqType
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectRequest represents object request data.
|
// ObjectRequest represents object request data.
|
||||||
|
@ -61,16 +62,14 @@ const (
|
||||||
|
|
||||||
const HdrAmzRequestID = "x-amz-request-id"
|
const HdrAmzRequestID = "x-amz-request-id"
|
||||||
|
|
||||||
const (
|
|
||||||
BucketURLPrm = "bucket"
|
|
||||||
)
|
|
||||||
|
|
||||||
var deploymentID = uuid.Must(uuid.NewRandom())
|
var deploymentID = uuid.Must(uuid.NewRandom())
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// De-facto standard header keys.
|
// De-facto standard header keys.
|
||||||
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
|
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
|
||||||
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
|
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
|
||||||
|
xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
|
||||||
|
xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
|
||||||
|
|
||||||
// RFC7239 defines a new "Forwarded: " header designed to replace the
|
// RFC7239 defines a new "Forwarded: " header designed to replace the
|
||||||
// existing use of X-Forwarded-* headers.
|
// existing use of X-Forwarded-* headers.
|
||||||
|
@ -79,6 +78,9 @@ var (
|
||||||
// Allows for a sub-match of the first value after 'for=' to the next
|
// Allows for a sub-match of the first value after 'for=' to the next
|
||||||
// comma, semi-colon or space. The match is case-insensitive.
|
// comma, semi-colon or space. The match is case-insensitive.
|
||||||
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|, )]+)(.*)`)
|
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|, )]+)(.*)`)
|
||||||
|
// Allows for a sub-match for the first instance of scheme (http|https)
|
||||||
|
// prefixed by 'proto='. The match is case-insensitive.
|
||||||
|
protoRegex = regexp.MustCompile(`(?i)^(;|,| )+(?:proto=)(https|http)`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewReqInfo returns new ReqInfo based on parameters.
|
// NewReqInfo returns new ReqInfo based on parameters.
|
||||||
|
@ -197,57 +199,6 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddBucketName adds bucket name to ReqInfo from context.
|
|
||||||
func AddBucketName(l *zap.Logger) Func {
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
|
|
||||||
reqInfo := GetReqInfo(ctx)
|
|
||||||
reqInfo.BucketName = chi.URLParam(r, BucketURLPrm)
|
|
||||||
|
|
||||||
if reqInfo.BucketName != "" {
|
|
||||||
reqLogger := reqLogOrDefault(ctx, l)
|
|
||||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
|
|
||||||
}
|
|
||||||
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddObjectName adds objects name to ReqInfo from context.
|
|
||||||
func AddObjectName(l *zap.Logger) Func {
|
|
||||||
return func(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := r.Context()
|
|
||||||
reqInfo := GetReqInfo(ctx)
|
|
||||||
reqLogger := reqLogOrDefault(ctx, l)
|
|
||||||
|
|
||||||
rctx := chi.RouteContext(ctx)
|
|
||||||
// trim leading slash (always present)
|
|
||||||
reqInfo.ObjectName = rctx.RoutePath[1:]
|
|
||||||
|
|
||||||
if r.URL.RawPath != "" {
|
|
||||||
// we have to do this because of
|
|
||||||
// https://github.com/go-chi/chi/issues/641
|
|
||||||
// https://github.com/go-chi/chi/issues/642
|
|
||||||
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
|
||||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
|
||||||
} else {
|
|
||||||
reqInfo.ObjectName = obj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if reqInfo.ObjectName != "" {
|
|
||||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
|
|
||||||
}
|
|
||||||
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
|
// getSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
|
||||||
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
|
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
|
||||||
// else fails.
|
// else fails.
|
||||||
|
@ -291,3 +242,31 @@ func getSourceIP(r *http.Request) string {
|
||||||
}
|
}
|
||||||
return raddr
|
return raddr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSourceScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
|
||||||
|
// Forwarded headers (in that order).
|
||||||
|
func GetSourceScheme(r *http.Request) string {
|
||||||
|
var scheme string
|
||||||
|
|
||||||
|
// Retrieve the scheme from X-Forwarded-Proto.
|
||||||
|
if proto := r.Header.Get(xForwardedProto); proto != "" {
|
||||||
|
scheme = strings.ToLower(proto)
|
||||||
|
} else if proto = r.Header.Get(xForwardedScheme); proto != "" {
|
||||||
|
scheme = strings.ToLower(proto)
|
||||||
|
} else if proto := r.Header.Get(forwarded); proto != "" {
|
||||||
|
// match should contain at least two elements if the protocol was
|
||||||
|
// specified in the Forwarded header. The first element will always be
|
||||||
|
// the 'for=', which we ignore, subsequently we proceed to look for
|
||||||
|
// 'proto=' which should precede right after `for=` if not
|
||||||
|
// we simply ignore the values and return empty. This is in line
|
||||||
|
// with the approach we took for returning first ip from multiple
|
||||||
|
// params.
|
||||||
|
if match := forRegex.FindStringSubmatch(proto); len(match) > 1 {
|
||||||
|
if match = protoRegex.FindStringSubmatch(match[2]); len(match) > 1 {
|
||||||
|
scheme = strings.ToLower(match[2])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return scheme
|
||||||
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||||
|
@ -29,20 +29,14 @@ type FrostFS interface {
|
||||||
SystemDNS(context.Context) (string, error)
|
SystemDNS(context.Context) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Settings interface {
|
|
||||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
FrostFS FrostFS
|
FrostFS FrostFS
|
||||||
RPCAddress string
|
RPCAddress string
|
||||||
Settings Settings
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type BucketResolver struct {
|
type BucketResolver struct {
|
||||||
rpcAddress string
|
rpcAddress string
|
||||||
frostfs FrostFS
|
frostfs FrostFS
|
||||||
settings Settings
|
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
resolvers []*Resolver
|
resolvers []*Resolver
|
||||||
|
@ -50,15 +44,15 @@ type BucketResolver struct {
|
||||||
|
|
||||||
type Resolver struct {
|
type Resolver struct {
|
||||||
Name string
|
Name string
|
||||||
resolve func(context.Context, string) (cid.ID, error)
|
resolve func(context.Context, string, string) (cid.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (cid.ID, error)) {
|
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (cid.ID, error)) {
|
||||||
r.resolve = fn
|
r.resolve = fn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Resolver) Resolve(ctx context.Context, name string) (cid.ID, error) {
|
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||||
return r.resolve(ctx, name)
|
return r.resolve(ctx, zone, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) {
|
func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) {
|
||||||
|
@ -87,12 +81,12 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
|
||||||
return resolvers, nil
|
return resolvers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid.ID, err error) {
|
func (r *BucketResolver) Resolve(ctx context.Context, zone, bktName string) (cnrID cid.ID, err error) {
|
||||||
r.mu.RLock()
|
r.mu.RLock()
|
||||||
defer r.mu.RUnlock()
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
for _, resolver := range r.resolvers {
|
for _, resolver := range r.resolvers {
|
||||||
cnrID, resolverErr := resolver.Resolve(ctx, bktName)
|
cnrID, resolverErr := resolver.Resolve(ctx, zone, bktName)
|
||||||
if resolverErr != nil {
|
if resolverErr != nil {
|
||||||
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
|
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -123,7 +117,6 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
FrostFS: r.frostfs,
|
FrostFS: r.frostfs,
|
||||||
RPCAddress: r.rpcAddress,
|
RPCAddress: r.rpcAddress,
|
||||||
Settings: r.settings,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resolvers, err := createResolvers(resolverNames, cfg)
|
resolvers, err := createResolvers(resolverNames, cfg)
|
||||||
|
@ -152,30 +145,25 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
|
||||||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case DNSResolver:
|
case DNSResolver:
|
||||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
return NewDNSResolver(cfg.FrostFS)
|
||||||
case NNSResolver:
|
case NNSResolver:
|
||||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
return NewNNSResolver(cfg.RPCAddress)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||||
if frostFS == nil {
|
if frostFS == nil {
|
||||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||||
}
|
}
|
||||||
if settings == nil {
|
|
||||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
|
||||||
}
|
|
||||||
|
|
||||||
var dns ns.DNS
|
var dns ns.DNS
|
||||||
|
|
||||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
resolveFunc := func(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||||
var err error
|
var err error
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
|
|
||||||
zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
|
if zone == v2container.SysAttributeZoneDefault {
|
||||||
if isDefault {
|
|
||||||
zone, err = frostFS.SystemDNS(ctx)
|
zone, err = frostFS.SystemDNS(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||||
|
@ -196,13 +184,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
func NewNNSResolver(address string) (*Resolver, error) {
|
||||||
if address == "" {
|
if address == "" {
|
||||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||||
}
|
}
|
||||||
if settings == nil {
|
|
||||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
|
||||||
}
|
|
||||||
|
|
||||||
var nns ns.NNS
|
var nns ns.NNS
|
||||||
|
|
||||||
|
@ -210,12 +195,9 @@ func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
||||||
return nil, fmt.Errorf("dial %s: %w", address, err)
|
return nil, fmt.Errorf("dial %s: %w", address, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
resolveFunc := func(_ context.Context, zone, name string) (cid.ID, error) {
|
||||||
var d container.Domain
|
var d container.Domain
|
||||||
d.SetName(name)
|
d.SetName(name)
|
||||||
|
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
|
||||||
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
|
|
||||||
d.SetZone(zone)
|
d.SetZone(zone)
|
||||||
|
|
||||||
cnrID, err := nns.ResolveContainerDomain(d)
|
cnrID, err := nns.ResolveContainerDomain(d)
|
||||||
|
|
|
@ -87,6 +87,7 @@ type (
|
||||||
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||||
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
||||||
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
|
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
|
||||||
|
PatchObjectHandler(http.ResponseWriter, *http.Request)
|
||||||
|
|
||||||
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||||
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
|
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
|
||||||
|
@ -97,6 +98,7 @@ type Settings interface {
|
||||||
s3middleware.RequestSettings
|
s3middleware.RequestSettings
|
||||||
s3middleware.PolicySettings
|
s3middleware.PolicySettings
|
||||||
s3middleware.MetricsSettings
|
s3middleware.MetricsSettings
|
||||||
|
s3middleware.VHSSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
type FrostFSID interface {
|
type FrostFSID interface {
|
||||||
|
@ -109,14 +111,10 @@ type Config struct {
|
||||||
Handler Handler
|
Handler Handler
|
||||||
Center s3middleware.Center
|
Center s3middleware.Center
|
||||||
Log *zap.Logger
|
Log *zap.Logger
|
||||||
LogHTTP *s3middleware.LogHTTPConfig
|
|
||||||
Metrics *metrics.AppMetrics
|
Metrics *metrics.AppMetrics
|
||||||
|
|
||||||
MiddlewareSettings Settings
|
MiddlewareSettings Settings
|
||||||
|
|
||||||
// Domains optional. If empty no virtual hosted domains will be attached.
|
|
||||||
Domains []string
|
|
||||||
|
|
||||||
FrostfsID FrostFSID
|
FrostfsID FrostFSID
|
||||||
|
|
||||||
FrostFSIDValidation bool
|
FrostFSIDValidation bool
|
||||||
|
@ -129,11 +127,6 @@ type Config struct {
|
||||||
|
|
||||||
func NewRouter(cfg Config) *chi.Mux {
|
func NewRouter(cfg Config) *chi.Mux {
|
||||||
api := chi.NewRouter()
|
api := chi.NewRouter()
|
||||||
|
|
||||||
if cfg.LogHTTP.Enabled {
|
|
||||||
api.Use(s3middleware.LogHTTP(cfg.Log, cfg.LogHTTP))
|
|
||||||
}
|
|
||||||
|
|
||||||
api.Use(
|
api.Use(
|
||||||
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
||||||
middleware.ThrottleWithOpts(cfg.Throttle),
|
middleware.ThrottleWithOpts(cfg.Throttle),
|
||||||
|
@ -148,11 +141,11 @@ func NewRouter(cfg Config) *chi.Mux {
|
||||||
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
|
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
api.Use(s3middleware.PrepareAddressStyle(cfg.MiddlewareSettings, cfg.Log))
|
||||||
api.Use(s3middleware.PolicyCheck(s3middleware.PolicyConfig{
|
api.Use(s3middleware.PolicyCheck(s3middleware.PolicyConfig{
|
||||||
Storage: cfg.PolicyChecker,
|
Storage: cfg.PolicyChecker,
|
||||||
FrostfsID: cfg.FrostfsID,
|
FrostfsID: cfg.FrostfsID,
|
||||||
Settings: cfg.MiddlewareSettings,
|
Settings: cfg.MiddlewareSettings,
|
||||||
Domains: cfg.Domains,
|
|
||||||
Log: cfg.Log,
|
Log: cfg.Log,
|
||||||
BucketResolver: cfg.Handler.ResolveBucket,
|
BucketResolver: cfg.Handler.ResolveBucket,
|
||||||
Decoder: cfg.XMLDecoder,
|
Decoder: cfg.XMLDecoder,
|
||||||
|
@ -160,22 +153,41 @@ func NewRouter(cfg Config) *chi.Mux {
|
||||||
}))
|
}))
|
||||||
|
|
||||||
defaultRouter := chi.NewRouter()
|
defaultRouter := chi.NewRouter()
|
||||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(cfg.Handler, cfg.Log))
|
defaultRouter.Mount("/{bucket}", bucketRouter(cfg.Handler))
|
||||||
defaultRouter.Get("/", named("ListBuckets", cfg.Handler.ListBucketsHandler))
|
defaultRouter.Get("/", named(s3middleware.ListBucketsOperation, cfg.Handler.ListBucketsHandler))
|
||||||
attachErrorHandler(defaultRouter)
|
attachErrorHandler(defaultRouter)
|
||||||
|
|
||||||
hr := NewHostBucketRouter("bucket")
|
vhsRouter := bucketRouter(cfg.Handler)
|
||||||
hr.Default(defaultRouter)
|
router := newGlobalRouter(defaultRouter, vhsRouter)
|
||||||
for _, domain := range cfg.Domains {
|
|
||||||
hr.Map(domain, bucketRouter(cfg.Handler, cfg.Log))
|
api.Mount("/", router)
|
||||||
}
|
|
||||||
api.Mount("/", hr)
|
|
||||||
|
|
||||||
attachErrorHandler(api)
|
attachErrorHandler(api)
|
||||||
|
|
||||||
return api
|
return api
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type globalRouter struct {
|
||||||
|
pathStyleRouter chi.Router
|
||||||
|
vhsRouter chi.Router
|
||||||
|
}
|
||||||
|
|
||||||
|
func newGlobalRouter(pathStyleRouter, vhsRouter chi.Router) *globalRouter {
|
||||||
|
return &globalRouter{
|
||||||
|
pathStyleRouter: pathStyleRouter,
|
||||||
|
vhsRouter: vhsRouter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *globalRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
router := g.pathStyleRouter
|
||||||
|
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.RequestVHSEnabled {
|
||||||
|
router = g.vhsRouter
|
||||||
|
}
|
||||||
|
|
||||||
|
router.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := s3middleware.GetReqInfo(r.Context())
|
reqInfo := s3middleware.GetReqInfo(r.Context())
|
||||||
|
@ -220,14 +232,13 @@ func attachErrorHandler(api *chi.Mux) {
|
||||||
api.MethodNotAllowed(named("MethodNotAllowed", errorHandler))
|
api.MethodNotAllowed(named("MethodNotAllowed", errorHandler))
|
||||||
}
|
}
|
||||||
|
|
||||||
func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
func bucketRouter(h Handler) chi.Router {
|
||||||
bktRouter := chi.NewRouter()
|
bktRouter := chi.NewRouter()
|
||||||
bktRouter.Use(
|
bktRouter.Use(
|
||||||
s3middleware.AddBucketName(log),
|
|
||||||
s3middleware.WrapHandler(h.AppendCORSHeaders),
|
s3middleware.WrapHandler(h.AppendCORSHeaders),
|
||||||
)
|
)
|
||||||
|
|
||||||
bktRouter.Mount("/", objectRouter(h, log))
|
bktRouter.Mount("/", objectRouter(h))
|
||||||
|
|
||||||
bktRouter.Options("/", named(s3middleware.OptionsBucketOperation, h.Preflight))
|
bktRouter.Options("/", named(s3middleware.OptionsBucketOperation, h.Preflight))
|
||||||
|
|
||||||
|
@ -299,7 +310,7 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
||||||
Add(NewFilter().
|
Add(NewFilter().
|
||||||
Queries(s3middleware.VersionsQuery).
|
Queries(s3middleware.VersionsQuery).
|
||||||
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
|
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
|
||||||
DefaultHandler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler)))
|
DefaultHandler(listWrapper(h)))
|
||||||
})
|
})
|
||||||
|
|
||||||
// PUT method handlers
|
// PUT method handlers
|
||||||
|
@ -362,7 +373,7 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
||||||
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
|
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
|
||||||
Add(NewFilter().
|
Add(NewFilter().
|
||||||
Queries(s3middleware.LifecycleQuery).
|
Queries(s3middleware.LifecycleQuery).
|
||||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
Handler(named(s3middleware.DeleteBucketLifecycleOperation, h.DeleteBucketLifecycleHandler))).
|
||||||
Add(NewFilter().
|
Add(NewFilter().
|
||||||
Queries(s3middleware.EncryptionQuery).
|
Queries(s3middleware.EncryptionQuery).
|
||||||
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).
|
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).
|
||||||
|
@ -374,14 +385,27 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
||||||
return bktRouter
|
return bktRouter
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
func listWrapper(h Handler) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.BucketName == "" {
|
||||||
|
reqInfo.API = s3middleware.ListBucketsOperation
|
||||||
|
h.ListBucketsHandler(w, r)
|
||||||
|
} else {
|
||||||
|
reqInfo.API = s3middleware.ListObjectsV1Operation
|
||||||
|
h.ListObjectsV1Handler(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func objectRouter(h Handler) chi.Router {
|
||||||
objRouter := chi.NewRouter()
|
objRouter := chi.NewRouter()
|
||||||
objRouter.Use(s3middleware.AddObjectName(l))
|
|
||||||
|
|
||||||
objRouter.Options("/*", named(s3middleware.OptionsObjectOperation, h.Preflight))
|
objRouter.Options("/*", named(s3middleware.OptionsObjectOperation, h.Preflight))
|
||||||
|
|
||||||
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
||||||
|
|
||||||
|
objRouter.Patch("/*", named(s3middleware.PatchObjectOperation, h.PatchObjectHandler))
|
||||||
|
|
||||||
// GET method handlers
|
// GET method handlers
|
||||||
objRouter.Group(func(r chi.Router) {
|
objRouter.Group(func(r chi.Router) {
|
||||||
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
||||||
|
|
|
@ -23,7 +23,11 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
const FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
const (
|
||||||
|
FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
||||||
|
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
|
||||||
|
FrostfsServernameHeader = "X-Frostfs-Servername"
|
||||||
|
)
|
||||||
|
|
||||||
type poolStatisticMock struct {
|
type poolStatisticMock struct {
|
||||||
}
|
}
|
||||||
|
@ -73,6 +77,9 @@ func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
||||||
type middlewareSettingsMock struct {
|
type middlewareSettingsMock struct {
|
||||||
denyByDefault bool
|
denyByDefault bool
|
||||||
sourceIPHeader string
|
sourceIPHeader string
|
||||||
|
domains []string
|
||||||
|
vhsEnabled bool
|
||||||
|
vhsNamespacesEnabled map[string]bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *middlewareSettingsMock) SourceIPHeader() string {
|
func (r *middlewareSettingsMock) SourceIPHeader() string {
|
||||||
|
@ -91,6 +98,26 @@ func (r *middlewareSettingsMock) PolicyDenyByDefault() bool {
|
||||||
return r.denyByDefault
|
return r.denyByDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *middlewareSettingsMock) Domains() []string {
|
||||||
|
return r.domains
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *middlewareSettingsMock) GlobalVHS() bool {
|
||||||
|
return r.vhsEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *middlewareSettingsMock) VHSHeader() string {
|
||||||
|
return FrostfsVHSHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *middlewareSettingsMock) ServernameHeader() string {
|
||||||
|
return FrostfsServernameHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *middlewareSettingsMock) VHSNamespacesEnabled() map[string]bool {
|
||||||
|
return r.vhsNamespacesEnabled
|
||||||
|
}
|
||||||
|
|
||||||
type frostFSIDMock struct {
|
type frostFSIDMock struct {
|
||||||
tags map[string]string
|
tags map[string]string
|
||||||
validateError bool
|
validateError bool
|
||||||
|
@ -534,6 +561,10 @@ func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
|
||||||
h.writeResponse(w, res)
|
h.writeResponse(w, res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *handlerMock) PatchObjectHandler(http.ResponseWriter, *http.Request) {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
|
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
|
||||||
reqInfo := middleware.GetReqInfo(ctx)
|
reqInfo := middleware.GetReqInfo(ctx)
|
||||||
bktInfo, ok := h.buckets[reqInfo.Namespace+name]
|
bktInfo, ok := h.buckets[reqInfo.Namespace+name]
|
||||||
|
|
|
@ -75,11 +75,9 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
|
||||||
Handler: handlerTestMock,
|
Handler: handlerTestMock,
|
||||||
Center: ¢erMock{t: t},
|
Center: ¢erMock{t: t},
|
||||||
Log: logger,
|
Log: logger,
|
||||||
LogHTTP: new(s3middleware.LogHTTPConfig),
|
|
||||||
Metrics: metrics.NewAppMetrics(metricsConfig),
|
Metrics: metrics.NewAppMetrics(metricsConfig),
|
||||||
MiddlewareSettings: middlewareSettings,
|
MiddlewareSettings: middlewareSettings,
|
||||||
PolicyChecker: policyChecker,
|
PolicyChecker: policyChecker,
|
||||||
Domains: []string{"domain1", "domain2"},
|
|
||||||
FrostfsID: &frostFSIDMock{},
|
FrostfsID: &frostFSIDMock{},
|
||||||
XMLDecoder: &xmlMock{},
|
XMLDecoder: &xmlMock{},
|
||||||
Tagging: &resourceTaggingMock{},
|
Tagging: &resourceTaggingMock{},
|
||||||
|
@ -848,6 +846,31 @@ func TestFrostFSIDValidation(t *testing.T) {
|
||||||
createBucketErr(chiRouter, "", "bkt-3", nil, apiErrors.ErrInternalError)
|
createBucketErr(chiRouter, "", "bkt-3", nil, apiErrors.ErrInternalError)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRouterListObjectsV2Domains(t *testing.T) {
|
||||||
|
chiRouter := prepareRouter(t, enableVHSDomains("domain.com"))
|
||||||
|
|
||||||
|
chiRouter.handler.buckets["bucket"] = &data.BucketInfo{}
|
||||||
|
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||||
|
r.Host = "bucket.domain.com"
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Set(s3middleware.ListTypeQuery, "2")
|
||||||
|
r.URL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
chiRouter.ServeHTTP(w, r)
|
||||||
|
resp := readResponse(t, w)
|
||||||
|
require.Equal(t, s3middleware.ListObjectsV2Operation, resp.Method)
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableVHSDomains(domains ...string) option {
|
||||||
|
return func(cfg *Config) {
|
||||||
|
setting := cfg.MiddlewareSettings.(*middlewareSettingsMock)
|
||||||
|
setting.vhsEnabled = true
|
||||||
|
setting.domains = domains
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
||||||
var res handlerResult
|
var res handlerResult
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (*frost
|
||||||
return nil, fmt.Errorf("dial pool: %w", err)
|
return nil, fmt.Errorf("dial pool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(p, cfg.Key)), nil
|
return frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(p, cfg.Key), log), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
|
func parsePolicies(val string) (authmate.ContainerPolicies, error) {
|
||||||
|
|
254
cmd/s3-gw/app.go
254
cmd/s3-gw/app.go
|
@ -11,6 +11,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
@ -21,6 +22,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/handler"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
|
@ -37,6 +39,8 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
|
@ -83,7 +87,6 @@ type (
|
||||||
|
|
||||||
appSettings struct {
|
appSettings struct {
|
||||||
logLevel zap.AtomicLevel
|
logLevel zap.AtomicLevel
|
||||||
httpLogging *s3middleware.LogHTTPConfig
|
|
||||||
maxClient maxClientsConfig
|
maxClient maxClientsConfig
|
||||||
defaultMaxAge int
|
defaultMaxAge int
|
||||||
reconnectInterval time.Duration
|
reconnectInterval time.Duration
|
||||||
|
@ -103,6 +106,11 @@ type (
|
||||||
policyDenyByDefault bool
|
policyDenyByDefault bool
|
||||||
sourceIPHeader string
|
sourceIPHeader string
|
||||||
retryMaxAttempts int
|
retryMaxAttempts int
|
||||||
|
domains []string
|
||||||
|
vhsEnabled bool
|
||||||
|
vhsHeader string
|
||||||
|
servernameHeader string
|
||||||
|
vhsNamespacesEnabled map[string]bool
|
||||||
retryMaxBackoff time.Duration
|
retryMaxBackoff time.Duration
|
||||||
retryStrategy handler.RetryStrategy
|
retryStrategy handler.RetryStrategy
|
||||||
}
|
}
|
||||||
|
@ -122,7 +130,7 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||||
|
|
||||||
cfg := tokens.Config{
|
cfg := tokens.Config{
|
||||||
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(objPool, key)),
|
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(objPool, key), log.logger),
|
||||||
Key: key,
|
Key: key,
|
||||||
CacheConfig: getAccessBoxCacheConfig(v, log.logger),
|
CacheConfig: getAccessBoxCacheConfig(v, log.logger),
|
||||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(v, log.logger),
|
RemovingCheckAfterDurations: fetchRemovingCheckInterval(v, log.logger),
|
||||||
|
@ -154,13 +162,13 @@ func (a *App) init(ctx context.Context) {
|
||||||
a.setRuntimeParameters()
|
a.setRuntimeParameters()
|
||||||
a.initFrostfsID(ctx)
|
a.initFrostfsID(ctx)
|
||||||
a.initPolicyStorage(ctx)
|
a.initPolicyStorage(ctx)
|
||||||
a.initAPI()
|
a.initAPI(ctx)
|
||||||
a.initMetrics()
|
a.initMetrics()
|
||||||
a.initServers(ctx)
|
a.initServers(ctx)
|
||||||
a.initTracing(ctx)
|
a.initTracing(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initLayer() {
|
func (a *App) initLayer(ctx context.Context) {
|
||||||
a.initResolver()
|
a.initResolver()
|
||||||
|
|
||||||
// prepare random key for anonymous requests
|
// prepare random key for anonymous requests
|
||||||
|
@ -172,6 +180,22 @@ func (a *App) initLayer() {
|
||||||
var gateOwner user.ID
|
var gateOwner user.ID
|
||||||
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
user.IDFromKey(&gateOwner, a.key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
|
var corsCnrInfo *data.BucketInfo
|
||||||
|
if a.cfg.IsSet(cfgContainersCORS) {
|
||||||
|
corsCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersCORS)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal(logs.CouldNotFetchCORSContainerInfo, zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var lifecycleCnrInfo *data.BucketInfo
|
||||||
|
if a.cfg.IsSet(cfgContainersLifecycle) {
|
||||||
|
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
layerCfg := &layer.Config{
|
layerCfg := &layer.Config{
|
||||||
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
||||||
AnonKey: layer.AnonymousKey{
|
AnonKey: layer.AnonymousKey{
|
||||||
|
@ -181,6 +205,9 @@ func (a *App) initLayer() {
|
||||||
Resolver: a.bucketResolver,
|
Resolver: a.bucketResolver,
|
||||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||||
Features: a.settings,
|
Features: a.settings,
|
||||||
|
GateKey: a.key,
|
||||||
|
CORSCnrInfo: corsCnrInfo,
|
||||||
|
LifecycleCnrInfo: lifecycleCnrInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare object layer
|
// prepare object layer
|
||||||
|
@ -190,7 +217,6 @@ func (a *App) initLayer() {
|
||||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||||
settings := &appSettings{
|
settings := &appSettings{
|
||||||
logLevel: log.lvl,
|
logLevel: log.lvl,
|
||||||
httpLogging: new(s3middleware.LogHTTPConfig),
|
|
||||||
maxClient: newMaxClients(v),
|
maxClient: newMaxClients(v),
|
||||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||||
reconnectInterval: fetchReconnectInterval(v),
|
reconnectInterval: fetchReconnectInterval(v),
|
||||||
|
@ -209,30 +235,85 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
||||||
s.updateNamespacesSettings(v, log)
|
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
|
||||||
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
|
|
||||||
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
|
||||||
s.setClientCut(v.GetBool(cfgClientCut))
|
|
||||||
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
|
||||||
s.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
|
||||||
s.setPolicyDenyByDefault(v.GetBool(cfgPolicyDenyByDefault))
|
|
||||||
s.setSourceIPHeader(v.GetString(cfgSourceIPHeader))
|
|
||||||
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
|
|
||||||
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
|
|
||||||
s.setRetryStrategy(fetchRetryStrategy(v))
|
|
||||||
s.updateHTTPLoggingSettings(v, log)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
|
|
||||||
nsHeader := v.GetString(cfgResolveNamespaceHeader)
|
|
||||||
nsConfig, defaultNamespaces := fetchNamespacesConfig(log, v)
|
nsConfig, defaultNamespaces := fetchNamespacesConfig(log, v)
|
||||||
|
defaultXMLNS := v.GetBool(cfgKludgeUseDefaultXMLNS)
|
||||||
|
bypassContentEncodingInChunks := v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)
|
||||||
|
clientCut := v.GetBool(cfgClientCut)
|
||||||
|
maxBufferSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
||||||
|
md5Enabled := v.GetBool(cfgMD5Enabled)
|
||||||
|
policyDenyByDefault := v.GetBool(cfgPolicyDenyByDefault)
|
||||||
|
sourceIPHeader := v.GetString(cfgSourceIPHeader)
|
||||||
|
retryMaxAttempts := fetchRetryMaxAttempts(v)
|
||||||
|
retryMaxBackoff := fetchRetryMaxBackoff(v)
|
||||||
|
retryStrategy := fetchRetryStrategy(v)
|
||||||
|
domains := fetchDomains(v, log)
|
||||||
|
vhsEnabled := v.GetBool(cfgVHSEnabled)
|
||||||
|
vhsHeader := v.GetString(cfgVHSHeader)
|
||||||
|
servernameHeader := v.GetString(cfgServernameHeader)
|
||||||
|
vhsNamespacesEnabled := s.prepareVHSNamespaces(v, log)
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
s.namespaceHeader = nsHeader
|
s.namespaceHeader = namespaceHeader
|
||||||
s.defaultNamespaces = defaultNamespaces
|
s.defaultNamespaces = defaultNamespaces
|
||||||
s.namespaces = nsConfig.Namespaces
|
s.namespaces = nsConfig.Namespaces
|
||||||
|
s.defaultXMLNS = defaultXMLNS
|
||||||
|
s.bypassContentEncodingInChunks = bypassContentEncodingInChunks
|
||||||
|
s.clientCut = clientCut
|
||||||
|
s.maxBufferSizeForPut = maxBufferSizeForPut
|
||||||
|
s.md5Enabled = md5Enabled
|
||||||
|
s.policyDenyByDefault = policyDenyByDefault
|
||||||
|
s.sourceIPHeader = sourceIPHeader
|
||||||
|
s.retryMaxAttempts = retryMaxAttempts
|
||||||
|
s.retryMaxBackoff = retryMaxBackoff
|
||||||
|
s.retryStrategy = retryStrategy
|
||||||
|
s.domains = domains
|
||||||
|
s.vhsEnabled = vhsEnabled
|
||||||
|
s.vhsHeader = vhsHeader
|
||||||
|
s.servernameHeader = servernameHeader
|
||||||
|
s.vhsNamespacesEnabled = vhsNamespacesEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
|
||||||
|
nsMap := fetchVHSNamespaces(v, log)
|
||||||
|
vhsNamespaces := make(map[string]bool, len(nsMap))
|
||||||
|
for ns, flag := range nsMap {
|
||||||
|
vhsNamespaces[s.ResolveNamespaceAlias(ns)] = flag
|
||||||
|
}
|
||||||
|
|
||||||
|
return vhsNamespaces
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) Domains() []string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.domains
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) GlobalVHS() bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.vhsEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) VHSHeader() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.vhsHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) ServernameHeader() string {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.servernameHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) VHSNamespacesEnabled() map[string]bool {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
return s.vhsNamespacesEnabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) BypassContentEncodingInChunks() bool {
|
func (s *appSettings) BypassContentEncodingInChunks() bool {
|
||||||
|
@ -241,36 +322,18 @@ func (s *appSettings) BypassContentEncodingInChunks() bool {
|
||||||
return s.bypassContentEncodingInChunks
|
return s.bypassContentEncodingInChunks
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.bypassContentEncodingInChunks = bypass
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) ClientCut() bool {
|
func (s *appSettings) ClientCut() bool {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.clientCut
|
return s.clientCut
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setClientCut(clientCut bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.clientCut = clientCut
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.maxBufferSizeForPut
|
return s.maxBufferSizeForPut
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setBufferMaxSizeForPut(size uint64) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.maxBufferSizeForPut = size
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
|
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
@ -318,12 +381,6 @@ func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
||||||
return dec
|
return dec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) useDefaultXMLNamespace(useDefaultNamespace bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.defaultXMLNS = useDefaultNamespace
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) DefaultMaxAge() int {
|
func (s *appSettings) DefaultMaxAge() int {
|
||||||
return s.defaultMaxAge
|
return s.defaultMaxAge
|
||||||
}
|
}
|
||||||
|
@ -342,24 +399,18 @@ func (s *appSettings) MD5Enabled() bool {
|
||||||
return s.md5Enabled
|
return s.md5Enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setMD5Enabled(md5Enabled bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.md5Enabled = md5Enabled
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) NamespaceHeader() string {
|
func (s *appSettings) NamespaceHeader() string {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.namespaceHeader
|
return s.namespaceHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
func (s *appSettings) FormContainerZone(ns string) string {
|
||||||
if len(ns) == 0 {
|
if len(ns) == 0 {
|
||||||
return v2container.SysAttributeZoneDefault, true
|
return v2container.SysAttributeZoneDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
return ns + ".ns", false
|
return ns + ".ns"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) isDefaultNamespace(ns string) bool {
|
func (s *appSettings) isDefaultNamespace(ns string) bool {
|
||||||
|
@ -383,62 +434,32 @@ func (s *appSettings) PolicyDenyByDefault() bool {
|
||||||
return s.policyDenyByDefault
|
return s.policyDenyByDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setPolicyDenyByDefault(policyDenyByDefault bool) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.policyDenyByDefault = policyDenyByDefault
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) setSourceIPHeader(header string) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.sourceIPHeader = header
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) SourceIPHeader() string {
|
func (s *appSettings) SourceIPHeader() string {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.sourceIPHeader
|
return s.sourceIPHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setRetryMaxAttempts(maxAttempts int) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.retryMaxAttempts = maxAttempts
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) RetryMaxAttempts() int {
|
func (s *appSettings) RetryMaxAttempts() int {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.retryMaxAttempts
|
return s.retryMaxAttempts
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setRetryMaxBackoff(maxBackoff time.Duration) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.retryMaxBackoff = maxBackoff
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) RetryMaxBackoff() time.Duration {
|
func (s *appSettings) RetryMaxBackoff() time.Duration {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.retryMaxBackoff
|
return s.retryMaxBackoff
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) setRetryStrategy(strategy handler.RetryStrategy) {
|
|
||||||
s.mu.Lock()
|
|
||||||
s.retryStrategy = strategy
|
|
||||||
s.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *appSettings) RetryStrategy() handler.RetryStrategy {
|
func (s *appSettings) RetryStrategy() handler.RetryStrategy {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
return s.retryStrategy
|
return s.retryStrategy
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initAPI() {
|
func (a *App) initAPI(ctx context.Context) {
|
||||||
a.initLayer()
|
a.initLayer(ctx)
|
||||||
a.initHandler()
|
a.initHandler()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -446,6 +467,7 @@ func (a *App) initMetrics() {
|
||||||
cfg := metrics.AppMetricsConfig{
|
cfg := metrics.AppMetricsConfig{
|
||||||
Logger: a.log,
|
Logger: a.log,
|
||||||
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
||||||
|
TreeStatistic: a.treePool,
|
||||||
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -504,7 +526,6 @@ func (a *App) getResolverConfig() *resolver.Config {
|
||||||
return &resolver.Config{
|
return &resolver.Config{
|
||||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||||
Settings: a.settings,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -564,20 +585,6 @@ func newMaxClients(cfg *viper.Viper) maxClientsConfig {
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *appSettings) updateHTTPLoggingSettings(cfg *viper.Viper, log *zap.Logger) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
s.httpLogging.Enabled = cfg.GetBool(cfgHTTPLoggingEnabled)
|
|
||||||
s.httpLogging.MaxBody = cfg.GetInt64(cfgHTTPLoggingMaxBody)
|
|
||||||
s.httpLogging.MaxLogSize = cfg.GetInt(cfgHTTPLoggingMaxLogSize)
|
|
||||||
s.httpLogging.OutputPath = cfg.GetString(cfgHTTPLoggingDestination)
|
|
||||||
s.httpLogging.UseGzip = cfg.GetBool(cfgHTTPLoggingGzip)
|
|
||||||
if err := s3middleware.ReloadFileLogger(s.httpLogging); err != nil {
|
|
||||||
log.Error(logs.FailedToReloadHTTPFileLogger, zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.Pool, *treepool.Pool, *keys.PrivateKey) {
|
||||||
var prm pool.InitParameters
|
var prm pool.InitParameters
|
||||||
var prmTree treepool.InitParameters
|
var prmTree treepool.InitParameters
|
||||||
|
@ -615,6 +622,9 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
||||||
|
|
||||||
errorThreshold := fetchErrorThreshold(cfg)
|
errorThreshold := fetchErrorThreshold(cfg)
|
||||||
prm.SetErrorThreshold(errorThreshold)
|
prm.SetErrorThreshold(errorThreshold)
|
||||||
|
|
||||||
|
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(cfg))
|
||||||
|
|
||||||
prm.SetLogger(logger)
|
prm.SetLogger(logger)
|
||||||
prmTree.SetLogger(logger)
|
prmTree.SetLogger(logger)
|
||||||
|
|
||||||
|
@ -687,10 +697,6 @@ func (a *App) setHealthStatus() {
|
||||||
|
|
||||||
// Serve runs HTTP server to handle S3 API requests.
|
// Serve runs HTTP server to handle S3 API requests.
|
||||||
func (a *App) Serve(ctx context.Context) {
|
func (a *App) Serve(ctx context.Context) {
|
||||||
// Attach S3 API:
|
|
||||||
domains := a.cfg.GetStringSlice(cfgListenDomains)
|
|
||||||
a.log.Info(logs.FetchDomainsPrepareToUseAPI, zap.Strings("domains", domains))
|
|
||||||
|
|
||||||
cfg := api.Config{
|
cfg := api.Config{
|
||||||
Throttle: middleware.ThrottleOpts{
|
Throttle: middleware.ThrottleOpts{
|
||||||
Limit: a.settings.maxClient.count,
|
Limit: a.settings.maxClient.count,
|
||||||
|
@ -699,9 +705,7 @@ func (a *App) Serve(ctx context.Context) {
|
||||||
Handler: a.api,
|
Handler: a.api,
|
||||||
Center: a.ctr,
|
Center: a.ctr,
|
||||||
Log: a.log,
|
Log: a.log,
|
||||||
LogHTTP: a.settings.httpLogging,
|
|
||||||
Metrics: a.metrics,
|
Metrics: a.metrics,
|
||||||
Domains: domains,
|
|
||||||
|
|
||||||
MiddlewareSettings: a.settings,
|
MiddlewareSettings: a.settings,
|
||||||
PolicyChecker: a.policyStorage,
|
PolicyChecker: a.policyStorage,
|
||||||
|
@ -1052,3 +1056,37 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
|
||||||
|
|
||||||
return len(a.unbindServers) == 0
|
return len(a.unbindServers) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data.BucketInfo, err error) {
|
||||||
|
containerString := a.cfg.GetString(cfgKey)
|
||||||
|
|
||||||
|
var id cid.ID
|
||||||
|
if err = id.DecodeString(containerString); err != nil {
|
||||||
|
i := strings.Index(containerString, ".")
|
||||||
|
if i < 0 {
|
||||||
|
return nil, fmt.Errorf("invalid container address: %s", containerString)
|
||||||
|
}
|
||||||
|
|
||||||
|
if id, err = a.bucketResolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return getContainerInfo(ctx, id, a.pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getContainerInfo(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) (*data.BucketInfo, error) {
|
||||||
|
prm := pool.PrmContainerGet{
|
||||||
|
ContainerID: id,
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := frostFSPool.GetContainer(ctx, prm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &data.BucketInfo{
|
||||||
|
CID: id,
|
||||||
|
HomomorphicHashDisabled: container.IsHomomorphicHashingDisabled(res),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
|
@ -30,6 +30,8 @@ import (
|
||||||
const (
|
const (
|
||||||
destinationStdout = "stdout"
|
destinationStdout = "stdout"
|
||||||
destinationJournald = "journald"
|
destinationJournald = "journald"
|
||||||
|
|
||||||
|
wildcardPlaceholder = "<wildcard>"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -39,6 +41,8 @@ const (
|
||||||
defaultStreamTimeout = 10 * time.Second
|
defaultStreamTimeout = 10 * time.Second
|
||||||
defaultShutdownTimeout = 15 * time.Second
|
defaultShutdownTimeout = 15 * time.Second
|
||||||
|
|
||||||
|
defaultGracefulCloseOnSwitchTimeout = 10 * time.Second
|
||||||
|
|
||||||
defaultPoolErrorThreshold uint32 = 100
|
defaultPoolErrorThreshold uint32 = 100
|
||||||
defaultPlacementPolicy = "REP 3"
|
defaultPlacementPolicy = "REP 3"
|
||||||
|
|
||||||
|
@ -53,6 +57,8 @@ const (
|
||||||
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
|
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
|
||||||
|
|
||||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||||
|
defaultVHSHeader = "X-Frostfs-S3-VHS"
|
||||||
|
defaultServernameHeader = "X-Frostfs-Servername"
|
||||||
|
|
||||||
defaultConstraintName = "default"
|
defaultConstraintName = "default"
|
||||||
|
|
||||||
|
@ -75,13 +81,6 @@ const ( // Settings.
|
||||||
cfgLoggerLevel = "logger.level"
|
cfgLoggerLevel = "logger.level"
|
||||||
cfgLoggerDestination = "logger.destination"
|
cfgLoggerDestination = "logger.destination"
|
||||||
|
|
||||||
// HttpLogging.
|
|
||||||
cfgHTTPLoggingEnabled = "http_logging.enabled"
|
|
||||||
cfgHTTPLoggingMaxBody = "http_logging.max_body"
|
|
||||||
cfgHTTPLoggingMaxLogSize = "http_logging.max_log_size"
|
|
||||||
cfgHTTPLoggingDestination = "http_logging.destination"
|
|
||||||
cfgHTTPLoggingGzip = "http_logging.gzip"
|
|
||||||
|
|
||||||
// Wallet.
|
// Wallet.
|
||||||
cfgWalletPath = "wallet.path"
|
cfgWalletPath = "wallet.path"
|
||||||
cfgWalletAddress = "wallet.address"
|
cfgWalletAddress = "wallet.address"
|
||||||
|
@ -151,6 +150,12 @@ const ( // Settings.
|
||||||
|
|
||||||
cfgListenDomains = "listen_domains"
|
cfgListenDomains = "listen_domains"
|
||||||
|
|
||||||
|
// VHS.
|
||||||
|
cfgVHSEnabled = "vhs.enabled"
|
||||||
|
cfgVHSHeader = "vhs.vhs_header"
|
||||||
|
cfgServernameHeader = "vhs.servername_header"
|
||||||
|
cfgVHSNamespaces = "vhs.namespaces"
|
||||||
|
|
||||||
// Peers.
|
// Peers.
|
||||||
cfgPeers = "peers"
|
cfgPeers = "peers"
|
||||||
|
|
||||||
|
@ -183,6 +188,10 @@ const ( // Settings.
|
||||||
|
|
||||||
cfgSourceIPHeader = "source_ip_header"
|
cfgSourceIPHeader = "source_ip_header"
|
||||||
|
|
||||||
|
// Containers.
|
||||||
|
cfgContainersCORS = "containers.cors"
|
||||||
|
cfgContainersLifecycle = "containers.lifecycle"
|
||||||
|
|
||||||
// Command line args.
|
// Command line args.
|
||||||
cmdHelp = "help"
|
cmdHelp = "help"
|
||||||
cmdVersion = "version"
|
cmdVersion = "version"
|
||||||
|
@ -203,6 +212,10 @@ const ( // Settings.
|
||||||
// Sets max attempt to make successful tree request.
|
// Sets max attempt to make successful tree request.
|
||||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||||
|
|
||||||
|
// Specifies the timeout after which unhealthy client be closed during rebalancing
|
||||||
|
// if it will become healthy back.
|
||||||
|
cfgGracefulCloseOnSwitchTimeout = "frostfs.graceful_close_on_switch_timeout"
|
||||||
|
|
||||||
// List of allowed AccessKeyID prefixes.
|
// List of allowed AccessKeyID prefixes.
|
||||||
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
||||||
|
|
||||||
|
@ -289,6 +302,15 @@ func fetchRebalanceInterval(cfg *viper.Viper) time.Duration {
|
||||||
return rebalanceInterval
|
return rebalanceInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fetchSetGracefulCloseOnSwitchTimeout(cfg *viper.Viper) time.Duration {
|
||||||
|
val := cfg.GetDuration(cfgGracefulCloseOnSwitchTimeout)
|
||||||
|
if val <= 0 {
|
||||||
|
val = defaultGracefulCloseOnSwitchTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
func fetchErrorThreshold(cfg *viper.Viper) uint32 {
|
func fetchErrorThreshold(cfg *viper.Viper) uint32 {
|
||||||
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||||
if errorThreshold <= 0 {
|
if errorThreshold <= 0 {
|
||||||
|
@ -671,6 +693,41 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fetchDomains(v *viper.Viper, log *zap.Logger) []string {
|
||||||
|
domains := validateDomains(v.GetStringSlice(cfgListenDomains), log)
|
||||||
|
|
||||||
|
countParts := func(domain string) int {
|
||||||
|
return strings.Count(domain, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(domains, func(i, j int) bool {
|
||||||
|
return countParts(domains[i]) > countParts(domains[j])
|
||||||
|
})
|
||||||
|
|
||||||
|
return domains
|
||||||
|
}
|
||||||
|
|
||||||
|
func fetchVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
|
||||||
|
vhsNamespacesEnabled := make(map[string]bool)
|
||||||
|
nsMap := v.GetStringMap(cfgVHSNamespaces)
|
||||||
|
for ns, val := range nsMap {
|
||||||
|
if _, ok := vhsNamespacesEnabled[ns]; ok {
|
||||||
|
log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
enabledFlag, ok := val.(bool)
|
||||||
|
if !ok {
|
||||||
|
log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
vhsNamespacesEnabled[ns] = enabledFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
return vhsNamespacesEnabled
|
||||||
|
}
|
||||||
|
|
||||||
func newSettings() *viper.Viper {
|
func newSettings() *viper.Viper {
|
||||||
v := viper.New()
|
v := viper.New()
|
||||||
|
|
||||||
|
@ -722,13 +779,6 @@ func newSettings() *viper.Viper {
|
||||||
v.SetDefault(cfgLoggerLevel, "debug")
|
v.SetDefault(cfgLoggerLevel, "debug")
|
||||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||||
|
|
||||||
// http logger
|
|
||||||
v.SetDefault(cfgHTTPLoggingEnabled, false)
|
|
||||||
v.SetDefault(cfgHTTPLoggingMaxBody, 1024)
|
|
||||||
v.SetDefault(cfgHTTPLoggingMaxLogSize, 50)
|
|
||||||
v.SetDefault(cfgHTTPLoggingDestination, "stdout")
|
|
||||||
v.SetDefault(cfgHTTPLoggingGzip, false)
|
|
||||||
|
|
||||||
// pool:
|
// pool:
|
||||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||||
v.SetDefault(cfgStreamTimeout, defaultStreamTimeout)
|
v.SetDefault(cfgStreamTimeout, defaultStreamTimeout)
|
||||||
|
@ -764,6 +814,10 @@ func newSettings() *viper.Viper {
|
||||||
v.SetDefault(cfgRetryMaxAttempts, defaultRetryMaxAttempts)
|
v.SetDefault(cfgRetryMaxAttempts, defaultRetryMaxAttempts)
|
||||||
v.SetDefault(cfgRetryMaxBackoff, defaultRetryMaxBackoff)
|
v.SetDefault(cfgRetryMaxBackoff, defaultRetryMaxBackoff)
|
||||||
|
|
||||||
|
// vhs
|
||||||
|
v.SetDefault(cfgVHSHeader, defaultVHSHeader)
|
||||||
|
v.SetDefault(cfgServernameHeader, defaultServernameHeader)
|
||||||
|
|
||||||
// Bind flags
|
// Bind flags
|
||||||
if err := bindFlags(v, flags); err != nil {
|
if err := bindFlags(v, flags); err != nil {
|
||||||
panic(fmt.Errorf("bind flags: %w", err))
|
panic(fmt.Errorf("bind flags: %w", err))
|
||||||
|
@ -1039,3 +1093,19 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||||
}
|
}
|
||||||
return lvl, nil
|
return lvl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateDomains(domains []string, log *zap.Logger) []string {
|
||||||
|
validDomains := make([]string, 0, len(domains))
|
||||||
|
LOOP:
|
||||||
|
for _, domain := range domains {
|
||||||
|
domainParts := strings.Split(domain, ".")
|
||||||
|
for _, part := range domainParts {
|
||||||
|
if strings.ContainsAny(part, "<>") && part != wildcardPlaceholder {
|
||||||
|
log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain))
|
||||||
|
continue LOOP
|
||||||
|
}
|
||||||
|
}
|
||||||
|
validDomains = append(validDomains, domain)
|
||||||
|
}
|
||||||
|
return validDomains
|
||||||
|
}
|
||||||
|
|
34
cmd/s3-gw/validate_test.go
Normal file
34
cmd/s3-gw/validate_test.go
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestValidateDomains(t *testing.T) {
|
||||||
|
inputDomains := []string{
|
||||||
|
"s3dev.frostfs.devenv",
|
||||||
|
"s3dev.<invalid>.frostfs.devenv",
|
||||||
|
"s3dev.<wildcard>.frostfs.devenv",
|
||||||
|
"s3dev.<wildcard.frostfs.devenv",
|
||||||
|
"s3dev.wildcard>.frostfs.devenv",
|
||||||
|
"s3dev.<wild.card>.frostfs.devenv",
|
||||||
|
"<invalid>.frostfs.devenv",
|
||||||
|
"<wildcard>.frostfs.devenv>",
|
||||||
|
"<wildcard>.frostfs.devenv",
|
||||||
|
"s3dev.fro<stfs.devenv",
|
||||||
|
"<wildcard>.dev.<wildcard>.frostfs.devenv",
|
||||||
|
"<wildcard>.dev.<wildc>ard>.frostfs.devenv",
|
||||||
|
}
|
||||||
|
expectedDomains := []string{
|
||||||
|
"s3dev.frostfs.devenv",
|
||||||
|
"s3dev.<wildcard>.frostfs.devenv",
|
||||||
|
"<wildcard>.frostfs.devenv",
|
||||||
|
"<wildcard>.dev.<wildcard>.frostfs.devenv",
|
||||||
|
}
|
||||||
|
|
||||||
|
actualDomains := validateDomains(inputDomains, zaptest.NewLogger(t))
|
||||||
|
require.Equal(t, expectedDomains, actualDomains)
|
||||||
|
}
|
|
@ -36,8 +36,15 @@ S3_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
|
||||||
# How often to reconnect to the servers
|
# How often to reconnect to the servers
|
||||||
S3_GW_RECONNECT_INTERVAL: 1m
|
S3_GW_RECONNECT_INTERVAL: 1m
|
||||||
|
|
||||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
# Domains to be able to use virtual-hosted-style access to bucket
|
||||||
S3_GW_LISTEN_DOMAINS=s3dev.frostfs.devenv
|
S3_GW_LISTEN_DOMAINS="domain.com <wildcard>.domain.com"
|
||||||
|
|
||||||
|
# VHS enabled flag
|
||||||
|
S3_GW_VHS_ENABLED=false
|
||||||
|
# Header for determining whether VHS is enabled for the request
|
||||||
|
S3_GW_VHS_VHS_HEADER=X-Frostfs-S3-VHS
|
||||||
|
# Header for determining servername
|
||||||
|
S3_GW_VHS_SERVERNAME_HEADER=X-Frostfs-Servername
|
||||||
|
|
||||||
# Config file
|
# Config file
|
||||||
S3_GW_CONFIG=/path/to/config/yaml
|
S3_GW_CONFIG=/path/to/config/yaml
|
||||||
|
@ -45,17 +52,6 @@ S3_GW_CONFIG=/path/to/config/yaml
|
||||||
# Logger
|
# Logger
|
||||||
S3_GW_LOGGER_LEVEL=debug
|
S3_GW_LOGGER_LEVEL=debug
|
||||||
|
|
||||||
# HTTP logger
|
|
||||||
S3_GW_HTTP_LOGGING_ENABLED=false
|
|
||||||
# max body size to log
|
|
||||||
S3_GW_HTTP_LOGGING_MAX_BODY=1024
|
|
||||||
# max log size in Mb
|
|
||||||
S3_GW_HTTP_LOGGING_MAX_LOG_SIZE: 20
|
|
||||||
# use log compression
|
|
||||||
S3_GW_HTTP_LOGGING_GZIP=true
|
|
||||||
# possible destination output values: filesystem path, url, "stdout", "stderr"
|
|
||||||
S3_GW_HTTP_LOGGING_DESTINATION=stdout
|
|
||||||
|
|
||||||
# RPC endpoint and order of resolving of bucket names
|
# RPC endpoint and order of resolving of bucket names
|
||||||
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
|
S3_GW_RPC_ENDPOINT=http://morph-chain.frostfs.devenv:30333/
|
||||||
S3_GW_RESOLVE_ORDER="nns dns"
|
S3_GW_RESOLVE_ORDER="nns dns"
|
||||||
|
@ -148,6 +144,8 @@ S3_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
|
||||||
# max attempt to make successful tree request.
|
# max attempt to make successful tree request.
|
||||||
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
# default value is 0 that means the number of attempts equals to number of nodes in pool.
|
||||||
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
|
||||||
|
# Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back.
|
||||||
|
S3_GW_FROSTFS_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT=10s
|
||||||
|
|
||||||
# List of allowed AccessKeyID prefixes
|
# List of allowed AccessKeyID prefixes
|
||||||
# If not set, S3 GW will accept all AccessKeyIDs
|
# If not set, S3 GW will accept all AccessKeyIDs
|
||||||
|
@ -227,3 +225,6 @@ S3_GW_RETRY_MAX_BACKOFF=30s
|
||||||
# Backoff strategy. `exponential` and `constant` are allowed.
|
# Backoff strategy. `exponential` and `constant` are allowed.
|
||||||
S3_GW_RETRY_STRATEGY=exponential
|
S3_GW_RETRY_STRATEGY=exponential
|
||||||
|
|
||||||
|
# Containers properties
|
||||||
|
S3_GW_CONTAINERS_CORS=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
S3_GW_CONTAINERS_LIFECYCLE=AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
|
|
@ -42,23 +42,20 @@ server:
|
||||||
# Domains to be able to use virtual-hosted-style access to bucket.
|
# Domains to be able to use virtual-hosted-style access to bucket.
|
||||||
listen_domains:
|
listen_domains:
|
||||||
- s3dev.frostfs.devenv
|
- s3dev.frostfs.devenv
|
||||||
|
- s3dev.<wildcard>.frostfs.devenv
|
||||||
|
|
||||||
|
vhs:
|
||||||
|
enabled: false
|
||||||
|
vhs_header: X-Frostfs-S3-VHS
|
||||||
|
servername_header: X-Frostfs-Servername
|
||||||
|
namespaces:
|
||||||
|
"ns1": false
|
||||||
|
"ns2": true
|
||||||
|
|
||||||
logger:
|
logger:
|
||||||
level: debug
|
level: debug
|
||||||
destination: stdout
|
destination: stdout
|
||||||
|
|
||||||
# log http request data (URI, headers, query, etc)
|
|
||||||
http_logging:
|
|
||||||
enabled: false
|
|
||||||
# max body size to log
|
|
||||||
max_body: 1024
|
|
||||||
# max log size in Mb
|
|
||||||
max_log_size: 20
|
|
||||||
# use log compression
|
|
||||||
gzip: true
|
|
||||||
# possible output values: filesystem path, url, "stdout", "stderr"
|
|
||||||
destination: stdout
|
|
||||||
|
|
||||||
# RPC endpoint and order of resolving of bucket names
|
# RPC endpoint and order of resolving of bucket names
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
resolve_order:
|
resolve_order:
|
||||||
|
@ -174,6 +171,8 @@ frostfs:
|
||||||
client_cut: false
|
client_cut: false
|
||||||
# Sets max buffer size for read payload in put operations.
|
# Sets max buffer size for read payload in put operations.
|
||||||
buffer_max_size_for_put: 1048576
|
buffer_max_size_for_put: 1048576
|
||||||
|
# Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back.
|
||||||
|
graceful_close_on_switch_timeout: 10s
|
||||||
|
|
||||||
# List of allowed AccessKeyID prefixes
|
# List of allowed AccessKeyID prefixes
|
||||||
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
||||||
|
@ -264,3 +263,8 @@ retry:
|
||||||
max_backoff: 30s
|
max_backoff: 30s
|
||||||
# Backoff strategy. `exponential` and `constant` are allowed.
|
# Backoff strategy. `exponential` and `constant` are allowed.
|
||||||
strategy: exponential
|
strategy: exponential
|
||||||
|
|
||||||
|
# Containers properties
|
||||||
|
containers:
|
||||||
|
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
|
|
@ -92,6 +92,7 @@ type FrostFS interface {
|
||||||
//
|
//
|
||||||
// It returns exactly one non-nil value. It returns any error encountered which
|
// It returns exactly one non-nil value. It returns any error encountered which
|
||||||
// prevented the object payload from being read.
|
// prevented the object payload from being read.
|
||||||
|
// Object must contain full payload.
|
||||||
GetCredsObject(context.Context, oid.Address) (*object.Object, error)
|
GetCredsObject(context.Context, oid.Address) (*object.Object, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,23 +6,23 @@ This document describes s3-gw authentication and authorization mechanism.
|
||||||
|
|
||||||
Basic provisions:
|
Basic provisions:
|
||||||
|
|
||||||
* A request to s3-gw can be signed or not (request that isn't signed we will cal anonymous or just anon)
|
* A request to s3-gw can be signed or not (request that isn't signed we will call anonymous or just anon)
|
||||||
* To manage resources (buckets/objects) using s3-gw you must have appropriate access rights
|
* To manage resources (buckets/objects) using s3-gw you must have appropriate access rights
|
||||||
|
|
||||||
Each request must be authenticated (at least as anonymous) and authorized. The following scheme shows components that
|
Each request must be authenticated (at least as anonymous) and authorized. The following scheme shows components that
|
||||||
are involved to this
|
are involved in this
|
||||||
process.
|
process.
|
||||||
|
|
||||||
<a>
|
<a>
|
||||||
<img src="images/authentication/auth-overview.svg" alt="Auth general overview"/>
|
<img src="images/authentication/auth-overview.svg" alt="Auth general overview"/>
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
There are several participants of this process:
|
There are several participants in this process:
|
||||||
|
|
||||||
1. User that make a request
|
1. User that make a request
|
||||||
2. S3-GW that accepts a request
|
2. S3-GW that accepts a request
|
||||||
3. FrostFS Storage that stores AccessObjects (objects are needed for authentication)
|
3. FrostFS Storage that stores AccessObjects (objects are needed for authentication)
|
||||||
4. Blockchain smart contracts (`frostfsid`, `policy`) that stores user info and access rules.
|
4. Blockchain smart contracts (`frostfsid`, `policy`) that store user info and access rules.
|
||||||
|
|
||||||
## Data auth process
|
## Data auth process
|
||||||
|
|
||||||
|
@ -32,23 +32,23 @@ Let's look at the process in more detail:
|
||||||
<img src="images/authentication/auth-sequence.svg" alt="Auth sequence diagram"/>
|
<img src="images/authentication/auth-sequence.svg" alt="Auth sequence diagram"/>
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
* First of all, someone make a request. If request is signed we will check its signature (`Authentication`) after that
|
* First of all, someone makes a request. If request is signed we will check its signature (`Authentication`) after that
|
||||||
we will check access rights using policies (`Auhorization`). For anonymous requests only authorization be performed.
|
we will check access rights using policies (`Auhorization`). For anonymous requests only authorization is performed.
|
||||||
|
|
||||||
* **Authentication steps**:
|
* **Authentication steps**:
|
||||||
* Each signed request is provided with `AccessKeyId` and signature. So if request is signed we must check its
|
* Each signed request is provided with `AccessKeyId` and signature. So if request is signed we must check its
|
||||||
signature. To do this we must know the `AccessKeyId`/`SecretAccessKey` pair (How the signature is calculated
|
signature. To do this we must know the `AccessKeyId`/`SecretAccessKey` pair (For how the signature is calculated
|
||||||
using this pair see [signing](#aws-signing). Client and server (s3-gw) use the same credentials and algorithm to
|
using this pair, see [signing](#aws-signing). Client and server (s3-gw) use the same credentials and algorithm to
|
||||||
compute signature). The `AccessKeyId` is a public part of credentials, and it's passed to gate in request. The
|
compute signature). The `AccessKeyId` is a public part of credentials, and it's passed to the gate in request. The
|
||||||
private part of credentials is `SecretAccessKey` and it's encrypted and stored in [AccessBox](#accessbox). So on
|
private part of credentials is `SecretAccessKey` and it's encrypted and stored in [AccessBox](#accessbox). So on
|
||||||
this step we must find appropriate `AccessBox` in FrostFS storage node (How to find appropriate `AccessBox`
|
this step we must find appropriate `AccessBox` in FrostFS storage node (For how to find appropriate `AccessBox`
|
||||||
knowing `AccessKeyId` see [search algorithm](#search-algorithm)). On this stage we can get `AccessDenied` from
|
knowing `AccessKeyId`, see [search algorithm](#search-algorithm)). On this stage we can get `AccessDenied` from
|
||||||
FrostFS storage node if the s3-gw doesn't have permission to read this `AccessBox` object.
|
FrostFS storage node if the s3-gw doesn't have permission to read this `AccessBox` object.
|
||||||
|
|
||||||
* After successful retrieving object we must extract `SecretAccessKey` from it. Since it's encrypted the s3-gw must
|
* After successfully retrieving the object we must extract `SecretAccessKey` from it. Since it's encrypted, the s3-gw must
|
||||||
decrypt (see [encryption](#encryption)) this object using own private key and `SeedKey` from `AccessBox`
|
decrypt (see [encryption](#encryption)) this object using its own private key and `SeedKey` from `AccessBox`
|
||||||
(see [AccessBox inner structure](#accessbox)). After s3-gw have got the `AccessKeyId`/`SecretAccessKey` pair it
|
(see [AccessBox inner structure](#accessbox)). After s3-gw got the `AccessKeyId`/`SecretAccessKey` pair it
|
||||||
[calculate signature](#aws-signing) and compare got signature with provided withing request. If signature doesn't
|
[calculates signature](#aws-signing) and compares this signature with one provided by the request. If signature doesn't
|
||||||
match the `AccessDenied` is returned.
|
match the `AccessDenied` is returned.
|
||||||
|
|
||||||
* `AccessBox` also contains `OwnerID` that is related to `AccessKeyId` that was provided. So we have to check if
|
* `AccessBox` also contains `OwnerID` that is related to `AccessKeyId` that was provided. So we have to check if
|
||||||
|
@ -63,7 +63,7 @@ Let's look at the process in more detail:
|
||||||
|
|
||||||
* After successful authentication and authorization the request will be processed by s3-gw business logic and finally be
|
* After successful authentication and authorization the request will be processed by s3-gw business logic and finally be
|
||||||
propagated to FrostFS storage node which also performs some auth checks and can return `AccessDenied`. If this happens
|
propagated to FrostFS storage node which also performs some auth checks and can return `AccessDenied`. If this happens
|
||||||
s3-gw also returns `AccessDenied` as response.
|
s3-gw also returns `AccessDenied` as a response.
|
||||||
|
|
||||||
### AWS Signing
|
### AWS Signing
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ authentication with the AWS Signature Version 4 algorithm. More info in AWS docu
|
||||||
|
|
||||||
You can express authentication information by using one of the following methods:
|
You can express authentication information by using one of the following methods:
|
||||||
|
|
||||||
* **HTTP Authorization header** - Using the HTTP Authorization header is the most common method of authenticating an
|
* **HTTP Authorization header** - Using the HTTP Authorization header is the most common method of authenticating
|
||||||
FrostFS S3 request. All the FrostFS S3 REST operations (except for browser-based uploads using POST requests) require
|
FrostFS S3 request. All the FrostFS S3 REST operations (except for browser-based uploads using POST requests) require
|
||||||
this header. For more information about the Authorization header value, and how to calculate signature and related
|
this header. For more information about the Authorization header value, and how to calculate signature and related
|
||||||
options,
|
options,
|
||||||
|
@ -114,7 +114,7 @@ parameters for authentication, you use a varying combination of request elements
|
||||||
HTTP POST request, the POST policy in the request is the string you sign. For more information about computing string to
|
HTTP POST request, the POST policy in the request is the string you sign. For more information about computing string to
|
||||||
sign, follow links provided at the end of this section.
|
sign, follow links provided at the end of this section.
|
||||||
|
|
||||||
For signing key, the diagram shows series of calculations, where result of each step you feed into the next step. The
|
For signing key, the diagram shows series of calculations, where the result of each step you feed into the next step. The
|
||||||
final step is the signing key.
|
final step is the signing key.
|
||||||
|
|
||||||
Upon receiving an authenticated request, FrostFS S3 servers re-create the signature by using the authentication
|
Upon receiving an authenticated request, FrostFS S3 servers re-create the signature by using the authentication
|
||||||
|
@ -139,7 +139,7 @@ See detains in [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/A
|
||||||
|
|
||||||
#### s3-gw
|
#### s3-gw
|
||||||
|
|
||||||
s3-gw support the following ways to provide the singed request:
|
s3-gw supports the following ways to provide the singed request:
|
||||||
|
|
||||||
* [HTTP Authorization header](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html)
|
* [HTTP Authorization header](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html)
|
||||||
* [Query string parameters](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
|
* [Query string parameters](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
|
||||||
|
@ -153,10 +153,10 @@ if they don't match the access denied is returned.
|
||||||
### AccessBox
|
### AccessBox
|
||||||
|
|
||||||
`AccessBox` is an ordinary object in FrostFS storage. It contains all information that can be used by s3-gw to
|
`AccessBox` is an ordinary object in FrostFS storage. It contains all information that can be used by s3-gw to
|
||||||
successfully authenticate request. Also, it contains data that is required to successful authentication in FrostFS
|
successfully authenticate request. Also, it contains data that is required for successful authentication in FrostFS
|
||||||
storage node.
|
storage node.
|
||||||
|
|
||||||
Based on this object s3 credentials are formed:
|
Object s3 credentials are formed based on:
|
||||||
|
|
||||||
* `AccessKeyId` - is concatenated container id and object id (`<cid>0<oid>`) of `AccessBox` (
|
* `AccessKeyId` - is concatenated container id and object id (`<cid>0<oid>`) of `AccessBox` (
|
||||||
e.g. `2XGRML5EW3LMHdf64W2DkBy1Nkuu4y4wGhUj44QjbXBi05ZNvs8WVwy1XTmSEkcVkydPKzCgtmR7U3zyLYTj3Snxf`)
|
e.g. `2XGRML5EW3LMHdf64W2DkBy1Nkuu4y4wGhUj44QjbXBi05ZNvs8WVwy1XTmSEkcVkydPKzCgtmR7U3zyLYTj3Snxf`)
|
||||||
|
@ -173,9 +173,9 @@ Based on this object s3 credentials are formed:
|
||||||
|
|
||||||
**Headers:**
|
**Headers:**
|
||||||
|
|
||||||
`AccessBox` object has the following attributes (at least them, it also can contain custom one):
|
`AccessBox` object has the following attributes (at least them, it also can contain custom ones):
|
||||||
|
|
||||||
* `Timestamp` - unix timestamp when object was created
|
* `Timestamp` - unix timestamp indicating when the object was created
|
||||||
* `__SYSTEM__EXPIRATION_EPOCH` - epoch after which the object isn't available anymore
|
* `__SYSTEM__EXPIRATION_EPOCH` - epoch after which the object isn't available anymore
|
||||||
* `S3-CRDT-Versions-Add` - comma separated list of previous versions of `AccessBox` (
|
* `S3-CRDT-Versions-Add` - comma separated list of previous versions of `AccessBox` (
|
||||||
see [AccessBox versions](#accessbox-versions))
|
see [AccessBox versions](#accessbox-versions))
|
||||||
|
@ -190,7 +190,7 @@ It contains:
|
||||||
|
|
||||||
* Seed key - hex-encoded public seed key to compute shared secret using ECDH (see [encryption](#encryption))
|
* Seed key - hex-encoded public seed key to compute shared secret using ECDH (see [encryption](#encryption))
|
||||||
* List of gate data:
|
* List of gate data:
|
||||||
* Gate public key (so that gate (when it will decrypt data later) know which one item from list it should process)
|
* Gate public key (so that gate (when it will decrypt data later) know which item from the list it should process)
|
||||||
* Encrypted tokens:
|
* Encrypted tokens:
|
||||||
* `SecretAccessKey` - hex-encoded random generated 32 bytes
|
* `SecretAccessKey` - hex-encoded random generated 32 bytes
|
||||||
* Marshaled bearer token - more detail
|
* Marshaled bearer token - more detail
|
||||||
|
@ -207,16 +207,16 @@ It contains:
|
||||||
|
|
||||||
Imagine the following scenario:
|
Imagine the following scenario:
|
||||||
|
|
||||||
* There is a system where only one s3-gw exist
|
* There is a system where only one s3-gw exists
|
||||||
* There is a `AccessBox` that can be used by this s3-gw
|
* There is an `AccessBox` that can be used by this s3-gw
|
||||||
* User has s3 credentials (`AccessKeyId`/`SecretAccessKey`) related to corresponded `AccessBox` and can successfully
|
* User has s3 credentials (`AccessKeyId`/`SecretAccessKey`) related to corresponding `AccessBox` and can successfully
|
||||||
make request to s3-gw
|
make request to s3-gw
|
||||||
* The system is expanded and new one s3-gw is added
|
* The system is expanded and a new s3-gw is added
|
||||||
* User must be able to use the credentials (that he has already had) to make request to new one s3-gw
|
* User must be able to use the credentials (that he has already had) to make request to the new s3-gw
|
||||||
|
|
||||||
Since `AccessBox` object is immutable and `SecretAccessKey` is encrypted only for restricted list of keys (can be used
|
Since `AccessBox` object is immutable and `SecretAccessKey` is encrypted only for restricted list of keys (can be used
|
||||||
(decrypted) only by limited number of s3-gw) we have to create new `AccessBox` that has encrypted secrets for new list
|
(decrypted) only by limited number of s3-gw) we have to create a new `AccessBox` that has encrypted secrets for a new list
|
||||||
of s3-gw and be related to initial s3 credentials (`AccessKeyId`/`SecretAccessKey`). Such relationship is done
|
of s3-gw and is related to the initial s3 credentials (`AccessKeyId`/`SecretAccessKey`). Such relation is done
|
||||||
by `S3-Access-Box-CRDT-Name`.
|
by `S3-Access-Box-CRDT-Name`.
|
||||||
|
|
||||||
##### Search algorithm
|
##### Search algorithm
|
||||||
|
@ -285,10 +285,10 @@ is performed the following algorithm is applied:
|
||||||
* If no rules were matched return `deny` status.
|
* If no rules were matched return `deny` status.
|
||||||
|
|
||||||
To local and contract policies `deny first` scheme is applied. This means that if several rules were matched for
|
To local and contract policies `deny first` scheme is applied. This means that if several rules were matched for
|
||||||
reqeust (with both statuses `allow` and `deny`) the resulting status be `deny`.
|
reqeust (with both statuses `allow` and `deny`) the resulting status is `deny`.
|
||||||
|
|
||||||
Policy rules validate if specified request can be performed on the specific resource. Request and resource can contain
|
Policy rules validate if specified request can be performed on the specific resource. Request and resource can contain
|
||||||
some properties and rules can contain conditions on some such properties.
|
some properties, and rules can contain conditions on some of these properties.
|
||||||
|
|
||||||
In s3-gw resource is `/bucket/object`, `/bucket` or just `/` (if request is trying to list buckets).
|
In s3-gw resource is `/bucket/object`, `/bucket` or just `/` (if request is trying to list buckets).
|
||||||
Currently, request that is checked contains the following properties (so policy rule can contain conditions on them):
|
Currently, request that is checked contains the following properties (so policy rule can contain conditions on them):
|
||||||
|
|
|
@ -176,7 +176,6 @@ There are some custom types used for brevity:
|
||||||
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
| `placement_policy` | [Placement policy configuration](#placement_policy-section) |
|
||||||
| `server` | [Server configuration](#server-section) |
|
| `server` | [Server configuration](#server-section) |
|
||||||
| `logger` | [Logger configuration](#logger-section) |
|
| `logger` | [Logger configuration](#logger-section) |
|
||||||
| `http_logging` | [HTTP Request logger configuration](#http_logging-section) |
|
|
||||||
| `cache` | [Cache configuration](#cache-section) |
|
| `cache` | [Cache configuration](#cache-section) |
|
||||||
| `cors` | [CORS configuration](#cors-section) |
|
| `cors` | [CORS configuration](#cors-section) |
|
||||||
| `pprof` | [Pprof configuration](#pprof-section) |
|
| `pprof` | [Pprof configuration](#pprof-section) |
|
||||||
|
@ -193,12 +192,15 @@ There are some custom types used for brevity:
|
||||||
| `proxy` | [Proxy contract configuration](#proxy-section) |
|
| `proxy` | [Proxy contract configuration](#proxy-section) |
|
||||||
| `namespaces` | [Namespaces configuration](#namespaces-section) |
|
| `namespaces` | [Namespaces configuration](#namespaces-section) |
|
||||||
| `retry` | [Retry configuration](#retry-section) |
|
| `retry` | [Retry configuration](#retry-section) |
|
||||||
|
| `containers` | [Containers configuration](#containers-section) |
|
||||||
|
| `vhs` | [VHS configuration](#vhs-section) |
|
||||||
|
|
||||||
### General section
|
### General section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
listen_domains:
|
listen_domains:
|
||||||
- s3dev.frostfs.devenv
|
- s3dev.frostfs.devenv
|
||||||
|
- s3dev.<wildcard>.frostfs.devenv
|
||||||
- s3dev2.frostfs.devenv
|
- s3dev2.frostfs.devenv
|
||||||
|
|
||||||
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
|
||||||
|
@ -226,7 +228,7 @@ source_ip_header: "Source-Ip"
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|----------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|----------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `listen_domains` | `[]string` | no | | Domains to be able to use virtual-hosted-style access to bucket. |
|
| `listen_domains` | `[]string` | yes | | Domains to be able to use virtual-hosted-style access to bucket. The presence of placeholders of the <wildcard> type is supported. |
|
||||||
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
|
||||||
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
|
||||||
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
|
||||||
|
@ -374,27 +376,6 @@ logger:
|
||||||
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
| `level` | `string` | yes | `debug` | Logging level.<br/>Possible values: `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. |
|
||||||
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
| `destination` | `string` | no | `stdout` | Destination for logger: `stdout` or `journald` |
|
||||||
|
|
||||||
|
|
||||||
### `http_logging` section
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
http_logging:
|
|
||||||
enabled: false
|
|
||||||
max_body: 1024
|
|
||||||
max_log_size: 20
|
|
||||||
gzip: true
|
|
||||||
destination: stdout
|
|
||||||
```
|
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
|
||||||
|----------------|--------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `enabled` | bool | yes | false | Flag to enable the logger. |
|
|
||||||
| `max_body` | int | yes | 1024 | Max body size for log output in bytes. |
|
|
||||||
| `max_log_size` | int | yes | 50 | Log file size threshold (in megabytes) to be moved in backup file. After reaching threshold, initial filename is appended with timestamp. And new empty file with initial name is created. |
|
|
||||||
| `gzip` | bool | yes | false | Whether to enable Gzip compression for backup log files. |
|
|
||||||
| `destination` | string | yes | stdout | Specify path for log output. Accepts log file path, or "stdout" and "stderr" reserved words to print in output streams. |
|
|
||||||
|
|
||||||
|
|
||||||
### `cache` section
|
### `cache` section
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -544,14 +525,16 @@ frostfs:
|
||||||
client_cut: false
|
client_cut: false
|
||||||
buffer_max_size_for_put: 1048576 # 1mb
|
buffer_max_size_for_put: 1048576 # 1mb
|
||||||
tree_pool_max_attempts: 0
|
tree_pool_max_attempts: 0
|
||||||
|
graceful_close_on_switch_timeout: 10s
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | SIGHUP reload | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|---------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|------------------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
||||||
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
||||||
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
|
||||||
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
|
||||||
|
| `graceful_close_on_switch_timeout` | `duration` | no | `10s` | Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back. |
|
||||||
|
|
||||||
# `resolve_bucket` section
|
# `resolve_bucket` section
|
||||||
|
|
||||||
|
@ -730,3 +713,38 @@ retry:
|
||||||
| `max_backoff` | `duration` | yes | `30s` | Max delay before next attempt. |
|
| `max_backoff` | `duration` | yes | `30s` | Max delay before next attempt. |
|
||||||
| `strategy` | `string` | yes | `exponential` | Backoff strategy. `exponential` and `constant` are allowed. |
|
| `strategy` | `string` | yes | `exponential` | Backoff strategy. `exponential` and `constant` are allowed. |
|
||||||
|
|
||||||
|
# `containers` section
|
||||||
|
|
||||||
|
Section for well-known containers to store s3-related data and settings.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
containers:
|
||||||
|
cors: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
lifecycle: AZjLTXfK4vs4ovxMic2xEJKSymMNLqdwq9JT64ASFCRj
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
|-------------|----------|---------------|---------------|-------------------------------------------------------------------------------------------|
|
||||||
|
| `cors` | `string` | no | | Container name for CORS configurations. If not set, container of the bucket is used. |
|
||||||
|
| `lifecycle` | `string` | no | | Container name for lifecycle configurations. If not set, container of the bucket is used. |
|
||||||
|
|
||||||
|
# `vhs` section
|
||||||
|
|
||||||
|
Configuration of virtual hosted addressing style.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
vhs:
|
||||||
|
enabled: false
|
||||||
|
vhs_header: X-Frostfs-S3-VHS
|
||||||
|
servername_header: X-Frostfs-Servername
|
||||||
|
namespaces:
|
||||||
|
"ns1": false
|
||||||
|
"ns2": true
|
||||||
|
```
|
||||||
|
|
||||||
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|
| ------------------- | ----------------- | ------------- | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| `enabled` | `bool` | yes | `false` | Enables the use of virtual host addressing for buckets at the application level. |
|
||||||
|
| `vhs_header` | `string` | yes | `X-Frostfs-S3-VHS` | Header for determining whether VHS is enabled for the request. |
|
||||||
|
| `servername_header` | `string` | yes | `X-Frostfs-Servername` | Header for determining servername. |
|
||||||
|
| `namespaces` | `map[string]bool` | yes | | A map in which the keys are the name of the namespace, and the values are the flag responsible for enabling VHS for the specified namespace. Overrides global 'enabled' setting even when it is disabled. |
|
||||||
|
|
221
docs/extensions.md
Normal file
221
docs/extensions.md
Normal file
|
@ -0,0 +1,221 @@
|
||||||
|
# S3 API Extension
|
||||||
|
|
||||||
|
## Bucket operations management
|
||||||
|
|
||||||
|
### Action to delete bucket (DeleteBucket)
|
||||||
|
|
||||||
|
Deletes bucket with all objects in it.
|
||||||
|
|
||||||
|
#### Request Parameters
|
||||||
|
|
||||||
|
- **Bucket**
|
||||||
|
|
||||||
|
Specifies the bucket being deleted.
|
||||||
|
|
||||||
|
|
||||||
|
#### Errors
|
||||||
|
|
||||||
|
- **NoSuchEntity**
|
||||||
|
|
||||||
|
The request was rejected because it referenced a resource entity that does not exist.
|
||||||
|
|
||||||
|
HTTP Status Code: 404
|
||||||
|
|
||||||
|
- **ServiceFailure**
|
||||||
|
|
||||||
|
The request processing has failed because of an unknown error, exception or failure.
|
||||||
|
|
||||||
|
HTTP Status Code: 500
|
||||||
|
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
Sample Request
|
||||||
|
|
||||||
|
```text
|
||||||
|
DELETE / HTTP/1.1
|
||||||
|
X-Amz-Force-Delete-Bucket: true
|
||||||
|
Host: data.s3.<Region>.frostfs-s3-gw.com
|
||||||
|
Date: Wed, 01 Mar 2024 12:00:00 GMT
|
||||||
|
Authorization: authorization string
|
||||||
|
```
|
||||||
|
|
||||||
|
Sample Response
|
||||||
|
|
||||||
|
```text
|
||||||
|
HTTP/1.1 204 No Content
|
||||||
|
x-amz-id-2: JuKZqmXuiwFeDQxhD7M8KtsKobSzWA1QEjLbTMTagkKdBX2z7Il/jGhDeJ3j6s80
|
||||||
|
x-amz-request-id: 32FE2CEB32F5EE25
|
||||||
|
Date: Wed, 01 Mar 2006 12:00:00 GMT
|
||||||
|
Connection: close
|
||||||
|
Server: AmazonS3
|
||||||
|
```
|
||||||
|
|
||||||
|
## Object operations management
|
||||||
|
|
||||||
|
### Action to patch object (PatchObject)
|
||||||
|
|
||||||
|
Allows to partially change and add data to an existing object.
|
||||||
|
|
||||||
|
> **Note**: patch is not supported for objects that were uploaded using SSE.
|
||||||
|
|
||||||
|
#### Path parameters
|
||||||
|
|
||||||
|
- **Bucket**
|
||||||
|
|
||||||
|
Bucket name.
|
||||||
|
|
||||||
|
_Required: Yes_
|
||||||
|
|
||||||
|
- **Key**
|
||||||
|
|
||||||
|
Object name.
|
||||||
|
|
||||||
|
_Required: Yes_
|
||||||
|
|
||||||
|
#### Query parameters
|
||||||
|
|
||||||
|
- **versionId**
|
||||||
|
|
||||||
|
Version of the original object to patch.
|
||||||
|
|
||||||
|
_Required: No_
|
||||||
|
|
||||||
|
#### Request headers
|
||||||
|
|
||||||
|
- **Content-Range**
|
||||||
|
|
||||||
|
The byte range of the object (or its version) to patch.
|
||||||
|
|
||||||
|
The value is formed as follows: `bytes {start byte}-{end byte}/*`.
|
||||||
|
|
||||||
|
Range boundaries are included.
|
||||||
|
|
||||||
|
The maximum range length is 5GB.
|
||||||
|
|
||||||
|
To write additional data to the object, the end byte must be greater than the object size.
|
||||||
|
|
||||||
|
The start byte cannot be greater than the object size.
|
||||||
|
|
||||||
|
The range length must be equal to the value of the **_Content-Length_** header.
|
||||||
|
|
||||||
|
The format corresponds to the [RFC 9110](https://www.rfc-editor.org/rfc/rfc9110#name-content-range) specification with the following
|
||||||
|
exceptions:
|
||||||
|
|
||||||
|
- **_complete-length_** parameter is ignored.
|
||||||
|
- **_last-pos_** parameter is optional (if not specified, the value is assumed to be equal to the end byte of the object).
|
||||||
|
|
||||||
|
_Required: Yes_
|
||||||
|
|
||||||
|
- **Content-Length**
|
||||||
|
|
||||||
|
Number of bytes sent in the request body.
|
||||||
|
|
||||||
|
_Required: Yes_
|
||||||
|
|
||||||
|
- **If-Match**
|
||||||
|
|
||||||
|
Patch is performed if ETag of the object (or its version) is equal to specified in the header.
|
||||||
|
|
||||||
|
_Required: No_
|
||||||
|
|
||||||
|
- **If-Unmodified-Since**
|
||||||
|
|
||||||
|
Patch is performed if the object (or its version) has not changed since the time specified in the header.
|
||||||
|
|
||||||
|
_Required: No_
|
||||||
|
|
||||||
|
- **x-amz-expected-bucket-owner**
|
||||||
|
|
||||||
|
ID of the intended owner of the bucket.
|
||||||
|
|
||||||
|
_Required: No_
|
||||||
|
|
||||||
|
#### Request body
|
||||||
|
|
||||||
|
Contains new data for the passed byte range of the object.
|
||||||
|
|
||||||
|
#### Response
|
||||||
|
|
||||||
|
The request returns the following data in XML format.
|
||||||
|
|
||||||
|
- **PatchObjectResult**
|
||||||
|
|
||||||
|
Root level tag for parameters.
|
||||||
|
|
||||||
|
- **Object**
|
||||||
|
|
||||||
|
Parent tag for patch results.
|
||||||
|
|
||||||
|
- **LastModified**
|
||||||
|
|
||||||
|
Time when the object was last modified. Applying patch does not change this value.
|
||||||
|
|
||||||
|
- **ETag**
|
||||||
|
|
||||||
|
Patched object tag. For regular objects always in SHA-256 format.
|
||||||
|
|
||||||
|
If the bucket is versioned, the **_x-amz-version-id_** header is returned with the version of the created object.
|
||||||
|
|
||||||
|
#### Errors
|
||||||
|
|
||||||
|
- **MissingContentRange**
|
||||||
|
|
||||||
|
The required **_Content-Range_** header was not sent.
|
||||||
|
|
||||||
|
HTTP Status Code: 400
|
||||||
|
|
||||||
|
- **NoSuchBucket**
|
||||||
|
|
||||||
|
The specified bucket does not exist.
|
||||||
|
|
||||||
|
HTTP Status Code: 404
|
||||||
|
|
||||||
|
- **NoSuchKey**
|
||||||
|
|
||||||
|
The specified object does not exist.
|
||||||
|
|
||||||
|
HTTP Status Code: 404
|
||||||
|
|
||||||
|
- **MissingContentLength**
|
||||||
|
|
||||||
|
The required **_Content-Length_** header was not sent.
|
||||||
|
|
||||||
|
HTTP Status Code: 411
|
||||||
|
|
||||||
|
- **PreconditionFailed**
|
||||||
|
|
||||||
|
At least one of the preconditions is not satisfied.
|
||||||
|
|
||||||
|
HTTP Status Code: 412
|
||||||
|
|
||||||
|
- **InvalidRange**
|
||||||
|
|
||||||
|
Incorrect value in **_Content-Range_** header.
|
||||||
|
|
||||||
|
HTTP Status Code: 416
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
Sample Request
|
||||||
|
|
||||||
|
```text
|
||||||
|
PATCH /example-bucket/example-key HTTP/1.1
|
||||||
|
Host: data.s3.<Region>.frostfs-s3-gw.com
|
||||||
|
Content-Range: bytes 0-3/*
|
||||||
|
Content-Length: 4
|
||||||
|
&AUTHPARAMS
|
||||||
|
|
||||||
|
Body
|
||||||
|
```
|
||||||
|
|
||||||
|
Sample Response
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<PatchObjectResult>
|
||||||
|
<Object>
|
||||||
|
<LastModified>2024-07-24T14:54:54Z</LastModified>
|
||||||
|
<ETag>"e8b53b75afaf3ce898f048c663b11cf4c71f5f13456673dd5b422a247c9e627f"</ETag>
|
||||||
|
</Object>
|
||||||
|
</PatchObjectResult>
|
||||||
|
```
|
58
go.mod
58
go.mod
|
@ -1,40 +1,41 @@
|
||||||
module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||||
|
|
||||||
go 1.20
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240417080107-db361318009c
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240705093617-560cbbd1f1e4
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
|
||||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||||
github.com/aws/aws-sdk-go v1.44.6
|
github.com/aws/aws-sdk-go v1.44.6
|
||||||
github.com/aws/aws-sdk-go-v2 v1.18.1
|
github.com/aws/aws-sdk-go-v2 v1.18.1
|
||||||
github.com/bluele/gcache v0.0.2
|
github.com/bluele/gcache v0.0.2
|
||||||
github.com/go-chi/chi/v5 v5.0.8
|
github.com/go-chi/chi/v5 v5.0.8
|
||||||
github.com/google/uuid v1.3.1
|
github.com/google/uuid v1.6.0
|
||||||
github.com/minio/sio v0.3.0
|
github.com/minio/sio v0.3.0
|
||||||
github.com/nspcc-dev/neo-go v0.105.0
|
github.com/mr-tron/base58 v1.2.0
|
||||||
|
github.com/nspcc-dev/neo-go v0.106.2
|
||||||
github.com/panjf2000/ants/v2 v2.5.0
|
github.com/panjf2000/ants/v2 v2.5.0
|
||||||
github.com/prometheus/client_golang v1.15.1
|
github.com/prometheus/client_golang v1.19.0
|
||||||
github.com/prometheus/client_model v0.3.0
|
github.com/prometheus/client_model v0.5.0
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.15.0
|
github.com/spf13/viper v1.15.0
|
||||||
github.com/ssgreg/journald v1.0.0
|
github.com/ssgreg/journald v1.0.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/stretchr/testify v1.9.0
|
||||||
|
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
|
||||||
github.com/urfave/cli/v2 v2.3.0
|
github.com/urfave/cli/v2 v2.3.0
|
||||||
go.opentelemetry.io/otel v1.16.0
|
go.opentelemetry.io/otel v1.16.0
|
||||||
go.opentelemetry.io/otel/trace v1.16.0
|
go.opentelemetry.io/otel/trace v1.16.0
|
||||||
go.uber.org/zap v1.26.0
|
go.uber.org/zap v1.27.0
|
||||||
golang.org/x/crypto v0.21.0
|
golang.org/x/crypto v0.21.0
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||||
golang.org/x/net v0.23.0
|
golang.org/x/net v0.23.0
|
||||||
golang.org/x/text v0.14.0
|
golang.org/x/text v0.14.0
|
||||||
google.golang.org/grpc v1.59.0
|
google.golang.org/grpc v1.63.2
|
||||||
google.golang.org/protobuf v1.33.0
|
google.golang.org/protobuf v1.33.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
@ -42,6 +43,7 @@ require (
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
|
||||||
|
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||||
github.com/aws/smithy-go v1.13.5 // indirect
|
github.com/aws/smithy-go v1.13.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
@ -53,28 +55,24 @@ require (
|
||||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||||
github.com/go-logr/logr v1.2.4 // indirect
|
github.com/go-logr/logr v1.2.4 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.3 // indirect
|
|
||||||
github.com/golang/snappy v0.0.1 // indirect
|
github.com/golang/snappy v0.0.1 // indirect
|
||||||
github.com/gorilla/websocket v1.5.0 // indirect
|
github.com/gorilla/websocket v1.5.1 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||||
github.com/hashicorp/golang-lru v0.6.0 // indirect
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
|
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 // indirect
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c // indirect
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d // indirect
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0 // indirect
|
github.com/nspcc-dev/rfc6979 v0.2.1 // indirect
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.0 // indirect
|
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/common v0.42.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.9.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||||
github.com/spf13/afero v1.9.3 // indirect
|
github.com/spf13/afero v1.9.3 // indirect
|
||||||
github.com/spf13/cast v1.5.0 // indirect
|
github.com/spf13/cast v1.5.0 // indirect
|
||||||
|
@ -83,7 +81,7 @@ require (
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
|
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 // indirect
|
||||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||||
github.com/urfave/cli v1.22.5 // indirect
|
github.com/urfave/cli v1.22.5 // indirect
|
||||||
go.etcd.io/bbolt v1.3.8 // indirect
|
go.etcd.io/bbolt v1.3.9 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect
|
||||||
|
@ -92,12 +90,12 @@ require (
|
||||||
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.16.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
golang.org/x/sync v0.3.0 // indirect
|
golang.org/x/sync v0.6.0 // indirect
|
||||||
golang.org/x/sys v0.18.0 // indirect
|
golang.org/x/sys v0.18.0 // indirect
|
||||||
golang.org/x/term v0.18.0 // indirect
|
golang.org/x/term v0.18.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f // indirect
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
141
go.sum
141
go.sum
|
@ -36,20 +36,20 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
||||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3 h1:H5GvrVlowIMWfzqQkhY0p0myooJxQ1sMRVSFfXawwWg=
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
|
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240417080107-db361318009c h1:V58j1eg12wxbl4fUbjWtBOexl3zFt4w0EGHpCPkWJhQ=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240417080107-db361318009c/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240705093617-560cbbd1f1e4 h1:izmHYpkz7cPr2Zpudxxh0wvrtAIxYywEG+uraghVSlo=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1 h1:+Z55WxE1ad/LBzRX1dqgaWlXAQ/NDjUsBlwEIZ4rn6k=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240705093617-560cbbd1f1e4/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1/go.mod h1:Pl77loECndbgIC0Kljj1MFmGJKQ9gotaFINyveW1T8I=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a h1:Bk1fB4cQASPKgAVGCdlBOEp5ohZfDxqK6fZM8eP+Emo=
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
|
||||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
|
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
|
||||||
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
|
||||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
|
||||||
|
@ -59,6 +59,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
|
||||||
|
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||||
|
@ -71,6 +73,7 @@ github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c=
|
github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c=
|
||||||
|
github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
|
||||||
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
|
||||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
|
@ -93,7 +96,9 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||||
|
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||||
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb h1:f0BMgIjhZy4lSRHCXFbQst85f5agZAjtDMixQqBWNpc=
|
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb h1:f0BMgIjhZy4lSRHCXFbQst85f5agZAjtDMixQqBWNpc=
|
||||||
|
github.com/consensys/gnark-crypto v0.12.2-0.20231013160410-1f65e75b6dfb/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
@ -111,6 +116,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
|
||||||
|
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
|
@ -128,7 +134,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||||
|
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
@ -155,8 +162,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
|
||||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
@ -173,7 +178,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||||
|
@ -189,26 +195,25 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
|
||||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU=
|
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
|
||||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
|
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||||
|
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
|
@ -226,30 +231,31 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
|
||||||
github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
|
github.com/minio/sio v0.3.0 h1:syEFBewzOMOYVzSTFpp1MqpSZk8rUNbz8VIIc+PNzus=
|
||||||
github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
github.com/minio/sio v0.3.0/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||||
|
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||||
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c h1:OOQeE613BH93ICPq3eke5N78gWNeMjcBWkmD2NKyXVg=
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2 h1:mD9hU3v+zJcnHAVmHnZKt3I++tvn30gBj2rP2PocZMk=
|
||||||
github.com/nspcc-dev/go-ordered-json v0.0.0-20231123160306-3374ff1e7a3c/go.mod h1:79bEUDEviBHJMFV6Iq6in57FEOCMcRhfQnfaf0ETA5U=
|
github.com/nspcc-dev/go-ordered-json v0.0.0-20240301084351-0246b013f8b2/go.mod h1:U5VfmPNM88P4RORFb6KSUVBdJBDhlqggJZYGXGPxOcc=
|
||||||
github.com/nspcc-dev/neo-go v0.105.0 h1:vtNZYFEFySK8zRDhLzQYha849VzWrcKezlnq/oNQg/w=
|
github.com/nspcc-dev/neo-go v0.106.2 h1:KXSJ2J5Oacc7LrX3r4jvnC8ihKqHs5NB21q4f2S3r9o=
|
||||||
github.com/nspcc-dev/neo-go v0.105.0/go.mod h1:6pchIHg5okeZO955RxpTh5q0sUI0vtpgPM6Q+no1rlI=
|
github.com/nspcc-dev/neo-go v0.106.2/go.mod h1:Ojwfx3/lv0VTeEHMpQ17g0wTnXcCSoFQVq5GEeCZmGo=
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0 h1:N+dMIBmteXjJpkH6UZ7HmNftuFxkqszfGLbhsEctnv0=
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d h1:Vcb7YkZuUSSIC+WF/xV3UDfHbAxZgyT2zGleJP3Ig5k=
|
||||||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20231127165613-b35f351f0ba0/go.mod h1:J/Mk6+nKeKSW4wygkZQFLQ6SkLOSGX5Ga0RuuuktEag=
|
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20240521091047-78685785716d/go.mod h1:/vrbWSHc7YS1KSYhVOyyeucXW/e+1DkVBOgnBEXUCeY=
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.0 h1:3e1WNxrN60/6N0DW7+UYisLeZJyfqZTNOjeV/toYvOE=
|
github.com/nspcc-dev/rfc6979 v0.2.1 h1:8wWxkamHWFmO790GsewSoKUSJjVnL1fmdRpokU/RgRM=
|
||||||
github.com/nspcc-dev/rfc6979 v0.2.0/go.mod h1:exhIh1PdpDC5vQmyEsGvc4YDM/lyQp/452QxGq/UEso=
|
github.com/nspcc-dev/rfc6979 v0.2.1/go.mod h1:Tk7h5kyUWkhjyO3zUgFFhy1v2vQv3BvQEntakdtqrWc=
|
||||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
@ -267,18 +273,19 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
@ -308,26 +315,26 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
|
||||||
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
|
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
|
||||||
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||||
|
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4 h1:GpfJ7OdNjS7BFTVwNCUI9L4aCJOFRbr5fdHqjdhoYE8=
|
||||||
|
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4/go.mod h1:f3jBhpWvuZmue0HZK52GzRHJOYHYSILs/c8+K2S/J+o=
|
||||||
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
||||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
||||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||||
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74 h1:JwtAtbp7r/7QSyGz8mKUbYJBg2+6Cd7OjM8o/GNOcVo=
|
|
||||||
github.com/virtuald/go-ordered-json v0.0.0-20170621173500-b18e6e673d74/go.mod h1:RmMWU37GKR2s6pgrIEB4ixgpVCt/cf7dnJv3fuH1J1c=
|
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||||
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
|
@ -353,11 +360,12 @@ go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLk
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
@ -378,8 +386,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
|
||||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
@ -403,7 +411,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||||
|
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
@ -462,9 +471,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
@ -576,12 +584,14 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f
|
||||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
|
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||||
|
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||||
|
@ -646,12 +656,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
||||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg=
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||||
google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY=
|
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
|
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
@ -672,8 +682,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@ -692,12 +702,11 @@ google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
@ -706,6 +715,7 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
@ -721,3 +731,4 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||||
|
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||||
|
|
|
@ -4,18 +4,22 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/crdt"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/crdt"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -25,11 +29,12 @@ const (
|
||||||
// AuthmateFrostFS is a mediator which implements authmate.FrostFS through pool.Pool.
|
// AuthmateFrostFS is a mediator which implements authmate.FrostFS through pool.Pool.
|
||||||
type AuthmateFrostFS struct {
|
type AuthmateFrostFS struct {
|
||||||
frostFS layer.FrostFS
|
frostFS layer.FrostFS
|
||||||
|
log *zap.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
|
// NewAuthmateFrostFS creates new AuthmateFrostFS using provided pool.Pool.
|
||||||
func NewAuthmateFrostFS(frostFS layer.FrostFS) *AuthmateFrostFS {
|
func NewAuthmateFrostFS(frostFS layer.FrostFS, log *zap.Logger) *AuthmateFrostFS {
|
||||||
return &AuthmateFrostFS{frostFS: frostFS}
|
return &AuthmateFrostFS{frostFS: frostFS, log: log}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerExists implements authmate.FrostFS interface method.
|
// ContainerExists implements authmate.FrostFS interface method.
|
||||||
|
@ -79,17 +84,27 @@ func (x *AuthmateFrostFS) GetCredsObject(ctx context.Context, addr oid.Address)
|
||||||
credObjID = last.ObjID
|
credObjID = last.ObjID
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := x.frostFS.ReadObject(ctx, layer.PrmObjectRead{
|
res, err := x.frostFS.GetObject(ctx, layer.PrmObjectGet{
|
||||||
Container: addr.Container(),
|
Container: addr.Container(),
|
||||||
Object: credObjID,
|
Object: credObjID,
|
||||||
WithPayload: true,
|
|
||||||
WithHeader: true,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return res.Head, err
|
defer func() {
|
||||||
|
if closeErr := res.Payload.Close(); closeErr != nil {
|
||||||
|
x.reqLogger(ctx).Warn(logs.CloseCredsObjectPayload, zap.Error(closeErr))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
data, err := io.ReadAll(res.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res.Header.SetPayload(data)
|
||||||
|
|
||||||
|
return &res.Header, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateObject implements authmate.FrostFS interface method.
|
// CreateObject implements authmate.FrostFS interface method.
|
||||||
|
@ -122,12 +137,17 @@ func (x *AuthmateFrostFS) CreateObject(ctx context.Context, prm tokens.PrmObject
|
||||||
attributes = append(attributes, [2]string{attr.Key(), attr.Value()})
|
attributes = append(attributes, [2]string{attr.Key(), attr.Value()})
|
||||||
}
|
}
|
||||||
|
|
||||||
return x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
res, err := x.frostFS.CreateObject(ctx, layer.PrmObjectCreate{
|
||||||
Container: prm.Container,
|
Container: prm.Container,
|
||||||
Filepath: prm.Filepath,
|
Filepath: prm.Filepath,
|
||||||
Attributes: attributes,
|
Attributes: attributes,
|
||||||
Payload: bytes.NewReader(prm.Payload),
|
Payload: bytes.NewReader(prm.Payload),
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.ObjectID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *AuthmateFrostFS) getCredVersions(ctx context.Context, addr oid.Address) (*crdt.ObjectVersions, error) {
|
func (x *AuthmateFrostFS) getCredVersions(ctx context.Context, addr oid.Address) (*crdt.ObjectVersions, error) {
|
||||||
|
@ -143,21 +163,28 @@ func (x *AuthmateFrostFS) getCredVersions(ctx context.Context, addr oid.Address)
|
||||||
versions := crdt.NewObjectVersions(objCredSystemName)
|
versions := crdt.NewObjectVersions(objCredSystemName)
|
||||||
|
|
||||||
for _, id := range credVersions {
|
for _, id := range credVersions {
|
||||||
objVersion, err := x.frostFS.ReadObject(ctx, layer.PrmObjectRead{
|
objVersion, err := x.frostFS.HeadObject(ctx, layer.PrmObjectHead{
|
||||||
Container: addr.Container(),
|
Container: addr.Container(),
|
||||||
Object: id,
|
Object: id,
|
||||||
WithHeader: true,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("head crdt access box '%s': %w", id.EncodeToString(), err)
|
return nil, fmt.Errorf("head crdt access box '%s': %w", id.EncodeToString(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
versions.AppendVersion(crdt.NewObjectVersion(objVersion.Head))
|
versions.AppendVersion(crdt.NewObjectVersion(objVersion))
|
||||||
}
|
}
|
||||||
|
|
||||||
return versions, nil
|
return versions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *AuthmateFrostFS) reqLogger(ctx context.Context) *zap.Logger {
|
||||||
|
reqLogger := middleware.GetReqLog(ctx)
|
||||||
|
if reqLogger != nil {
|
||||||
|
return reqLogger
|
||||||
|
}
|
||||||
|
return x.log
|
||||||
|
}
|
||||||
|
|
||||||
func credVersionSysName(cnrID cid.ID, objID oid.ID) string {
|
func credVersionSysName(cnrID cid.ID, objID oid.ID) string {
|
||||||
return cnrID.EncodeToString() + "0" + objID.EncodeToString()
|
return cnrID.EncodeToString() + "0" + objID.EncodeToString()
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.uber.org/zap/zaptest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGetCredsObject(t *testing.T) {
|
func TestGetCredsObject(t *testing.T) {
|
||||||
|
@ -35,7 +36,7 @@ func TestGetCredsObject(t *testing.T) {
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
|
|
||||||
frostfs := NewAuthmateFrostFS(layer.NewTestFrostFS(key))
|
frostfs := NewAuthmateFrostFS(layer.NewTestFrostFS(key), zaptest.NewLogger(t))
|
||||||
|
|
||||||
cid, err := frostfs.CreateContainer(ctx, authmate.PrmContainerCreate{
|
cid, err := frostfs.CreateContainer(ctx, authmate.PrmContainerCreate{
|
||||||
FriendlyName: bktName,
|
FriendlyName: bktName,
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
@ -51,7 +52,7 @@ func NewFrostFS(p *pool.Pool, key *keys.PrivateKey) *FrostFS {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimeToEpoch implements frostfs.FrostFS interface method.
|
// TimeToEpoch implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (uint64, uint64, error) {
|
||||||
dur := futureTime.Sub(now)
|
dur := futureTime.Sub(now)
|
||||||
if dur < 0 {
|
if dur < 0 {
|
||||||
|
@ -87,7 +88,7 @@ func (x *FrostFS) TimeToEpoch(ctx context.Context, now, futureTime time.Time) (u
|
||||||
return curr, epoch, nil
|
return curr, epoch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container implements frostfs.FrostFS interface method.
|
// Container implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) Container(ctx context.Context, layerPrm layer.PrmContainer) (*container.Container, error) {
|
func (x *FrostFS) Container(ctx context.Context, layerPrm layer.PrmContainer) (*container.Container, error) {
|
||||||
prm := pool.PrmContainerGet{
|
prm := pool.PrmContainerGet{
|
||||||
ContainerID: layerPrm.ContainerID,
|
ContainerID: layerPrm.ContainerID,
|
||||||
|
@ -102,7 +103,7 @@ func (x *FrostFS) Container(ctx context.Context, layerPrm layer.PrmContainer) (*
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateContainer implements frostfs.FrostFS interface method.
|
// CreateContainer implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCreate) (*layer.ContainerCreateResult, error) {
|
func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCreate) (*layer.ContainerCreateResult, error) {
|
||||||
var cnr container.Container
|
var cnr container.Container
|
||||||
cnr.Init()
|
cnr.Init()
|
||||||
|
@ -150,7 +151,7 @@ func (x *FrostFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCre
|
||||||
}, handleObjectError("save container via connection pool", err)
|
}, handleObjectError("save container via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UserContainers implements frostfs.FrostFS interface method.
|
// UserContainers implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) UserContainers(ctx context.Context, layerPrm layer.PrmUserContainers) ([]cid.ID, error) {
|
func (x *FrostFS) UserContainers(ctx context.Context, layerPrm layer.PrmUserContainers) ([]cid.ID, error) {
|
||||||
prm := pool.PrmContainerList{
|
prm := pool.PrmContainerList{
|
||||||
OwnerID: layerPrm.UserID,
|
OwnerID: layerPrm.UserID,
|
||||||
|
@ -161,7 +162,7 @@ func (x *FrostFS) UserContainers(ctx context.Context, layerPrm layer.PrmUserCont
|
||||||
return r, handleObjectError("list user containers via connection pool", err)
|
return r, handleObjectError("list user containers via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteContainer implements frostfs.FrostFS interface method.
|
// DeleteContainer implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) DeleteContainer(ctx context.Context, id cid.ID, token *session.Container) error {
|
func (x *FrostFS) DeleteContainer(ctx context.Context, id cid.ID, token *session.Container) error {
|
||||||
prm := pool.PrmContainerDelete{ContainerID: id, Session: token, WaitParams: &x.await}
|
prm := pool.PrmContainerDelete{ContainerID: id, Session: token, WaitParams: &x.await}
|
||||||
|
|
||||||
|
@ -169,8 +170,8 @@ func (x *FrostFS) DeleteContainer(ctx context.Context, id cid.ID, token *session
|
||||||
return handleObjectError("delete container via connection pool", err)
|
return handleObjectError("delete container via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateObject implements frostfs.FrostFS interface method.
|
// CreateObject implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
|
func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (*layer.CreateObjectResult, error) {
|
||||||
attrNum := len(prm.Attributes) + 1 // + creation time
|
attrNum := len(prm.Attributes) + 1 // + creation time
|
||||||
|
|
||||||
if prm.Filepath != "" {
|
if prm.Filepath != "" {
|
||||||
|
@ -237,8 +238,15 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
|
||||||
prmPut.UseKey(prm.PrivateKey)
|
prmPut.UseKey(prm.PrivateKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
idObj, err := x.pool.PutObject(ctx, prmPut)
|
res, err := x.pool.PutObject(ctx, prmPut)
|
||||||
return idObj, handleObjectError("save object via connection pool", err)
|
if err = handleObjectError("save object via connection pool", err); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &layer.CreateObjectResult{
|
||||||
|
ObjectID: res.ObjectID,
|
||||||
|
CreationEpoch: res.Epoch,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// wraps io.ReadCloser and transforms Read errors related to access violation
|
// wraps io.ReadCloser and transforms Read errors related to access violation
|
||||||
|
@ -255,8 +263,31 @@ func (x payloadReader) Read(p []byte) (int, error) {
|
||||||
return n, handleObjectError("read payload", err)
|
return n, handleObjectError("read payload", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadObject implements frostfs.FrostFS interface method.
|
// HeadObject implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
|
func (x *FrostFS) HeadObject(ctx context.Context, prm layer.PrmObjectHead) (*object.Object, error) {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(prm.Container)
|
||||||
|
addr.SetObject(prm.Object)
|
||||||
|
|
||||||
|
var prmHead pool.PrmObjectHead
|
||||||
|
prmHead.SetAddress(addr)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmHead.UseBearer(*prm.BearerToken)
|
||||||
|
} else {
|
||||||
|
prmHead.UseKey(prm.PrivateKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.HeadObject(ctx, prmHead)
|
||||||
|
if err != nil {
|
||||||
|
return nil, handleObjectError("read object header via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObject implements layer.FrostFS interface method.
|
||||||
|
func (x *FrostFS) GetObject(ctx context.Context, prm layer.PrmObjectGet) (*layer.Object, error) {
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(prm.Container)
|
addr.SetContainer(prm.Container)
|
||||||
addr.SetObject(prm.Object)
|
addr.SetObject(prm.Object)
|
||||||
|
@ -270,54 +301,22 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
||||||
prmGet.UseKey(prm.PrivateKey)
|
prmGet.UseKey(prm.PrivateKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if prm.WithHeader {
|
|
||||||
if prm.WithPayload {
|
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
res, err := x.pool.GetObject(ctx, prmGet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, handleObjectError("init full object reading via connection pool", err)
|
return nil, handleObjectError("init full object reading via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer res.Payload.Close()
|
return &layer.Object{
|
||||||
|
Header: res.Header,
|
||||||
payload, err := io.ReadAll(res.Payload)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read full object payload", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Header.SetPayload(payload)
|
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
|
||||||
Head: &res.Header,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var prmHead pool.PrmObjectHead
|
|
||||||
prmHead.SetAddress(addr)
|
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
|
||||||
prmHead.UseBearer(*prm.BearerToken)
|
|
||||||
} else {
|
|
||||||
prmHead.UseKey(prm.PrivateKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr, err := x.pool.HeadObject(ctx, prmHead)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("read object header via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
|
||||||
Head: &hdr,
|
|
||||||
}, nil
|
|
||||||
} else if prm.PayloadRange[0]+prm.PayloadRange[1] == 0 {
|
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
|
||||||
if err != nil {
|
|
||||||
return nil, handleObjectError("init full payload range reading via connection pool", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
|
||||||
Payload: res.Payload,
|
Payload: res.Payload,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RangeObject implements layer.FrostFS interface method.
|
||||||
|
func (x *FrostFS) RangeObject(ctx context.Context, prm layer.PrmObjectRange) (io.ReadCloser, error) {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(prm.Container)
|
||||||
|
addr.SetObject(prm.Object)
|
||||||
|
|
||||||
var prmRange pool.PrmObjectRange
|
var prmRange pool.PrmObjectRange
|
||||||
prmRange.SetAddress(addr)
|
prmRange.SetAddress(addr)
|
||||||
|
@ -335,12 +334,10 @@ func (x *FrostFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*lay
|
||||||
return nil, handleObjectError("init payload range reading via connection pool", err)
|
return nil, handleObjectError("init payload range reading via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
return payloadReader{&res}, nil
|
||||||
Payload: payloadReader{&res},
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject implements frostfs.FrostFS interface method.
|
// DeleteObject implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) error {
|
func (x *FrostFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) error {
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(prm.Container)
|
addr.SetContainer(prm.Container)
|
||||||
|
@ -359,7 +356,7 @@ func (x *FrostFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) e
|
||||||
return handleObjectError("mark object removal via connection pool", err)
|
return handleObjectError("mark object removal via connection pool", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SearchObjects implements frostfs.FrostFS interface method.
|
// SearchObjects implements layer.FrostFS interface method.
|
||||||
func (x *FrostFS) SearchObjects(ctx context.Context, prm layer.PrmObjectSearch) ([]oid.ID, error) {
|
func (x *FrostFS) SearchObjects(ctx context.Context, prm layer.PrmObjectSearch) ([]oid.ID, error) {
|
||||||
filters := object.NewSearchFilters()
|
filters := object.NewSearchFilters()
|
||||||
filters.AddRootFilter()
|
filters.AddRootFilter()
|
||||||
|
@ -396,6 +393,48 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm layer.PrmObjectSearch)
|
||||||
return buf, handleObjectError("read object list", err)
|
return buf, handleObjectError("read object list", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NetworkInfo implements layer.FrostFS interface method.
|
||||||
|
func (x *FrostFS) NetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) {
|
||||||
|
ni, err := x.pool.NetworkInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return ni, handleObjectError("get network info via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ni, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *FrostFS) PatchObject(ctx context.Context, prm layer.PrmObjectPatch) (oid.ID, error) {
|
||||||
|
var addr oid.Address
|
||||||
|
addr.SetContainer(prm.Container)
|
||||||
|
addr.SetObject(prm.Object)
|
||||||
|
|
||||||
|
var prmPatch pool.PrmObjectPatch
|
||||||
|
prmPatch.SetAddress(addr)
|
||||||
|
|
||||||
|
var rng object.Range
|
||||||
|
rng.SetOffset(prm.Offset)
|
||||||
|
rng.SetLength(prm.Length)
|
||||||
|
if prm.Length+prm.Offset > prm.ObjectSize {
|
||||||
|
rng.SetLength(prm.ObjectSize - prm.Offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
prmPatch.SetRange(&rng)
|
||||||
|
prmPatch.SetPayloadReader(prm.Payload)
|
||||||
|
|
||||||
|
if prm.BearerToken != nil {
|
||||||
|
prmPatch.UseBearer(*prm.BearerToken)
|
||||||
|
} else {
|
||||||
|
prmPatch.UseKey(prm.PrivateKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
res, err := x.pool.PatchObject(ctx, prmPatch)
|
||||||
|
if err != nil {
|
||||||
|
return oid.ID{}, handleObjectError("patch object via connection pool", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.ObjectID, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||||
// It implements resolver.FrostFS.
|
// It implements resolver.FrostFS.
|
||||||
type ResolverFrostFS struct {
|
type ResolverFrostFS struct {
|
||||||
|
|
|
@ -189,7 +189,7 @@ func (m *multiTX) wrapCall(method string, args []any) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !errors.Is(commonclient.ErrTransactionTooLarge, err) {
|
if !errors.Is(err, commonclient.ErrTransactionTooLarge) {
|
||||||
m.err = err
|
m.err = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,16 +19,16 @@ type GetNodeByPathResponseInfoWrapper struct {
|
||||||
response *grpcService.GetNodeByPathResponse_Info
|
response *grpcService.GetNodeByPathResponse_Info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetNodeID() []uint64 {
|
||||||
return n.response.GetNodeId()
|
return []uint64{n.response.GetNodeId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetParentID() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetParentID() []uint64 {
|
||||||
return n.response.GetParentId()
|
return []uint64{n.response.GetParentId()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() uint64 {
|
func (n GetNodeByPathResponseInfoWrapper) GetTimestamp() []uint64 {
|
||||||
return n.response.GetTimestamp()
|
return []uint64{n.response.GetTimestamp()}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
func (n GetNodeByPathResponseInfoWrapper) GetMeta() []tree.Meta {
|
||||||
|
@ -43,15 +43,21 @@ type GetSubTreeResponseBodyWrapper struct {
|
||||||
response *grpcService.GetSubTreeResponse_Body
|
response *grpcService.GetSubTreeResponse_Body
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetNodeID() uint64 {
|
func (n GetSubTreeResponseBodyWrapper) GetNodeID() []uint64 {
|
||||||
return n.response.GetNodeId()
|
return n.response.GetNodeId()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetParentID() uint64 {
|
func (n GetSubTreeResponseBodyWrapper) GetParentID() []uint64 {
|
||||||
return n.response.GetParentId()
|
resp := n.response.GetParentId()
|
||||||
|
if resp == nil {
|
||||||
|
// storage sends nil that should be interpreted as []uint64{0}
|
||||||
|
// due to protobuf compatibility, see 'GetSubTree' function
|
||||||
|
return []uint64{0}
|
||||||
|
}
|
||||||
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() uint64 {
|
func (n GetSubTreeResponseBodyWrapper) GetTimestamp() []uint64 {
|
||||||
return n.response.GetTimestamp()
|
return n.response.GetTimestamp()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,13 +102,25 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]tree.NodeResponse, error) {
|
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
|
||||||
|
order := treepool.NoneOrder
|
||||||
|
if sort {
|
||||||
|
order = treepool.AscendingOrder
|
||||||
|
}
|
||||||
poolPrm := treepool.GetSubTreeParams{
|
poolPrm := treepool.GetSubTreeParams{
|
||||||
CID: bktInfo.CID,
|
CID: bktInfo.CID,
|
||||||
TreeID: treeID,
|
TreeID: treeID,
|
||||||
RootID: rootID,
|
RootID: rootID,
|
||||||
Depth: depth,
|
Depth: depth,
|
||||||
BearerToken: getBearer(ctx, bktInfo),
|
BearerToken: getBearer(ctx, bktInfo),
|
||||||
|
Order: order,
|
||||||
|
}
|
||||||
|
if len(rootID) == 1 && rootID[0] == 0 {
|
||||||
|
// storage node interprets 'nil' value as []uint64{0}
|
||||||
|
// gate wants to send 'nil' value instead of []uint64{0}, because
|
||||||
|
// it provides compatibility with previous tree service api where
|
||||||
|
// single uint64(0) value is dropped from signature
|
||||||
|
poolPrm.RootID = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||||
|
@ -162,7 +180,7 @@ func (s *SubTreeStreamImpl) Next() (tree.NodeResponse, error) {
|
||||||
return s.Next()
|
return s.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *PoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) (tree.SubTreeStream, error) {
|
func (w *PoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) (tree.SubTreeStream, error) {
|
||||||
poolPrm := treepool.GetSubTreeParams{
|
poolPrm := treepool.GetSubTreeParams{
|
||||||
CID: bktInfo.CID,
|
CID: bktInfo.CID,
|
||||||
TreeID: treeID,
|
TreeID: treeID,
|
||||||
|
@ -171,6 +189,13 @@ func (w *PoolWrapper) GetSubTreeStream(ctx context.Context, bktInfo *data.Bucket
|
||||||
BearerToken: getBearer(ctx, bktInfo),
|
BearerToken: getBearer(ctx, bktInfo),
|
||||||
Order: treepool.AscendingOrder,
|
Order: treepool.AscendingOrder,
|
||||||
}
|
}
|
||||||
|
if len(rootID) == 1 && rootID[0] == 0 {
|
||||||
|
// storage node interprets 'nil' value as []uint64{0}
|
||||||
|
// gate wants to send 'nil' value instead of []uint64{0}, because
|
||||||
|
// it provides compatibility with previous tree service api where
|
||||||
|
// single uint64(0) value is dropped from signature
|
||||||
|
poolPrm.RootID = nil
|
||||||
|
}
|
||||||
|
|
||||||
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
subTreeReader, err := w.p.GetSubTree(ctx, poolPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -20,7 +20,6 @@ const (
|
||||||
UsingCredentials = "using credentials" // Info in ../../cmd/s3-gw/app.go
|
UsingCredentials = "using credentials" // Info in ../../cmd/s3-gw/app.go
|
||||||
ApplicationStarted = "application started" // Info in ../../cmd/s3-gw/app.go
|
ApplicationStarted = "application started" // Info in ../../cmd/s3-gw/app.go
|
||||||
ApplicationFinished = "application finished" // Info in ../../cmd/s3-gw/app.go
|
ApplicationFinished = "application finished" // Info in ../../cmd/s3-gw/app.go
|
||||||
FetchDomainsPrepareToUseAPI = "fetch domains, prepare to use API" // Info in ../../cmd/s3-gw/app.go
|
|
||||||
StartingServer = "starting server" // Info in ../../cmd/s3-gw/app.go
|
StartingServer = "starting server" // Info in ../../cmd/s3-gw/app.go
|
||||||
StoppingServer = "stopping server" // Info in ../../cmd/s3-gw/app.go
|
StoppingServer = "stopping server" // Info in ../../cmd/s3-gw/app.go
|
||||||
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../cmd/s3-gw/app.go
|
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../cmd/s3-gw/app.go
|
||||||
|
@ -62,7 +61,6 @@ const (
|
||||||
RequestFailed = "request failed" // Error in ../../api/handler/util.go
|
RequestFailed = "request failed" // Error in ../../api/handler/util.go
|
||||||
GetBucketInfo = "get bucket info" // Warn in ../../api/handler/cors.go
|
GetBucketInfo = "get bucket info" // Warn in ../../api/handler/cors.go
|
||||||
GetBucketCors = "get bucket cors" // Warn in ../../api/handler/cors.go
|
GetBucketCors = "get bucket cors" // Warn in ../../api/handler/cors.go
|
||||||
SomeACLNotFullyMapped = "some acl not fully mapped" // Warn in ../../api/handler/acl.go
|
|
||||||
CouldntDeleteObject = "couldn't delete object" // Error in ../../api/layer/layer.go
|
CouldntDeleteObject = "couldn't delete object" // Error in ../../api/layer/layer.go
|
||||||
BucketIsCreated = "bucket is created" // Info in ../../api/handler/put.go
|
BucketIsCreated = "bucket is created" // Info in ../../api/handler/put.go
|
||||||
CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute" // Error in ../../api/layer/container.go
|
CouldNotParseContainerObjectLockEnabledAttribute = "could not parse container object lock enabled attribute" // Error in ../../api/layer/container.go
|
||||||
|
@ -84,6 +82,7 @@ const (
|
||||||
FailedToSubmitTaskToPool = "failed to submit task to pool" // Warn in ../../api/layer/object.go
|
FailedToSubmitTaskToPool = "failed to submit task to pool" // Warn in ../../api/layer/object.go
|
||||||
CouldNotFetchObjectMeta = "could not fetch object meta" // Warn in ../../api/layer/object.go
|
CouldNotFetchObjectMeta = "could not fetch object meta" // Warn in ../../api/layer/object.go
|
||||||
GetTreeNode = "get tree node" // Debug in ../../api/layer/tagging.go
|
GetTreeNode = "get tree node" // Debug in ../../api/layer/tagging.go
|
||||||
|
GetTreeNodeToDelete = "get tree node to delete" // Debug in ../../api/layer/tagging.go
|
||||||
CouldntPutBucketInfoIntoCache = "couldn't put bucket info into cache" // Warn in ../../api/layer/cache.go
|
CouldntPutBucketInfoIntoCache = "couldn't put bucket info into cache" // Warn in ../../api/layer/cache.go
|
||||||
CouldntAddObjectToCache = "couldn't add object to cache" // Warn in ../../api/layer/cache.go
|
CouldntAddObjectToCache = "couldn't add object to cache" // Warn in ../../api/layer/cache.go
|
||||||
CouldntCacheAccessControlOperation = "couldn't cache access control operation" // Warn in ../../api/layer/cache.go
|
CouldntCacheAccessControlOperation = "couldn't cache access control operation" // Warn in ../../api/layer/cache.go
|
||||||
|
@ -100,11 +99,6 @@ const (
|
||||||
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
FailedToPassAuthentication = "failed to pass authentication" // Error in ../../api/middleware/auth.go
|
||||||
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
|
FailedToResolveCID = "failed to resolve CID" // Debug in ../../api/middleware/metrics.go
|
||||||
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
|
RequestStart = "request start" // Info in ../../api/middleware/reqinfo.go
|
||||||
RequestHTTP = "http request" // Info in ../../api/middleware/log_http.go
|
|
||||||
FailedToInitializeHTTPLogger = "failed to initialize http logger" // Warn in ../../api/middleware/log_http.go
|
|
||||||
FailedToReloadHTTPFileLogger = "failed to reload http file logger" // Warn in ../../api/middleware/log_http.go
|
|
||||||
FailedToGetRequestBody = "failed to get request body" // Warn in ../../api/middleware/log_http.go
|
|
||||||
LogHTTPDisabledInThisBuild = "http logging disabled in this build" // Warn in ../../api/middleware/log_http_stub.go
|
|
||||||
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
|
FailedToUnescapeObjectName = "failed to unescape object name" // Warn in ../../api/middleware/reqinfo.go
|
||||||
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
|
InvalidDefaultMaxAge = "invalid defaultMaxAge" // Fatal in ../../cmd/s3-gw/app_settings.go
|
||||||
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
|
CantShutDownService = "can't shut down service" // Panic in ../../cmd/s3-gw/service.go
|
||||||
|
@ -147,4 +141,25 @@ const (
|
||||||
CouldntCacheSubject = "couldn't cache subject info"
|
CouldntCacheSubject = "couldn't cache subject info"
|
||||||
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
UserGroupsListIsEmpty = "user groups list is empty, subject not found"
|
||||||
CouldntCacheUserKey = "couldn't cache user key"
|
CouldntCacheUserKey = "couldn't cache user key"
|
||||||
|
ObjectTaggingNodeHasMultipleIDs = "object tagging node has multiple ids"
|
||||||
|
BucketTaggingNodeHasMultipleIDs = "bucket tagging node has multiple ids"
|
||||||
|
BucketSettingsNodeHasMultipleIDs = "bucket settings node has multiple ids"
|
||||||
|
BucketCORSNodeHasMultipleIDs = "bucket cors node has multiple ids"
|
||||||
|
SystemNodeHasMultipleIDs = "system node has multiple ids"
|
||||||
|
FailedToRemoveOldSystemNode = "failed to remove old system node"
|
||||||
|
FailedToParseAddressInTreeNode = "failed to parse object addr in tree node"
|
||||||
|
UnexpectedMultiNodeIDsInSubTreeMultiParts = "unexpected multi node ids in sub tree multi parts"
|
||||||
|
FoundSeveralSystemNodes = "found several system nodes"
|
||||||
|
FailedToParsePartInfo = "failed to parse part info"
|
||||||
|
CouldNotFetchCORSContainerInfo = "couldn't fetch CORS container info"
|
||||||
|
CloseCredsObjectPayload = "close creds object payload"
|
||||||
|
CouldntDeleteLifecycleObject = "couldn't delete lifecycle configuration object"
|
||||||
|
CouldntCacheLifecycleConfiguration = "couldn't cache lifecycle configuration"
|
||||||
|
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
|
||||||
|
BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids"
|
||||||
|
GetBucketLifecycle = "get bucket lifecycle"
|
||||||
|
WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped"
|
||||||
|
WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped"
|
||||||
|
WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped"
|
||||||
|
FailedToRemoveOldPartNode = "failed to remove old part node"
|
||||||
)
|
)
|
||||||
|
|
|
@ -20,6 +20,7 @@ type AppMetrics struct {
|
||||||
type AppMetricsConfig struct {
|
type AppMetricsConfig struct {
|
||||||
Logger *zap.Logger
|
Logger *zap.Logger
|
||||||
PoolStatistics StatisticScraper
|
PoolStatistics StatisticScraper
|
||||||
|
TreeStatistic TreePoolStatistic
|
||||||
Registerer prometheus.Registerer
|
Registerer prometheus.Registerer
|
||||||
Enabled bool
|
Enabled bool
|
||||||
}
|
}
|
||||||
|
@ -36,7 +37,7 @@ func NewAppMetrics(cfg AppMetricsConfig) *AppMetrics {
|
||||||
|
|
||||||
return &AppMetrics{
|
return &AppMetrics{
|
||||||
logger: cfg.Logger,
|
logger: cfg.Logger,
|
||||||
gate: NewGateMetrics(cfg.PoolStatistics, registry),
|
gate: NewGateMetrics(cfg.PoolStatistics, cfg.TreeStatistic, registry),
|
||||||
enabled: cfg.Enabled,
|
enabled: cfg.Enabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,6 +144,16 @@ var appMetricsDesc = map[string]map[string]Description{
|
||||||
VariableLabels: []string{"endpoint"},
|
VariableLabels: []string{"endpoint"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
treePoolSubsystem: {
|
||||||
|
avgRequestDurationMetric: Description{
|
||||||
|
Type: dto.MetricType_GAUGE,
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: treePoolSubsystem,
|
||||||
|
Name: avgRequestDurationMetric,
|
||||||
|
Help: "Average request duration (in milliseconds) for specific method in tree pool",
|
||||||
|
VariableLabels: []string{"method"},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
type Description struct {
|
type Description struct {
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
@ -15,6 +16,10 @@ type StatisticScraper interface {
|
||||||
Statistic() pool.Statistic
|
Statistic() pool.Statistic
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TreePoolStatistic interface {
|
||||||
|
Statistic() tree.Statistic
|
||||||
|
}
|
||||||
|
|
||||||
type GateMetrics struct {
|
type GateMetrics struct {
|
||||||
registry prometheus.Registerer
|
registry prometheus.Registerer
|
||||||
State *StateMetrics
|
State *StateMetrics
|
||||||
|
@ -22,9 +27,10 @@ type GateMetrics struct {
|
||||||
Billing *billingMetrics
|
Billing *billingMetrics
|
||||||
Stats *APIStatMetrics
|
Stats *APIStatMetrics
|
||||||
HTTPServer *httpServerMetrics
|
HTTPServer *httpServerMetrics
|
||||||
|
TreePool *treePoolMetricsCollector
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewGateMetrics(scraper StatisticScraper, registry prometheus.Registerer) *GateMetrics {
|
func NewGateMetrics(scraper StatisticScraper, treeScraper TreePoolStatistic, registry prometheus.Registerer) *GateMetrics {
|
||||||
stateMetric := newStateMetrics()
|
stateMetric := newStateMetrics()
|
||||||
registry.MustRegister(stateMetric)
|
registry.MustRegister(stateMetric)
|
||||||
|
|
||||||
|
@ -40,6 +46,9 @@ func NewGateMetrics(scraper StatisticScraper, registry prometheus.Registerer) *G
|
||||||
serverMetric := newHTTPServerMetrics()
|
serverMetric := newHTTPServerMetrics()
|
||||||
registry.MustRegister(serverMetric)
|
registry.MustRegister(serverMetric)
|
||||||
|
|
||||||
|
treePoolMetric := newTreePoolMetricsCollector(treeScraper)
|
||||||
|
registry.MustRegister(treePoolMetric)
|
||||||
|
|
||||||
return &GateMetrics{
|
return &GateMetrics{
|
||||||
registry: registry,
|
registry: registry,
|
||||||
State: stateMetric,
|
State: stateMetric,
|
||||||
|
@ -47,6 +56,7 @@ func NewGateMetrics(scraper StatisticScraper, registry prometheus.Registerer) *G
|
||||||
Billing: billingMetric,
|
Billing: billingMetric,
|
||||||
Stats: statsMetric,
|
Stats: statsMetric,
|
||||||
HTTPServer: serverMetric,
|
HTTPServer: serverMetric,
|
||||||
|
TreePool: treePoolMetric,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
50
metrics/treepool.go
Normal file
50
metrics/treepool.go
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
treePoolSubsystem = "tree_pool"
|
||||||
|
|
||||||
|
methodGetNodes = "get_nodes"
|
||||||
|
methodGetSubTree = "get_sub_tree"
|
||||||
|
methodAddNode = "add_node"
|
||||||
|
methodAddNodeByPath = "add_node_by_path"
|
||||||
|
methodMoveNode = "move_node"
|
||||||
|
methodRemoveNode = "remove_node"
|
||||||
|
)
|
||||||
|
|
||||||
|
type treePoolMetricsCollector struct {
|
||||||
|
statScraper TreePoolStatistic
|
||||||
|
requestDuration *prometheus.GaugeVec
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTreePoolMetricsCollector(stat TreePoolStatistic) *treePoolMetricsCollector {
|
||||||
|
return &treePoolMetricsCollector{
|
||||||
|
statScraper: stat,
|
||||||
|
requestDuration: mustNewGaugeVec(appMetricsDesc[treePoolSubsystem][avgRequestDurationMetric]),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *treePoolMetricsCollector) Collect(ch chan<- prometheus.Metric) {
|
||||||
|
m.updateStatistic()
|
||||||
|
m.requestDuration.Collect(ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *treePoolMetricsCollector) Describe(descs chan<- *prometheus.Desc) {
|
||||||
|
m.requestDuration.Describe(descs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *treePoolMetricsCollector) updateStatistic() {
|
||||||
|
stat := m.statScraper.Statistic()
|
||||||
|
|
||||||
|
m.requestDuration.Reset()
|
||||||
|
|
||||||
|
m.requestDuration.WithLabelValues(methodGetNodes).Set(float64(stat.AverageGetNodes().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(methodGetSubTree).Set(float64(stat.AverageGetSubTree().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(methodAddNode).Set(float64(stat.AverageAddNode().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(methodAddNodeByPath).Set(float64(stat.AverageAddNodeByPath().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(methodMoveNode).Set(float64(stat.AverageMoveNode().Milliseconds()))
|
||||||
|
m.requestDuration.WithLabelValues(methodRemoveNode).Set(float64(stat.AverageRemoveNode().Milliseconds()))
|
||||||
|
}
|
File diff suppressed because it is too large
Load diff
|
@ -2,6 +2,7 @@ package tree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -31,16 +32,16 @@ type nodeResponse struct {
|
||||||
timestamp uint64
|
timestamp uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetNodeID() uint64 {
|
func (n nodeResponse) GetNodeID() []uint64 {
|
||||||
return n.nodeID
|
return []uint64{n.nodeID}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetParentID() uint64 {
|
func (n nodeResponse) GetParentID() []uint64 {
|
||||||
return n.parentID
|
return []uint64{n.parentID}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetTimestamp() uint64 {
|
func (n nodeResponse) GetTimestamp() []uint64 {
|
||||||
return n.timestamp
|
return []uint64{n.timestamp}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nodeResponse) GetMeta() []Meta {
|
func (n nodeResponse) GetMeta() []Meta {
|
||||||
|
@ -233,7 +234,7 @@ func (c *ServiceClientMemory) GetNodes(_ context.Context, p *GetNodesParams) ([]
|
||||||
return res2, nil
|
return res2, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) ([]NodeResponse, error) {
|
func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error) {
|
||||||
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
|
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
@ -244,11 +245,19 @@ func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.Bucket
|
||||||
return nil, ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
node := tr.treeData.getNode(rootID)
|
if len(rootID) != 1 {
|
||||||
|
return nil, errors.New("invalid rootID")
|
||||||
|
}
|
||||||
|
|
||||||
|
node := tr.treeData.getNode(rootID[0])
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return nil, ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if sort {
|
||||||
|
sortNode(tr.treeData)
|
||||||
|
}
|
||||||
|
|
||||||
// we depth-1 in case of uint32 and 0 as mark to get all subtree leads to overflow and depth is getting quite big to walk all tree levels
|
// we depth-1 in case of uint32 and 0 as mark to get all subtree leads to overflow and depth is getting quite big to walk all tree levels
|
||||||
return node.listNodes(nil, depth-1), nil
|
return node.listNodes(nil, depth-1), nil
|
||||||
}
|
}
|
||||||
|
@ -270,7 +279,7 @@ func (s *SubTreeStreamMemoryImpl) Next() (NodeResponse, error) {
|
||||||
return s.res[s.offset-1], nil
|
return s.res[s.offset-1], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ServiceClientMemory) GetSubTreeStream(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID uint64, depth uint32) (SubTreeStream, error) {
|
func (c *ServiceClientMemory) GetSubTreeStream(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) (SubTreeStream, error) {
|
||||||
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
|
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
|
||||||
if !ok {
|
if !ok {
|
||||||
return &SubTreeStreamMemoryImpl{err: ErrNodeNotFound}, nil
|
return &SubTreeStreamMemoryImpl{err: ErrNodeNotFound}, nil
|
||||||
|
@ -281,7 +290,11 @@ func (c *ServiceClientMemory) GetSubTreeStream(_ context.Context, bktInfo *data.
|
||||||
return nil, ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
node := tr.treeData.getNode(rootID)
|
if len(rootID) != 1 {
|
||||||
|
return nil, errors.New("invalid rootID")
|
||||||
|
}
|
||||||
|
|
||||||
|
node := tr.treeData.getNode(rootID[0])
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return nil, ErrNodeNotFound
|
return nil, ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
|
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||||
|
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||||
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
|
@ -293,14 +294,68 @@ func TestGetLatestNode(t *testing.T) {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actualNode, err := getLatestNode(tc.nodes)
|
actualNode, err := getLatestVersionNode(tc.nodes)
|
||||||
if tc.error {
|
if tc.error {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, tc.expectedNodeID, actualNode.GetNodeID())
|
require.EqualValues(t, []uint64{tc.expectedNodeID}, actualNode.GetNodeID())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSplitTreeMultiparts(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
memCli, err := NewTreeServiceClientMemory()
|
||||||
|
require.NoError(t, err)
|
||||||
|
treeService := NewTree(memCli, zaptest.NewLogger(t))
|
||||||
|
|
||||||
|
bktInfo := &data.BucketInfo{
|
||||||
|
CID: cidtest.ID(),
|
||||||
|
}
|
||||||
|
|
||||||
|
multipartInfo := &data.MultipartInfo{
|
||||||
|
Key: "multipart",
|
||||||
|
UploadID: "id",
|
||||||
|
Meta: map[string]string{},
|
||||||
|
Owner: usertest.ID(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = treeService.CreateMultipartUpload(ctx, bktInfo, multipartInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
multipartInfo, err = treeService.GetMultipartUpload(ctx, bktInfo, multipartInfo.Key, multipartInfo.UploadID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var objIDs []oid.ID
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
objID := oidtest.ID()
|
||||||
|
_, err = memCli.AddNode(ctx, bktInfo, systemTree, multipartInfo.ID, map[string]string{
|
||||||
|
partNumberKV: "1",
|
||||||
|
oidKV: objID.EncodeToString(),
|
||||||
|
ownerKV: usertest.ID().EncodeToString(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
objIDs = append(objIDs, objID)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts, err := treeService.GetParts(ctx, bktInfo, multipartInfo.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts, 2)
|
||||||
|
|
||||||
|
objToDeletes, err := treeService.AddPart(ctx, bktInfo, multipartInfo.ID, &data.PartInfo{
|
||||||
|
Key: multipartInfo.Key,
|
||||||
|
UploadID: multipartInfo.UploadID,
|
||||||
|
Number: 1,
|
||||||
|
OID: oidtest.ID(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, objIDs, objToDeletes, "oids to delete mismatched")
|
||||||
|
|
||||||
|
parts, err = treeService.GetParts(ctx, bktInfo, multipartInfo.ID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, parts, 1)
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue