forked from TrueCloudLab/frostfs-s3-gw
Compare commits
63 commits
master
...
feature/tr
Author | SHA1 | Date | |
---|---|---|---|
84bbd767ae | |||
424038de6c | |||
3cf27d281d | |||
3c7cb82553 | |||
57b7e83380 | |||
6a90f4e624 | |||
cb3753f286 | |||
81209e308c | |||
b78e55e101 | |||
25c24f5ce6 | |||
09c11262c6 | |||
f120715a37 | |||
aaed083d82 | |||
e35b582fe2 | |||
39fc7aa3ee | |||
da41f47826 | |||
9e5fb4be95 | |||
346243b159 | |||
03481274f0 | |||
c2adbd758a | |||
bc17ab5e47 | |||
9fadfbbc2f | |||
827ea1a41e | |||
968f10a72f | |||
582e6ac642 | |||
99f273f9af | |||
cd96adef36 | |||
738ce14f50 | |||
5358e39f71 | |||
34c1426b9f | |||
8ca73e2079 | |||
a87c636b4c | |||
26baf8a94e | |||
f187141ae5 | |||
3cffc782e9 | |||
d0e4d55772 | |||
42e72889a5 | |||
98815d5473 | |||
62615d7ab7 | |||
575ab4d294 | |||
d919e6cce2 | |||
056f168d77 | |||
9bdfe2a016 | |||
d6b506f6d9 | |||
a2e0b92575 | |||
b08f476ea7 | |||
f4275d837a | |||
664f83b2b7 | |||
136b5521fe | |||
a5f670d904 | |||
d76c4fe2a2 | |||
0637133c61 | |||
bf00fa6aa9 | |||
ff690ce996 | |||
534ae7f0f1 | |||
77673797f9 | |||
9e1766ff74 | |||
e73f11c251 | |||
5cb77018f8 | |||
fa68a4ce40 | |||
0644067496 | |||
481520705a | |||
28723f4a68 |
139 changed files with 9431 additions and 1978 deletions
|
@ -1,13 +1,14 @@
|
|||
FROM golang:1.21 AS builder
|
||||
FROM golang:1.22 AS builder
|
||||
|
||||
ARG BUILD=now
|
||||
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
||||
ARG VERSION=dev
|
||||
ARG GOFLAGS=""
|
||||
|
||||
WORKDIR /src
|
||||
COPY . /src
|
||||
|
||||
RUN make
|
||||
RUN make GOFLAGS=${GOFLAGS}
|
||||
|
||||
# Executable image
|
||||
FROM alpine AS frostfs-s3-gw
|
||||
|
|
|
@ -6,7 +6,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.21', '1.22' ]
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Run commit format checker
|
||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
||||
|
|
|
@ -10,7 +10,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
cache: true
|
||||
|
||||
- name: Install linters
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go_versions: [ '1.21', '1.22' ]
|
||||
go_versions: [ '1.22', '1.23' ]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.22'
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
|
|
@ -12,7 +12,8 @@ run:
|
|||
# output configuration options
|
||||
output:
|
||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||
format: tab
|
||||
formats:
|
||||
- format: tab
|
||||
|
||||
# all available settings of specific linters
|
||||
linters-settings:
|
||||
|
|
11
CHANGELOG.md
11
CHANGELOG.md
|
@ -4,6 +4,17 @@ This document outlines major changes between releases.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- Add support for virtual hosted style addressing (#446, #449)
|
||||
- Support new param `frostfs.graceful_close_on_switch_timeout` (#475)
|
||||
- Support patch object method (#479)
|
||||
- Add `sign` command to `frostfs-s3-authmate` (#467)
|
||||
- Support custom aws credentials (#509)
|
||||
|
||||
### Changed
|
||||
- Update go version to go1.19 (#470)
|
||||
- Avoid maintenance mode storage node during object operations (#524)
|
||||
|
||||
## [0.30.0] - Kangshung -2024-07-19
|
||||
|
||||
### Fixed
|
||||
|
|
44
Makefile
44
Makefile
|
@ -4,8 +4,8 @@
|
|||
REPO ?= $(shell go list -m)
|
||||
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
|
||||
GO_VERSION ?= 1.22
|
||||
LINT_VERSION ?= 1.56.1
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
|
||||
LINT_VERSION ?= 1.60.1
|
||||
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
|
||||
BINDIR = bin
|
||||
|
||||
METRICS_DUMP_OUT ?= ./metrics-dump.json
|
||||
|
@ -14,6 +14,8 @@ METRICS_DUMP_OUT ?= ./metrics-dump.json
|
|||
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
|
||||
BINS = $(addprefix $(BINDIR)/, $(CMDS))
|
||||
|
||||
GOFLAGS ?=
|
||||
|
||||
# Variables for docker
|
||||
REPO_BASENAME = $(shell basename `go list -m`)
|
||||
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
|
||||
|
@ -23,6 +25,12 @@ OUTPUT_LINT_DIR ?= $(shell pwd)/bin
|
|||
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
|
||||
TMP_DIR := .cache
|
||||
|
||||
# Variables for fuzzing
|
||||
FUZZ_NGFUZZ_DIR ?= ""
|
||||
FUZZ_TIMEOUT ?= 30
|
||||
FUZZ_FUNCTIONS ?= "all"
|
||||
FUZZ_AUX ?= ""
|
||||
|
||||
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
|
||||
|
||||
# .deb package versioning
|
||||
|
@ -38,6 +46,7 @@ all: $(BINS)
|
|||
$(BINS): $(BINDIR) dep
|
||||
@echo "⇒ Build $@"
|
||||
CGO_ENABLED=0 \
|
||||
GOFLAGS=$(GOFLAGS) \
|
||||
go build -v -trimpath \
|
||||
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
|
||||
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
|
||||
|
@ -64,7 +73,7 @@ docker/%:
|
|||
-w /src \
|
||||
-u `stat -c "%u:%g" .` \
|
||||
--env HOME=/src \
|
||||
golang:$(GO_VERSION) make $*,\
|
||||
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\
|
||||
@echo "supported docker targets: all $(BINS) lint")
|
||||
|
||||
# Run tests
|
||||
|
@ -76,6 +85,34 @@ cover:
|
|||
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
|
||||
@go tool cover -html=coverage.txt -o coverage.html
|
||||
|
||||
# Run fuzzing
|
||||
CLANG := $(shell which clang-17 2>/dev/null)
|
||||
.PHONY: check-clang all
|
||||
check-clang:
|
||||
ifeq ($(CLANG),)
|
||||
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
|
||||
@exit 1
|
||||
endif
|
||||
|
||||
.PHONY: check-ngfuzz all
|
||||
check-ngfuzz:
|
||||
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
|
||||
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: install-fuzzing-deps
|
||||
install-fuzzing-deps: check-clang check-ngfuzz
|
||||
|
||||
.PHONY: fuzz
|
||||
fuzz: install-fuzzing-deps
|
||||
@START_PATH=$$(pwd); \
|
||||
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
|
||||
cd $(FUZZ_NGFUZZ_DIR) && \
|
||||
./ngfuzz -clean && \
|
||||
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
|
||||
./ngfuzz -report
|
||||
|
||||
# Reformat code
|
||||
format:
|
||||
@echo "⇒ Processing gofmt check"
|
||||
|
@ -87,6 +124,7 @@ image:
|
|||
@docker build \
|
||||
--build-arg REPO=$(REPO) \
|
||||
--build-arg VERSION=$(VERSION) \
|
||||
--build-arg GOFLAGS=$(GOFLAGS) \
|
||||
--rm \
|
||||
-f .docker/Dockerfile \
|
||||
-t $(HUB_IMAGE):$(HUB_TAG) .
|
||||
|
|
18
README.md
18
README.md
|
@ -93,6 +93,24 @@ HTTP/1.1 200 OK
|
|||
|
||||
Also, you can configure domains using `.env` variables or `yaml` file.
|
||||
|
||||
## Fuzzing
|
||||
To run fuzzing tests use the following command:
|
||||
|
||||
```shell
|
||||
$ make fuzz
|
||||
```
|
||||
|
||||
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
|
||||
|
||||
You can also use the following arguments:
|
||||
|
||||
```
|
||||
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
|
||||
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
|
||||
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
|
||||
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
|
||||
````
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Configuration](./docs/configuration.md)
|
||||
|
|
26
SECURITY.md
Normal file
26
SECURITY.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Security Policy
|
||||
|
||||
|
||||
## How To Report a Vulnerability
|
||||
|
||||
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
|
||||
|
||||
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
|
||||
|
||||
Instead, you can report it using one of the following ways:
|
||||
|
||||
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
|
||||
|
||||
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
|
||||
|
||||
* The type of issue (e.g., buffer overflow, or cross-site scripting)
|
||||
* Affected version(s)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Any log files that are related to this issue (if possible)
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
|
||||
This information will help us triage your report more quickly.
|
|
@ -14,16 +14,17 @@ import (
|
|||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
// authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||
var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
||||
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
|
||||
var AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
|
||||
|
||||
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
|
||||
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
|
||||
|
@ -34,6 +35,11 @@ type (
|
|||
postReg *RegexpSubmatcher
|
||||
cli tokens.Credentials
|
||||
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
|
||||
settings CenterSettings
|
||||
}
|
||||
|
||||
CenterSettings interface {
|
||||
AccessBoxContainer() (cid.ID, bool)
|
||||
}
|
||||
|
||||
//nolint:revive
|
||||
|
@ -50,7 +56,6 @@ type (
|
|||
)
|
||||
|
||||
const (
|
||||
accessKeyPartsNum = 2
|
||||
authHeaderPartsNum = 6
|
||||
maxFormSizeMemory = 50 * 1048576 // 50 MB
|
||||
|
||||
|
@ -82,24 +87,20 @@ var ContentSHA256HeaderStandardValue = map[string]struct{}{
|
|||
}
|
||||
|
||||
// New creates an instance of AuthCenter.
|
||||
func New(creds tokens.Credentials, prefixes []string) *Center {
|
||||
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center {
|
||||
return &Center{
|
||||
cli: creds,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
allowedAccessKeyIDPrefixes: prefixes,
|
||||
settings: settings,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
||||
submatches := c.reg.GetSubmatches(header)
|
||||
if len(submatches) != authHeaderPartsNum {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), header)
|
||||
}
|
||||
|
||||
accessKey := strings.Split(submatches["access_key_id"], "0")
|
||||
if len(accessKey) != accessKeyPartsNum {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKey)
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), header)
|
||||
}
|
||||
|
||||
signedFields := strings.Split(submatches["signed_header_fields"], ";")
|
||||
|
@ -114,15 +115,6 @@ func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func getAddress(accessKeyID string) (oid.Address, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err != nil {
|
||||
return addr, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKeyID)
|
||||
}
|
||||
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func IsStandardContentSHA256(key string) bool {
|
||||
_, ok := ContentSHA256HeaderStandardValue[key]
|
||||
return ok
|
||||
|
@ -181,14 +173,14 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
addr, err := getAddress(authHdr.AccessKeyID)
|
||||
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
box, attrs, err := c.cli.GetBox(r.Context(), addr)
|
||||
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, authHdr.AccessKeyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
||||
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err)
|
||||
}
|
||||
|
||||
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
|
||||
|
@ -216,15 +208,29 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
|
||||
return addr.Container(), nil
|
||||
}
|
||||
|
||||
cnrID, ok := c.settings.AccessBoxContainer()
|
||||
if ok {
|
||||
return cnrID, nil
|
||||
}
|
||||
|
||||
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
|
||||
}
|
||||
|
||||
func checkFormatHashContentSHA256(hash string) error {
|
||||
if !IsStandardContentSHA256(hash) {
|
||||
hashBinary, err := hex.DecodeString(hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: decode hash: %s: %s", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch),
|
||||
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch),
|
||||
hash, err.Error())
|
||||
}
|
||||
if len(hashBinary) != sha256.Size && len(hash) != 0 {
|
||||
return fmt.Errorf("%w: invalid hash size %d", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch), len(hashBinary))
|
||||
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,12 +248,12 @@ func (c Center) checkAccessKeyID(accessKeyID string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apiErrors.GetAPIError(apiErrors.ErrAccessDenied))
|
||||
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied))
|
||||
}
|
||||
|
||||
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
||||
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
|
||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apiErrors.GetAPIError(apiErrors.ErrInvalidArgument), maxFormSizeMemory)
|
||||
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory)
|
||||
}
|
||||
|
||||
if err := prepareForm(r.MultipartForm); err != nil {
|
||||
|
@ -262,7 +268,7 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
|||
creds := MultipartFormValue(r, "x-amz-credential")
|
||||
submatches := c.postReg.GetSubmatches(creds)
|
||||
if len(submatches) != 4 {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), creds)
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds)
|
||||
}
|
||||
|
||||
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
|
||||
|
@ -272,14 +278,14 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
|||
|
||||
accessKeyID := submatches["access_key_id"]
|
||||
|
||||
addr, err := getAddress(accessKeyID)
|
||||
cnrID, err := c.getAccessBoxContainer(accessKeyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
box, attrs, err := c.cli.GetBox(r.Context(), addr)
|
||||
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, accessKeyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get box '%s': %w", addr, err)
|
||||
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err)
|
||||
}
|
||||
|
||||
secret := box.Gate.SecretKey
|
||||
|
@ -288,7 +294,7 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
|
|||
signature := SignStr(secret, service, region, signatureDateTime, policy)
|
||||
reqSignature := MultipartFormValue(r, "x-amz-signature")
|
||||
if signature != reqSignature {
|
||||
return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
||||
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
||||
reqSignature, signature)
|
||||
}
|
||||
|
||||
|
@ -333,11 +339,11 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
|||
if authHeader.IsPresigned {
|
||||
now := time.Now()
|
||||
if signatureDateTime.Add(authHeader.Expiration).Before(now) {
|
||||
return fmt.Errorf("%w: expired: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest),
|
||||
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest),
|
||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||
}
|
||||
if now.Before(signatureDateTime) {
|
||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrBadRequest),
|
||||
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest),
|
||||
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
|
||||
}
|
||||
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
|
||||
|
@ -352,7 +358,7 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
|
|||
}
|
||||
|
||||
if authHeader.SignatureV4 != signature {
|
||||
return fmt.Errorf("%w: %s != %s: headers %v", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
|
||||
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch),
|
||||
authHeader.SignatureV4, signature, authHeader.SignedFields)
|
||||
}
|
||||
|
||||
|
|
88
api/auth/center_fuzz_test.go
Normal file
88
api/auth/center_fuzz_test.go
Normal file
|
@ -0,0 +1,88 @@
|
|||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
utils "github.com/trailofbits/go-fuzz-utils"
|
||||
)
|
||||
|
||||
const (
|
||||
fuzzSuccessExitCode = 0
|
||||
fuzzFailExitCode = -1
|
||||
)
|
||||
|
||||
func InitFuzzAuthenticate() {
|
||||
}
|
||||
|
||||
func DoFuzzAuthenticate(input []byte) int {
|
||||
// FUZZER INIT
|
||||
if len(input) < 100 {
|
||||
return fuzzFailExitCode
|
||||
}
|
||||
|
||||
tp, err := utils.NewTypeProvider(input)
|
||||
if err != nil {
|
||||
return fuzzFailExitCode
|
||||
}
|
||||
|
||||
var accessKeyAddr oid.Address
|
||||
err = tp.Fill(accessKeyAddr)
|
||||
if err != nil {
|
||||
return fuzzFailExitCode
|
||||
}
|
||||
|
||||
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
|
||||
secretKey, err := tp.GetString()
|
||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
|
||||
|
||||
reqData := RequestData{
|
||||
Method: "GET",
|
||||
Endpoint: "http://localhost:8084",
|
||||
Bucket: "my-bucket",
|
||||
Object: "@obj/name",
|
||||
}
|
||||
presignData := PresignData{
|
||||
Service: "s3",
|
||||
Region: "spb",
|
||||
Lifetime: 10 * time.Minute,
|
||||
SignTime: time.Now().UTC(),
|
||||
}
|
||||
|
||||
req, err := PresignRequest(awsCreds, reqData, presignData)
|
||||
if req == nil {
|
||||
return fuzzFailExitCode
|
||||
}
|
||||
|
||||
expBox := &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: secretKey,
|
||||
},
|
||||
}
|
||||
|
||||
mock := newTokensFrostfsMock()
|
||||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := &Center{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
}
|
||||
|
||||
_, _ = c.Authenticate(req)
|
||||
|
||||
return fuzzSuccessExitCode
|
||||
}
|
||||
|
||||
func FuzzAuthenticate(f *testing.F) {
|
||||
f.Fuzz(func(t *testing.T, data []byte) {
|
||||
DoFuzzAuthenticate(data)
|
||||
})
|
||||
}
|
|
@ -17,8 +17,9 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
|
||||
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
|
@ -28,11 +29,23 @@ import (
|
|||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
type centerSettingsMock struct {
|
||||
accessBoxContainer *cid.ID
|
||||
}
|
||||
|
||||
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
|
||||
if c.accessBoxContainer == nil {
|
||||
return cid.ID{}, false
|
||||
}
|
||||
return *c.accessBoxContainer, true
|
||||
}
|
||||
|
||||
func TestAuthHeaderParse(t *testing.T) {
|
||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||
|
||||
center := &Center{
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||
settings: ¢erSettingsMock{},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
|
@ -57,11 +70,6 @@ func TestAuthHeaderParse(t *testing.T) {
|
|||
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
header: strings.ReplaceAll(defaultHeader, "oid0cid", "oidcid"),
|
||||
err: errors.GetAPIError(errors.ErrInvalidAccessKeyID),
|
||||
expected: nil,
|
||||
},
|
||||
} {
|
||||
authHeader, err := center.parseAuthHeader(tc.header)
|
||||
require.ErrorIs(t, err, tc.err, tc.header)
|
||||
|
@ -69,43 +77,6 @@ func TestAuthHeaderParse(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAuthHeaderGetAddress(t *testing.T) {
|
||||
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
|
||||
|
||||
for _, tc := range []struct {
|
||||
authHeader *AuthHeader
|
||||
err error
|
||||
}{
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oid0cid",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
{
|
||||
authHeader: &AuthHeader{
|
||||
AccessKeyID: "oidcid",
|
||||
},
|
||||
err: defaulErr,
|
||||
},
|
||||
} {
|
||||
_, err := getAddress(tc.authHeader.AccessKeyID)
|
||||
require.ErrorIs(t, err, tc.err, tc.authHeader.AccessKeyID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignature(t *testing.T) {
|
||||
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
|
||||
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
|
||||
|
@ -171,17 +142,17 @@ func TestCheckFormatContentSHA256(t *testing.T) {
|
|||
}
|
||||
|
||||
type frostFSMock struct {
|
||||
objects map[oid.Address]*object.Object
|
||||
objects map[string]*object.Object
|
||||
}
|
||||
|
||||
func newFrostFSMock() *frostFSMock {
|
||||
return &frostFSMock{
|
||||
objects: map[oid.Address]*object.Object{},
|
||||
objects: map[string]*object.Object{},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *frostFSMock) GetCredsObject(_ context.Context, address oid.Address) (*object.Object, error) {
|
||||
obj, ok := f.objects[address]
|
||||
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) {
|
||||
obj, ok := f.objects[prm.AccessKeyID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
|
@ -208,7 +179,7 @@ func TestAuthenticate(t *testing.T) {
|
|||
GateKey: key.PublicKey(),
|
||||
}}
|
||||
|
||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"))
|
||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
||||
require.NoError(t, err)
|
||||
data, err := accessBox.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
@ -219,10 +190,10 @@ func TestAuthenticate(t *testing.T) {
|
|||
obj.SetContainerID(addr.Container())
|
||||
obj.SetID(addr.Object())
|
||||
|
||||
frostfs := newFrostFSMock()
|
||||
frostfs.objects[addr] = &obj
|
||||
accessKeyID := getAccessKeyID(addr)
|
||||
|
||||
accessKeyID := addr.Container().String() + "0" + addr.Object().String()
|
||||
frostfs := newFrostFSMock()
|
||||
frostfs.objects[accessKeyID] = &obj
|
||||
|
||||
awsCreds := credentials.NewStaticCredentials(accessKeyID, secret.SecretKey, "")
|
||||
defaultSigner := v4.NewSigner(awsCreds)
|
||||
|
@ -413,13 +384,13 @@ func TestAuthenticate(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
creds := tokens.New(bigConfig)
|
||||
cntr := New(creds, tc.prefixes)
|
||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
||||
box, err := cntr.Authenticate(tc.request)
|
||||
|
||||
if tc.err {
|
||||
require.Error(t, err)
|
||||
if tc.errCode > 0 {
|
||||
err = frostfsErrors.UnwrapErr(err)
|
||||
err = frosterr.UnwrapErr(err)
|
||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
||||
}
|
||||
} else {
|
||||
|
@ -455,7 +426,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
|||
GateKey: key.PublicKey(),
|
||||
}}
|
||||
|
||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"))
|
||||
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false)
|
||||
require.NoError(t, err)
|
||||
data, err := accessBox.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
@ -466,10 +437,11 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
|||
obj.SetContainerID(addr.Container())
|
||||
obj.SetID(addr.Object())
|
||||
|
||||
frostfs := newFrostFSMock()
|
||||
frostfs.objects[addr] = &obj
|
||||
accessKeyID := getAccessKeyID(addr)
|
||||
|
||||
frostfs := newFrostFSMock()
|
||||
frostfs.objects[accessKeyID] = &obj
|
||||
|
||||
accessKeyID := addr.Container().String() + "0" + addr.Object().String()
|
||||
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
|
||||
|
||||
timeToSign := time.Now()
|
||||
|
@ -590,13 +562,13 @@ func TestHTTPPostAuthenticate(t *testing.T) {
|
|||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
creds := tokens.New(bigConfig)
|
||||
cntr := New(creds, tc.prefixes)
|
||||
cntr := New(creds, tc.prefixes, ¢erSettingsMock{})
|
||||
box, err := cntr.Authenticate(tc.request)
|
||||
|
||||
if tc.err {
|
||||
require.Error(t, err)
|
||||
if tc.errCode > 0 {
|
||||
err = frostfsErrors.UnwrapErr(err)
|
||||
err = frosterr.UnwrapErr(err)
|
||||
require.Equal(t, errors.GetAPIError(tc.errCode), err)
|
||||
}
|
||||
} else {
|
||||
|
@ -633,3 +605,7 @@ func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldN
|
|||
|
||||
return req
|
||||
}
|
||||
|
||||
func getAccessKeyID(addr oid.Address) string {
|
||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
||||
}
|
||||
|
|
|
@ -29,11 +29,11 @@ func newTokensFrostfsMock() *credentialsMock {
|
|||
}
|
||||
|
||||
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
|
||||
m.boxes[addr.String()] = box
|
||||
m.boxes[getAccessKeyID(addr)] = box
|
||||
}
|
||||
|
||||
func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, []object.Attribute, error) {
|
||||
box, ok := m.boxes[addr.String()]
|
||||
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) {
|
||||
box, ok := m.boxes[accessKeyID]
|
||||
if !ok {
|
||||
return nil, nil, &apistatus.ObjectNotFound{}
|
||||
}
|
||||
|
@ -41,11 +41,11 @@ func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox
|
|||
return box, nil, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Put(context.Context, cid.ID, tokens.CredentialsParam) (oid.Address, error) {
|
||||
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
func (m credentialsMock) Update(context.Context, oid.Address, tokens.CredentialsParam) (oid.Address, error) {
|
||||
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) {
|
||||
return oid.Address{}, nil
|
||||
}
|
||||
|
||||
|
@ -84,9 +84,10 @@ func TestCheckSign(t *testing.T) {
|
|||
mock.addBox(accessKeyAddr, expBox)
|
||||
|
||||
c := &Center{
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(authorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
cli: mock,
|
||||
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
|
||||
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
|
||||
settings: ¢erSettingsMock{},
|
||||
}
|
||||
box, err := c.Authenticate(req)
|
||||
require.NoError(t, err)
|
||||
|
|
18
api/cache/accessbox.go
vendored
18
api/cache/accessbox.go
vendored
|
@ -30,6 +30,7 @@ type (
|
|||
Box *accessbox.Box
|
||||
Attributes []object.Attribute
|
||||
PutTime time.Time
|
||||
Address *oid.Address
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -57,8 +58,8 @@ func NewAccessBoxCache(config *Config) *AccessBoxCache {
|
|||
}
|
||||
|
||||
// Get returns a cached accessbox.
|
||||
func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
||||
entry, err := o.cache.Get(address)
|
||||
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
|
||||
entry, err := o.cache.Get(accessKeyID)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -74,16 +75,11 @@ func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
|
|||
}
|
||||
|
||||
// Put stores an accessbox to cache.
|
||||
func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box, attrs []object.Attribute) error {
|
||||
val := &AccessBoxCacheValue{
|
||||
Box: box,
|
||||
Attributes: attrs,
|
||||
PutTime: time.Now(),
|
||||
}
|
||||
return o.cache.Set(address, val)
|
||||
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error {
|
||||
return o.cache.Set(accessKeyID, val)
|
||||
}
|
||||
|
||||
// Delete removes an accessbox from cache.
|
||||
func (o *AccessBoxCache) Delete(address oid.Address) {
|
||||
o.cache.Remove(address)
|
||||
func (o *AccessBoxCache) Delete(accessKeyID string) {
|
||||
o.cache.Remove(accessKeyID)
|
||||
}
|
||||
|
|
24
api/cache/cache_test.go
vendored
24
api/cache/cache_test.go
vendored
|
@ -1,13 +1,14 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/nspcc-dev/neo-go/pkg/util"
|
||||
|
@ -22,18 +23,21 @@ func TestAccessBoxCacheType(t *testing.T) {
|
|||
|
||||
addr := oidtest.Address()
|
||||
box := &accessbox.Box{}
|
||||
var attrs []object.Attribute
|
||||
val := &AccessBoxCacheValue{
|
||||
Box: box,
|
||||
}
|
||||
|
||||
err := cache.Put(addr, box, attrs)
|
||||
accessKeyID := getAccessKeyID(addr)
|
||||
|
||||
err := cache.Put(accessKeyID, val)
|
||||
require.NoError(t, err)
|
||||
val := cache.Get(addr)
|
||||
require.Equal(t, box, val.Box)
|
||||
require.Equal(t, attrs, val.Attributes)
|
||||
resVal := cache.Get(accessKeyID)
|
||||
require.Equal(t, box, resVal.Box)
|
||||
require.Equal(t, 0, observedLog.Len())
|
||||
|
||||
err = cache.cache.Set(addr, "tmp")
|
||||
err = cache.cache.Set(accessKeyID, "tmp")
|
||||
require.NoError(t, err)
|
||||
assertInvalidCacheEntry(t, cache.Get(addr), observedLog)
|
||||
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog)
|
||||
}
|
||||
|
||||
func TestBucketsCacheType(t *testing.T) {
|
||||
|
@ -230,3 +234,7 @@ func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
|
|||
loggerCore, observedLog := observer.New(zap.WarnLevel)
|
||||
return zap.New(loggerCore), observedLog
|
||||
}
|
||||
|
||||
func getAccessKeyID(addr oid.Address) string {
|
||||
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
|
||||
}
|
||||
|
|
65
api/cache/network_info.go
vendored
Normal file
65
api/cache/network_info.go
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/bluele/gcache"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
// NetworkInfoCache provides cache for network info.
|
||||
NetworkInfoCache struct {
|
||||
cache gcache.Cache
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NetworkInfoCacheConfig stores expiration params for cache.
|
||||
NetworkInfoCacheConfig struct {
|
||||
Lifetime time.Duration
|
||||
Logger *zap.Logger
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultNetworkInfoCacheLifetime = 1 * time.Minute
|
||||
networkInfoCacheSize = 1
|
||||
networkInfoKey = "network_info"
|
||||
)
|
||||
|
||||
// DefaultNetworkInfoConfig returns new default cache expiration values.
|
||||
func DefaultNetworkInfoConfig(logger *zap.Logger) *NetworkInfoCacheConfig {
|
||||
return &NetworkInfoCacheConfig{
|
||||
Lifetime: DefaultNetworkInfoCacheLifetime,
|
||||
Logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// NewNetworkInfoCache creates an object of NetworkInfoCache.
|
||||
func NewNetworkInfoCache(config *NetworkInfoCacheConfig) *NetworkInfoCache {
|
||||
gc := gcache.New(networkInfoCacheSize).LRU().Expiration(config.Lifetime).Build()
|
||||
return &NetworkInfoCache{cache: gc, logger: config.Logger}
|
||||
}
|
||||
|
||||
func (c *NetworkInfoCache) Get() *netmap.NetworkInfo {
|
||||
entry, err := c.cache.Get(networkInfoKey)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(netmap.NetworkInfo)
|
||||
if !ok {
|
||||
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func (c *NetworkInfoCache) Put(info netmap.NetworkInfo) error {
|
||||
return c.cache.Set(networkInfoKey, info)
|
||||
}
|
20
api/cache/system.go
vendored
20
api/cache/system.go
vendored
|
@ -88,6 +88,22 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
|
|||
return result
|
||||
}
|
||||
|
||||
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
|
||||
entry, err := o.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result, ok := entry.(*data.LifecycleConfiguration)
|
||||
if !ok {
|
||||
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
|
||||
zap.String("expected", fmt.Sprintf("%T", result)))
|
||||
return nil
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
|
||||
entry, err := o.cache.Get(key)
|
||||
if err != nil {
|
||||
|
@ -133,6 +149,10 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
|
|||
return o.cache.Set(key, obj)
|
||||
}
|
||||
|
||||
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
|
||||
return o.cache.Set(key, obj)
|
||||
}
|
||||
|
||||
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
|
||||
return o.cache.Set(key, settings)
|
||||
}
|
||||
|
|
|
@ -6,14 +6,16 @@ import (
|
|||
"time"
|
||||
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
)
|
||||
|
||||
const (
|
||||
bktSettingsObject = ".s3-settings"
|
||||
bktCORSConfigurationObject = ".s3-cors"
|
||||
bktSettingsObject = ".s3-settings"
|
||||
bktCORSConfigurationObject = ".s3-cors"
|
||||
bktLifecycleConfigurationObject = ".s3-lifecycle"
|
||||
|
||||
VersioningUnversioned = "Unversioned"
|
||||
VersioningEnabled = "Enabled"
|
||||
|
@ -31,6 +33,7 @@ type (
|
|||
LocationConstraint string
|
||||
ObjectLockEnabled bool
|
||||
HomomorphicHashDisabled bool
|
||||
PlacementPolicy netmap.PlacementPolicy
|
||||
}
|
||||
|
||||
// ObjectInfo holds S3 object data.
|
||||
|
@ -81,6 +84,15 @@ type (
|
|||
VersionID string
|
||||
NoErrorOnDeleteMarker bool
|
||||
}
|
||||
|
||||
// CreatedObjectInfo stores created object info.
|
||||
CreatedObjectInfo struct {
|
||||
ID oid.ID
|
||||
Size uint64
|
||||
HashSum []byte
|
||||
MD5Sum []byte
|
||||
CreationEpoch uint64
|
||||
}
|
||||
)
|
||||
|
||||
// SettingsObjectName is a system name for a bucket settings file.
|
||||
|
@ -91,6 +103,10 @@ func (b *BucketInfo) CORSObjectName() string {
|
|||
return b.CID.EncodeToString() + bktCORSConfigurationObject
|
||||
}
|
||||
|
||||
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
|
||||
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
|
||||
}
|
||||
|
||||
// VersionID returns object version from ObjectInfo.
|
||||
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }
|
||||
|
||||
|
|
55
api/data/lifecycle.go
Normal file
55
api/data/lifecycle.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package data
|
||||
|
||||
import "encoding/xml"
|
||||
|
||||
const (
|
||||
LifecycleStatusEnabled = "Enabled"
|
||||
LifecycleStatusDisabled = "Disabled"
|
||||
)
|
||||
|
||||
type (
|
||||
LifecycleConfiguration struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
|
||||
Rules []LifecycleRule `xml:"Rule"`
|
||||
}
|
||||
|
||||
LifecycleRule struct {
|
||||
Status string `xml:"Status,omitempty"`
|
||||
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
|
||||
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
}
|
||||
|
||||
AbortIncompleteMultipartUpload struct {
|
||||
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
|
||||
}
|
||||
|
||||
LifecycleExpiration struct {
|
||||
Date string `xml:"Date,omitempty"`
|
||||
Days *int `xml:"Days,omitempty"`
|
||||
Epoch *uint64 `xml:"Epoch,omitempty"`
|
||||
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
|
||||
}
|
||||
|
||||
LifecycleRuleFilter struct {
|
||||
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
|
||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
Tag *Tag `xml:"Tag,omitempty"`
|
||||
}
|
||||
|
||||
LifecycleRuleAndOperator struct {
|
||||
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
|
||||
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
Tags []Tag `xml:"Tag"`
|
||||
}
|
||||
|
||||
NonCurrentVersionExpiration struct {
|
||||
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
|
||||
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
|
||||
}
|
||||
)
|
|
@ -72,6 +72,7 @@ type BaseNodeVersion struct {
|
|||
Created *time.Time
|
||||
Owner *user.ID
|
||||
IsDeleteMarker bool
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
||||
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
|
||||
|
@ -110,6 +111,7 @@ type MultipartInfo struct {
|
|||
Meta map[string]string
|
||||
CopiesNumbers []uint32
|
||||
Finished bool
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
||||
// PartInfo is upload information about part.
|
||||
|
@ -124,6 +126,14 @@ type PartInfo struct {
|
|||
Created time.Time `json:"created"`
|
||||
}
|
||||
|
||||
type PartInfoExtended struct {
|
||||
PartInfo
|
||||
|
||||
// Timestamp is used to find the latest version of part info in case of tree split
|
||||
// when there are multiple nodes for the same part.
|
||||
Timestamp uint64
|
||||
}
|
||||
|
||||
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
|
||||
func (p *PartInfo) ToHeaderString() string {
|
||||
// ETag value contains SHA256 checksum which is used while getting object parts attributes.
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
|
@ -187,6 +190,9 @@ const (
|
|||
ErrInvalidRequestLargeCopy
|
||||
ErrInvalidStorageClass
|
||||
VersionIDMarkerWithoutKeyMarker
|
||||
ErrInvalidRangeLength
|
||||
ErrRangeOutOfBounds
|
||||
ErrMissingContentRange
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrInsecureClientRequest
|
||||
|
@ -1739,12 +1745,30 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRangeLength: {
|
||||
ErrCode: ErrInvalidRangeLength,
|
||||
Code: "InvalidRange",
|
||||
Description: "Provided range length must be equal to content length",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrRangeOutOfBounds: {
|
||||
ErrCode: ErrRangeOutOfBounds,
|
||||
Code: "InvalidRange",
|
||||
Description: "Provided range is outside of object bounds",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrMissingContentRange: {
|
||||
ErrCode: ErrMissingContentRange,
|
||||
Code: "MissingContentRange",
|
||||
Description: "Content-Range header is mandatory for this type of request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
// IsS3Error checks if the provided error is a specific s3 error.
|
||||
func IsS3Error(err error, code ErrorCode) bool {
|
||||
err = frosterrors.UnwrapErr(err)
|
||||
err = frosterr.UnwrapErr(err)
|
||||
e, ok := err.(Error)
|
||||
return ok && e.ErrCode == code
|
||||
}
|
||||
|
@ -1781,6 +1805,26 @@ func GetAPIErrorWithError(code ErrorCode, err error) Error {
|
|||
return errorCodes.toAPIErrWithErr(code, err)
|
||||
}
|
||||
|
||||
// TransformToS3Error converts FrostFS error to the corresponding S3 error type.
|
||||
func TransformToS3Error(err error) error {
|
||||
err = frosterr.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||
var s3err Error
|
||||
if errors.As(err, &s3err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(err, frostfs.ErrAccessDenied) ||
|
||||
errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||
return GetAPIError(ErrAccessDenied)
|
||||
}
|
||||
|
||||
if errors.Is(err, frostfs.ErrGatewayTimeout) {
|
||||
return GetAPIError(ErrGatewayTimeout)
|
||||
}
|
||||
|
||||
return GetAPIError(ErrInternalError)
|
||||
}
|
||||
|
||||
// ObjectError -- error that is linked to a specific object.
|
||||
type ObjectError struct {
|
||||
Err error
|
||||
|
|
|
@ -2,7 +2,12 @@ package errors
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func BenchmarkErrCode(b *testing.B) {
|
||||
|
@ -24,3 +29,56 @@ func BenchmarkErrorsIs(b *testing.B) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransformS3Errors(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err error
|
||||
expected ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "simple std error to internal error",
|
||||
err: errors.New("some error"),
|
||||
expected: ErrInternalError,
|
||||
},
|
||||
{
|
||||
name: "layer access denied error to s3 access denied error",
|
||||
err: frostfs.ErrAccessDenied,
|
||||
expected: ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "wrapped layer access denied error to s3 access denied error",
|
||||
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
|
||||
expected: ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer node access denied error to s3 access denied error",
|
||||
err: tree.ErrNodeAccessDenied,
|
||||
expected: ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||
err: frostfs.ErrGatewayTimeout,
|
||||
expected: ErrGatewayTimeout,
|
||||
},
|
||||
{
|
||||
name: "s3 error to s3 error",
|
||||
err: GetAPIError(ErrInvalidPart),
|
||||
expected: ErrInvalidPart,
|
||||
},
|
||||
{
|
||||
name: "wrapped s3 error to s3 error",
|
||||
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
|
||||
expected: ErrInvalidPart,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := TransformToS3Error(tc.err)
|
||||
s3err, ok := err.(Error)
|
||||
require.True(t, ok, "error must be s3 error")
|
||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||
"expected: '%s', got: '%s'",
|
||||
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -28,12 +28,12 @@ func TestPutObjectACLErrorAPE(t *testing.T) {
|
|||
|
||||
info := createBucket(hc, bktName)
|
||||
|
||||
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
|
||||
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported)
|
||||
putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored
|
||||
putObjectWithHeaders(hc, bktName, objName, nil)
|
||||
|
||||
aclBody := &AccessControlPolicy{}
|
||||
putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
|
||||
putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported)
|
||||
|
||||
aclRes := getObjectACL(hc, bktName, objName)
|
||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
||||
|
@ -49,7 +49,7 @@ func TestCreateObjectACLErrorAPE(t *testing.T) {
|
|||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPublic}}, http.StatusBadRequest)
|
||||
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPrivate}}, http.StatusOK)
|
||||
|
||||
createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
|
||||
createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported)
|
||||
createMultipartUpload(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate})
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func TestBucketACLAPE(t *testing.T) {
|
|||
info := createBucket(hc, bktName)
|
||||
|
||||
aclBody := &AccessControlPolicy{}
|
||||
putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
|
||||
putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported)
|
||||
|
||||
aclRes := getBucketACL(hc, bktName)
|
||||
checkPrivateACL(t, aclRes, info.Key.PublicKey())
|
||||
|
@ -113,7 +113,7 @@ func TestBucketPolicy(t *testing.T) {
|
|||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
|
||||
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy)
|
||||
|
||||
newPolicy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
|
@ -125,7 +125,7 @@ func TestBucketPolicy(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicy)
|
||||
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicy)
|
||||
|
||||
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*"
|
||||
putBucketPolicy(hc, bktName, newPolicy)
|
||||
|
@ -140,7 +140,7 @@ func TestBucketPolicyStatus(t *testing.T) {
|
|||
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
|
||||
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy)
|
||||
|
||||
newPolicy := engineiam.Policy{
|
||||
Version: "2012-10-17",
|
||||
|
@ -152,7 +152,7 @@ func TestBucketPolicyStatus(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
|
||||
putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicyNotPrincipal)
|
||||
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicyNotPrincipal)
|
||||
|
||||
newPolicy.Statement[0].NotPrincipal = nil
|
||||
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}}
|
||||
|
@ -221,7 +221,7 @@ func TestPutBucketPolicy(t *testing.T) {
|
|||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) engineiam.Policy {
|
||||
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) engineiam.Policy {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyHandler(w, r)
|
||||
|
||||
|
@ -231,13 +231,13 @@ func getBucketPolicy(hc *handlerContext, bktName string, errCode ...s3errors.Err
|
|||
err := json.NewDecoder(w.Result().Body).Decode(&policy)
|
||||
require.NoError(hc.t, err)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
||||
}
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) PolicyStatus {
|
||||
func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) PolicyStatus {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketPolicyStatusHandler(w, r)
|
||||
|
||||
|
@ -247,13 +247,13 @@ func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...s3erro
|
|||
err := xml.NewDecoder(w.Result().Body).Decode(&policyStatus)
|
||||
require.NoError(hc.t, err)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
||||
}
|
||||
|
||||
return policyStatus
|
||||
}
|
||||
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...s3errors.ErrorCode) {
|
||||
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...apierr.ErrorCode) {
|
||||
body, err := json.Marshal(bktPolicy)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
|
@ -263,7 +263,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Pol
|
|||
if len(errCode) == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0]))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -312,9 +312,9 @@ func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
|
|||
}
|
||||
}
|
||||
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
|
||||
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code apierr.ErrorCode) {
|
||||
w := createBucketBase(hc, bktName, box)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
|
||||
|
@ -330,9 +330,9 @@ func putBucketACL(hc *handlerContext, bktName string, box *accessbox.Box, header
|
|||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
|
||||
func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) {
|
||||
w := putBucketACLBase(hc, bktName, box, header, body)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putBucketACLBase(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
||||
|
@ -360,9 +360,9 @@ func getBucketACLBase(hc *handlerContext, bktName string) *httptest.ResponseReco
|
|||
return w
|
||||
}
|
||||
|
||||
func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
|
||||
func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) {
|
||||
w := putObjectACLBase(hc, bktName, objName, box, header, body)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putObjectACLBase(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
|
||||
|
@ -396,9 +396,9 @@ func putObjectWithHeaders(hc *handlerContext, bktName, objName string, headers m
|
|||
return w.Header()
|
||||
}
|
||||
|
||||
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code s3errors.ErrorCode) {
|
||||
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code apierr.ErrorCode) {
|
||||
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, headers map[string]string, box *accessbox.Box, data []byte) *httptest.ResponseRecorder {
|
||||
|
|
|
@ -41,7 +41,6 @@ type (
|
|||
RetryMaxAttempts() int
|
||||
RetryMaxBackoff() time.Duration
|
||||
RetryStrategy() RetryStrategy
|
||||
Domains() []string
|
||||
}
|
||||
|
||||
FrostFSID interface {
|
||||
|
|
|
@ -81,10 +81,17 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p := &layer.DeleteObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Objects: versionedObject,
|
||||
Settings: bktSettings,
|
||||
BktInfo: bktInfo,
|
||||
Objects: versionedObject,
|
||||
Settings: bktSettings,
|
||||
NetworkInfo: networkInfo,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
deletedObject := deletedObjects[0]
|
||||
|
@ -181,11 +188,18 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
p := &layer.DeleteObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
IsMultiple: true,
|
||||
BktInfo: bktInfo,
|
||||
Objects: toRemove,
|
||||
Settings: bktSettings,
|
||||
NetworkInfo: networkInfo,
|
||||
IsMultiple: true,
|
||||
}
|
||||
deletedObjects := h.obj.DeleteObjects(ctx, p)
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -131,7 +131,7 @@ func TestDeleteObjectsError(t *testing.T) {
|
|||
addr.SetContainer(bktInfo.CID)
|
||||
addr.SetObject(nodeVersion.OID)
|
||||
|
||||
expectedError := apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
expectedError := apierr.GetAPIError(apierr.ErrAccessDenied)
|
||||
hc.tp.SetObjectError(addr, expectedError)
|
||||
|
||||
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
|
||||
|
@ -553,9 +553,9 @@ func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version s
|
|||
assertStatus(t, w, http.StatusNotFound)
|
||||
}
|
||||
|
||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
|
||||
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
|
||||
w := headObjectBase(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -37,7 +38,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
|||
|
||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
||||
require.NoError(t, err)
|
||||
obj, err := tc.MockedPool().GetObject(tc.Context(), layer.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
|
||||
obj, err := tc.MockedPool().GetObject(tc.Context(), frostfs.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
|
||||
require.NoError(t, err)
|
||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
||||
require.NoError(t, err)
|
||||
|
@ -288,6 +289,21 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
|
|||
return w
|
||||
}
|
||||
|
||||
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
|
||||
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
|
||||
assertStatus(hc.t, w, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, uploadID)
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||
hc.Handler().AbortMultipartUploadHandler(w, r)
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
|
||||
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
stderrors "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -89,7 +89,7 @@ func TestPreconditions(t *testing.T) {
|
|||
name: "IfMatch false",
|
||||
info: newInfo(etag, today),
|
||||
args: &conditionalArgs{IfMatch: etag2},
|
||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
|
||||
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)},
|
||||
{
|
||||
name: "IfNoneMatch true",
|
||||
info: newInfo(etag, today),
|
||||
|
@ -99,7 +99,7 @@ func TestPreconditions(t *testing.T) {
|
|||
name: "IfNoneMatch false",
|
||||
info: newInfo(etag, today),
|
||||
args: &conditionalArgs{IfNoneMatch: etag},
|
||||
expected: errors.GetAPIError(errors.ErrNotModified)},
|
||||
expected: apierr.GetAPIError(apierr.ErrNotModified)},
|
||||
{
|
||||
name: "IfModifiedSince true",
|
||||
info: newInfo(etag, today),
|
||||
|
@ -109,7 +109,7 @@ func TestPreconditions(t *testing.T) {
|
|||
name: "IfModifiedSince false",
|
||||
info: newInfo(etag, yesterday),
|
||||
args: &conditionalArgs{IfModifiedSince: &today},
|
||||
expected: errors.GetAPIError(errors.ErrNotModified)},
|
||||
expected: apierr.GetAPIError(apierr.ErrNotModified)},
|
||||
{
|
||||
name: "IfUnmodifiedSince true",
|
||||
info: newInfo(etag, yesterday),
|
||||
|
@ -119,7 +119,7 @@ func TestPreconditions(t *testing.T) {
|
|||
name: "IfUnmodifiedSince false",
|
||||
info: newInfo(etag, today),
|
||||
args: &conditionalArgs{IfUnmodifiedSince: &yesterday},
|
||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
|
||||
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)},
|
||||
|
||||
{
|
||||
name: "IfMatch true, IfUnmodifiedSince false",
|
||||
|
@ -131,19 +131,19 @@ func TestPreconditions(t *testing.T) {
|
|||
name: "IfMatch false, IfUnmodifiedSince true",
|
||||
info: newInfo(etag, yesterday),
|
||||
args: &conditionalArgs{IfMatch: etag2, IfUnmodifiedSince: &today},
|
||||
expected: errors.GetAPIError(errors.ErrPreconditionFailed),
|
||||
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed),
|
||||
},
|
||||
{
|
||||
name: "IfNoneMatch false, IfModifiedSince true",
|
||||
info: newInfo(etag, today),
|
||||
args: &conditionalArgs{IfNoneMatch: etag, IfModifiedSince: &yesterday},
|
||||
expected: errors.GetAPIError(errors.ErrNotModified),
|
||||
expected: apierr.GetAPIError(apierr.ErrNotModified),
|
||||
},
|
||||
{
|
||||
name: "IfNoneMatch true, IfModifiedSince false",
|
||||
info: newInfo(etag, yesterday),
|
||||
args: &conditionalArgs{IfNoneMatch: etag2, IfModifiedSince: &today},
|
||||
expected: errors.GetAPIError(errors.ErrNotModified),
|
||||
expected: apierr.GetAPIError(apierr.ErrNotModified),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
@ -151,7 +151,7 @@ func TestPreconditions(t *testing.T) {
|
|||
if tc.expected == nil {
|
||||
require.NoError(t, actual)
|
||||
} else {
|
||||
require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
|
||||
require.True(t, errors.Is(actual, tc.expected), tc.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -193,8 +193,8 @@ func TestGetObject(t *testing.T) {
|
|||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), errors.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
|
||||
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func TestGetObjectEnabledMD5(t *testing.T) {
|
||||
|
@ -228,9 +228,17 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
|||
return content
|
||||
}
|
||||
|
||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
||||
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte {
|
||||
w := getObjectBaseResponse(tc, bktName, objName, version)
|
||||
assertStatus(tc.t, w, http.StatusOK)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(tc.t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
|
||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
|
||||
|
|
1025
api/handler/handler_fuzz_test.go
Normal file
1025
api/handler/handler_fuzz_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -20,6 +20,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
|
||||
|
@ -37,8 +38,12 @@ import (
|
|||
)
|
||||
|
||||
type handlerContext struct {
|
||||
*handlerContextBase
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
type handlerContextBase struct {
|
||||
owner user.ID
|
||||
t *testing.T
|
||||
h *handler
|
||||
tp *layer.TestFrostFS
|
||||
tree *tree.Tree
|
||||
|
@ -50,19 +55,19 @@ type handlerContext struct {
|
|||
cache *layer.Cache
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Handler() *handler {
|
||||
func (hc *handlerContextBase) Handler() *handler {
|
||||
return hc.h
|
||||
}
|
||||
|
||||
func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
|
||||
func (hc *handlerContextBase) MockedPool() *layer.TestFrostFS {
|
||||
return hc.tp
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Layer() *layer.Layer {
|
||||
func (hc *handlerContextBase) Layer() *layer.Layer {
|
||||
return hc.h.obj
|
||||
}
|
||||
|
||||
func (hc *handlerContext) Context() context.Context {
|
||||
func (hc *handlerContextBase) Context() context.Context {
|
||||
return hc.context
|
||||
}
|
||||
|
||||
|
@ -72,7 +77,6 @@ type configMock struct {
|
|||
defaultCopiesNumbers []uint32
|
||||
bypassContentEncodingInChunks bool
|
||||
md5Enabled bool
|
||||
domains []string
|
||||
}
|
||||
|
||||
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
|
||||
|
@ -136,27 +140,35 @@ func (c *configMock) RetryStrategy() RetryStrategy {
|
|||
return RetryStrategyConstant
|
||||
}
|
||||
|
||||
func (c *configMock) Domains() []string {
|
||||
return c.domains
|
||||
}
|
||||
|
||||
func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
|
||||
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample()))
|
||||
require.NoError(t, err)
|
||||
return &handlerContext{
|
||||
handlerContextBase: hc,
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
|
||||
return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
|
||||
hc, err := prepareHandlerContextBase(getMinCacheConfig(zap.NewExample()))
|
||||
require.NoError(t, err)
|
||||
return &handlerContext{
|
||||
handlerContextBase: hc,
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
|
||||
func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBase, error) {
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l := zap.NewExample()
|
||||
log := zap.NewExample()
|
||||
tp := layer.NewTestFrostFS(key)
|
||||
|
||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
||||
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (cid.ID, error) {
|
||||
return tp.ContainerID(name)
|
||||
})
|
||||
|
||||
|
@ -164,7 +176,9 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
|||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||
|
||||
memCli, err := tree.NewTreeServiceClientMemory()
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
treeMock := tree.NewTree(memCli, zap.NewExample())
|
||||
|
||||
|
@ -181,32 +195,38 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *hand
|
|||
|
||||
var pp netmap.PlacementPolicy
|
||||
err = pp.DecodeString("REP 1")
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &configMock{
|
||||
defaultPolicy: pp,
|
||||
}
|
||||
h := &handler{
|
||||
log: l,
|
||||
obj: layer.NewLayer(l, tp, layerCfg),
|
||||
log: log,
|
||||
obj: layer.NewLayer(log, tp, layerCfg),
|
||||
cfg: cfg,
|
||||
ape: newAPEMock(),
|
||||
frostfsid: newFrostfsIDMock(),
|
||||
}
|
||||
|
||||
return &handlerContext{
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &handlerContextBase{
|
||||
owner: owner,
|
||||
t: t,
|
||||
h: h,
|
||||
tp: tp,
|
||||
tree: treeMock,
|
||||
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: newTestAccessBox(t, key)}),
|
||||
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: accessBox}),
|
||||
config: cfg,
|
||||
|
||||
layerFeatures: features,
|
||||
treeMock: memCli,
|
||||
cache: layerCfg.Cache,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
||||
|
@ -224,12 +244,14 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
|
|||
Buckets: minCacheCfg,
|
||||
System: minCacheCfg,
|
||||
AccessControl: minCacheCfg,
|
||||
NetworkInfo: &cache.NetworkInfoCacheConfig{Lifetime: minCacheCfg.Lifetime},
|
||||
}
|
||||
}
|
||||
|
||||
type apeMock struct {
|
||||
chainMap map[engine.Target][]*chain.Chain
|
||||
policyMap map[string][]byte
|
||||
err error
|
||||
}
|
||||
|
||||
func newAPEMock() *apeMock {
|
||||
|
@ -273,6 +295,10 @@ func (a *apeMock) DeletePolicy(namespace string, cnrID cid.ID) error {
|
|||
}
|
||||
|
||||
func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error {
|
||||
if a.err != nil {
|
||||
return a.err
|
||||
}
|
||||
|
||||
if err := a.PutPolicy(ns, cnrID, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -287,6 +313,10 @@ func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain
|
|||
}
|
||||
|
||||
func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error {
|
||||
if a.err != nil {
|
||||
return a.err
|
||||
}
|
||||
|
||||
if err := a.DeletePolicy(ns, cnrID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -300,6 +330,10 @@ func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.I
|
|||
}
|
||||
|
||||
func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
||||
if a.err != nil {
|
||||
return nil, a.err
|
||||
}
|
||||
|
||||
policy, ok := a.policyMap[ns+cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, errors.New("not found")
|
||||
|
@ -309,6 +343,10 @@ func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error {
|
||||
if a.err != nil {
|
||||
return a.err
|
||||
}
|
||||
|
||||
for i := range chains {
|
||||
if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil {
|
||||
return err
|
||||
|
@ -350,10 +388,16 @@ func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
|||
}
|
||||
|
||||
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
||||
res, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
||||
rep := netmap.ReplicaDescriptor{}
|
||||
rep.SetNumberOfObjects(1)
|
||||
policy := netmap.PlacementPolicy{}
|
||||
policy.AddReplicas(rep)
|
||||
|
||||
res, err := hc.MockedPool().CreateContainer(hc.Context(), frostfs.PrmContainerCreate{
|
||||
Creator: hc.owner,
|
||||
Name: bktName,
|
||||
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
|
||||
Policy: policy,
|
||||
})
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
|
@ -365,6 +409,7 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
|||
ObjectLockEnabled: true,
|
||||
Owner: ownerID,
|
||||
HomomorphicHashDisabled: res.HomomorphicHashDisabled,
|
||||
PlacementPolicy: policy,
|
||||
}
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
|
@ -397,7 +442,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
|||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: objName,
|
||||
Size: uint64(len(content)),
|
||||
Size: ptr(uint64(len(content))),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: header,
|
||||
Encryption: encryption,
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
|
@ -95,8 +95,8 @@ func TestHeadObject(t *testing.T) {
|
|||
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
|
||||
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
|
||||
|
||||
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
|
||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
|
||||
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion)
|
||||
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey)
|
||||
}
|
||||
|
||||
func TestIsAvailableToResolve(t *testing.T) {
|
||||
|
@ -119,21 +119,25 @@ func TestIsAvailableToResolve(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
|
||||
func newTestAccessBox(key *keys.PrivateKey) (*accessbox.Box, error) {
|
||||
var err error
|
||||
if key == nil {
|
||||
key, err = keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var btoken bearer.Token
|
||||
btoken.SetImpersonate(true)
|
||||
err = btoken.Sign(key.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
BearerToken: &btoken,
|
||||
},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
|
282
api/handler/lifecycle.go
Normal file
282
api/handler/lifecycle.go
Normal file
|
@ -0,0 +1,282 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRules = 1000
|
||||
maxRuleIDLen = 255
|
||||
maxNewerNoncurrentVersions = 100
|
||||
)
|
||||
|
||||
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := h.obj.GetBucketLifecycleConfiguration(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket lifecycle configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, cfg); err != nil {
|
||||
h.logAndSendError(w, "could not encode GetBucketLifecycle response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
tee := io.TeeReader(r.Body, &buf)
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
// Content-Md5 is required and should be set
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
|
||||
if _, ok := r.Header[api.ContentMD5]; !ok {
|
||||
h.logAndSendError(w, "missing Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||
return
|
||||
}
|
||||
|
||||
headerMD5, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5))
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
|
||||
cfg := new(data.LifecycleConfiguration)
|
||||
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
|
||||
h.logAndSendError(w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bodyMD5, err := getContentMD5(&buf)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get content md5", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(headerMD5, bodyMD5) {
|
||||
h.logAndSendError(w, "Content-MD5 does not match", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
networkInfo, err := h.obj.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get network info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
|
||||
h.logAndSendError(w, "invalid lifecycle configuration", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PutBucketLifecycleParams{
|
||||
BktInfo: bktInfo,
|
||||
LifecycleCfg: cfg,
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
|
||||
h.logAndSendError(w, "could not put bucket lifecycle configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = h.obj.DeleteBucketLifecycleConfiguration(ctx, bktInfo); err != nil {
|
||||
h.logAndSendError(w, "could not delete bucket lifecycle configuration", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfiguration, ni *netmap.NetworkInfo) error {
|
||||
now := layer.TimeNow(ctx)
|
||||
|
||||
if len(cfg.Rules) > maxRules {
|
||||
return fmt.Errorf("number of rules cannot be greater than %d", maxRules)
|
||||
}
|
||||
|
||||
ids := make(map[string]struct{}, len(cfg.Rules))
|
||||
for i, rule := range cfg.Rules {
|
||||
if _, ok := ids[rule.ID]; ok && rule.ID != "" {
|
||||
return fmt.Errorf("duplicate 'ID': %s", rule.ID)
|
||||
}
|
||||
ids[rule.ID] = struct{}{}
|
||||
|
||||
if len(rule.ID) > maxRuleIDLen {
|
||||
return fmt.Errorf("'ID' value cannot be longer than %d characters", maxRuleIDLen)
|
||||
}
|
||||
|
||||
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
|
||||
return fmt.Errorf("invalid lifecycle status: %s", rule.Status)
|
||||
}
|
||||
|
||||
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
|
||||
return fmt.Errorf("at least one action needs to be specified in a rule")
|
||||
}
|
||||
|
||||
if rule.AbortIncompleteMultipartUpload != nil {
|
||||
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
|
||||
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
|
||||
return fmt.Errorf("days after initiation must be a positive integer: %d", *rule.AbortIncompleteMultipartUpload.DaysAfterInitiation)
|
||||
}
|
||||
|
||||
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||
return fmt.Errorf("abort incomplete multipart upload cannot be specified with tags")
|
||||
}
|
||||
}
|
||||
|
||||
if rule.Expiration != nil {
|
||||
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
|
||||
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
|
||||
return fmt.Errorf("expired object delete marker cannot be specified with days or date")
|
||||
}
|
||||
|
||||
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
|
||||
return fmt.Errorf("expired object delete marker cannot be specified with tags")
|
||||
}
|
||||
}
|
||||
|
||||
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
|
||||
return fmt.Errorf("expiration days must be a positive integer: %d", *rule.Expiration.Days)
|
||||
}
|
||||
|
||||
if rule.Expiration.Date != "" {
|
||||
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value of expiration date: %s", rule.Expiration.Date)
|
||||
}
|
||||
|
||||
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert time to epoch: %w", err)
|
||||
}
|
||||
|
||||
cfg.Rules[i].Expiration.Epoch = &epoch
|
||||
}
|
||||
}
|
||||
|
||||
if rule.NonCurrentVersionExpiration != nil {
|
||||
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
|
||||
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
|
||||
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
|
||||
return fmt.Errorf("invalid value of newer noncurrent versions: %d", *rule.NonCurrentVersionExpiration.NewerNonCurrentVersions)
|
||||
}
|
||||
|
||||
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
|
||||
return fmt.Errorf("invalid value of noncurrent days: %d", *rule.NonCurrentVersionExpiration.NonCurrentDays)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
|
||||
if filter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var fields int
|
||||
|
||||
if filter.And != nil {
|
||||
fields++
|
||||
for _, tag := range filter.And.Tags {
|
||||
err := checkTag(tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if filter.And.ObjectSizeGreaterThan != nil && filter.And.ObjectSizeLessThan != nil &&
|
||||
*filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
|
||||
return fmt.Errorf("the maximum object size must be larger than the minimum object size")
|
||||
}
|
||||
}
|
||||
|
||||
if filter.ObjectSizeGreaterThan != nil {
|
||||
fields++
|
||||
}
|
||||
|
||||
if filter.ObjectSizeLessThan != nil {
|
||||
fields++
|
||||
}
|
||||
|
||||
if filter.Prefix != "" {
|
||||
fields++
|
||||
}
|
||||
|
||||
if filter.Tag != nil {
|
||||
fields++
|
||||
err := checkTag(*filter.Tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if fields > 1 {
|
||||
return fmt.Errorf("filter cannot have more than one field")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContentMD5(reader io.Reader) ([]byte, error) {
|
||||
hash := md5.New()
|
||||
_, err := io.Copy(hash, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hash.Sum(nil), nil
|
||||
}
|
464
api/handler/lifecycle_test.go
Normal file
464
api/handler/lifecycle_test.go
Normal file
|
@ -0,0 +1,464 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"github.com/mr-tron/base58"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPutBucketLifecycleConfiguration(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
|
||||
bktName := "bucket-lifecycle"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
body *data.LifecycleConfiguration
|
||||
error bool
|
||||
}{
|
||||
{
|
||||
name: "correct configuration",
|
||||
body: &data.LifecycleConfiguration{
|
||||
XMLName: xml.Name{
|
||||
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
|
||||
Local: "LifecycleConfiguration",
|
||||
},
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
Date: time.Now().Format("2006-01-02T15:04:05.000Z"),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
And: &data.LifecycleRuleAndOperator{
|
||||
Prefix: "prefix/",
|
||||
Tags: []data.Tag{{Key: "key", Value: "value"}, {Key: "tag", Value: ""}},
|
||||
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||
DaysAfterInitiation: ptr(14),
|
||||
},
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
ExpiredObjectDeleteMarker: ptr(true),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
ObjectSizeLessThan: ptr(uint64(100)),
|
||||
},
|
||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||
NewerNonCurrentVersions: ptr(1),
|
||||
NonCurrentDays: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "too many rules",
|
||||
body: func() *data.LifecycleConfiguration {
|
||||
lifecycle := new(data.LifecycleConfiguration)
|
||||
for i := 0; i <= maxRules; i++ {
|
||||
lifecycle.Rules = append(lifecycle.Rules, data.LifecycleRule{
|
||||
ID: "Rule" + strconv.Itoa(i),
|
||||
})
|
||||
}
|
||||
return lifecycle
|
||||
}(),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "duplicate rule ID",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
ID: "Rule",
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "Rule",
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "too long rule ID",
|
||||
body: func() *data.LifecycleConfiguration {
|
||||
id := make([]byte, maxRuleIDLen+1)
|
||||
_, err := io.ReadFull(rand.Reader, id)
|
||||
require.NoError(t, err)
|
||||
return &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
ID: base58.Encode(id)[:maxRuleIDLen+1],
|
||||
},
|
||||
},
|
||||
}
|
||||
}(),
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid status",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: "invalid",
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "no actions",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
Prefix: "prefix/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid days after initiation",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||
DaysAfterInitiation: ptr(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid expired object delete marker declaration",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
ExpiredObjectDeleteMarker: ptr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid expiration days",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid expiration date",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Date: "invalid",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "newer noncurrent versions is too small",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||
NewerNonCurrentVersions: ptr(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "newer noncurrent versions is too large",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||
NewerNonCurrentVersions: ptr(101),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid noncurrent days",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
|
||||
NonCurrentDays: ptr(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "more than one filter field",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
Prefix: "prefix/",
|
||||
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid tag in filter",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
Tag: &data.Tag{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "abort incomplete multipart upload with tag",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
|
||||
DaysAfterInitiation: ptr(14),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
Tag: &data.Tag{Key: "key", Value: "value"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "expired object delete marker with tag",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
ExpiredObjectDeleteMarker: ptr(true),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
And: &data.LifecycleRuleAndOperator{
|
||||
Tags: []data.Tag{{Key: "key", Value: "value"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
{
|
||||
name: "invalid size range",
|
||||
body: &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
Filter: &data.LifecycleRuleFilter{
|
||||
And: &data.LifecycleRuleAndOperator{
|
||||
ObjectSizeGreaterThan: ptr(uint64(100)),
|
||||
ObjectSizeLessThan: ptr(uint64(100)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
error: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.error {
|
||||
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||
return
|
||||
}
|
||||
|
||||
putBucketLifecycleConfiguration(hc, bktName, tc.body)
|
||||
|
||||
cfg := getBucketLifecycleConfiguration(hc, bktName)
|
||||
require.Equal(t, *tc.body, *cfg)
|
||||
|
||||
deleteBucketLifecycleConfiguration(hc, bktName)
|
||||
getBucketLifecycleConfigurationErr(hc, bktName, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-lifecycle-md5"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
lifecycle := &data.LifecycleConfiguration{
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
|
||||
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
|
||||
r.Header.Set(api.ContentMD5, "some-hash")
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
}
|
||||
|
||||
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-lifecycle-invalid-xml"
|
||||
createBucket(hc, bktName)
|
||||
|
||||
cfg := &data.CORSConfiguration{}
|
||||
body, err := xml.Marshal(cfg)
|
||||
require.NoError(t, err)
|
||||
contentMD5, err := getContentMD5(bytes.NewReader(body))
|
||||
require.NoError(t, err)
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(contentMD5))
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apierr.Error) {
|
||||
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", cfg)
|
||||
|
||||
rawBody, err := xml.Marshal(cfg)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
hash := md5.New()
|
||||
hash.Write(rawBody)
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
|
||||
hc.Handler().PutBucketLifecycleHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func getBucketLifecycleConfiguration(hc *handlerContext, bktName string) *data.LifecycleConfiguration {
|
||||
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &data.LifecycleConfiguration{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func getBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, err apierr.Error) {
|
||||
w := getBucketLifecycleConfigurationBase(hc, bktName)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func getBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().GetBucketLifecycleHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func deleteBucketLifecycleConfiguration(hc *handlerContext, bktName string) {
|
||||
w := deleteBucketLifecycleConfigurationBase(hc, bktName)
|
||||
assertStatus(hc.t, w, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func deleteBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
|
||||
w, r := prepareTestRequest(hc, bktName, "", nil)
|
||||
hc.Handler().DeleteBucketLifecycleHandler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
@ -36,7 +36,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "couldn't put object locking configuration", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
|||
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,7 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -204,7 +204,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -252,7 +252,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
h.logAndSendError(w, "object lock disabled", reqInfo,
|
||||
apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
|
||||
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -269,7 +269,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
if !lockInfo.IsRetentionSet() {
|
||||
h.logAndSendError(w, "retention lock isn't set", reqInfo, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
|
||||
h.logAndSendError(w, "retention lock isn't set", reqInfo, apierr.GetAPIError(apierr.ErrNoSuchKey))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ func checkLockConfiguration(conf *data.ObjectLockConfiguration) error {
|
|||
func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
|
||||
if !bktInfo.ObjectLockEnabled {
|
||||
if existLockHeaders(header) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound)
|
||||
return nil, apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -346,7 +346,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
until := header.Get(api.AmzObjectLockRetainUntilDate)
|
||||
|
||||
if mode != "" && until == "" || mode == "" && until != "" {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockInvalidHeaders)
|
||||
return nil, apierr.GetAPIError(apierr.ErrObjectLockInvalidHeaders)
|
||||
}
|
||||
|
||||
if mode != "" {
|
||||
|
@ -355,7 +355,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
}
|
||||
|
||||
if mode != complianceMode && mode != governanceMode {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrUnknownWORMModeDirective)
|
||||
return nil, apierr.GetAPIError(apierr.ErrUnknownWORMModeDirective)
|
||||
}
|
||||
|
||||
objectLock.Retention.IsCompliance = mode == complianceMode
|
||||
|
@ -364,7 +364,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
if until != "" {
|
||||
retentionDate, err := time.Parse(time.RFC3339, until)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidRetentionDate)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidRetentionDate)
|
||||
}
|
||||
if objectLock.Retention == nil {
|
||||
objectLock.Retention = &data.RetentionLock{}
|
||||
|
@ -382,7 +382,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
|||
}
|
||||
|
||||
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,16 +397,16 @@ func existLockHeaders(header http.Header) bool {
|
|||
|
||||
func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
|
||||
if retention.Mode != governanceMode && retention.Mode != complianceMode {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
||||
return nil, apierr.GetAPIError(apierr.ErrMalformedXML)
|
||||
}
|
||||
|
||||
retentionDate, err := time.Parse(time.RFC3339, retention.RetainUntilDate)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
||||
return nil, apierr.GetAPIError(apierr.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if retentionDate.Before(layer.TimeNow(ctx)) {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
|
||||
var bypass bool
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -270,23 +270,23 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
|
|||
for _, tc := range []struct {
|
||||
name string
|
||||
bucket string
|
||||
expectedError apiErrors.Error
|
||||
expectedError apierr.Error
|
||||
noError bool
|
||||
configuration *data.ObjectLockConfiguration
|
||||
}{
|
||||
{
|
||||
name: "bkt not found",
|
||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
|
||||
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
|
||||
},
|
||||
{
|
||||
name: "bkt lock disabled",
|
||||
bucket: bktLockDisabled,
|
||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed),
|
||||
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed),
|
||||
},
|
||||
{
|
||||
name: "invalid configuration",
|
||||
bucket: bktLockEnabled,
|
||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrInternalError),
|
||||
expectedError: apierr.GetAPIError(apierr.ErrInternalError),
|
||||
configuration: &data.ObjectLockConfiguration{ObjectLockEnabled: "dummy"},
|
||||
},
|
||||
{
|
||||
|
@ -359,18 +359,18 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
for _, tc := range []struct {
|
||||
name string
|
||||
bucket string
|
||||
expectedError apiErrors.Error
|
||||
expectedError apierr.Error
|
||||
noError bool
|
||||
expectedConf *data.ObjectLockConfiguration
|
||||
}{
|
||||
{
|
||||
name: "bkt not found",
|
||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
|
||||
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
|
||||
},
|
||||
{
|
||||
name: "bkt lock disabled",
|
||||
bucket: bktLockDisabled,
|
||||
expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound),
|
||||
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound),
|
||||
},
|
||||
{
|
||||
name: "bkt lock enabled empty default",
|
||||
|
@ -407,7 +407,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
|
||||
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apierr.Error) {
|
||||
actualErrorResponse := &middleware.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
@ -415,7 +415,7 @@ func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError api
|
|||
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
||||
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
||||
|
||||
if expectedError.ErrCode != apiErrors.ErrInternalError {
|
||||
if expectedError.ErrCode != apierr.ErrInternalError {
|
||||
require.Equal(t, expectedError.Description, actualErrorResponse.Message)
|
||||
}
|
||||
}
|
||||
|
@ -473,33 +473,33 @@ func TestObjectRetention(t *testing.T) {
|
|||
objName := "obj-for-retention"
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
|
||||
getObjectRetention(hc, bktName, objName, nil, apiErrors.ErrNoSuchKey)
|
||||
getObjectRetention(hc, bktName, objName, nil, apierr.ErrNoSuchKey)
|
||||
|
||||
retention := &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
||||
putObjectRetention(hc, bktName, objName, retention, false, 0)
|
||||
getObjectRetention(hc, bktName, objName, retention, 0)
|
||||
|
||||
retention = &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().UTC().Add(time.Minute).Format(time.RFC3339)}
|
||||
putObjectRetention(hc, bktName, objName, retention, false, apiErrors.ErrInternalError)
|
||||
putObjectRetention(hc, bktName, objName, retention, false, apierr.ErrInternalError)
|
||||
|
||||
retention = &data.Retention{Mode: complianceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
|
||||
putObjectRetention(hc, bktName, objName, retention, true, 0)
|
||||
getObjectRetention(hc, bktName, objName, retention, 0)
|
||||
|
||||
putObjectRetention(hc, bktName, objName, retention, true, apiErrors.ErrInternalError)
|
||||
putObjectRetention(hc, bktName, objName, retention, true, apierr.ErrInternalError)
|
||||
}
|
||||
|
||||
func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
|
||||
func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectRetentionHandler(w, r)
|
||||
if errCode == 0 {
|
||||
assertRetention(hc.t, w, retention)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode))
|
||||
}
|
||||
}
|
||||
|
||||
func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apiErrors.ErrorCode) {
|
||||
func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apierr.ErrorCode) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
||||
if byPass {
|
||||
r.Header.Set(api.AmzBypassGovernanceRetention, strconv.FormatBool(true))
|
||||
|
@ -508,7 +508,7 @@ func putObjectRetention(hc *handlerContext, bktName, objName string, retention *
|
|||
if errCode == 0 {
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
} else {
|
||||
assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(errCode))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -572,37 +572,37 @@ func TestPutLockErrors(t *testing.T) {
|
|||
createTestBucketWithLock(hc, bktName, nil)
|
||||
|
||||
headers := map[string]string{api.AmzObjectLockMode: complianceMode}
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders)
|
||||
|
||||
delete(headers, api.AmzObjectLockMode)
|
||||
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Add(time.Minute).Format(time.RFC3339)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders)
|
||||
|
||||
headers[api.AmzObjectLockMode] = "dummy"
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrUnknownWORMModeDirective)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrUnknownWORMModeDirective)
|
||||
|
||||
headers[api.AmzObjectLockMode] = complianceMode
|
||||
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Format(time.RFC3339)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrPastObjectLockRetainDate)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrPastObjectLockRetainDate)
|
||||
|
||||
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
|
||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrInvalidRetentionDate)
|
||||
|
||||
putObject(hc, bktName, objName)
|
||||
|
||||
retention := &data.Retention{Mode: governanceMode}
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML)
|
||||
|
||||
retention.Mode = "dummy"
|
||||
retention.RetainUntilDate = time.Now().Add(time.Minute).UTC().Format(time.RFC3339)
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML)
|
||||
|
||||
retention.Mode = governanceMode
|
||||
retention.RetainUntilDate = time.Now().UTC().Format(time.RFC3339)
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrPastObjectLockRetainDate)
|
||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrPastObjectLockRetainDate)
|
||||
}
|
||||
|
||||
func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apiErrors.ErrorCode) {
|
||||
func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apierr.ErrorCode) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
|
||||
for key, val := range headers {
|
||||
|
@ -610,13 +610,13 @@ func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName
|
|||
}
|
||||
|
||||
hc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, apiErrors.GetAPIError(errCode))
|
||||
assertS3Error(t, w, apierr.GetAPIError(errCode))
|
||||
}
|
||||
|
||||
func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
|
||||
func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) {
|
||||
w, r := prepareTestRequest(hc, bktName, objName, retention)
|
||||
hc.Handler().PutObjectRetentionHandler(w, r)
|
||||
assertS3Error(t, w, apiErrors.GetAPIError(errCode))
|
||||
assertS3Error(t, w, apierr.GetAPIError(errCode))
|
||||
}
|
||||
|
||||
func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, retention *data.Retention, delta float64) {
|
||||
|
|
|
@ -206,10 +206,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
size := h.getPutPayloadSize(r)
|
||||
|
||||
p := &layer.UploadPartParams{
|
||||
Info: &layer.UploadInfoParams{
|
||||
|
@ -429,7 +426,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
|||
Bucket: objInfo.Bucket,
|
||||
Key: objInfo.Name,
|
||||
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
|
||||
Location: getObjectLocation(r, h.cfg.Domains(), reqInfo.BucketName, reqInfo.ObjectName),
|
||||
Location: getObjectLocation(r, reqInfo.BucketName, reqInfo.ObjectName, reqInfo.RequestVHSEnabled),
|
||||
}
|
||||
|
||||
if settings.VersioningEnabled() {
|
||||
|
@ -450,7 +447,7 @@ func getURLScheme(r *http.Request) string {
|
|||
}
|
||||
|
||||
// getObjectLocation gets the fully qualified URL of an object.
|
||||
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
|
||||
func getObjectLocation(r *http.Request, bucket, object string, vhsEnabled bool) string {
|
||||
proto := middleware.GetSourceScheme(r)
|
||||
if proto == "" {
|
||||
proto = getURLScheme(r)
|
||||
|
@ -460,13 +457,12 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
|||
Path: path.Join("/", bucket, object),
|
||||
Scheme: proto,
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.HasPrefix(r.Host, bucket+"."+domain) {
|
||||
u.Path = path.Join("/", object)
|
||||
break
|
||||
}
|
||||
|
||||
// If vhs enabled then we need to use bucket DNS style.
|
||||
if vhsEnabled {
|
||||
u.Path = path.Join("/", object)
|
||||
}
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
|
@ -530,6 +526,11 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
|||
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
|
||||
}
|
||||
|
||||
if p.EncodingType != "" && strings.ToLower(p.EncodingType) != urlEncodingType {
|
||||
h.logAndSendError(w, "invalid encoding type", reqInfo, errors.GetAPIError(errors.ErrInvalidEncodingMethod))
|
||||
return
|
||||
}
|
||||
|
||||
list, err := h.obj.ListMultipartUploads(r.Context(), p)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
|
||||
|
@ -640,14 +641,14 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
|||
res := ListMultipartUploadsResponse{
|
||||
Bucket: params.Bkt.Name,
|
||||
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
||||
Delimiter: params.Delimiter,
|
||||
Delimiter: s3PathEncode(params.Delimiter, params.EncodingType),
|
||||
EncodingType: params.EncodingType,
|
||||
IsTruncated: info.IsTruncated,
|
||||
KeyMarker: params.KeyMarker,
|
||||
KeyMarker: s3PathEncode(params.KeyMarker, params.EncodingType),
|
||||
MaxUploads: params.MaxUploads,
|
||||
NextKeyMarker: info.NextKeyMarker,
|
||||
NextKeyMarker: s3PathEncode(info.NextKeyMarker, params.EncodingType),
|
||||
NextUploadIDMarker: info.NextUploadIDMarker,
|
||||
Prefix: params.Prefix,
|
||||
Prefix: s3PathEncode(params.Prefix, params.EncodingType),
|
||||
UploadIDMarker: params.UploadIDMarker,
|
||||
}
|
||||
|
||||
|
@ -659,7 +660,7 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
|||
ID: u.Owner.String(),
|
||||
DisplayName: u.Owner.String(),
|
||||
},
|
||||
Key: u.Key,
|
||||
Key: s3PathEncode(u.Key, params.EncodingType),
|
||||
Owner: Owner{
|
||||
ID: u.Owner.String(),
|
||||
DisplayName: u.Owner.String(),
|
||||
|
|
|
@ -2,21 +2,27 @@ package handler
|
|||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -35,7 +41,7 @@ func TestMultipartUploadInvalidPart(t *testing.T) {
|
|||
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))
|
||||
}
|
||||
|
||||
func TestDeleteMultipartAllParts(t *testing.T) {
|
||||
|
@ -99,7 +105,7 @@ func TestMultipartReUploadPart(t *testing.T) {
|
|||
require.Equal(t, etag2, list.Parts[1].ETag)
|
||||
|
||||
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
|
||||
assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))
|
||||
|
||||
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
|
||||
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
|
||||
|
@ -122,6 +128,108 @@ func TestMultipartReUploadPart(t *testing.T) {
|
|||
equalDataSlices(t, append(data1, data2...), data)
|
||||
}
|
||||
|
||||
func TestMultipartRemovePartsSplit(t *testing.T) {
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
partSize := 8
|
||||
|
||||
t.Run("reupload part", func(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||
|
||||
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||
require.NoError(t, err)
|
||||
|
||||
objID := oidtest.ID()
|
||||
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||
"Number": "1",
|
||||
"OID": objID.EncodeToString(),
|
||||
"Owner": usertest.ID().EncodeToString(),
|
||||
"ETag": "etag",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||
require.Len(t, hc.tp.Objects(), 2)
|
||||
|
||||
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
require.Equal(t, `"etag"`, list.Parts[0].ETag)
|
||||
|
||||
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
require.Equal(t, etag1, list.Parts[0].ETag)
|
||||
|
||||
require.Len(t, hc.tp.Objects(), 1)
|
||||
})
|
||||
|
||||
t.Run("abort multipart", func(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||
|
||||
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||
require.NoError(t, err)
|
||||
|
||||
objID := oidtest.ID()
|
||||
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||
"Number": "1",
|
||||
"OID": objID.EncodeToString(),
|
||||
"Owner": usertest.ID().EncodeToString(),
|
||||
"ETag": "etag",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||
require.Len(t, hc.tp.Objects(), 2)
|
||||
|
||||
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
|
||||
require.Empty(t, hc.tp.Objects())
|
||||
})
|
||||
|
||||
t.Run("complete multipart", func(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
|
||||
|
||||
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
|
||||
require.NoError(t, err)
|
||||
|
||||
objID := oidtest.ID()
|
||||
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
|
||||
"Number": "1",
|
||||
"OID": objID.EncodeToString(),
|
||||
"Owner": usertest.ID().EncodeToString(),
|
||||
"ETag": "etag",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
|
||||
require.Len(t, hc.tp.Objects(), 2)
|
||||
|
||||
completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
|
||||
require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
|
||||
})
|
||||
}
|
||||
|
||||
func containsOID(objects []*object.Object, objID oid.ID) bool {
|
||||
for _, o := range objects {
|
||||
oID, _ := o.ID()
|
||||
if oID.Equals(objID) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func TestListMultipartUploads(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -145,14 +253,14 @@ func TestListMultipartUploads(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("check max uploads", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", "", 2)
|
||||
listUploads := listMultipartUploads(hc, bktName, "", "", "", "", 2)
|
||||
require.Len(t, listUploads.Uploads, 2)
|
||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||
})
|
||||
|
||||
t.Run("check prefix", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "/my", "", "", "", -1)
|
||||
listUploads := listMultipartUploads(hc, bktName, "/my", "", "", "", -1)
|
||||
require.Len(t, listUploads.Uploads, 2)
|
||||
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
|
||||
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
|
||||
|
@ -160,7 +268,7 @@ func TestListMultipartUploads(t *testing.T) {
|
|||
|
||||
t.Run("check markers", func(t *testing.T) {
|
||||
t.Run("check only key-marker", func(t *testing.T) {
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", objName2, -1)
|
||||
listUploads := listMultipartUploads(hc, bktName, "", "", "", objName2, -1)
|
||||
require.Len(t, listUploads.Uploads, 1)
|
||||
// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
|
||||
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
|
||||
|
@ -171,7 +279,7 @@ func TestListMultipartUploads(t *testing.T) {
|
|||
if uploadIDMarker > uploadInfo2.UploadID {
|
||||
uploadIDMarker = uploadInfo2.UploadID
|
||||
}
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, "", -1)
|
||||
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, "", -1)
|
||||
// If key-marker is not specified, the upload-id-marker parameter is ignored.
|
||||
require.Len(t, listUploads.Uploads, 3)
|
||||
})
|
||||
|
@ -179,7 +287,7 @@ func TestListMultipartUploads(t *testing.T) {
|
|||
t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
|
||||
uploadIDMarker := "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, objName3, -1)
|
||||
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, objName3, -1)
|
||||
require.Len(t, listUploads.Uploads, 1)
|
||||
// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
|
||||
// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
|
||||
|
@ -414,7 +522,7 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
|||
r.Header.Set(api.AmzContentSha256, tc.hash)
|
||||
hc.Handler().UploadPartHandler(w, r)
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3Errors.GetAPIError(s3Errors.ErrContentSHA256Mismatch))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch))
|
||||
|
||||
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
|
||||
require.Len(t, list.Parts, 1)
|
||||
|
@ -443,11 +551,11 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
|
|||
|
||||
func TestMultipartObjectLocation(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
req *http.Request
|
||||
bucket string
|
||||
object string
|
||||
domains []string
|
||||
expected string
|
||||
req *http.Request
|
||||
bucket string
|
||||
object string
|
||||
vhsEnabled bool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
req: &http.Request{
|
||||
|
@ -492,29 +600,96 @@ func TestMultipartObjectLocation(t *testing.T) {
|
|||
req: &http.Request{
|
||||
Host: "mybucket.s3dev.frostfs.devenv",
|
||||
},
|
||||
domains: []string{"s3dev.frostfs.devenv"},
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
vhsEnabled: true,
|
||||
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||
},
|
||||
{
|
||||
req: &http.Request{
|
||||
Host: "mybucket.s3dev.frostfs.devenv",
|
||||
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
|
||||
},
|
||||
domains: []string{"s3dev.frostfs.devenv"},
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||
bucket: "mybucket",
|
||||
object: "test/1.txt",
|
||||
vhsEnabled: true,
|
||||
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
|
||||
},
|
||||
} {
|
||||
t.Run("", func(t *testing.T) {
|
||||
location := getObjectLocation(tc.req, tc.domains, tc.bucket, tc.object)
|
||||
location := getObjectLocation(tc.req, tc.bucket, tc.object, tc.vhsEnabled)
|
||||
require.Equal(t, tc.expected, location)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadPartWithNegativeContentLength(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "bucket-to-upload-part", "object-multipart"
|
||||
createTestBucket(hc, bktName)
|
||||
partSize := 5 * 1024 * 1024
|
||||
|
||||
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
|
||||
|
||||
partBody := make([]byte, partSize)
|
||||
_, err := rand.Read(partBody)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
query := make(url.Values)
|
||||
query.Set(uploadIDQuery, multipartUpload.UploadID)
|
||||
query.Set(partNumberQuery, "1")
|
||||
|
||||
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
|
||||
r.ContentLength = -1
|
||||
hc.Handler().UploadPartHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
|
||||
completeMultipartUpload(hc, bktName, objName, multipartUpload.UploadID, []string{w.Header().Get(api.ETag)})
|
||||
res, _ := getObject(hc, bktName, objName)
|
||||
equalDataSlices(t, partBody, res)
|
||||
|
||||
resp := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||
require.Len(t, resp.ObjectParts.Parts, 1)
|
||||
require.Equal(t, partSize, resp.ObjectParts.Parts[0].Size)
|
||||
}
|
||||
|
||||
func TestListMultipartUploadsEncoding(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName := "bucket-to-list-uploads-encoding"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
listAllMultipartUploadsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||
|
||||
objects := []string{"foo()/bar", "foo()/bar/xyzzy", "asdf+b"}
|
||||
for _, objName := range objects {
|
||||
createMultipartUpload(hc, bktName, objName, nil)
|
||||
}
|
||||
|
||||
listResponse := listMultipartUploadsURL(hc, bktName, "foo(", ")", "", "", -1)
|
||||
|
||||
require.Len(t, listResponse.CommonPrefixes, 1)
|
||||
require.Equal(t, "foo%28%29", listResponse.CommonPrefixes[0].Prefix)
|
||||
require.Equal(t, "foo%28", listResponse.Prefix)
|
||||
require.Equal(t, "%29", listResponse.Delimiter)
|
||||
require.Equal(t, "url", listResponse.EncodingType)
|
||||
require.Equal(t, maxObjectList, listResponse.MaxUploads)
|
||||
|
||||
listResponse = listMultipartUploads(hc, bktName, "", "", "", "", 1)
|
||||
require.Empty(t, listResponse.EncodingType)
|
||||
|
||||
listResponse = listMultipartUploadsURL(hc, bktName, "", "", "", listResponse.NextKeyMarker, 1)
|
||||
|
||||
require.Len(t, listResponse.CommonPrefixes, 0)
|
||||
require.Len(t, listResponse.Uploads, 1)
|
||||
require.Equal(t, "foo%28%29/bar", listResponse.Uploads[0].Key)
|
||||
require.Equal(t, "asdf%2Bb", listResponse.KeyMarker)
|
||||
require.Equal(t, "foo%28%29/bar", listResponse.NextKeyMarker)
|
||||
require.Equal(t, "url", listResponse.EncodingType)
|
||||
require.Equal(t, 1, listResponse.MaxUploads)
|
||||
}
|
||||
|
||||
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
|
||||
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
|
||||
}
|
||||
|
@ -540,16 +715,42 @@ func uploadPartCopyBase(hc *handlerContext, bktName, objName string, encrypted b
|
|||
return uploadPartCopyResponse
|
||||
}
|
||||
|
||||
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
|
||||
return listMultipartUploadsBase(hc, bktName, "", "", "", "", -1)
|
||||
func listMultipartUploads(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, "", maxUploads)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListMultipartUploadsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||
func listMultipartUploadsURL(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
|
||||
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, urlEncodingType, maxUploads)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListMultipartUploadsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
|
||||
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", "", -1)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListMultipartUploadsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listAllMultipartUploadsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) {
|
||||
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", encoding, -1)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker, encoding string, maxUploads int) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
query.Set(prefixQueryName, prefix)
|
||||
query.Set(delimiterQueryName, delimiter)
|
||||
query.Set(uploadIDMarkerQueryName, uploadIDMarker)
|
||||
query.Set(keyMarkerQueryName, keyMarker)
|
||||
query.Set(encodingTypeQueryName, encoding)
|
||||
if maxUploads != -1 {
|
||||
query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
|
||||
}
|
||||
|
@ -557,10 +758,7 @@ func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, up
|
|||
w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)
|
||||
|
||||
hc.Handler().ListMultipartUploadsHandler(w, r)
|
||||
listPartsResponse := &ListMultipartUploadsResponse{}
|
||||
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
|
||||
|
||||
return listPartsResponse
|
||||
return w
|
||||
}
|
||||
|
||||
func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {
|
||||
|
|
|
@ -7,10 +7,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
)
|
||||
|
||||
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
||||
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -153,6 +154,10 @@ func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsC
|
|||
res.Delimiter = queryValues.Get("delimiter")
|
||||
res.Encode = queryValues.Get("encoding-type")
|
||||
|
||||
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
|
||||
}
|
||||
|
||||
if queryValues.Get("max-keys") == "" {
|
||||
res.MaxKeys = maxObjectList
|
||||
} else if res.MaxKeys, err = strconv.Atoi(queryValues.Get("max-keys")); err != nil || res.MaxKeys < 0 {
|
||||
|
@ -257,6 +262,10 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
|
|||
res.Encode = queryValues.Get("encoding-type")
|
||||
res.VersionIDMarker = queryValues.Get("version-id-marker")
|
||||
|
||||
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
|
||||
}
|
||||
|
||||
if res.VersionIDMarker != "" && res.KeyMarker == "" {
|
||||
return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker)
|
||||
}
|
||||
|
|
|
@ -4,19 +4,25 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
"go.uber.org/zap/zaptest/observer"
|
||||
)
|
||||
|
||||
func TestParseContinuationToken(t *testing.T) {
|
||||
|
@ -93,8 +99,35 @@ func TestListObjectsWithOldTreeNodes(t *testing.T) {
|
|||
checkListVersionsOldNodes(hc, listVers.Version, objInfos)
|
||||
}
|
||||
|
||||
func TestListObjectsVersionsSkipLogTaggingNodesError(t *testing.T) {
|
||||
loggerCore, observedLog := observer.New(zap.DebugLevel)
|
||||
log := zap.New(loggerCore)
|
||||
|
||||
hcBase, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
|
||||
require.NoError(t, err)
|
||||
hc := &handlerContext{
|
||||
handlerContextBase: hcBase,
|
||||
t: t,
|
||||
}
|
||||
|
||||
bktName, objName := "bucket-versioning-enabled", "versions/object"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
createTestObject(hc, bktInfo, objName, encryption.Params{})
|
||||
|
||||
putObjectTagging(hc.t, hc, bktName, objName, map[string]string{"tag1": "val1"})
|
||||
|
||||
listObjectsVersions(hc, bktName, "", "", "", "", -1)
|
||||
|
||||
filtered := observedLog.Filter(func(entry observer.LoggedEntry) bool {
|
||||
return strings.Contains(entry.Message, logs.ParseTreeNode)
|
||||
})
|
||||
require.Empty(t, filtered)
|
||||
}
|
||||
|
||||
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
|
||||
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0)
|
||||
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0, true)
|
||||
require.NoError(hc.t, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
|
@ -138,11 +171,17 @@ func checkListVersionsOldNodes(hc *handlerContext, list []ObjectVersionResponse,
|
|||
}
|
||||
|
||||
func TestListObjectsContextCanceled(t *testing.T) {
|
||||
layerCfg := layer.DefaultCachesConfigs(zaptest.NewLogger(t))
|
||||
log := zaptest.NewLogger(t)
|
||||
layerCfg := layer.DefaultCachesConfigs(log)
|
||||
layerCfg.SessionList.Lifetime = time.Hour
|
||||
layerCfg.SessionList.Size = 1
|
||||
|
||||
hc := prepareHandlerContextBase(t, layerCfg)
|
||||
hcBase, err := prepareHandlerContextBase(layerCfg)
|
||||
require.NoError(t, err)
|
||||
hc := &handlerContext{
|
||||
handlerContextBase: hcBase,
|
||||
t: t,
|
||||
}
|
||||
|
||||
bktName := "bucket-versioning-enabled"
|
||||
bktInfo := createTestBucket(hc, bktName)
|
||||
|
@ -718,6 +757,16 @@ func TestListObjectVersionsEncoding(t *testing.T) {
|
|||
require.Equal(t, 3, listResponse.MaxKeys)
|
||||
}
|
||||
|
||||
func TestListingsWithInvalidEncodingType(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bucket-for-listing-invalid-encoding"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
listObjectsVersionsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||
listObjectsV2Err(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||
listObjectsV1Err(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||
}
|
||||
|
||||
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
|
||||
for i, v := range versions.Version {
|
||||
require.Equal(t, names[i], v.Key)
|
||||
|
@ -725,10 +774,19 @@ func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, nam
|
|||
}
|
||||
|
||||
func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
|
||||
return listObjectsV2Ext(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys)
|
||||
w := listObjectsV2Base(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsV2Response{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *ListObjectsV2Response {
|
||||
func listObjectsV2Err(hc *handlerContext, bktName, encoding string, err apierr.Error) {
|
||||
w := listObjectsV2Base(hc, bktName, "", "", "", "", encoding, -1)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func listObjectsV2Base(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *httptest.ResponseRecorder {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
query.Add("fetch-owner", "true")
|
||||
if len(startAfter) != 0 {
|
||||
|
@ -743,10 +801,7 @@ func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter
|
|||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV2Handler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsV2Response{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
return w
|
||||
}
|
||||
|
||||
func validateListV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int,
|
||||
|
@ -806,28 +861,54 @@ func prepareCommonListObjectsQuery(prefix, delimiter string, maxKeys int) url.Va
|
|||
}
|
||||
|
||||
func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
if len(marker) != 0 {
|
||||
query.Add("marker", marker)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV1Handler(w, r)
|
||||
w := listObjectsV1Base(hc, bktName, prefix, delimiter, marker, "", maxKeys)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsV1Response{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listObjectsV1Err(hc *handlerContext, bktName, encoding string, err apierr.Error) {
|
||||
w := listObjectsV1Base(hc, bktName, "", "", "", encoding, -1)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func listObjectsV1Base(hc *handlerContext, bktName, prefix, delimiter, marker, encoding string, maxKeys int) *httptest.ResponseRecorder {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
if len(marker) != 0 {
|
||||
query.Add("marker", marker)
|
||||
}
|
||||
if len(encoding) != 0 {
|
||||
query.Add("encoding-type", encoding)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListObjectsV1Handler(w, r)
|
||||
return w
|
||||
}
|
||||
|
||||
func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
|
||||
return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, false)
|
||||
w := listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, "", maxKeys)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsVersionsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listObjectsVersionsURL(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
|
||||
return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, true)
|
||||
w := listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, urlEncodingType, maxKeys)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsVersionsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
}
|
||||
|
||||
func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int, encode bool) *ListObjectsVersionsResponse {
|
||||
func listObjectsVersionsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) {
|
||||
w := listObjectsVersionsBase(hc, bktName, "", "", "", "", encoding, -1)
|
||||
assertS3Error(hc.t, w, err)
|
||||
}
|
||||
|
||||
func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker, encoding string, maxKeys int) *httptest.ResponseRecorder {
|
||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||
if len(keyMarker) != 0 {
|
||||
query.Add("key-marker", keyMarker)
|
||||
|
@ -835,14 +916,11 @@ func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, key
|
|||
if len(versionIDMarker) != 0 {
|
||||
query.Add("version-id-marker", versionIDMarker)
|
||||
}
|
||||
if encode {
|
||||
query.Add("encoding-type", "url")
|
||||
if len(encoding) != 0 {
|
||||
query.Add("encoding-type", encoding)
|
||||
}
|
||||
|
||||
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
|
||||
hc.Handler().ListBucketObjectVersionsHandler(w, r)
|
||||
assertStatus(hc.t, w, http.StatusOK)
|
||||
res := &ListObjectsVersionsResponse{}
|
||||
parseTestResponse(hc.t, w, res)
|
||||
return res
|
||||
return w
|
||||
}
|
||||
|
|
195
api/handler/patch.go
Normal file
195
api/handler/patch.go
Normal file
|
@ -0,0 +1,195 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const maxPatchSize = 5 * 1024 * 1024 * 1024 // 5GB
|
||||
|
||||
func (h *handler) PatchObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
if _, ok := r.Header[api.ContentRange]; !ok {
|
||||
h.logAndSendError(w, "missing Content-Range", reqInfo, errors.GetAPIError(errors.ErrMissingContentRange))
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := r.Header[api.ContentLength]; !ok {
|
||||
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
|
||||
conditional, err := parsePatchConditionalHeaders(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse conditional headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
srcObjPrm := &layer.HeadObjectParams{
|
||||
Object: reqInfo.ObjectName,
|
||||
BktInfo: bktInfo,
|
||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||
return
|
||||
}
|
||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
byteRange, err := parsePatchByteRange(r.Header.Get(api.ContentRange), srcSize)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if maxPatchSize < byteRange.End-byteRange.Start+1 {
|
||||
h.logAndSendError(w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if uint64(r.ContentLength) != (byteRange.End - byteRange.Start + 1) {
|
||||
h.logAndSendError(w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength))
|
||||
return
|
||||
}
|
||||
|
||||
if byteRange.Start > srcSize {
|
||||
h.logAndSendError(w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PatchObjectParams{
|
||||
Object: extendedSrcObjInfo,
|
||||
BktInfo: bktInfo,
|
||||
NewBytes: r.Body,
|
||||
Range: byteRange,
|
||||
VersioningEnabled: settings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(nil, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
|
||||
if err != nil {
|
||||
if isErrObjectLocked(err) {
|
||||
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
} else {
|
||||
h.logAndSendError(w, "could not patch object", reqInfo, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if settings.VersioningEnabled() {
|
||||
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
|
||||
}
|
||||
|
||||
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
|
||||
|
||||
resp := PatchObjectResult{
|
||||
Object: PatchObject{
|
||||
LastModified: extendedObjInfo.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())),
|
||||
},
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||
h.logAndSendError(w, "could not encode PatchObjectResult to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func parsePatchConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||
}
|
||||
|
||||
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func parsePatchByteRange(rangeStr string, objSize uint64) (*layer.RangeParams, error) {
|
||||
const prefix = "bytes "
|
||||
|
||||
if rangeStr == "" {
|
||||
return nil, fmt.Errorf("empty range")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(rangeStr, prefix) {
|
||||
return nil, fmt.Errorf("unknown unit in range header")
|
||||
}
|
||||
|
||||
rangeStr, _, found := strings.Cut(strings.TrimPrefix(rangeStr, prefix), "/") // value after / is ignored
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||
}
|
||||
|
||||
startStr, endStr, found := strings.Cut(rangeStr, "-")
|
||||
if !found {
|
||||
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||
}
|
||||
|
||||
start, err := strconv.ParseUint(startStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid start byte: %s", startStr)
|
||||
}
|
||||
|
||||
end := objSize - 1
|
||||
if len(endStr) > 0 {
|
||||
end, err = strconv.ParseUint(endStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid end byte: %s", endStr)
|
||||
}
|
||||
}
|
||||
|
||||
if start > end {
|
||||
return nil, fmt.Errorf("start byte is greater than end byte")
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
Start: start,
|
||||
End: end,
|
||||
}, nil
|
||||
}
|
524
api/handler/patch_test.go
Normal file
524
api/handler/patch_test.go
Normal file
|
@ -0,0 +1,524 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.md5Enabled = true
|
||||
|
||||
bktName, objName := "bucket-for-patch", "object-for-patch"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("old object content")
|
||||
md5Hash := md5.New()
|
||||
md5Hash.Write(content)
|
||||
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
created := time.Now()
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||
|
||||
patchPayload := []byte("new")
|
||||
sha256Hash := sha256.New()
|
||||
sha256Hash.Write(patchPayload)
|
||||
sha256Hash.Write(content[len(patchPayload):])
|
||||
hash := hex.EncodeToString(sha256Hash.Sum(nil))
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
rng string
|
||||
headers map[string]string
|
||||
code apierr.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
|
||||
api.IfMatch: etag,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid range syntax",
|
||||
rng: "bytes 0-2",
|
||||
code: apierr.ErrInvalidRange,
|
||||
},
|
||||
{
|
||||
name: "invalid range length",
|
||||
rng: "bytes 0-5/*",
|
||||
code: apierr.ErrInvalidRangeLength,
|
||||
},
|
||||
{
|
||||
name: "invalid range start",
|
||||
rng: "bytes 20-22/*",
|
||||
code: apierr.ErrRangeOutOfBounds,
|
||||
},
|
||||
{
|
||||
name: "range is too long",
|
||||
rng: "bytes 0-5368709120/*",
|
||||
code: apierr.ErrInvalidRange,
|
||||
},
|
||||
{
|
||||
name: "If-Unmodified-Since precondition are not satisfied",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
|
||||
},
|
||||
code: apierr.ErrPreconditionFailed,
|
||||
},
|
||||
{
|
||||
name: "If-Match precondition are not satisfied",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfMatch: "etag",
|
||||
},
|
||||
code: apierr.ErrPreconditionFailed,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.code == 0 {
|
||||
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
|
||||
require.Equal(t, data.Quote(hash), res.Object.ETag)
|
||||
} else {
|
||||
patchObjectErr(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchMultipartObject(t *testing.T) {
|
||||
tc := prepareHandlerContextWithMinCache(t)
|
||||
tc.config.md5Enabled = true
|
||||
|
||||
bktName, objName, partSize := "bucket-for-multipart-patch", "object-for-multipart-patch", 5*1024*1024
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
t.Run("patch beginning of the first part", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchSize := partSize / 2
|
||||
patchBody := make([]byte, patchSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(patchSize-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{patchBody, data1[patchSize:], data2, data3}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch middle of the first part", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchSize := partSize / 2
|
||||
patchBody := make([]byte, patchSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/4)+"-"+strconv.Itoa(partSize*3/4-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/4], patchBody, data1[partSize*3/4:], data2, data3}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch first and second parts", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchSize := partSize / 2
|
||||
patchBody := make([]byte, patchSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3/4)+"-"+strconv.Itoa(partSize*5/4-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize*3/4], patchBody, data2[partSize/4:], data3}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch all parts", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchSize := partSize * 2
|
||||
patchBody := make([]byte, patchSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2-1)+"-"+strconv.Itoa(partSize/2+patchSize-2)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2-1], patchBody, data3[partSize/2-1:]}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch all parts and append bytes", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchSize := partSize * 3
|
||||
patchBody := make([]byte, patchSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2)+"-"+strconv.Itoa(partSize/2+patchSize-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2], patchBody}, []byte("")), object)
|
||||
require.Equal(t, partSize*7/2, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch second part", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchBody := make([]byte, partSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize)+"-"+strconv.Itoa(partSize*2-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1, patchBody, data3}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch last part, equal size", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchBody := make([]byte, partSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
||||
require.Equal(t, partSize*3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch last part, increase size", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchBody := make([]byte, partSize+1)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
|
||||
require.Equal(t, partSize*3+1, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch last part with offset and append bytes", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchBody := make([]byte, partSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2+3)+"-"+strconv.Itoa(partSize*3+2)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3[:3], patchBody}, []byte("")), object)
|
||||
require.Equal(t, partSize*3+3, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("append bytes", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
|
||||
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
|
||||
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
|
||||
|
||||
patchBody := make([]byte, partSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3)+"-"+strconv.Itoa(partSize*4-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3, patchBody}, []byte("")), object)
|
||||
require.Equal(t, partSize*4, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
|
||||
})
|
||||
|
||||
t.Run("patch empty multipart", func(t *testing.T) {
|
||||
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
|
||||
etag, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, 0)
|
||||
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag})
|
||||
|
||||
patchBody := make([]byte, partSize)
|
||||
_, err := rand.Read(patchBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(partSize-1)+"/*", patchBody, nil)
|
||||
object, header := getObject(tc, bktName, objName)
|
||||
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
|
||||
require.NoError(t, err)
|
||||
equalDataSlices(t, patchBody, object)
|
||||
require.Equal(t, partSize, contentLen)
|
||||
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-1"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPatchWithVersion(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
createVersionedBucket(hc, bktName)
|
||||
objHeader := putObjectContent(hc, bktName, objName, "content")
|
||||
|
||||
putObjectContent(hc, bktName, objName, "some content")
|
||||
|
||||
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
|
||||
|
||||
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
|
||||
require.False(t, res.IsTruncated)
|
||||
require.Len(t, res.Version, 3)
|
||||
|
||||
for _, version := range res.Version {
|
||||
content := getObjectVersion(hc, bktName, objName, version.VersionID)
|
||||
if version.IsLatest {
|
||||
require.Equal(t, []byte("content updated"), content)
|
||||
continue
|
||||
}
|
||||
if version.VersionID == objHeader.Get(api.AmzVersionID) {
|
||||
require.Equal(t, []byte("content"), content)
|
||||
continue
|
||||
}
|
||||
require.Equal(t, []byte("some content"), content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchEncryptedObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
patchObjectErr(t, tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, apierr.ErrInternalError)
|
||||
}
|
||||
|
||||
func TestPatchMissingHeaders(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentRange))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||
r.Header.Set(api.ContentRange, "bytes 0-2/*")
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentLength))
|
||||
}
|
||||
|
||||
func TestParsePatchByteRange(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
rng string
|
||||
size uint64
|
||||
expected *layer.RangeParams
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
rng: "bytes 2-7/*",
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-7/3",
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-/*",
|
||||
size: 9,
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-/3",
|
||||
size: 9,
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "2-7/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 7-2/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-7",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes a-7/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-a/*",
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
|
||||
rng, err := parsePatchByteRange(tt.rng, tt.size)
|
||||
if tt.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected.Start, rng.Start)
|
||||
require.Equal(t, tt.expected.End, rng.End)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
|
||||
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
result := &PatchObjectResult{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||
require.NoError(t, err)
|
||||
return result
|
||||
}
|
||||
|
||||
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
|
||||
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
result := &PatchObjectResult{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||
require.NoError(t, err)
|
||||
return result
|
||||
}
|
||||
|
||||
func patchObjectErr(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code apierr.ErrorCode) {
|
||||
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||
assertS3Error(t, w, apierr.GetAPIError(code))
|
||||
}
|
||||
|
||||
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
if len(version) > 0 {
|
||||
query.Add(api.QueryVersionID, version)
|
||||
}
|
||||
|
||||
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
|
||||
r.Header.Set(api.ContentRange, rng)
|
||||
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
|
||||
for k, v := range headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
return w
|
||||
}
|
|
@ -2,11 +2,12 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
stderrors "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
|
@ -20,7 +21,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
|
@ -91,11 +92,11 @@ func (p *postPolicy) CheckField(key string, value string) error {
|
|||
}
|
||||
cond := p.condition(key)
|
||||
if cond == nil {
|
||||
return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
||||
return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat)
|
||||
}
|
||||
|
||||
if !cond.match(value) {
|
||||
return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
||||
return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -203,7 +204,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if cannedACLStatus == aclStatusYes {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -242,22 +243,22 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
metadata[api.ContentEncoding] = encodings
|
||||
}
|
||||
|
||||
var size uint64
|
||||
if r.ContentLength > 0 {
|
||||
size = uint64(r.ContentLength)
|
||||
}
|
||||
size := h.getPutPayloadSize(r)
|
||||
|
||||
params := &layer.PutObjectParams{
|
||||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: body,
|
||||
Size: size,
|
||||
Header: metadata,
|
||||
Encryption: encryptionParams,
|
||||
ContentMD5: r.Header.Get(api.ContentMD5),
|
||||
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
params.Size = &size
|
||||
}
|
||||
|
||||
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "invalid copies number", reqInfo, err)
|
||||
|
@ -332,16 +333,16 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
|||
|
||||
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
|
||||
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
|
||||
errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
||||
apierr.GetAPIError(apierr.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
|
||||
}
|
||||
|
||||
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
|
||||
if len(decodeContentSize) == 0 {
|
||||
return nil, errors.GetAPIError(errors.ErrMissingContentLength)
|
||||
return nil, apierr.GetAPIError(apierr.ErrMissingContentLength)
|
||||
}
|
||||
|
||||
if _, err := strconv.Atoi(decodeContentSize); err != nil {
|
||||
return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
|
||||
return nil, fmt.Errorf("%w: parse decoded content length: %s", apierr.GetAPIError(apierr.ErrMissingContentLength), err.Error())
|
||||
}
|
||||
|
||||
chunkReader, err := newSignV4ChunkedReader(r)
|
||||
|
@ -377,43 +378,43 @@ func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryptio
|
|||
}
|
||||
|
||||
if r.TLS == nil {
|
||||
return enc, errors.GetAPIError(errors.ErrInsecureSSECustomerRequest)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInsecureSSECustomerRequest)
|
||||
}
|
||||
|
||||
if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 {
|
||||
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm)
|
||||
return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerAlgorithm)
|
||||
}
|
||||
if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 {
|
||||
return enc, errors.GetAPIError(errors.ErrMissingSSECustomerKey)
|
||||
return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerKey)
|
||||
}
|
||||
|
||||
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInvalidEncryptionAlgorithm)
|
||||
}
|
||||
|
||||
key, err := base64.StdEncoding.DecodeString(sseCustomerKey)
|
||||
if err != nil {
|
||||
if isCopySource {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters)
|
||||
}
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
if len(key) != layer.AESKeySize {
|
||||
if isCopySource {
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters)
|
||||
}
|
||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey)
|
||||
}
|
||||
|
||||
keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5)
|
||||
if err != nil {
|
||||
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
||||
return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch)
|
||||
}
|
||||
|
||||
md5Sum := md5.Sum(key)
|
||||
if !bytes.Equal(md5Sum[:], keyMD5) {
|
||||
return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
|
||||
return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch)
|
||||
}
|
||||
|
||||
params, err := encryption.NewParams(key)
|
||||
|
@ -443,7 +444,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
tags := new(data.Tagging)
|
||||
if err = h.cfg.NewXMLDecoder(buffer).Decode(tags); err != nil {
|
||||
h.logAndSendError(w, "could not decode tag set", reqInfo,
|
||||
fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
|
||||
fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
|
||||
return
|
||||
}
|
||||
tagSet, err = h.readTagSet(tags)
|
||||
|
@ -466,7 +467,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if acl := auth.MultipartFormValue(r, "acl"); acl != "" && acl != basicACLPrivate {
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
|
||||
h.logAndSendError(w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -507,12 +508,12 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if reqInfo.ObjectName == "" {
|
||||
h.logAndSendError(w, "missing object name", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
||||
h.logAndSendError(w, "missing object name", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||
return
|
||||
}
|
||||
|
||||
if !policy.CheckContentLength(size) {
|
||||
h.logAndSendError(w, "invalid content-length", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
|
||||
h.logAndSendError(w, "invalid content-length", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -520,7 +521,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
|||
BktInfo: bktInfo,
|
||||
Object: reqInfo.ObjectName,
|
||||
Reader: contentReader,
|
||||
Size: size,
|
||||
Size: &size,
|
||||
Header: metadata,
|
||||
}
|
||||
|
||||
|
@ -594,13 +595,13 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
|
|||
return nil, fmt.Errorf("could not unmarshal policy: %w", err)
|
||||
}
|
||||
if policy.Expiration.Before(time.Now()) {
|
||||
return nil, fmt.Errorf("policy is expired: %w", errors.GetAPIError(errors.ErrInvalidArgument))
|
||||
return nil, fmt.Errorf("policy is expired: %w", apierr.GetAPIError(apierr.ErrInvalidArgument))
|
||||
}
|
||||
policy.empty = false
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
return nil, stderrors.New("empty multipart form")
|
||||
return nil, errors.New("empty multipart form")
|
||||
}
|
||||
|
||||
for key, v := range r.MultipartForm.Value {
|
||||
|
@ -631,7 +632,7 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
|
|||
for _, cond := range policy.Conditions {
|
||||
if cond.Key == "bucket" {
|
||||
if !cond.match(reqInfo.BucketName) {
|
||||
return nil, errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
|
||||
return nil, apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -673,10 +674,10 @@ func parseTaggingHeader(header http.Header) (map[string]string, error) {
|
|||
if tagging := header.Get(api.AmzTagging); len(tagging) > 0 {
|
||||
queries, err := url.ParseQuery(tagging)
|
||||
if err != nil {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidArgument)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidArgument)
|
||||
}
|
||||
if len(queries) > maxTags {
|
||||
return nil, errors.GetAPIError(errors.ErrInvalidTagsSizeExceed)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidTagsSizeExceed)
|
||||
}
|
||||
tagSet = make(map[string]string, len(queries))
|
||||
for k, v := range queries {
|
||||
|
@ -726,7 +727,7 @@ func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, box
|
|||
}
|
||||
|
||||
if p.SessionContainerCreation == nil {
|
||||
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
|
||||
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", apierr.GetAPIError(apierr.ErrAccessDenied))
|
||||
}
|
||||
|
||||
if err := checkBucketName(reqInfo.BucketName); err != nil {
|
||||
|
@ -783,7 +784,8 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID)
|
||||
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil {
|
||||
h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err)
|
||||
cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains)
|
||||
h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err, zap.NamedError("cleanup_error", cleanErr))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -804,8 +806,9 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
|
|||
return h.obj.PutBucketSettings(ctx, sp)
|
||||
}, h.putBucketSettingsRetryer())
|
||||
if err != nil {
|
||||
cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains)
|
||||
h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||
zap.String("container_id", bktInfo.CID.EncodeToString()), zap.NamedError("cleanup_error", cleanErr))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -815,6 +818,28 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
}
|
||||
|
||||
func (h *handler) cleanupBucketCreation(ctx context.Context, reqInfo *middleware.ReqInfo, bktInfo *data.BucketInfo, boxData *accessbox.Box, chains []*chain.Chain) error {
|
||||
prm := &layer.DeleteBucketParams{
|
||||
BktInfo: bktInfo,
|
||||
SessionToken: boxData.Gate.SessionTokenForDelete(),
|
||||
}
|
||||
|
||||
if err := h.obj.DeleteContainer(ctx, prm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chainIDs := make([]chain.ID, len(chains))
|
||||
for i, c := range chains {
|
||||
chainIDs[i] = c.ID
|
||||
}
|
||||
|
||||
if err := h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
|
||||
return fmt.Errorf("delete bucket acl policy: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 {
|
||||
return retry.NewStandard(func(options *retry.StandardOptions) {
|
||||
options.MaxAttempts = h.cfg.RetryMaxAttempts()
|
||||
|
@ -828,7 +853,7 @@ func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 {
|
|||
}
|
||||
|
||||
options.Retryables = []retry.IsErrorRetryable{retry.IsErrorRetryableFunc(func(err error) aws.Ternary {
|
||||
if stderrors.Is(err, tree.ErrNodeAccessDenied) {
|
||||
if errors.Is(err, tree.ErrNodeAccessDenied) {
|
||||
return aws.TrueTernary
|
||||
}
|
||||
return aws.FalseTernary
|
||||
|
@ -957,7 +982,7 @@ func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, lo
|
|||
return nil
|
||||
}
|
||||
|
||||
return errors.GetAPIError(errors.ErrInvalidLocationConstraint)
|
||||
return apierr.GetAPIError(apierr.ErrInvalidLocationConstraint)
|
||||
}
|
||||
|
||||
func isLockEnabled(log *zap.Logger, header http.Header) bool {
|
||||
|
@ -976,28 +1001,22 @@ func isLockEnabled(log *zap.Logger, header http.Header) bool {
|
|||
|
||||
func checkBucketName(bucketName string) error {
|
||||
if len(bucketName) < 3 || len(bucketName) > 63 {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(bucketName, "xn--") || strings.HasSuffix(bucketName, "-s3alias") {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
|
||||
}
|
||||
if net.ParseIP(bucketName) != nil {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
|
||||
}
|
||||
|
||||
labels := strings.Split(bucketName, ".")
|
||||
for _, label := range labels {
|
||||
if len(label) == 0 {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
for i, r := range bucketName {
|
||||
if r == '.' || (!isAlphaNum(r) && r != '-') {
|
||||
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
|
||||
}
|
||||
for i, r := range label {
|
||||
if !isAlphaNum(r) && r != '-' {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
}
|
||||
if (i == 0 || i == len(label)-1) && r == '-' {
|
||||
return errors.GetAPIError(errors.ErrInvalidBucketName)
|
||||
}
|
||||
if (i == 0 || i == len(bucketName)-1) && r == '-' {
|
||||
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1015,7 +1034,7 @@ func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams,
|
|||
|
||||
params := new(createBucketParams)
|
||||
if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())
|
||||
}
|
||||
return params, nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
|
@ -20,7 +21,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
|
@ -29,6 +30,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
awsChunkedRequestExampleDecodedContentLength = 66560
|
||||
awsChunkedRequestExampleContentLength = 66824
|
||||
)
|
||||
|
||||
func TestCheckBucketName(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
|
@ -36,10 +42,10 @@ func TestCheckBucketName(t *testing.T) {
|
|||
}{
|
||||
{name: "bucket"},
|
||||
{name: "2bucket"},
|
||||
{name: "buc.ket"},
|
||||
{name: "buc-ket"},
|
||||
{name: "abc"},
|
||||
{name: "63aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
|
||||
{name: "buc.ket", err: true},
|
||||
{name: "buc.-ket", err: true},
|
||||
{name: "bucket.", err: true},
|
||||
{name: ".bucket", err: true},
|
||||
|
@ -199,7 +205,7 @@ func TestPostObject(t *testing.T) {
|
|||
t.Run(tc.key+";"+tc.filename, func(t *testing.T) {
|
||||
w := postObjectBase(hc, ns, bktName, tc.key, tc.filename, tc.content)
|
||||
if tc.err {
|
||||
assertS3Error(hc.t, w, s3errors.GetAPIError(s3errors.ErrInternalError))
|
||||
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInternalError))
|
||||
return
|
||||
}
|
||||
assertStatus(hc.t, w, http.StatusNoContent)
|
||||
|
@ -245,6 +251,10 @@ func TestPutObjectWithNegativeContentLength(t *testing.T) {
|
|||
tc.Handler().HeadObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
|
||||
|
||||
result := listVersions(t, tc, bktName)
|
||||
require.Len(t, result.Version, 1)
|
||||
require.EqualValues(t, len(content), result.Version[0].Size)
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyError(t *testing.T) {
|
||||
|
@ -258,7 +268,7 @@ func TestPutObjectWithStreamBodyError(t *testing.T) {
|
|||
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
|
||||
r.Header.Set(api.ContentEncoding, api.AwsChunked)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentLength))
|
||||
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
@ -274,7 +284,7 @@ func TestPutObjectWithInvalidContentMD5(t *testing.T) {
|
|||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidDigest))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
|
||||
|
||||
checkNotFound(t, tc, bktName, objName, emptyVersion)
|
||||
}
|
||||
|
@ -337,7 +347,7 @@ func TestPutObjectCheckContentSHA256(t *testing.T) {
|
|||
hc.Handler().PutObjectHandler(w, r)
|
||||
|
||||
if tc.error {
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch))
|
||||
|
||||
w, r := prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().GetObjectHandler(w, r)
|
||||
|
@ -361,12 +371,37 @@ func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
|||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 66824)
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamEmptyBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "dkirillov", "tmp"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req := getEmptyChunkedRequest(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, "0", w.Header().Get(api.ContentLength))
|
||||
|
||||
res := listObjectsV1(hc, bktName, "", "", "", -1)
|
||||
require.Len(t, res.Contents, 1)
|
||||
require.Empty(t, res.Contents[0].Size)
|
||||
}
|
||||
|
||||
func TestPutChunkedTestContentEncoding(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -385,7 +420,7 @@ func TestPutChunkedTestContentEncoding(t *testing.T) {
|
|||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
req.Header.Set(api.ContentEncoding, "gzip")
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
|
||||
|
||||
hc.config.bypassContentEncodingInChunks = true
|
||||
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
|
||||
|
@ -397,6 +432,8 @@ func TestPutChunkedTestContentEncoding(t *testing.T) {
|
|||
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
|
||||
}
|
||||
|
||||
// getChunkedRequest implements request example from
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
|
||||
func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
|
@ -424,9 +461,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-length", "66824")
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-decoded-content-length", "66560")
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
|
@ -457,15 +494,72 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request) {
|
||||
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
||||
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
||||
|
||||
//awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "")
|
||||
//signer := v4.NewSigner(awsCreds)
|
||||
|
||||
reqBody := bytes.NewBufferString("0;chunk-signature=311a7142c8f3a07972c3aca65c36484b513a8fee48ab7178c7225388f2ae9894\r\n\r\n")
|
||||
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/"+bktName+"/"+objName, reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("Amz-Sdk-Invocation-Id", "8a8cd4be-aef8-8034-f08d-a6144ade41f9")
|
||||
req.Header.Set("Amz-Sdk-Request", "attempt=1; max=2")
|
||||
req.Header.Set(api.Authorization, "AWS4-HMAC-SHA256 Credential=48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh/20241003/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-encoding;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length, Signature=4b530ab4af2381f214941af591266b209968264a2c94337fa1efc048c7dff352")
|
||||
req.Header.Set(api.ContentEncoding, "aws-chunked")
|
||||
req.Header.Set(api.ContentLength, "86")
|
||||
req.Header.Set(api.ContentType, "text/plain; charset=UTF-8")
|
||||
req.Header.Set(api.AmzDate, "20241003T100055Z")
|
||||
req.Header.Set(api.AmzContentSha256, "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set(api.AmzDecodedContentLength, "0")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20241003T100055Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "4b530ab4af2381f214941af591266b209968264a2c94337fa1efc048c7dff352",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req
|
||||
}
|
||||
|
||||
func TestCreateBucket(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
|
||||
info := createBucket(hc, bktName)
|
||||
createBucketAssertS3Error(hc, bktName, info.Box, s3errors.ErrBucketAlreadyOwnedByYou)
|
||||
createBucketAssertS3Error(hc, bktName, info.Box, apierr.ErrBucketAlreadyOwnedByYou)
|
||||
|
||||
box2, _ := createAccessBox(t)
|
||||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||
createBucketAssertS3Error(hc, bktName, box2, apierr.ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
func TestCreateBucketWithoutPermissions(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
bktName := "bkt-name"
|
||||
|
||||
hc.h.ape.(*apeMock).err = errors.New("no permissions")
|
||||
|
||||
box, _ := createAccessBox(t)
|
||||
createBucketAssertS3Error(hc, bktName, box, apierr.ErrInternalError)
|
||||
|
||||
_, err := hc.tp.ContainerID(bktName)
|
||||
require.Errorf(t, err, "container exists after failed creation, but shouldn't")
|
||||
}
|
||||
|
||||
func TestCreateNamespacedBucket(t *testing.T) {
|
||||
|
|
|
@ -195,6 +195,15 @@ type PostResponse struct {
|
|||
ETag string `xml:"Etag"`
|
||||
}
|
||||
|
||||
type PatchObjectResult struct {
|
||||
Object PatchObject `xml:"Object"`
|
||||
}
|
||||
|
||||
type PatchObject struct {
|
||||
LastModified string `xml:"LastModified"`
|
||||
ETag string `xml:"ETag"`
|
||||
}
|
||||
|
||||
// MarshalXML -- StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
tokens := []xml.Token{start}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -98,7 +98,7 @@ func TestPutObjectTaggingCheckUniqueness(t *testing.T) {
|
|||
middleware.GetReqInfo(r.Context()).Tagging = tc.body
|
||||
hc.Handler().PutObjectTaggingHandler(w, r)
|
||||
if tc.error {
|
||||
assertS3Error(t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidTagKeyUniqueness))
|
||||
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidTagKeyUniqueness))
|
||||
return
|
||||
}
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
|
|
@ -11,10 +11,6 @@ func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Requ
|
|||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
@ -51,10 +47,6 @@ func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request)
|
|||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
||||
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
|
||||
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
|
||||
}
|
||||
|
|
|
@ -10,10 +10,9 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
@ -30,7 +29,7 @@ func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
|
|||
|
||||
func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
|
||||
err = handleDeleteMarker(w, err)
|
||||
if code, wrErr := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err)); wrErr != nil {
|
||||
if code, wrErr := middleware.WriteErrorResponse(w, reqInfo, apierr.TransformToS3Error(err)); wrErr != nil {
|
||||
additional = append(additional, zap.NamedError("write_response_error", wrErr))
|
||||
} else {
|
||||
additional = append(additional, zap.Int("status", code))
|
||||
|
@ -57,25 +56,7 @@ func handleDeleteMarker(w http.ResponseWriter, err error) error {
|
|||
}
|
||||
|
||||
w.Header().Set(api.AmzDeleteMarker, "true")
|
||||
return fmt.Errorf("%w: %s", s3errors.GetAPIError(target.ErrorCode), err)
|
||||
}
|
||||
|
||||
func transformToS3Error(err error) error {
|
||||
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
|
||||
if _, ok := err.(s3errors.Error); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
if errors.Is(err, layer.ErrAccessDenied) ||
|
||||
errors.Is(err, layer.ErrNodeAccessDenied) {
|
||||
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
|
||||
}
|
||||
|
||||
if errors.Is(err, layer.ErrGatewayTimeout) {
|
||||
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
|
||||
}
|
||||
|
||||
return s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
return fmt.Errorf("%w: %s", apierr.GetAPIError(target.ErrorCode), err)
|
||||
}
|
||||
|
||||
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
|
||||
|
@ -106,6 +87,19 @@ func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header
|
|||
return bktInfo, checkOwner(bktInfo, expected)
|
||||
}
|
||||
|
||||
func (h *handler) getPutPayloadSize(r *http.Request) uint64 {
|
||||
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
|
||||
decodedSize, err := strconv.Atoi(decodeContentSize)
|
||||
if err != nil {
|
||||
if r.ContentLength >= 0 {
|
||||
return uint64(r.ContentLength)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
return uint64(decodedSize)
|
||||
}
|
||||
|
||||
func parseRange(s string) (*layer.RangeParams, error) {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
|
@ -114,26 +108,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
|||
prefix := "bytes="
|
||||
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidRange)
|
||||
}
|
||||
|
||||
s = strings.TrimPrefix(s, prefix)
|
||||
|
||||
valuesStr := strings.Split(s, "-")
|
||||
if len(valuesStr) != 2 {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidRange)
|
||||
}
|
||||
|
||||
values := make([]uint64, 0, len(valuesStr))
|
||||
for _, v := range valuesStr {
|
||||
num, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidRange)
|
||||
}
|
||||
values = append(values, num)
|
||||
}
|
||||
if values[0] > values[1] {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidRange)
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
|
|
|
@ -1,64 +1 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTransformS3Errors(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
err error
|
||||
expected s3errors.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "simple std error to internal error",
|
||||
err: errors.New("some error"),
|
||||
expected: s3errors.ErrInternalError,
|
||||
},
|
||||
{
|
||||
name: "layer access denied error to s3 access denied error",
|
||||
err: layer.ErrAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "wrapped layer access denied error to s3 access denied error",
|
||||
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer node access denied error to s3 access denied error",
|
||||
err: layer.ErrNodeAccessDenied,
|
||||
expected: s3errors.ErrAccessDenied,
|
||||
},
|
||||
{
|
||||
name: "layer gateway timeout error to s3 gateway timeout error",
|
||||
err: layer.ErrGatewayTimeout,
|
||||
expected: s3errors.ErrGatewayTimeout,
|
||||
},
|
||||
{
|
||||
name: "s3 error to s3 error",
|
||||
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
{
|
||||
name: "wrapped s3 error to s3 error",
|
||||
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
|
||||
expected: s3errors.ErrInvalidPart,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := transformToS3Error(tc.err)
|
||||
s3err, ok := err.(s3errors.Error)
|
||||
require.True(t, ok, "error must be s3 error")
|
||||
require.Equalf(t, tc.expected, s3err.ErrCode,
|
||||
"expected: '%s', got: '%s'",
|
||||
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"go.uber.org/zap"
|
||||
|
@ -19,6 +20,7 @@ type Cache struct {
|
|||
bucketCache *cache.BucketCache
|
||||
systemCache *cache.SystemCache
|
||||
accessCache *cache.AccessControlCache
|
||||
networkInfoCache *cache.NetworkInfoCache
|
||||
}
|
||||
|
||||
// CachesConfig contains params for caches.
|
||||
|
@ -31,6 +33,7 @@ type CachesConfig struct {
|
|||
Buckets *cache.Config
|
||||
System *cache.Config
|
||||
AccessControl *cache.Config
|
||||
NetworkInfo *cache.NetworkInfoCacheConfig
|
||||
}
|
||||
|
||||
// DefaultCachesConfigs returns filled configs.
|
||||
|
@ -44,6 +47,7 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
|
|||
Buckets: cache.DefaultBucketConfig(logger),
|
||||
System: cache.DefaultSystemConfig(logger),
|
||||
AccessControl: cache.DefaultAccessControlConfig(logger),
|
||||
NetworkInfo: cache.DefaultNetworkInfoConfig(logger),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,6 +61,7 @@ func NewCache(cfg *CachesConfig) *Cache {
|
|||
bucketCache: cache.NewBucketCache(cfg.Buckets),
|
||||
systemCache: cache.NewSystemCache(cfg.System),
|
||||
accessCache: cache.NewAccessControlCache(cfg.AccessControl),
|
||||
networkInfoCache: cache.NewNetworkInfoCache(cfg.NetworkInfo),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,3 +262,39 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
|
|||
func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) {
|
||||
c.systemCache.Delete(bktInfo.CORSObjectName())
|
||||
}
|
||||
|
||||
func (c *Cache) GetLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo) *data.LifecycleConfiguration {
|
||||
key := bkt.LifecycleConfigurationObjectName()
|
||||
|
||||
if !c.accessCache.Get(owner, key) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.systemCache.GetLifecycleConfiguration(key)
|
||||
}
|
||||
|
||||
func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, cfg *data.LifecycleConfiguration) {
|
||||
key := bkt.LifecycleConfigurationObjectName()
|
||||
|
||||
if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err))
|
||||
}
|
||||
|
||||
if err := c.accessCache.Put(owner, key); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) DeleteLifecycleConfiguration(bktInfo *data.BucketInfo) {
|
||||
c.systemCache.Delete(bktInfo.LifecycleConfigurationObjectName())
|
||||
}
|
||||
|
||||
func (c *Cache) GetNetworkInfo() *netmap.NetworkInfo {
|
||||
return c.networkInfoCache.Get()
|
||||
}
|
||||
|
||||
func (c *Cache) PutNetworkInfo(info netmap.NetworkInfo) {
|
||||
if err := c.networkInfoCache.Put(info); err != nil {
|
||||
c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,8 @@ import (
|
|||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
)
|
||||
|
||||
func (n *Layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *data.ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, data.LockInfo, error) {
|
||||
|
@ -29,8 +30,8 @@ func (n *Layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *data.Ob
|
|||
|
||||
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, data.LockInfo{}, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, data.LockInfo{}, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, data.LockInfo{}, err
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
|
@ -20,7 +21,7 @@ const (
|
|||
AttributeLockEnabled = "LockEnabled"
|
||||
)
|
||||
|
||||
func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.BucketInfo, error) {
|
||||
func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*data.BucketInfo, error) {
|
||||
var (
|
||||
err error
|
||||
res *container.Container
|
||||
|
@ -37,7 +38,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
res, err = n.frostFS.Container(ctx, prm)
|
||||
if err != nil {
|
||||
if client.IsErrContainerNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchBucket), err.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("get frostfs container: %w", err)
|
||||
}
|
||||
|
@ -52,6 +53,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
info.Created = container.CreatedAt(cnr)
|
||||
info.LocationConstraint = cnr.Attribute(attributeLocationConstraint)
|
||||
info.HomomorphicHashDisabled = container.IsHomomorphicHashingDisabled(cnr)
|
||||
info.PlacementPolicy = cnr.PlacementPolicy()
|
||||
|
||||
attrLockEnabled := cnr.Attribute(AttributeLockEnabled)
|
||||
if len(attrLockEnabled) > 0 {
|
||||
|
@ -64,7 +66,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
}
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
if zone != info.Zone {
|
||||
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
|
||||
}
|
||||
|
@ -77,7 +79,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
|
|||
func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
||||
stoken := n.SessionTokenForRead(ctx)
|
||||
|
||||
prm := PrmUserContainers{
|
||||
prm := frostfs.PrmUserContainers{
|
||||
UserID: n.BearerOwner(ctx),
|
||||
SessionToken: stoken,
|
||||
}
|
||||
|
@ -90,7 +92,7 @@ func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|||
|
||||
list := make([]*data.BucketInfo, 0, len(res))
|
||||
for i := range res {
|
||||
getPrm := PrmContainer{
|
||||
getPrm := frostfs.PrmContainer{
|
||||
ContainerID: res[i],
|
||||
SessionToken: stoken,
|
||||
}
|
||||
|
@ -111,7 +113,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
||||
}
|
||||
|
||||
zone, _ := n.features.FormContainerZone(p.Namespace)
|
||||
zone := n.features.FormContainerZone(p.Namespace)
|
||||
|
||||
bktInfo := &data.BucketInfo{
|
||||
Name: p.Name,
|
||||
|
@ -132,7 +134,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
})
|
||||
}
|
||||
|
||||
res, err := n.frostFS.CreateContainer(ctx, PrmContainerCreate{
|
||||
res, err := n.frostFS.CreateContainer(ctx, frostfs.PrmContainerCreate{
|
||||
Creator: bktInfo.Owner,
|
||||
Policy: p.Policy,
|
||||
Name: p.Name,
|
||||
|
@ -148,6 +150,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
|
|||
|
||||
bktInfo.CID = res.ContainerID
|
||||
bktInfo.HomomorphicHashDisabled = res.HomomorphicHashDisabled
|
||||
bktInfo.PlacementPolicy = p.Policy
|
||||
|
||||
n.cache.PutBucket(bktInfo)
|
||||
|
||||
|
|
|
@ -3,12 +3,14 @@ package layer
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -31,14 +33,14 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
}
|
||||
|
||||
if cors.CORSRules == nil {
|
||||
return errors.GetAPIError(errors.ErrMalformedXML)
|
||||
return apierr.GetAPIError(apierr.ErrMalformedXML)
|
||||
}
|
||||
|
||||
if err := checkCORS(cors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prm := PrmObjectCreate{
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Payload: &buf,
|
||||
Filepath: p.BktInfo.CORSObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
@ -55,13 +57,13 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
prm.Container = corsBkt.CID
|
||||
|
||||
_, objID, _, _, err := n.objectPutAndHash(ctx, prm, corsBkt)
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, corsBkt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("put cors object: %w", err)
|
||||
}
|
||||
|
||||
objsToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, newAddress(corsBkt.CID, objID))
|
||||
objToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||
objsToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, newAddress(corsBkt.CID, createdObj.ID))
|
||||
objToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !objToDeleteNotFound {
|
||||
return err
|
||||
}
|
||||
|
@ -79,7 +81,7 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|||
|
||||
// deleteCORSObject removes object and logs in case of error.
|
||||
func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
|
||||
var prmAuth PrmAuth
|
||||
var prmAuth frostfs.PrmAuth
|
||||
corsBkt := bktInfo
|
||||
if !addr.Container().Equals(bktInfo.CID) && !addr.Container().Equals(cid.ID{}) {
|
||||
corsBkt = &data.BucketInfo{CID: addr.Container()}
|
||||
|
@ -104,7 +106,7 @@ func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
|
|||
|
||||
func (n *Layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
objs, err := n.treeService.DeleteBucketCORS(ctx, bktInfo)
|
||||
objNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||
objNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !objNotFound {
|
||||
return err
|
||||
}
|
||||
|
@ -124,12 +126,12 @@ func checkCORS(cors *data.CORSConfiguration) error {
|
|||
for _, r := range cors.CORSRules {
|
||||
for _, m := range r.AllowedMethods {
|
||||
if _, ok := supportedMethods[m]; !ok {
|
||||
return errors.GetAPIErrorWithError(errors.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m))
|
||||
return apierr.GetAPIErrorWithError(apierr.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m))
|
||||
}
|
||||
}
|
||||
for _, h := range r.ExposeHeaders {
|
||||
if h == wildcard {
|
||||
return errors.GetAPIError(errors.ErrCORSWildcardExposeHeaders)
|
||||
return apierr.GetAPIError(apierr.ErrCORSWildcardExposeHeaders)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package layer
|
||||
package frostfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -166,6 +166,12 @@ type PrmObjectCreate struct {
|
|||
BufferMaxSize uint64
|
||||
}
|
||||
|
||||
// CreateObjectResult is a result parameter of FrostFS.CreateObject operation.
|
||||
type CreateObjectResult struct {
|
||||
ObjectID oid.ID
|
||||
CreationEpoch uint64
|
||||
}
|
||||
|
||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||
type PrmObjectDelete struct {
|
||||
// Authentication parameters.
|
||||
|
@ -194,6 +200,27 @@ type PrmObjectSearch struct {
|
|||
FilePrefix string
|
||||
}
|
||||
|
||||
// PrmObjectPatch groups parameters of FrostFS.PatchObject operation.
|
||||
type PrmObjectPatch struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
||||
// Container of the patched object.
|
||||
Container cid.ID
|
||||
|
||||
// Identifier of the patched object.
|
||||
Object oid.ID
|
||||
|
||||
// Object patch payload encapsulated in io.Reader primitive.
|
||||
Payload io.Reader
|
||||
|
||||
// Object range to patch.
|
||||
Offset, Length uint64
|
||||
|
||||
// Size of original object payload.
|
||||
ObjectSize uint64
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
ErrAccessDenied = errors.New("access denied")
|
||||
|
@ -261,15 +288,15 @@ type FrostFS interface {
|
|||
|
||||
// CreateObject creates and saves a parameterized object in the FrostFS container.
|
||||
// It sets 'Timestamp' attribute to the current time.
|
||||
// It returns the ID of the saved object.
|
||||
// It returns the ID and creation epoch of the saved object.
|
||||
//
|
||||
// Creation time should be written into the object (UTC).
|
||||
//
|
||||
// It returns ErrAccessDenied on write access violation.
|
||||
//
|
||||
// It returns exactly one non-zero value. It returns any error encountered which
|
||||
// prevented the container from being created.
|
||||
CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the object from being created.
|
||||
CreateObject(context.Context, PrmObjectCreate) (*CreateObjectResult, error)
|
||||
|
||||
// DeleteObject marks the object to be removed from the FrostFS container by identifier.
|
||||
// Successful return does not guarantee actual removal.
|
||||
|
@ -288,6 +315,15 @@ type FrostFS interface {
|
|||
// prevented the objects from being selected.
|
||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||
|
||||
// PatchObject performs object patch in the FrostFS container.
|
||||
// It returns the ID of the patched object.
|
||||
//
|
||||
// It returns ErrAccessDenied on selection access violation.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the objects from being patched.
|
||||
PatchObject(context.Context, PrmObjectPatch) (oid.ID, error)
|
||||
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||
// Note:
|
||||
// * future time must be after the now
|
||||
|
@ -295,4 +331,7 @@ type FrostFS interface {
|
|||
//
|
||||
// It returns any error encountered which prevented computing epochs.
|
||||
TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error)
|
||||
|
||||
// NetworkInfo returns parameters of FrostFS network.
|
||||
NetworkInfo(context.Context) (netmap.NetworkInfo, error)
|
||||
}
|
|
@ -12,14 +12,17 @@ import (
|
|||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -50,16 +53,16 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
|
|||
k.md5Enabled = md5Enabled
|
||||
}
|
||||
|
||||
func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
func (k *FeatureSettingsMock) FormContainerZone(ns string) string {
|
||||
if ns == "" {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
return v2container.SysAttributeZoneDefault
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
return ns + ".ns"
|
||||
}
|
||||
|
||||
type TestFrostFS struct {
|
||||
FrostFS
|
||||
frostfs.FrostFS
|
||||
|
||||
objects map[string]*object.Object
|
||||
objectErrors map[string]error
|
||||
|
@ -137,7 +140,7 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
|
|||
t.containers[cnrID.EncodeToString()] = cnr
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (*ContainerCreateResult, error) {
|
||||
func (t *TestFrostFS) CreateContainer(_ context.Context, prm frostfs.PrmContainerCreate) (*frostfs.ContainerCreateResult, error) {
|
||||
var cnr container.Container
|
||||
cnr.Init()
|
||||
cnr.SetOwner(prm.Creator)
|
||||
|
@ -172,7 +175,7 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate)
|
|||
id.SetSHA256(sha256.Sum256(b))
|
||||
t.containers[id.EncodeToString()] = &cnr
|
||||
|
||||
return &ContainerCreateResult{ContainerID: id}, nil
|
||||
return &frostfs.ContainerCreateResult{ContainerID: id}, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error {
|
||||
|
@ -181,7 +184,7 @@ func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *sessio
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
|
||||
func (t *TestFrostFS) Container(_ context.Context, prm frostfs.PrmContainer) (*container.Container, error) {
|
||||
for k, v := range t.containers {
|
||||
if k == prm.ContainerID.EncodeToString() {
|
||||
return v, nil
|
||||
|
@ -191,7 +194,7 @@ func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container
|
|||
return nil, fmt.Errorf("container not found %s", prm.ContainerID)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) UserContainers(context.Context, PrmUserContainers) ([]cid.ID, error) {
|
||||
func (t *TestFrostFS) UserContainers(context.Context, frostfs.PrmUserContainers) ([]cid.ID, error) {
|
||||
var res []cid.ID
|
||||
for k := range t.containers {
|
||||
var idCnr cid.ID
|
||||
|
@ -218,7 +221,7 @@ func (t *TestFrostFS) retrieveObject(ctx context.Context, cnrID cid.ID, objID oi
|
|||
if obj, ok := t.objects[sAddr]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(cnrID, owner) {
|
||||
return nil, ErrAccessDenied
|
||||
return nil, frostfs.ErrAccessDenied
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
|
@ -227,23 +230,23 @@ func (t *TestFrostFS) retrieveObject(ctx context.Context, cnrID cid.ID, objID oi
|
|||
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) HeadObject(ctx context.Context, prm PrmObjectHead) (*object.Object, error) {
|
||||
func (t *TestFrostFS) HeadObject(ctx context.Context, prm frostfs.PrmObjectHead) (*object.Object, error) {
|
||||
return t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) GetObject(ctx context.Context, prm PrmObjectGet) (*Object, error) {
|
||||
func (t *TestFrostFS) GetObject(ctx context.Context, prm frostfs.PrmObjectGet) (*frostfs.Object, error) {
|
||||
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
return &frostfs.Object{
|
||||
Header: *obj,
|
||||
Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) RangeObject(ctx context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
|
||||
func (t *TestFrostFS) RangeObject(ctx context.Context, prm frostfs.PrmObjectRange) (io.ReadCloser, error) {
|
||||
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -255,10 +258,10 @@ func (t *TestFrostFS) RangeObject(ctx context.Context, prm PrmObjectRange) (io.R
|
|||
return io.NopCloser(bytes.NewReader(payload)), nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
|
||||
func (t *TestFrostFS) CreateObject(_ context.Context, prm frostfs.PrmObjectCreate) (*frostfs.CreateObjectResult, error) {
|
||||
b := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, b); err != nil {
|
||||
return oid.ID{}, err
|
||||
return nil, err
|
||||
}
|
||||
var id oid.ID
|
||||
id.SetSHA256(sha256.Sum256(b))
|
||||
|
@ -266,7 +269,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
attrs := make([]object.Attribute, 0)
|
||||
|
||||
if err := t.objectPutErrors[prm.Filepath]; err != nil {
|
||||
return oid.ID{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prm.Filepath != "" {
|
||||
|
@ -311,7 +314,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
if prm.Payload != nil {
|
||||
all, err := io.ReadAll(prm.Payload)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
return nil, err
|
||||
}
|
||||
obj.SetPayload(all)
|
||||
obj.SetPayloadSize(uint64(len(all)))
|
||||
|
@ -325,10 +328,13 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
|||
|
||||
addr := newAddress(cnrID, objID)
|
||||
t.objects[addr.EncodeToString()] = obj
|
||||
return objID, nil
|
||||
return &frostfs.CreateObjectResult{
|
||||
ObjectID: objID,
|
||||
CreationEpoch: t.currentEpoch - 1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
|
||||
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm frostfs.PrmObjectDelete) error {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
@ -340,7 +346,7 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) err
|
|||
if _, ok := t.objects[addr.EncodeToString()]; ok {
|
||||
owner := getBearerOwner(ctx)
|
||||
if !t.checkAccess(prm.Container, owner) {
|
||||
return ErrAccessDenied
|
||||
return frostfs.ErrAccessDenied
|
||||
}
|
||||
|
||||
delete(t.objects, addr.EncodeToString())
|
||||
|
@ -367,7 +373,7 @@ func (t *TestFrostFS) AllObjects(cnrID cid.ID) []oid.ID {
|
|||
return result
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) ([]oid.ID, error) {
|
||||
func (t *TestFrostFS) SearchObjects(_ context.Context, prm frostfs.PrmObjectSearch) ([]oid.ID, error) {
|
||||
filters := object.NewSearchFilters()
|
||||
filters.AddRootFilter()
|
||||
|
||||
|
@ -404,6 +410,51 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) ([]o
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
|
||||
ni := netmap.NetworkInfo{}
|
||||
ni.SetCurrentEpoch(t.currentEpoch)
|
||||
ni.SetEpochDuration(60)
|
||||
ni.SetMsPerBlock(1000)
|
||||
|
||||
return ni, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) PatchObject(ctx context.Context, prm frostfs.PrmObjectPatch) (oid.ID, error) {
|
||||
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
newObj := *obj
|
||||
|
||||
patchBytes, err := io.ReadAll(prm.Payload)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
var newPayload []byte
|
||||
if prm.Offset > 0 {
|
||||
newPayload = append(newPayload, obj.Payload()[:prm.Offset]...)
|
||||
}
|
||||
newPayload = append(newPayload, patchBytes...)
|
||||
if prm.Offset+prm.Length < obj.PayloadSize() {
|
||||
newPayload = append(newPayload, obj.Payload()[prm.Offset+prm.Length:]...)
|
||||
}
|
||||
newObj.SetPayload(newPayload)
|
||||
newObj.SetPayloadSize(uint64(len(newPayload)))
|
||||
|
||||
var hash checksum.Checksum
|
||||
checksum.Calculate(&hash, checksum.SHA256, newPayload)
|
||||
newObj.SetPayloadChecksum(hash)
|
||||
|
||||
newID := oidtest.ID()
|
||||
newObj.SetID(newID)
|
||||
|
||||
t.objects[newAddress(prm.Container, newID).EncodeToString()] = &newObj
|
||||
|
||||
return newID, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
|
||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
stderrors "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
@ -17,9 +17,10 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -35,39 +36,41 @@ import (
|
|||
|
||||
type (
|
||||
BucketResolver interface {
|
||||
Resolve(ctx context.Context, name string) (cid.ID, error)
|
||||
Resolve(ctx context.Context, zone, name string) (cid.ID, error)
|
||||
}
|
||||
|
||||
FeatureSettings interface {
|
||||
ClientCut() bool
|
||||
BufferMaxSizeForPut() uint64
|
||||
MD5Enabled() bool
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
FormContainerZone(ns string) string
|
||||
}
|
||||
|
||||
Layer struct {
|
||||
frostFS FrostFS
|
||||
gateOwner user.ID
|
||||
log *zap.Logger
|
||||
anonKey AnonymousKey
|
||||
resolver BucketResolver
|
||||
cache *Cache
|
||||
treeService TreeService
|
||||
features FeatureSettings
|
||||
gateKey *keys.PrivateKey
|
||||
corsCnrInfo *data.BucketInfo
|
||||
frostFS frostfs.FrostFS
|
||||
gateOwner user.ID
|
||||
log *zap.Logger
|
||||
anonKey AnonymousKey
|
||||
resolver BucketResolver
|
||||
cache *Cache
|
||||
treeService tree.Service
|
||||
features FeatureSettings
|
||||
gateKey *keys.PrivateKey
|
||||
corsCnrInfo *data.BucketInfo
|
||||
lifecycleCnrInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
Config struct {
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Cache *Cache
|
||||
AnonKey AnonymousKey
|
||||
Resolver BucketResolver
|
||||
TreeService TreeService
|
||||
Features FeatureSettings
|
||||
GateKey *keys.PrivateKey
|
||||
CORSCnrInfo *data.BucketInfo
|
||||
GateOwner user.ID
|
||||
ChainAddress string
|
||||
Cache *Cache
|
||||
AnonKey AnonymousKey
|
||||
Resolver BucketResolver
|
||||
TreeService tree.Service
|
||||
Features FeatureSettings
|
||||
GateKey *keys.PrivateKey
|
||||
CORSCnrInfo *data.BucketInfo
|
||||
LifecycleCnrInfo *data.BucketInfo
|
||||
}
|
||||
|
||||
// AnonymousKey contains data for anonymous requests.
|
||||
|
@ -101,7 +104,7 @@ type (
|
|||
PutObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Object string
|
||||
Size uint64
|
||||
Size *uint64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
Lock *data.ObjectLock
|
||||
|
@ -122,10 +125,11 @@ type (
|
|||
}
|
||||
|
||||
DeleteObjectParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
Objects []*VersionedObject
|
||||
Settings *data.BucketSettings
|
||||
IsMultiple bool
|
||||
BktInfo *data.BucketInfo
|
||||
Objects []*VersionedObject
|
||||
Settings *data.BucketSettings
|
||||
NetworkInfo netmap.NetworkInfo
|
||||
IsMultiple bool
|
||||
}
|
||||
|
||||
// PutSettingsParams stores object copy request parameters.
|
||||
|
@ -157,6 +161,7 @@ type (
|
|||
DstEncryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
Name string
|
||||
|
@ -231,18 +236,19 @@ func (p HeadObjectParams) Versioned() bool {
|
|||
|
||||
// NewLayer creates an instance of a Layer. It checks credentials
|
||||
// and establishes gRPC connection with the node.
|
||||
func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) *Layer {
|
||||
func NewLayer(log *zap.Logger, frostFS frostfs.FrostFS, config *Config) *Layer {
|
||||
return &Layer{
|
||||
frostFS: frostFS,
|
||||
log: log,
|
||||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: config.Cache,
|
||||
treeService: config.TreeService,
|
||||
features: config.Features,
|
||||
gateKey: config.GateKey,
|
||||
corsCnrInfo: config.CORSCnrInfo,
|
||||
frostFS: frostFS,
|
||||
log: log,
|
||||
gateOwner: config.GateOwner,
|
||||
anonKey: config.AnonKey,
|
||||
resolver: config.Resolver,
|
||||
cache: config.Cache,
|
||||
treeService: config.TreeService,
|
||||
features: config.Features,
|
||||
gateKey: config.GateKey,
|
||||
corsCnrInfo: config.CORSCnrInfo,
|
||||
lifecycleCnrInfo: config.LifecycleCnrInfo,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,7 +300,7 @@ func (n *Layer) reqLogger(ctx context.Context) *zap.Logger {
|
|||
return n.log
|
||||
}
|
||||
|
||||
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
||||
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *frostfs.PrmAuth, bktOwner user.ID) {
|
||||
if prm.BearerToken != nil || prm.PrivateKey != nil {
|
||||
return
|
||||
}
|
||||
|
@ -317,21 +323,21 @@ func (n *Layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
|
|||
}
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
|
||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
||||
containerID, err := n.ResolveBucket(ctx, name)
|
||||
containerID, err := n.ResolveBucket(ctx, zone, name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchBucket), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prm := PrmContainer{
|
||||
prm := frostfs.PrmContainer{
|
||||
ContainerID: containerID,
|
||||
SessionToken: n.SessionTokenForRead(ctx),
|
||||
}
|
||||
|
@ -347,13 +353,13 @@ func (n *Layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
|
|||
}
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
zone := n.features.FormContainerZone(reqInfo.Namespace)
|
||||
|
||||
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
|
||||
return bktInfo.CID, nil
|
||||
}
|
||||
|
||||
return n.ResolveBucket(ctx, name)
|
||||
return n.ResolveBucket(ctx, zone, name)
|
||||
}
|
||||
|
||||
// ListBuckets returns all user containers. The name of the bucket is a container
|
||||
|
@ -392,9 +398,9 @@ func (n *Layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPaylo
|
|||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
if p.Versioned {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchVersion), err.Error())
|
||||
err = fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error())
|
||||
} else {
|
||||
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchKey), err.Error())
|
||||
err = fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -523,7 +529,7 @@ func (n *Layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
|||
return n.PutObject(ctx, &PutObjectParams{
|
||||
BktInfo: p.DstBktInfo,
|
||||
Object: p.DstObject,
|
||||
Size: p.DstSize,
|
||||
Size: &p.DstSize,
|
||||
Reader: objPayload,
|
||||
Header: p.Header,
|
||||
Encryption: p.DstEncryption,
|
||||
|
@ -542,7 +548,8 @@ func getRandomOID() (oid.ID, error) {
|
|||
return objID, nil
|
||||
}
|
||||
|
||||
func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject) *VersionedObject {
|
||||
func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject,
|
||||
networkInfo netmap.NetworkInfo) *VersionedObject {
|
||||
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
||||
var nodeVersions []*data.NodeVersion
|
||||
if nodeVersions, obj.Error = n.getNodeVersionsToDelete(ctx, bkt, obj); obj.Error != nil {
|
||||
|
@ -624,6 +631,7 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
|
|||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
IsDeleteMarker: true,
|
||||
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||
},
|
||||
IsUnversioned: settings.VersioningSuspended(),
|
||||
}
|
||||
|
@ -648,22 +656,22 @@ func (n *Layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
|
|||
}
|
||||
|
||||
func isNotFoundError(err error) bool {
|
||||
return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
|
||||
errors.IsS3Error(err, errors.ErrNoSuchVersion)
|
||||
return apierr.IsS3Error(err, apierr.ErrNoSuchKey) ||
|
||||
apierr.IsS3Error(err, apierr.ErrNoSuchVersion)
|
||||
}
|
||||
|
||||
func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) ([]*data.NodeVersion, error) {
|
||||
var versionsToDelete []*data.NodeVersion
|
||||
versions, err := n.treeService.GetVersions(ctx, bkt, obj.Name)
|
||||
if err != nil {
|
||||
if stderrors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(versions) == 0 {
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion))
|
||||
}
|
||||
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
|
@ -705,7 +713,7 @@ func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInf
|
|||
}
|
||||
|
||||
if len(versionsToDelete) == 0 {
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion))
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids))
|
||||
|
@ -766,7 +774,7 @@ func (n *Layer) removeCombinedObject(ctx context.Context, bkt *data.BucketInfo,
|
|||
// DeleteObjects from the storage.
|
||||
func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
|
||||
for i, obj := range p.Objects {
|
||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj)
|
||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj, p.NetworkInfo)
|
||||
if p.IsMultiple && p.Objects[i].Error != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
|
||||
}
|
||||
|
@ -778,23 +786,23 @@ func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*Ver
|
|||
func (n *Layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
||||
bktInfo, err := n.GetBucketInfo(ctx, p.Name)
|
||||
if err != nil {
|
||||
if errors.IsS3Error(err, errors.ErrNoSuchBucket) {
|
||||
if apierr.IsS3Error(err, apierr.ErrNoSuchBucket) {
|
||||
return n.createContainer(ctx, p)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.SessionContainerCreation != nil && session.IssuedBy(*p.SessionContainerCreation, bktInfo.Owner) {
|
||||
return nil, errors.GetAPIError(errors.ErrBucketAlreadyOwnedByYou)
|
||||
return nil, apierr.GetAPIError(apierr.ErrBucketAlreadyOwnedByYou)
|
||||
}
|
||||
|
||||
return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
|
||||
return nil, apierr.GetAPIError(apierr.ErrBucketAlreadyExists)
|
||||
}
|
||||
|
||||
func (n *Layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
|
||||
func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||
var cnrID cid.ID
|
||||
if err := cnrID.DecodeString(name); err != nil {
|
||||
if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
|
||||
if cnrID, err = n.resolver.Resolve(ctx, zone, name); err != nil {
|
||||
return cid.ID{}, err
|
||||
}
|
||||
|
||||
|
@ -815,7 +823,7 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|||
return err
|
||||
}
|
||||
if len(res) != 0 {
|
||||
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
||||
return apierr.GetAPIError(apierr.ErrBucketNotEmpty)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -826,6 +834,11 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|||
n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err))
|
||||
}
|
||||
|
||||
lifecycleObj, treeErr := n.treeService.GetBucketLifecycleConfiguration(ctx, p.BktInfo)
|
||||
if treeErr != nil {
|
||||
n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr))
|
||||
}
|
||||
|
||||
err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete container: %w", err)
|
||||
|
@ -835,5 +848,33 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|||
n.deleteCORSObject(ctx, p.BktInfo, corsObj)
|
||||
}
|
||||
|
||||
if treeErr == nil && !lifecycleObj.Container().Equals(p.BktInfo.CID) {
|
||||
n.deleteLifecycleObject(ctx, p.BktInfo, lifecycleObj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Layer) DeleteContainer(ctx context.Context, p *DeleteBucketParams) error {
|
||||
n.cache.DeleteBucket(p.BktInfo)
|
||||
if err := n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken); err != nil {
|
||||
return fmt.Errorf("delete container: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Layer) GetNetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) {
|
||||
cachedInfo := n.cache.GetNetworkInfo()
|
||||
if cachedInfo != nil {
|
||||
return *cachedInfo, nil
|
||||
}
|
||||
|
||||
networkInfo, err := n.frostFS.NetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return netmap.NetworkInfo{}, fmt.Errorf("get network info: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutNetworkInfo(networkInfo)
|
||||
|
||||
return networkInfo, nil
|
||||
}
|
||||
|
|
146
api/layer/lifecycle.go
Normal file
146
api/layer/lifecycle.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type PutBucketLifecycleParams struct {
|
||||
BktInfo *data.BucketInfo
|
||||
LifecycleCfg *data.LifecycleConfiguration
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
|
||||
cfgBytes, err := xml.Marshal(p.LifecycleCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal lifecycle configuration: %w", err)
|
||||
}
|
||||
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Payload: bytes.NewReader(cfgBytes),
|
||||
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
|
||||
CreationTime: TimeNow(ctx),
|
||||
}
|
||||
|
||||
var lifecycleBkt *data.BucketInfo
|
||||
if n.lifecycleCnrInfo == nil {
|
||||
lifecycleBkt = p.BktInfo
|
||||
prm.CopiesNumber = p.CopiesNumbers
|
||||
} else {
|
||||
lifecycleBkt = n.lifecycleCnrInfo
|
||||
prm.PrmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||
}
|
||||
|
||||
prm.Container = lifecycleBkt.CID
|
||||
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, lifecycleBkt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("put lifecycle object: %w", err)
|
||||
}
|
||||
|
||||
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
|
||||
objsToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !objsToDeleteNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if !objsToDeleteNotFound {
|
||||
for _, addr := range objsToDelete {
|
||||
n.deleteLifecycleObject(ctx, p.BktInfo, addr)
|
||||
}
|
||||
}
|
||||
|
||||
n.cache.PutLifecycleConfiguration(n.BearerOwner(ctx), p.BktInfo, p.LifecycleCfg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteLifecycleObject removes object and logs in case of error.
|
||||
func (n *Layer) deleteLifecycleObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
|
||||
var prmAuth frostfs.PrmAuth
|
||||
lifecycleBkt := bktInfo
|
||||
if !addr.Container().Equals(bktInfo.CID) {
|
||||
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
|
||||
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||
}
|
||||
|
||||
if err := n.objectDeleteWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err),
|
||||
zap.String("cid", lifecycleBkt.CID.EncodeToString()),
|
||||
zap.String("oid", addr.Object().EncodeToString()))
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.LifecycleConfiguration, error) {
|
||||
owner := n.BearerOwner(ctx)
|
||||
if cfg := n.cache.GetLifecycleConfiguration(owner, bktInfo); cfg != nil {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
addr, err := n.treeService.GetBucketLifecycleConfiguration(ctx, bktInfo)
|
||||
objNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
||||
if err != nil && !objNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if objNotFound {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), err.Error())
|
||||
}
|
||||
|
||||
var prmAuth frostfs.PrmAuth
|
||||
lifecycleBkt := bktInfo
|
||||
if !addr.Container().Equals(bktInfo.CID) {
|
||||
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
|
||||
prmAuth.PrivateKey = &n.gateKey.PrivateKey
|
||||
}
|
||||
|
||||
obj, err := n.objectGetWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get lifecycle object: %w", err)
|
||||
}
|
||||
|
||||
lifecycleCfg := &data.LifecycleConfiguration{}
|
||||
|
||||
if err = xml.NewDecoder(obj.Payload).Decode(&lifecycleCfg); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal lifecycle configuration: %w", err)
|
||||
}
|
||||
|
||||
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
|
||||
|
||||
for i := range lifecycleCfg.Rules {
|
||||
if lifecycleCfg.Rules[i].Expiration != nil {
|
||||
lifecycleCfg.Rules[i].Expiration.Epoch = nil
|
||||
}
|
||||
}
|
||||
|
||||
return lifecycleCfg, nil
|
||||
}
|
||||
|
||||
func (n *Layer) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||
objs, err := n.treeService.DeleteBucketLifecycleConfiguration(ctx, bktInfo)
|
||||
objsNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !objsNotFound {
|
||||
return err
|
||||
}
|
||||
if !objsNotFound {
|
||||
for _, addr := range objs {
|
||||
n.deleteLifecycleObject(ctx, bktInfo, addr)
|
||||
}
|
||||
}
|
||||
|
||||
n.cache.DeleteLifecycleConfiguration(bktInfo)
|
||||
|
||||
return nil
|
||||
}
|
61
api/layer/lifecycle_test.go
Normal file
61
api/layer/lifecycle_test.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/xml"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBucketLifecycle(t *testing.T) {
|
||||
tc := prepareContext(t)
|
||||
|
||||
lifecycle := &data.LifecycleConfiguration{
|
||||
XMLName: xml.Name{
|
||||
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
|
||||
Local: "LifecycleConfiguration",
|
||||
},
|
||||
Rules: []data.LifecycleRule{
|
||||
{
|
||||
Status: data.LifecycleStatusEnabled,
|
||||
Expiration: &data.LifecycleExpiration{
|
||||
Days: ptr(21),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lifecycleBytes, err := xml.Marshal(lifecycle)
|
||||
require.NoError(t, err)
|
||||
hash := md5.New()
|
||||
hash.Write(lifecycleBytes)
|
||||
|
||||
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||
require.Equal(t, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), frosterr.UnwrapErr(err))
|
||||
|
||||
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
LifecycleCfg: lifecycle,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg, err := tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *lifecycle, *cfg)
|
||||
|
||||
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
|
||||
require.Equal(t, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), frosterr.UnwrapErr(err))
|
||||
}
|
||||
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
|
@ -691,10 +691,10 @@ func filterVersionsByMarker(objects []*data.ExtendedNodeVersion, p *ListObjectVe
|
|||
return objects[j+1:], nil
|
||||
}
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion)
|
||||
} else if obj.NodeVersion.FilePath > p.KeyMarker {
|
||||
if p.VersionIDMarker != "" {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion)
|
||||
}
|
||||
return objects[i:], nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,10 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
|
@ -150,6 +152,11 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
metaSize += len(p.Data.TagSet)
|
||||
}
|
||||
|
||||
networkInfo, err := n.GetNetworkInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info := &data.MultipartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
|
@ -157,6 +164,7 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
Created: TimeNow(ctx),
|
||||
Meta: make(map[string]string, metaSize),
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
CreationEpoch: networkInfo.CurrentEpoch(),
|
||||
}
|
||||
|
||||
for key, val := range p.Header {
|
||||
|
@ -181,14 +189,14 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
|
|||
func (n *Layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return "", fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if p.Size > UploadMaxSize {
|
||||
return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize)
|
||||
return "", fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooLarge), p.Size, UploadMaxSize)
|
||||
}
|
||||
|
||||
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
|
||||
|
@ -203,11 +211,11 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
bktInfo := p.Info.Bkt
|
||||
prm := PrmObjectCreate{
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Attributes: make([][2]string, 2),
|
||||
Payload: p.Reader,
|
||||
|
@ -229,76 +237,78 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
||||
|
||||
size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p.ContentMD5) > 0 {
|
||||
hashBytes, err := base64.StdEncoding.DecodeString(p.ContentMD5)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
if hex.EncodeToString(hashBytes) != hex.EncodeToString(md5Hash) {
|
||||
prm := PrmObjectDelete{
|
||||
Object: id,
|
||||
if hex.EncodeToString(hashBytes) != hex.EncodeToString(createdObj.MD5Sum) {
|
||||
prm := frostfs.PrmObjectDelete{
|
||||
Object: createdObj.ID,
|
||||
Container: bktInfo.CID,
|
||||
}
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
err = n.frostFS.DeleteObject(ctx, prm)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
}
|
||||
if p.Info.Encryption.Enabled() {
|
||||
size = decSize
|
||||
createdObj.Size = decSize
|
||||
}
|
||||
|
||||
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, bktInfo, id)
|
||||
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||
err = n.objectDelete(ctx, bktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.UploadPart,
|
||||
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
|
||||
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
|
||||
partInfo := &data.PartInfo{
|
||||
Key: p.Info.Key,
|
||||
UploadID: p.Info.UploadID,
|
||||
Number: p.PartNumber,
|
||||
OID: id,
|
||||
Size: size,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
OID: createdObj.ID,
|
||||
Size: createdObj.Size,
|
||||
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||
Created: prm.CreationTime,
|
||||
MD5: hex.EncodeToString(md5Hash),
|
||||
MD5: hex.EncodeToString(createdObj.MD5Sum),
|
||||
}
|
||||
|
||||
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
||||
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
|
||||
oldPartIDs, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
||||
oldPartIDNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
|
||||
if err != nil && !oldPartIDNotFound {
|
||||
return nil, err
|
||||
}
|
||||
if !oldPartIDNotFound {
|
||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
for _, oldPartID := range oldPartIDs {
|
||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
||||
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
|
||||
zap.String("cid", bktInfo.CID.EncodeToString()),
|
||||
zap.String("oid", oldPartID.EncodeToString()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
objInfo := &data.ObjectInfo{
|
||||
ID: id,
|
||||
ID: createdObj.ID,
|
||||
CID: bktInfo.CID,
|
||||
|
||||
Owner: bktInfo.Owner,
|
||||
|
@ -315,8 +325,8 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
|
|||
func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -332,11 +342,11 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
if p.Range != nil {
|
||||
size = p.Range.End - p.Range.Start + 1
|
||||
if p.Range.End > srcObjectSize {
|
||||
return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, srcObjectSize)
|
||||
return nil, fmt.Errorf("%w: %d-%d/%d", apierr.GetAPIError(apierr.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, srcObjectSize)
|
||||
}
|
||||
}
|
||||
if size > UploadMaxSize {
|
||||
return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize)
|
||||
return nil, fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooLarge), size, UploadMaxSize)
|
||||
}
|
||||
|
||||
objPayload, err := n.GetObject(ctx, &GetObjectParams{
|
||||
|
@ -363,7 +373,7 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
|
|||
func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
|
||||
for i := 1; i < len(p.Parts); i++ {
|
||||
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInvalidPartOrder)
|
||||
return nil, nil, apierr.GetAPIError(apierr.ErrInvalidPartOrder)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -374,25 +384,24 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
|
||||
if len(partsInfo) < len(p.Parts) {
|
||||
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", s3errors.GetAPIError(s3errors.ErrInvalidPart), len(partsInfo), len(p.Parts))
|
||||
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", apierr.GetAPIError(apierr.ErrInvalidPart), len(partsInfo), len(p.Parts))
|
||||
}
|
||||
|
||||
var multipartObjetSize uint64
|
||||
var encMultipartObjectSize uint64
|
||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
||||
parts := make([]*data.PartInfoExtended, 0, len(p.Parts))
|
||||
|
||||
var completedPartsHeader strings.Builder
|
||||
md5Hash := md5.New()
|
||||
for i, part := range p.Parts {
|
||||
partInfo := partsInfo[part.PartNumber]
|
||||
if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
|
||||
partInfo := partsInfo.Extract(part.PartNumber, data.UnQuote(part.ETag), n.features.MD5Enabled())
|
||||
if partInfo == nil {
|
||||
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", apierr.GetAPIError(apierr.ErrInvalidPart), part.PartNumber)
|
||||
}
|
||||
delete(partsInfo, part.PartNumber)
|
||||
|
||||
// for the last part we have no minimum size limit
|
||||
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
|
||||
return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
|
||||
return nil, nil, fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
|
||||
}
|
||||
parts = append(parts, partInfo)
|
||||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
||||
|
@ -453,7 +462,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
Object: p.Info.Key,
|
||||
Reader: bytes.NewReader(partsData),
|
||||
Header: initMetadata,
|
||||
Size: multipartObjetSize,
|
||||
Size: &multipartObjetSize,
|
||||
Encryption: p.Info.Encryption,
|
||||
CopiesNumbers: multipartInfo.CopiesNumbers,
|
||||
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
|
||||
|
@ -464,19 +473,21 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
|
|||
zap.String("uploadKey", p.Info.Key),
|
||||
zap.Error(err))
|
||||
|
||||
return nil, nil, s3errors.GetAPIError(s3errors.ErrInternalError)
|
||||
return nil, nil, apierr.GetAPIError(apierr.ErrInternalError)
|
||||
}
|
||||
|
||||
var addr oid.Address
|
||||
addr.SetContainer(p.Info.Bkt.CID)
|
||||
for _, partInfo := range partsInfo {
|
||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
zap.Error(err))
|
||||
for _, prts := range partsInfo {
|
||||
for _, partInfo := range prts {
|
||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
|
||||
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
|
||||
zap.Error(err))
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
n.cache.DeleteObject(addr)
|
||||
}
|
||||
addr.SetObject(partInfo.OID)
|
||||
n.cache.DeleteObject(addr)
|
||||
}
|
||||
|
||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
|
||||
|
@ -548,10 +559,12 @@ func (n *Layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
|
|||
return err
|
||||
}
|
||||
|
||||
for _, info := range parts {
|
||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||
for _, infos := range parts {
|
||||
for _, info := range infos {
|
||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
|
||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -568,14 +581,19 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
||||
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
|
||||
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters)
|
||||
}
|
||||
|
||||
res.Owner = multipartInfo.Owner
|
||||
|
||||
parts := make([]*Part, 0, len(partsInfo))
|
||||
|
||||
for _, partInfo := range partsInfo {
|
||||
for _, infos := range partsInfo {
|
||||
sort.Slice(infos, func(i, j int) bool {
|
||||
return infos[i].Timestamp < infos[j].Timestamp
|
||||
})
|
||||
|
||||
partInfo := infos[len(infos)-1]
|
||||
parts = append(parts, &Part{
|
||||
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
|
||||
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
||||
|
@ -612,11 +630,26 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
|
|||
return &res, nil
|
||||
}
|
||||
|
||||
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
||||
type PartsInfo map[int][]*data.PartInfoExtended
|
||||
|
||||
func (p PartsInfo) Extract(part int, etag string, md5Enabled bool) *data.PartInfoExtended {
|
||||
parts := p[part]
|
||||
|
||||
for i, info := range parts {
|
||||
if info.GetETag(md5Enabled) == etag {
|
||||
p[part] = append(parts[:i], parts[i+1:]...)
|
||||
return info
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, PartsInfo, error) {
|
||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error())
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -626,11 +659,11 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
res := make(map[int]*data.PartInfo, len(parts))
|
||||
res := make(map[int][]*data.PartInfoExtended, len(parts))
|
||||
partsNumbers := make([]int, len(parts))
|
||||
oids := make([]string, len(parts))
|
||||
for i, part := range parts {
|
||||
res[part.Number] = part
|
||||
res[part.Number] = append(res[part.Number], part)
|
||||
partsNumbers[i] = part.Number
|
||||
oids[i] = part.OID.EncodeToString()
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -19,8 +20,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/detector"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -47,7 +51,7 @@ type (
|
|||
}
|
||||
|
||||
DeleteMarkerError struct {
|
||||
ErrorCode apiErrors.ErrorCode
|
||||
ErrorCode apierr.ErrorCode
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -68,7 +72,7 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|||
|
||||
// objectHead returns all object's headers.
|
||||
func (n *Layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
|
||||
prm := PrmObjectHead{
|
||||
prm := frostfs.PrmObjectHead{
|
||||
Container: bktInfo.CID,
|
||||
Object: idObj,
|
||||
}
|
||||
|
@ -126,11 +130,11 @@ func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Re
|
|||
// initializes payload reader of the FrostFS object.
|
||||
// Zero range corresponds to full payload (panics if only offset is set).
|
||||
func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
|
||||
var prmAuth PrmAuth
|
||||
var prmAuth frostfs.PrmAuth
|
||||
n.prepareAuthParameters(ctx, &prmAuth, p.bktInfo.Owner)
|
||||
|
||||
if p.off+p.ln != 0 {
|
||||
prm := PrmObjectRange{
|
||||
prm := frostfs.PrmObjectRange{
|
||||
PrmAuth: prmAuth,
|
||||
Container: p.bktInfo.CID,
|
||||
Object: p.oid,
|
||||
|
@ -140,7 +144,7 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
|
|||
return n.frostFS.RangeObject(ctx, prm)
|
||||
}
|
||||
|
||||
prm := PrmObjectGet{
|
||||
prm := frostfs.PrmObjectGet{
|
||||
PrmAuth: prmAuth,
|
||||
Container: p.bktInfo.CID,
|
||||
Object: p.oid,
|
||||
|
@ -155,17 +159,17 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
|
|||
}
|
||||
|
||||
// objectGet returns an object with payload in the object.
|
||||
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*Object, error) {
|
||||
return n.objectGetBase(ctx, bktInfo, objID, PrmAuth{})
|
||||
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*frostfs.Object, error) {
|
||||
return n.objectGetBase(ctx, bktInfo, objID, frostfs.PrmAuth{})
|
||||
}
|
||||
|
||||
// objectGetWithAuth returns an object with payload in the object. Uses provided PrmAuth.
|
||||
func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
|
||||
func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth frostfs.PrmAuth) (*frostfs.Object, error) {
|
||||
return n.objectGetBase(ctx, bktInfo, objID, auth)
|
||||
}
|
||||
|
||||
func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
|
||||
prm := PrmObjectGet{
|
||||
func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth frostfs.PrmAuth) (*frostfs.Object, error) {
|
||||
prm := frostfs.PrmObjectGet{
|
||||
PrmAuth: auth,
|
||||
Container: bktInfo.CID,
|
||||
Object: objID,
|
||||
|
@ -230,40 +234,46 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
|
||||
r := p.Reader
|
||||
if p.Encryption.Enabled() {
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
|
||||
var size uint64
|
||||
if p.Size != nil {
|
||||
size = *p.Size
|
||||
}
|
||||
p.Header[AttributeDecryptedSize] = strconv.FormatUint(size, 10)
|
||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
||||
}
|
||||
|
||||
var encSize uint64
|
||||
if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
|
||||
if r, encSize, err = encryptionReader(p.Reader, size, p.Encryption.Key()); err != nil {
|
||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
||||
}
|
||||
p.Size = encSize
|
||||
p.Size = &encSize
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
if len(p.Header[api.ContentType]) == 0 {
|
||||
if contentType := MimeByFilePath(p.Object); len(contentType) == 0 {
|
||||
d := newDetector(r)
|
||||
d := detector.NewDetector(r, http.DetectContentType)
|
||||
if contentType, err := d.Detect(); err == nil {
|
||||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
r = d.MultiReader()
|
||||
r = d.RestoredReader()
|
||||
} else {
|
||||
p.Header[api.ContentType] = contentType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prm := PrmObjectCreate{
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
PayloadSize: p.Size,
|
||||
Filepath: p.Object,
|
||||
Payload: r,
|
||||
CreationTime: TimeNow(ctx),
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
if p.Size != nil {
|
||||
prm.PayloadSize = *p.Size
|
||||
}
|
||||
|
||||
prm.Attributes = make([][2]string, 0, len(p.Header))
|
||||
|
||||
|
@ -271,56 +281,64 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||
}
|
||||
|
||||
size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p.ContentMD5) > 0 {
|
||||
|
||||
if !p.Encryption.Enabled() && len(p.ContentMD5) > 0 {
|
||||
headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
if !bytes.Equal(headerMd5Hash, md5Hash) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
||||
if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
|
||||
}
|
||||
}
|
||||
|
||||
if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
|
||||
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
|
||||
if err != nil {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
if !bytes.Equal(contentHashBytes, hash) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, id)
|
||||
if !bytes.Equal(contentHashBytes, createdObj.HashSum) {
|
||||
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID)
|
||||
if err != nil {
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
}
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
|
||||
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)
|
||||
}
|
||||
}
|
||||
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
|
||||
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID))
|
||||
now := TimeNow(ctx)
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: id,
|
||||
ETag: hex.EncodeToString(hash),
|
||||
FilePath: p.Object,
|
||||
Size: p.Size,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
OID: createdObj.ID,
|
||||
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||
FilePath: p.Object,
|
||||
Created: &now,
|
||||
Owner: &n.gateOwner,
|
||||
CreationEpoch: createdObj.CreationEpoch,
|
||||
},
|
||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
||||
IsCombined: p.Header[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
if len(p.CompleteMD5Hash) > 0 {
|
||||
newVersion.MD5 = p.CompleteMD5Hash
|
||||
} else {
|
||||
newVersion.MD5 = hex.EncodeToString(md5Hash)
|
||||
newVersion.MD5 = hex.EncodeToString(createdObj.MD5Sum)
|
||||
}
|
||||
|
||||
if p.Size != nil {
|
||||
newVersion.Size = *p.Size
|
||||
} else {
|
||||
newVersion.Size = createdObj.Size
|
||||
}
|
||||
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
|
@ -332,7 +350,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
ObjVersion: &data.ObjectVersion{
|
||||
BktInfo: p.BktInfo,
|
||||
ObjectName: p.Object,
|
||||
VersionID: id.EncodeToString(),
|
||||
VersionID: createdObj.ID.EncodeToString(),
|
||||
},
|
||||
NewLock: p.Lock,
|
||||
CopiesNumbers: p.CopiesNumbers,
|
||||
|
@ -347,13 +365,13 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
|
|||
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
||||
|
||||
objInfo := &data.ObjectInfo{
|
||||
ID: id,
|
||||
ID: createdObj.ID,
|
||||
CID: p.BktInfo.CID,
|
||||
|
||||
Owner: n.gateOwner,
|
||||
Bucket: p.BktInfo.Name,
|
||||
Name: p.Object,
|
||||
Size: size,
|
||||
Size: createdObj.Size,
|
||||
Created: prm.CreationTime,
|
||||
Headers: p.Header,
|
||||
ContentType: p.Header[api.ContentType],
|
||||
|
@ -379,20 +397,20 @@ func (n *Layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
|
|||
|
||||
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if node.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey}
|
||||
return nil, DeleteMarkerError{ErrorCode: apierr.ErrNoSuchKey}
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s; %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
|
||||
return nil, fmt.Errorf("%w: %s; %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -415,8 +433,8 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
if p.VersionID == data.UnversionedObjectVersionID {
|
||||
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -433,7 +451,7 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
}
|
||||
}
|
||||
if foundVersion == nil {
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
|
||||
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -443,13 +461,13 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
}
|
||||
|
||||
if foundVersion.IsDeleteMarker {
|
||||
return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed}
|
||||
return nil, DeleteMarkerError{ErrorCode: apierr.ErrMethodNotAllowed}
|
||||
}
|
||||
|
||||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
||||
if err != nil {
|
||||
if client.IsErrObjectNotFound(err) {
|
||||
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -468,16 +486,16 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
|
|||
|
||||
// objectDelete puts tombstone object into frostfs.
|
||||
func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
||||
return n.objectDeleteBase(ctx, bktInfo, idObj, PrmAuth{})
|
||||
return n.objectDeleteBase(ctx, bktInfo, idObj, frostfs.PrmAuth{})
|
||||
}
|
||||
|
||||
// objectDeleteWithAuth puts tombstone object into frostfs. Uses provided PrmAuth.
|
||||
func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
|
||||
func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth frostfs.PrmAuth) error {
|
||||
return n.objectDeleteBase(ctx, bktInfo, idObj, auth)
|
||||
}
|
||||
|
||||
func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
|
||||
prm := PrmObjectDelete{
|
||||
func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth frostfs.PrmAuth) error {
|
||||
prm := frostfs.PrmObjectDelete{
|
||||
PrmAuth: auth,
|
||||
Container: bktInfo.CID,
|
||||
Object: idObj,
|
||||
|
@ -491,8 +509,7 @@ func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo,
|
|||
}
|
||||
|
||||
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
|
||||
// Returns object ID and payload sha256 hash.
|
||||
func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
|
||||
func (n *Layer) objectPutAndHash(ctx context.Context, prm frostfs.PrmObjectCreate, bktInfo *data.BucketInfo) (*data.CreatedObjectInfo, error) {
|
||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||
prm.ClientCut = n.features.ClientCut()
|
||||
prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
|
||||
|
@ -505,15 +522,21 @@ func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktIn
|
|||
hash.Write(buf)
|
||||
md5Hash.Write(buf)
|
||||
})
|
||||
id, err := n.frostFS.CreateObject(ctx, prm)
|
||||
res, err := n.frostFS.CreateObject(ctx, prm)
|
||||
if err != nil {
|
||||
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
|
||||
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
|
||||
}
|
||||
|
||||
return 0, oid.ID{}, nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
|
||||
return &data.CreatedObjectInfo{
|
||||
ID: res.ObjectID,
|
||||
Size: size,
|
||||
HashSum: hash.Sum(nil),
|
||||
MD5Sum: md5Hash.Sum(nil),
|
||||
CreationEpoch: res.CreationEpoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type logWrapper struct {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"io"
|
||||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -37,14 +38,14 @@ func TestGoroutinesDontLeakInPutAndHash(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
payload := bytes.NewReader(content)
|
||||
|
||||
prm := PrmObjectCreate{
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Filepath: tc.obj,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
expErr := errors.New("some error")
|
||||
tc.testFrostFS.SetObjectPutError(tc.obj, expErr)
|
||||
_, _, _, _, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
|
||||
_, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
|
||||
require.ErrorIs(t, err, expErr)
|
||||
require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader")
|
||||
}
|
||||
|
|
265
api/layer/patch.go
Normal file
265
api/layer/patch.go
Normal file
|
@ -0,0 +1,265 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
)
|
||||
|
||||
type PatchObjectParams struct {
|
||||
Object *data.ExtendedObjectInfo
|
||||
BktInfo *data.BucketInfo
|
||||
NewBytes io.Reader
|
||||
Range *RangeParams
|
||||
VersioningEnabled bool
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
if p.Object.ObjectInfo.Headers[AttributeDecryptedSize] != "" {
|
||||
return nil, fmt.Errorf("patch encrypted object")
|
||||
}
|
||||
|
||||
if p.Object.ObjectInfo.Headers[MultipartObjectSize] != "" {
|
||||
return n.patchMultipartObject(ctx, p)
|
||||
}
|
||||
|
||||
prmPatch := frostfs.PrmObjectPatch{
|
||||
Container: p.BktInfo.CID,
|
||||
Object: p.Object.ObjectInfo.ID,
|
||||
Payload: p.NewBytes,
|
||||
Offset: p.Range.Start,
|
||||
Length: p.Range.End - p.Range.Start + 1,
|
||||
ObjectSize: p.Object.ObjectInfo.Size,
|
||||
}
|
||||
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
|
||||
|
||||
createdObj, err := n.patchObject(ctx, prmPatch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patch object: %w", err)
|
||||
}
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: createdObj.ID,
|
||||
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||
FilePath: p.Object.ObjectInfo.Name,
|
||||
Size: createdObj.Size,
|
||||
Created: &p.Object.ObjectInfo.Created,
|
||||
Owner: &n.gateOwner,
|
||||
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
|
||||
},
|
||||
IsUnversioned: !p.VersioningEnabled,
|
||||
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
|
||||
}
|
||||
|
||||
p.Object.ObjectInfo.ID = createdObj.ID
|
||||
p.Object.ObjectInfo.Size = createdObj.Size
|
||||
p.Object.ObjectInfo.MD5Sum = ""
|
||||
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
|
||||
p.Object.NodeVersion = newVersion
|
||||
|
||||
return p.Object, nil
|
||||
}
|
||||
|
||||
func (n *Layer) patchObject(ctx context.Context, p frostfs.PrmObjectPatch) (*data.CreatedObjectInfo, error) {
|
||||
objID, err := n.frostFS.PatchObject(ctx, p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patch object: %w", err)
|
||||
}
|
||||
|
||||
prmHead := frostfs.PrmObjectHead{
|
||||
PrmAuth: p.PrmAuth,
|
||||
Container: p.Container,
|
||||
Object: objID,
|
||||
}
|
||||
obj, err := n.frostFS.HeadObject(ctx, prmHead)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("head object: %w", err)
|
||||
}
|
||||
|
||||
payloadChecksum, _ := obj.PayloadChecksum()
|
||||
|
||||
return &data.CreatedObjectInfo{
|
||||
ID: objID,
|
||||
Size: obj.PayloadSize(),
|
||||
HashSum: payloadChecksum.Value(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
combinedObj, err := n.objectGet(ctx, p.BktInfo, p.Object.ObjectInfo.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get combined object '%s': %w", p.Object.ObjectInfo.ID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
var parts []*data.PartInfo
|
||||
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
|
||||
}
|
||||
|
||||
prmPatch := frostfs.PrmObjectPatch{
|
||||
Container: p.BktInfo.CID,
|
||||
}
|
||||
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
|
||||
|
||||
off, ln := p.Range.Start, p.Range.End-p.Range.Start+1
|
||||
var multipartObjectSize uint64
|
||||
for i, part := range parts {
|
||||
if off > part.Size || (off == part.Size && i != len(parts)-1) || ln == 0 {
|
||||
multipartObjectSize += part.Size
|
||||
if ln != 0 {
|
||||
off -= part.Size
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var createdObj *data.CreatedObjectInfo
|
||||
createdObj, off, ln, err = n.patchPart(ctx, part, p, &prmPatch, off, ln, i == len(parts)-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patch part: %w", err)
|
||||
}
|
||||
|
||||
parts[i].OID = createdObj.ID
|
||||
parts[i].Size = createdObj.Size
|
||||
parts[i].MD5 = ""
|
||||
parts[i].ETag = hex.EncodeToString(createdObj.HashSum)
|
||||
|
||||
multipartObjectSize += createdObj.Size
|
||||
}
|
||||
|
||||
return n.updateCombinedObject(ctx, parts, multipartObjectSize, p)
|
||||
}
|
||||
|
||||
// Returns patched part info, updated offset and length.
|
||||
func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObjectParams, prmPatch *frostfs.PrmObjectPatch, off, ln uint64, lastPart bool) (*data.CreatedObjectInfo, uint64, uint64, error) {
|
||||
if off == 0 && ln >= part.Size {
|
||||
curLen := part.Size
|
||||
if lastPart {
|
||||
curLen = ln
|
||||
}
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
Payload: io.LimitReader(p.NewBytes, int64(curLen)),
|
||||
CreationTime: part.Created,
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, 0, 0, fmt.Errorf("put new part object '%s': %w", part.OID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
ln -= curLen
|
||||
|
||||
return createdObj, off, ln, err
|
||||
}
|
||||
|
||||
curLen := ln
|
||||
if off+curLen > part.Size && !lastPart {
|
||||
curLen = part.Size - off
|
||||
}
|
||||
prmPatch.Object = part.OID
|
||||
prmPatch.ObjectSize = part.Size
|
||||
prmPatch.Offset = off
|
||||
prmPatch.Length = curLen
|
||||
|
||||
prmPatch.Payload = io.LimitReader(p.NewBytes, int64(prmPatch.Length))
|
||||
|
||||
createdObj, err := n.patchObject(ctx, *prmPatch)
|
||||
if err != nil {
|
||||
return nil, 0, 0, fmt.Errorf("patch part object '%s': %w", part.OID.EncodeToString(), err)
|
||||
}
|
||||
|
||||
ln -= curLen
|
||||
off = 0
|
||||
|
||||
return createdObj, off, ln, nil
|
||||
}
|
||||
|
||||
func (n *Layer) updateCombinedObject(ctx context.Context, parts []*data.PartInfo, fullObjSize uint64, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
newParts, err := json.Marshal(parts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshal parts for combined object: %w", err)
|
||||
}
|
||||
|
||||
var headerParts strings.Builder
|
||||
for i, part := range parts {
|
||||
headerPart := part.ToHeaderString()
|
||||
if i != len(parts)-1 {
|
||||
headerPart += ","
|
||||
}
|
||||
headerParts.WriteString(headerPart)
|
||||
}
|
||||
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Container: p.BktInfo.CID,
|
||||
PayloadSize: fullObjSize,
|
||||
Filepath: p.Object.ObjectInfo.Name,
|
||||
Payload: bytes.NewReader(newParts),
|
||||
CreationTime: p.Object.ObjectInfo.Created,
|
||||
CopiesNumber: p.CopiesNumbers,
|
||||
}
|
||||
|
||||
prm.Attributes = make([][2]string, 0, len(p.Object.ObjectInfo.Headers)+1)
|
||||
|
||||
for k, v := range p.Object.ObjectInfo.Headers {
|
||||
switch k {
|
||||
case MultipartObjectSize:
|
||||
prm.Attributes = append(prm.Attributes, [2]string{MultipartObjectSize, strconv.FormatUint(fullObjSize, 10)})
|
||||
case UploadCompletedParts:
|
||||
prm.Attributes = append(prm.Attributes, [2]string{UploadCompletedParts, headerParts.String()})
|
||||
case api.ContentType:
|
||||
default:
|
||||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
||||
}
|
||||
}
|
||||
prm.Attributes = append(prm.Attributes, [2]string{api.ContentType, p.Object.ObjectInfo.ContentType})
|
||||
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("put new combined object: %w", err)
|
||||
}
|
||||
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: createdObj.ID,
|
||||
ETag: hex.EncodeToString(createdObj.HashSum),
|
||||
MD5: hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts)),
|
||||
FilePath: p.Object.ObjectInfo.Name,
|
||||
Size: fullObjSize,
|
||||
Created: &p.Object.ObjectInfo.Created,
|
||||
Owner: &n.gateOwner,
|
||||
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
|
||||
},
|
||||
IsUnversioned: !p.VersioningEnabled,
|
||||
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
|
||||
}
|
||||
|
||||
p.Object.ObjectInfo.ID = createdObj.ID
|
||||
p.Object.ObjectInfo.Size = createdObj.Size
|
||||
p.Object.ObjectInfo.MD5Sum = hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts))
|
||||
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
|
||||
p.Object.ObjectInfo.Headers[MultipartObjectSize] = strconv.FormatUint(fullObjSize, 10)
|
||||
p.Object.ObjectInfo.Headers[UploadCompletedParts] = headerParts.String()
|
||||
p.Object.NodeVersion = newVersion
|
||||
|
||||
return p.Object, nil
|
||||
}
|
|
@ -3,7 +3,7 @@ package layer
|
|||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
errorsStd "errors"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
@ -11,7 +11,9 @@ import (
|
|||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
@ -40,7 +42,7 @@ func (n *Layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
|||
}
|
||||
|
||||
lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID)
|
||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -113,7 +115,7 @@ func (n *Layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
|
|||
}
|
||||
|
||||
func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
|
||||
prm := PrmObjectCreate{
|
||||
prm := frostfs.PrmObjectCreate{
|
||||
Container: bktInfo.CID,
|
||||
Locks: []oid.ID{objID},
|
||||
CreationTime: TimeNow(ctx),
|
||||
|
@ -126,8 +128,12 @@ func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
|||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
_, id, _, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
return id, err
|
||||
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
return createdObj.ID, nil
|
||||
}
|
||||
|
||||
func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion) (*data.LockInfo, error) {
|
||||
|
@ -142,7 +148,7 @@ func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion)
|
|||
}
|
||||
|
||||
lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID)
|
||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
if lockInfo == nil {
|
||||
|
@ -161,16 +167,16 @@ func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
|||
}
|
||||
|
||||
addr, err := n.treeService.GetBucketCORS(ctx, bkt)
|
||||
objNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
||||
objNotFound := errors.Is(err, tree.ErrNodeNotFound)
|
||||
if err != nil && !objNotFound {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if objNotFound {
|
||||
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchCORSConfiguration), err.Error())
|
||||
}
|
||||
|
||||
var prmAuth PrmAuth
|
||||
var prmAuth frostfs.PrmAuth
|
||||
corsBkt := bkt
|
||||
if !addr.Container().Equals(bkt.CID) && !addr.Container().Equals(cid.ID{}) {
|
||||
corsBkt = &data.BucketInfo{CID: addr.Container()}
|
||||
|
@ -205,7 +211,7 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
|
||||
settings, err := n.treeService.GetSettingsNode(ctx, bktInfo)
|
||||
if err != nil {
|
||||
if !errorsStd.Is(err, ErrNodeNotFound) {
|
||||
if !errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
||||
|
|
|
@ -6,7 +6,8 @@ import (
|
|||
"fmt"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
|
@ -39,8 +40,8 @@ func (n *Layer) GetObjectTagging(ctx context.Context, p *data.GetObjectTaggingPa
|
|||
|
||||
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return "", nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return "", nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -62,8 +63,8 @@ func (n *Layer) PutObjectTagging(ctx context.Context, p *data.PutObjectTaggingPa
|
|||
|
||||
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -81,8 +82,8 @@ func (n *Layer) DeleteObjectTagging(ctx context.Context, p *data.ObjectVersion)
|
|||
|
||||
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -102,7 +103,7 @@ func (n *Layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo)
|
|||
}
|
||||
|
||||
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
|
||||
if err != nil && !errors.Is(err, ErrNodeNotFound) {
|
||||
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -155,14 +156,14 @@ func (n *Layer) getNodeVersion(ctx context.Context, objVersion *data.ObjectVersi
|
|||
}
|
||||
}
|
||||
if version == nil {
|
||||
err = fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
|
||||
err = fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion))
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && version.IsDeleteMarker && !objVersion.NoErrorOnDeleteMarker {
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
|
||||
} else if errors.Is(err, ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
|
||||
return nil, fmt.Errorf("%w: found version is delete marker", apierr.GetAPIError(apierr.ErrNoSuchKey))
|
||||
} else if errors.Is(err, tree.ErrNodeNotFound) {
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error())
|
||||
}
|
||||
|
||||
if err == nil && version != nil && !version.IsDeleteMarker {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package layer
|
||||
package tree
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -8,8 +8,8 @@ import (
|
|||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
// TreeService provide interface to interact with tree service using s3 data models.
|
||||
type TreeService interface {
|
||||
// Service provide interface to interact with tree service using s3 data models.
|
||||
type Service interface {
|
||||
// PutSettingsNode update or create new settings node in tree service.
|
||||
PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error
|
||||
|
||||
|
@ -57,11 +57,15 @@ type TreeService interface {
|
|||
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
|
||||
|
||||
// AddPart puts a node to a system tree as a child of appropriate multipart upload
|
||||
// and returns objectID of a previous part which must be deleted in FrostFS.
|
||||
// and returns objectIDs of a previous part/s which must be deleted in FrostFS.
|
||||
//
|
||||
// If object id to remove is not found returns ErrNoNodeToRemove error.
|
||||
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
|
||||
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error)
|
||||
// If object ids to remove is not found returns ErrNoNodeToRemove error.
|
||||
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error)
|
||||
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error)
|
||||
|
||||
PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error)
|
||||
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)
|
||||
DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error)
|
||||
|
||||
// Compound methods for optimizations
|
||||
|
|
@ -6,8 +6,10 @@ import (
|
|||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
)
|
||||
|
||||
|
@ -33,7 +35,7 @@ type TreeServiceMock struct {
|
|||
locks map[string]map[uint64]*data.LockInfo
|
||||
tags map[string]map[uint64]map[string]string
|
||||
multiparts map[string]map[string][]*data.MultipartInfo
|
||||
parts map[string]map[int]*data.PartInfo
|
||||
parts map[string]map[int]*data.PartInfoExtended
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
||||
|
@ -92,7 +94,7 @@ func NewTreeService() *TreeServiceMock {
|
|||
locks: make(map[string]map[uint64]*data.LockInfo),
|
||||
tags: make(map[string]map[uint64]map[string]string),
|
||||
multiparts: make(map[string]map[string][]*data.MultipartInfo),
|
||||
parts: make(map[string]map[int]*data.PartInfo),
|
||||
parts: make(map[string]map[int]*data.PartInfoExtended),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,7 +106,7 @@ func (t *TreeServiceMock) PutSettingsNode(_ context.Context, bktInfo *data.Bucke
|
|||
func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
|
||||
settings, ok := t.settings[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
|
@ -139,7 +141,7 @@ func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketI
|
|||
|
||||
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
||||
|
||||
return nil, ErrNoNodeToRemove
|
||||
return nil, tree.ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([]oid.Address, error) {
|
||||
|
@ -149,12 +151,12 @@ func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([
|
|||
func (t *TreeServiceMock) GetVersions(_ context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
versions, ok := cnrVersionsMap[objectName]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
|
@ -163,12 +165,12 @@ func (t *TreeServiceMock) GetVersions(_ context.Context, bktInfo *data.BucketInf
|
|||
func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
versions, ok := cnrVersionsMap[objectName]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
|
@ -179,13 +181,13 @@ func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.Buck
|
|||
return versions[len(versions)-1], nil
|
||||
}
|
||||
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
var result []*data.NodeVersion
|
||||
|
@ -217,12 +219,12 @@ func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo
|
|||
func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
versions, ok := cnrVersionsMap[objectName]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
for _, version := range versions {
|
||||
|
@ -231,7 +233,7 @@ func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.Bucket
|
|||
}
|
||||
}
|
||||
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) AddVersion(_ context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error) {
|
||||
|
@ -277,7 +279,7 @@ func (t *TreeServiceMock) AddVersion(_ context.Context, bktInfo *data.BucketInfo
|
|||
func (t *TreeServiceMock) RemoveVersion(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) error {
|
||||
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return ErrNodeNotFound
|
||||
return tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
for key, versions := range cnrVersionsMap {
|
||||
|
@ -289,7 +291,7 @@ func (t *TreeServiceMock) RemoveVersion(_ context.Context, bktInfo *data.BucketI
|
|||
}
|
||||
}
|
||||
|
||||
return ErrNodeNotFound
|
||||
return tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetAllVersionsByPrefix(_ context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
||||
|
@ -333,7 +335,7 @@ func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.Buc
|
|||
func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) {
|
||||
cnrMultipartsMap, ok := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
multiparts := cnrMultipartsMap[objectName]
|
||||
|
@ -343,31 +345,34 @@ func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.Bu
|
|||
}
|
||||
}
|
||||
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
|
||||
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) {
|
||||
multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if multipartInfo.ID != multipartNodeID {
|
||||
return oid.ID{}, fmt.Errorf("invalid multipart info id")
|
||||
return nil, fmt.Errorf("invalid multipart info id")
|
||||
}
|
||||
|
||||
partsMap, ok := t.parts[info.UploadID]
|
||||
if !ok {
|
||||
partsMap = make(map[int]*data.PartInfo)
|
||||
partsMap = make(map[int]*data.PartInfoExtended)
|
||||
}
|
||||
|
||||
partsMap[info.Number] = info
|
||||
partsMap[info.Number] = &data.PartInfoExtended{
|
||||
PartInfo: *info,
|
||||
Timestamp: uint64(time.Now().UnixMicro()),
|
||||
}
|
||||
|
||||
t.parts[info.UploadID] = partsMap
|
||||
return oid.ID{}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
|
||||
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) {
|
||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||
|
||||
var foundMultipart *data.MultipartInfo
|
||||
|
@ -383,11 +388,11 @@ LOOP:
|
|||
}
|
||||
|
||||
if foundMultipart == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
return nil, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
partsMap := t.parts[foundMultipart.UploadID]
|
||||
result := make([]*data.PartInfo, 0, len(partsMap))
|
||||
result := make([]*data.PartInfoExtended, 0, len(partsMap))
|
||||
for _, part := range partsMap {
|
||||
result = append(result, part)
|
||||
}
|
||||
|
@ -395,6 +400,51 @@ LOOP:
|
|||
return result, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) PutBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
systemMap = make(map[string]*data.BaseNodeVersion)
|
||||
}
|
||||
|
||||
systemMap["lifecycle"] = &data.BaseNodeVersion{
|
||||
OID: addr.Object(),
|
||||
}
|
||||
|
||||
t.system[bktInfo.CID.EncodeToString()] = systemMap
|
||||
|
||||
return nil, tree.ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) GetBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return oid.Address{}, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
node, ok := systemMap["lifecycle"]
|
||||
if !ok {
|
||||
return oid.Address{}, tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
return newAddress(bktInfo.CID, node.OID), nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error) {
|
||||
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
|
||||
if !ok {
|
||||
return nil, tree.ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
node, ok := systemMap["lifecycle"]
|
||||
if !ok {
|
||||
return nil, tree.ErrNoNodeToRemove
|
||||
}
|
||||
|
||||
delete(systemMap, "lifecycle")
|
||||
|
||||
return []oid.Address{newAddress(bktInfo.CID, node.OID)}, nil
|
||||
}
|
||||
|
||||
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
|
||||
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
|
||||
|
||||
|
@ -412,7 +462,7 @@ LOOP:
|
|||
}
|
||||
|
||||
if uploadID == "" {
|
||||
return ErrNodeNotFound
|
||||
return tree.ErrNodeNotFound
|
||||
}
|
||||
|
||||
delete(t.parts, uploadID)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
|
||||
|
@ -23,7 +24,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
|
|||
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
|
||||
BktInfo: tc.bktInfo,
|
||||
Object: tc.obj,
|
||||
Size: uint64(len(content)),
|
||||
Size: ptr(uint64(len(content))),
|
||||
Reader: bytes.NewReader(content),
|
||||
Header: make(map[string]string),
|
||||
})
|
||||
|
@ -154,7 +155,7 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|||
tp := NewTestFrostFS(key)
|
||||
|
||||
bktName := "testbucket1"
|
||||
res, err := tp.CreateContainer(ctx, PrmContainerCreate{
|
||||
res, err := tp.CreateContainer(ctx, frostfs.PrmContainerCreate{
|
||||
Name: bktName,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
|
155
api/middleware/address_style.go
Normal file
155
api/middleware/address_style.go
Normal file
|
@ -0,0 +1,155 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
wildcardPlaceholder = "<wildcard>"
|
||||
|
||||
enabledVHS = "enabled"
|
||||
disabledVHS = "disabled"
|
||||
)
|
||||
|
||||
type VHSSettings interface {
|
||||
Domains() []string
|
||||
GlobalVHS() bool
|
||||
VHSHeader() string
|
||||
ServernameHeader() string
|
||||
VHSNamespacesEnabled() map[string]bool
|
||||
}
|
||||
|
||||
func PrepareAddressStyle(settings VHSSettings, log *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
reqLogger := reqLogOrDefault(ctx, log)
|
||||
statusVHS := r.Header.Get(settings.VHSHeader())
|
||||
|
||||
if isVHSAddress(statusVHS, settings.GlobalVHS(), settings.VHSNamespacesEnabled(), reqInfo.Namespace) {
|
||||
prepareVHSAddress(reqInfo, r, settings)
|
||||
} else {
|
||||
preparePathStyleAddress(reqInfo, r, reqLogger)
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func isVHSAddress(statusVHS string, enabledFlag bool, vhsNamespaces map[string]bool, namespace string) bool {
|
||||
switch statusVHS {
|
||||
case enabledVHS:
|
||||
return true
|
||||
case disabledVHS:
|
||||
return false
|
||||
default:
|
||||
result := enabledFlag
|
||||
if v, ok := vhsNamespaces[namespace]; ok {
|
||||
result = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
func prepareVHSAddress(reqInfo *ReqInfo, r *http.Request, settings VHSSettings) {
|
||||
reqInfo.RequestVHSEnabled = true
|
||||
bktName, match := checkDomain(r.Host, getDomains(r, settings))
|
||||
if match {
|
||||
if bktName == "" {
|
||||
reqInfo.RequestType = noneType
|
||||
} else {
|
||||
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
|
||||
reqInfo.RequestType = objectType
|
||||
reqInfo.ObjectName = objName
|
||||
reqInfo.BucketName = bktName
|
||||
} else {
|
||||
reqInfo.RequestType = bucketType
|
||||
reqInfo.BucketName = bktName
|
||||
}
|
||||
}
|
||||
} else {
|
||||
parts := strings.Split(r.Host, ".")
|
||||
reqInfo.BucketName = parts[0]
|
||||
|
||||
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
|
||||
reqInfo.RequestType = objectType
|
||||
reqInfo.ObjectName = objName
|
||||
} else {
|
||||
reqInfo.RequestType = bucketType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getDomains(r *http.Request, settings VHSSettings) []string {
|
||||
if headerServername := r.Header.Get(settings.ServernameHeader()); headerServername != "" {
|
||||
return []string{headerServername}
|
||||
}
|
||||
|
||||
return settings.Domains()
|
||||
}
|
||||
|
||||
func preparePathStyleAddress(reqInfo *ReqInfo, r *http.Request, reqLogger *zap.Logger) {
|
||||
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
||||
if bktObj == "" {
|
||||
reqInfo.RequestType = noneType
|
||||
} else if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
|
||||
reqInfo.RequestType = objectType
|
||||
reqInfo.BucketName = bktObj[:ind]
|
||||
reqInfo.ObjectName = bktObj[ind+1:]
|
||||
|
||||
if r.URL.RawPath != "" {
|
||||
// we have to do this because of
|
||||
// https://github.com/go-chi/chi/issues/641
|
||||
// https://github.com/go-chi/chi/issues/642
|
||||
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
||||
} else {
|
||||
reqInfo.ObjectName = obj
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqInfo.RequestType = bucketType
|
||||
reqInfo.BucketName = strings.TrimSuffix(bktObj, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func checkDomain(host string, domains []string) (bktName string, match bool) {
|
||||
partsHost := strings.Split(host, ".")
|
||||
for _, pattern := range domains {
|
||||
partsPattern := strings.Split(pattern, ".")
|
||||
bktName, match = compareMatch(partsHost, partsPattern)
|
||||
if match {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func compareMatch(host, pattern []string) (bktName string, match bool) {
|
||||
if len(host) < len(pattern) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
i, j := len(host)-1, len(pattern)-1
|
||||
for j >= 0 && (pattern[j] == wildcardPlaceholder || host[i] == pattern[j]) {
|
||||
i--
|
||||
j--
|
||||
}
|
||||
|
||||
switch {
|
||||
case i == -1:
|
||||
return "", true
|
||||
case i == 0 && (j != 0 || host[i] == pattern[j]):
|
||||
return host[0], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
443
api/middleware/address_style_test.go
Normal file
443
api/middleware/address_style_test.go
Normal file
|
@ -0,0 +1,443 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
const (
|
||||
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
|
||||
FrostfsServernameHeader = "X-Frostfs-Servername"
|
||||
)
|
||||
|
||||
type VHSSettingsMock struct {
|
||||
domains []string
|
||||
}
|
||||
|
||||
func (v *VHSSettingsMock) Domains() []string {
|
||||
return v.domains
|
||||
}
|
||||
|
||||
func (v *VHSSettingsMock) GlobalVHS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (v *VHSSettingsMock) VHSHeader() string {
|
||||
return FrostfsVHSHeader
|
||||
}
|
||||
|
||||
func (v *VHSSettingsMock) ServernameHeader() string {
|
||||
return FrostfsServernameHeader
|
||||
}
|
||||
|
||||
func (v *VHSSettingsMock) VHSNamespacesEnabled() map[string]bool {
|
||||
return make(map[string]bool)
|
||||
}
|
||||
|
||||
func TestIsVHSAddress(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
headerStatusVHS string
|
||||
vhsEnabledFlag bool
|
||||
vhsNamespaced map[string]bool
|
||||
namespace string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "vhs disabled",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "vhs disabled for namespace",
|
||||
vhsEnabledFlag: true,
|
||||
vhsNamespaced: map[string]bool{
|
||||
"kapusta": false,
|
||||
},
|
||||
namespace: "kapusta",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "vhs enabled (global vhs flag)",
|
||||
vhsEnabledFlag: true,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "vhs enabled for namespace",
|
||||
vhsNamespaced: map[string]bool{
|
||||
"kapusta": true,
|
||||
},
|
||||
namespace: "kapusta",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "vhs enabled (header)",
|
||||
headerStatusVHS: enabledVHS,
|
||||
vhsEnabledFlag: false,
|
||||
vhsNamespaced: map[string]bool{
|
||||
"kapusta": false,
|
||||
},
|
||||
namespace: "kapusta",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "vhs disabled (header)",
|
||||
headerStatusVHS: disabledVHS,
|
||||
vhsEnabledFlag: true,
|
||||
vhsNamespaced: map[string]bool{
|
||||
"kapusta": true,
|
||||
},
|
||||
namespace: "kapusta",
|
||||
expected: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := isVHSAddress(tc.headerStatusVHS, tc.vhsEnabledFlag, tc.vhsNamespaced, tc.namespace)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreparePathStyleAddress(t *testing.T) {
|
||||
bkt, obj := "test-bucket", "test-object"
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
urlParams string
|
||||
expectedReqType ReqType
|
||||
expectedBktName string
|
||||
expectedObjName string
|
||||
}{
|
||||
{
|
||||
name: "bucket request",
|
||||
urlParams: "/" + bkt,
|
||||
expectedReqType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "bucket request with slash",
|
||||
urlParams: "/" + bkt + "/",
|
||||
expectedReqType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "object request",
|
||||
urlParams: "/" + bkt + "/" + obj,
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj,
|
||||
},
|
||||
{
|
||||
name: "object request with slash",
|
||||
urlParams: "/" + bkt + "/" + obj + "/",
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj + "/",
|
||||
},
|
||||
{
|
||||
name: "none type request",
|
||||
urlParams: "/",
|
||||
expectedReqType: noneType,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
reqInfo := &ReqInfo{}
|
||||
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
|
||||
|
||||
preparePathStyleAddress(reqInfo, r, reqLogOrDefault(r.Context(), zaptest.NewLogger(t)))
|
||||
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
|
||||
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
|
||||
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareVHSAddress(t *testing.T) {
|
||||
bkt, obj, domain := "test-bucket", "test-object", "domain.com"
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
domains []string
|
||||
host string
|
||||
urlParams string
|
||||
expectedReqType ReqType
|
||||
expectedBktName string
|
||||
expectedObjName string
|
||||
}{
|
||||
{
|
||||
name: "bucket request, the domain matched",
|
||||
domains: []string{domain},
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/",
|
||||
expectedReqType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "object request, the domain matched",
|
||||
domains: []string{domain},
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/" + obj,
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj,
|
||||
},
|
||||
{
|
||||
name: "object request with slash, the domain matched",
|
||||
domains: []string{domain},
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/" + obj + "/",
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj + "/",
|
||||
},
|
||||
{
|
||||
name: "list-buckets request, the domain matched",
|
||||
domains: []string{domain},
|
||||
host: domain,
|
||||
urlParams: "/",
|
||||
expectedReqType: noneType,
|
||||
},
|
||||
{
|
||||
name: "bucket request, the domain don't match",
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/",
|
||||
expectedReqType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "object request, the domain don't match",
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/" + obj,
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj,
|
||||
},
|
||||
{
|
||||
name: "object request with slash, the domain don't match",
|
||||
host: bkt + "." + domain,
|
||||
urlParams: "/" + obj + "/",
|
||||
expectedReqType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj + "/",
|
||||
},
|
||||
{
|
||||
name: "list-buckets request, the domain don't match (list-buckets isn't supported if the domains don't match)",
|
||||
host: domain,
|
||||
urlParams: "/",
|
||||
expectedReqType: bucketType,
|
||||
expectedBktName: strings.Split(domain, ".")[0],
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
reqInfo := &ReqInfo{}
|
||||
vhsSettings := &VHSSettingsMock{domains: tc.domains}
|
||||
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
|
||||
r.Host = tc.host
|
||||
|
||||
prepareVHSAddress(reqInfo, r, vhsSettings)
|
||||
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
|
||||
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
|
||||
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckDomains(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
domains []string
|
||||
requestURL string
|
||||
expectedBktName string
|
||||
expectedMatch bool
|
||||
}{
|
||||
{
|
||||
name: "valid url with bktName and namespace (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.s3.kapusta.domain.com",
|
||||
expectedBktName: "bktA",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url without bktName and namespace (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "s3.kapusta.domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid bktName (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.bktB.s3.kapusta.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url without namespace (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.s3.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid infix (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.s4.kapusta.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid postfix (wildcard after protocol infix)",
|
||||
domains: []string{"s3.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.s3.kapusta.dom.su",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "valid url with bktName and namespace (wildcard at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "bktA.kapusta.domain.com",
|
||||
expectedBktName: "bktA",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url without bktName and namespace (wildcard at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "kapusta.domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid bktName (wildcard at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "bktA.bktB.kapusta.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "collision test - true, because we cannot clearly distinguish a namespace from a bucket (wildcard at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "bktA.domain.com",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "invalid url (fewer hosts)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid postfix (wildcard at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.domain.com"},
|
||||
requestURL: "bktA.kapusta.dom.su",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "valid url with bktName and without wildcard (root namaspace)",
|
||||
domains: []string{"domain.com"},
|
||||
requestURL: "bktA.domain.com",
|
||||
expectedBktName: "bktA",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url without bktName and without wildcard (root namaspace)",
|
||||
domains: []string{"domain.com"},
|
||||
requestURL: "domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "invalid url with bktName without wildcard (root namaspace)",
|
||||
domains: []string{"domain.com"},
|
||||
requestURL: "bktA.dom.su",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url without wildcard (root namaspace)",
|
||||
domains: []string{"domain.com"},
|
||||
requestURL: "dom.su",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "valid url, with a sorted list of domains",
|
||||
domains: []string{"s3.<wildcard>.domain.com", "<wildcard>.domain.com", "domain.com"},
|
||||
requestURL: "s3.kapusta.domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url with bktName, multiple wildcards (wildcards at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.s3.kapusta.domain.com",
|
||||
expectedBktName: "bktA",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url without bktName, multiple wildcards (wildcards at the beginning of the domain)",
|
||||
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||
requestURL: "s3.kapusta.domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url with bktName, multiply wildcards",
|
||||
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
|
||||
requestURL: "bktA.s3.kapusta.subdomain.domain.com",
|
||||
expectedBktName: "bktA",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "valid url without bktName, multiply wildcards",
|
||||
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
|
||||
requestURL: "s3.kapusta.subdomain.domain.com",
|
||||
expectedBktName: "",
|
||||
expectedMatch: true,
|
||||
},
|
||||
{
|
||||
name: "invalid url without one wildcard",
|
||||
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||
requestURL: "kapusta.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url, multiply wildcards",
|
||||
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||
requestURL: "s3.kapusta.dom.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
{
|
||||
name: "invalid url with invalid bktName, multiply wildcards",
|
||||
domains: []string{"<wildcard>.<wildcard>.domain.com"},
|
||||
requestURL: "bktA.bktB.s3.kapusta.domain.com",
|
||||
expectedMatch: false,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bktName, match := checkDomain(tc.requestURL, tc.domains)
|
||||
require.Equal(t, tc.expectedBktName, bktName)
|
||||
require.Equal(t, tc.expectedMatch, match)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDomains(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
settings := &VHSSettingsMock{
|
||||
domains: []string{
|
||||
"s3.domain.com",
|
||||
"s3.<wildcard>.domain.com",
|
||||
"domain.com",
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("the request does not contain the X-Frostfs-Servername header", func(t *testing.T) {
|
||||
actualDomains := getDomains(req, settings)
|
||||
require.Equal(t, settings.domains, actualDomains)
|
||||
})
|
||||
|
||||
serverName := "domain.com"
|
||||
req.Header.Set(settings.ServernameHeader(), serverName)
|
||||
|
||||
t.Run("the request contains the X-Frostfs-Servername header", func(t *testing.T) {
|
||||
actualDomains := getDomains(req, settings)
|
||||
require.Equal(t, []string{serverName}, actualDomains)
|
||||
})
|
||||
}
|
|
@ -8,9 +8,8 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -57,9 +56,9 @@ func Auth(center Center, log *zap.Logger) Func {
|
|||
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err))
|
||||
} else {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err))
|
||||
err = frostfsErrors.UnwrapErr(err)
|
||||
if _, ok := err.(apiErrors.Error); !ok {
|
||||
err = apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
|
||||
err = apierr.TransformToS3Error(err)
|
||||
if err.(apierr.Error).ErrCode == apierr.ErrInternalError {
|
||||
err = apierr.GetAPIError(apierr.ErrAccessDenied)
|
||||
}
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
|
|
|
@ -74,6 +74,7 @@ const (
|
|||
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
||||
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
||||
DeleteObjectOperation = "DeleteObject"
|
||||
PatchObjectOperation = "PatchObject"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -106,3 +107,9 @@ const (
|
|||
PartNumberQuery = "partNumber"
|
||||
LegalHoldQuery = "legal-hold"
|
||||
)
|
||||
|
||||
const (
|
||||
StdoutPath = "stdout"
|
||||
StderrPath = "stderr"
|
||||
SinkName = "lumberjack"
|
||||
)
|
||||
|
|
237
api/middleware/log_http.go
Normal file
237
api/middleware/log_http.go
Normal file
|
@ -0,0 +1,237 @@
|
|||
//go:build loghttp
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/detector"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/xmlutils"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type (
|
||||
LogHTTPSettings interface {
|
||||
LogHTTPConfig() LogHTTPConfig
|
||||
}
|
||||
LogHTTPConfig struct {
|
||||
Enabled bool
|
||||
MaxBody int64
|
||||
MaxLogSize int
|
||||
OutputPath string
|
||||
UseGzip bool
|
||||
log *httpLogger
|
||||
}
|
||||
httpLogger struct {
|
||||
*zap.Logger
|
||||
logRoller *lumberjack.Logger
|
||||
}
|
||||
// responseReadWriter helps read http response body.
|
||||
responseReadWriter struct {
|
||||
http.ResponseWriter
|
||||
response *bytes.Buffer
|
||||
statusCode int
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
payloadLabel = "payload"
|
||||
responseLabel = "response"
|
||||
)
|
||||
|
||||
func (lc *LogHTTPConfig) InitHTTPLogger(log *zap.Logger) {
|
||||
if err := lc.initHTTPLogger(); err != nil {
|
||||
log.Error(logs.FailedToInitializeHTTPLogger, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// initHTTPLogger returns registers zap sink and returns new httpLogger.
|
||||
func (lc *LogHTTPConfig) initHTTPLogger() (err error) {
|
||||
lc.log = &httpLogger{
|
||||
Logger: zap.NewNop(),
|
||||
logRoller: &lumberjack.Logger{},
|
||||
}
|
||||
c := newLoggerConfig()
|
||||
lc.log.Logger, err = c.Build()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lc.setLogOutput()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newLoggerConfig creates new zap.Config with disabled base fields.
|
||||
func newLoggerConfig() zap.Config {
|
||||
c := zap.NewProductionConfig()
|
||||
c.DisableCaller = true
|
||||
c.DisableStacktrace = true
|
||||
c.EncoderConfig = newEncoderConfig()
|
||||
c.Sampling = nil
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (lc *LogHTTPConfig) setLogOutput() {
|
||||
var output zapcore.WriteSyncer
|
||||
switch lc.OutputPath {
|
||||
case "", StdoutPath:
|
||||
output = zapcore.AddSync(os.Stdout)
|
||||
case StderrPath:
|
||||
output = zapcore.AddSync(os.Stderr)
|
||||
default:
|
||||
output = zapcore.AddSync(&lumberjack.Logger{
|
||||
Filename: lc.OutputPath,
|
||||
MaxSize: lc.MaxLogSize,
|
||||
Compress: lc.UseGzip,
|
||||
})
|
||||
}
|
||||
|
||||
// create logger with new sync
|
||||
lc.log.Logger = lc.log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
return zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), output, zapcore.InfoLevel)
|
||||
}))
|
||||
}
|
||||
|
||||
func newEncoderConfig() zapcore.EncoderConfig {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.MessageKey = zapcore.OmitKey
|
||||
c.LevelKey = zapcore.OmitKey
|
||||
c.TimeKey = zapcore.OmitKey
|
||||
c.FunctionKey = zapcore.OmitKey
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (ww *responseReadWriter) Write(data []byte) (int, error) {
|
||||
ww.response.Write(data)
|
||||
return ww.ResponseWriter.Write(data)
|
||||
}
|
||||
|
||||
func (ww *responseReadWriter) WriteHeader(code int) {
|
||||
ww.statusCode = code
|
||||
ww.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func (ww *responseReadWriter) Flush() {
|
||||
if f, ok := ww.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// LogHTTP logs http parameters from s3 request.
|
||||
func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
config := settings.LogHTTPConfig()
|
||||
if !config.Enabled || config.log == nil {
|
||||
h.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
httplog := config.log.getHTTPLogger(r).
|
||||
withFieldIfExist("query", r.URL.Query()).
|
||||
withFieldIfExist("headers", r.Header)
|
||||
|
||||
payload := getBody(r.Body, l)
|
||||
r.Body = io.NopCloser(bytes.NewReader(payload))
|
||||
|
||||
payloadReader := io.LimitReader(bytes.NewReader(payload), config.MaxBody)
|
||||
httplog = httplog.withProcessedBody(payloadLabel, payloadReader, l)
|
||||
|
||||
wr := newResponseReadWriter(w)
|
||||
h.ServeHTTP(wr, r)
|
||||
|
||||
respReader := io.LimitReader(wr.response, config.MaxBody)
|
||||
httplog = httplog.withProcessedBody(responseLabel, respReader, l)
|
||||
httplog = httplog.with(zap.Int("status", wr.statusCode))
|
||||
|
||||
httplog.Info(logs.LogHTTP)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// withFieldIfExist checks whether data is not empty and attach it to log output.
|
||||
func (lg *httpLogger) withFieldIfExist(label string, data map[string][]string) *httpLogger {
|
||||
if len(data) != 0 {
|
||||
return lg.with(zap.Any(label, data))
|
||||
}
|
||||
return lg
|
||||
}
|
||||
|
||||
func (lg *httpLogger) with(fields ...zap.Field) *httpLogger {
|
||||
return &httpLogger{
|
||||
Logger: lg.Logger.With(fields...),
|
||||
logRoller: lg.logRoller,
|
||||
}
|
||||
}
|
||||
|
||||
func (lg *httpLogger) getHTTPLogger(r *http.Request) *httpLogger {
|
||||
return lg.with(
|
||||
zap.String("from", r.RemoteAddr),
|
||||
zap.String("URI", r.RequestURI),
|
||||
zap.String("method", r.Method),
|
||||
zap.String("protocol", r.Proto),
|
||||
)
|
||||
}
|
||||
|
||||
func (lg *httpLogger) withProcessedBody(label string, bodyReader io.Reader, l *zap.Logger) *httpLogger {
|
||||
resp, err := processBody(bodyReader)
|
||||
if err != nil {
|
||||
l.Error(logs.FailedToProcessHTTPBody,
|
||||
zap.Error(err),
|
||||
zap.String("body type", payloadLabel))
|
||||
return lg
|
||||
}
|
||||
|
||||
return lg.with(zap.ByteString(label, resp))
|
||||
}
|
||||
|
||||
func newResponseReadWriter(w http.ResponseWriter) *responseReadWriter {
|
||||
return &responseReadWriter{
|
||||
ResponseWriter: w,
|
||||
response: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func getBody(httpBody io.ReadCloser, l *zap.Logger) []byte {
|
||||
defer func(httpBody io.ReadCloser) {
|
||||
if err := httpBody.Close(); err != nil {
|
||||
l.Error(logs.FailedToCloseHTTPBody, zap.Error(err))
|
||||
}
|
||||
}(httpBody)
|
||||
|
||||
body, err := io.ReadAll(httpBody)
|
||||
if err != nil {
|
||||
l.Error(logs.FailedToReadHTTPBody,
|
||||
zap.Error(err),
|
||||
zap.String("body type", payloadLabel))
|
||||
return nil
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
// processBody reads body and base64 encode it if it's not XML.
|
||||
func processBody(bodyReader io.Reader) ([]byte, error) {
|
||||
resultBody := &bytes.Buffer{}
|
||||
detect := detector.NewDetector(bodyReader, xmlutils.DetectXML)
|
||||
dataType, err := detect.Detect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
writer := xmlutils.ChooseWriter(dataType, resultBody)
|
||||
if _, err = io.Copy(writer, detect.RestoredReader()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resultBody.Bytes(), nil
|
||||
}
|
36
api/middleware/log_http_stub.go
Normal file
36
api/middleware/log_http_stub.go
Normal file
|
@ -0,0 +1,36 @@
|
|||
//go:build !loghttp
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
LogHTTPSettings interface {
|
||||
LogHTTPConfig() LogHTTPConfig
|
||||
}
|
||||
LogHTTPConfig struct {
|
||||
Enabled bool
|
||||
MaxBody int64
|
||||
MaxLogSize int
|
||||
OutputPath string
|
||||
UseGzip bool
|
||||
}
|
||||
)
|
||||
|
||||
func LogHTTP(l *zap.Logger, _ LogHTTPSettings) Func {
|
||||
l.Warn(logs.LogHTTPDisabledInThisBuild)
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (*LogHTTPConfig) InitHTTPLogger(*zap.Logger) {
|
||||
// ignore
|
||||
}
|
|
@ -11,8 +11,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
|
||||
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
|
||||
|
@ -25,11 +24,15 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
QueryVersionID = "versionId"
|
||||
QueryPrefix = "prefix"
|
||||
QueryDelimiter = "delimiter"
|
||||
QueryMaxKeys = "max-keys"
|
||||
amzTagging = "x-amz-tagging"
|
||||
QueryVersionID = "versionId"
|
||||
QueryPrefix = "prefix"
|
||||
QueryDelimiter = "delimiter"
|
||||
QueryMaxKeys = "max-keys"
|
||||
QueryMarker = "marker"
|
||||
QueryEncodingType = "encoding-type"
|
||||
amzTagging = "x-amz-tagging"
|
||||
|
||||
unmatchedBucketOperation = "UnmatchedBucketOperation"
|
||||
)
|
||||
|
||||
// In these operations we don't check resource tags because
|
||||
|
@ -73,7 +76,6 @@ type PolicyConfig struct {
|
|||
Storage engine.ChainRouter
|
||||
FrostfsID FrostFSIDInformer
|
||||
Settings PolicySettings
|
||||
Domains []string
|
||||
Log *zap.Logger
|
||||
BucketResolver BucketResolveFunc
|
||||
Decoder XMLDecoder
|
||||
|
@ -86,7 +88,7 @@ func PolicyCheck(cfg PolicyConfig) Func {
|
|||
ctx := r.Context()
|
||||
if err := policyCheck(r, cfg); err != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err))
|
||||
err = frostfsErrors.UnwrapErr(err)
|
||||
err = apierr.TransformToS3Error(err)
|
||||
if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil {
|
||||
reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
|
||||
}
|
||||
|
@ -99,21 +101,21 @@ func PolicyCheck(cfg PolicyConfig) Func {
|
|||
}
|
||||
|
||||
func policyCheck(r *http.Request, cfg PolicyConfig) error {
|
||||
reqType, bktName, objName := getBucketObject(r, cfg.Domains)
|
||||
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqType, bktName, objName)
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
|
||||
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqInfo.RequestType, reqInfo.BucketName, reqInfo.ObjectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var bktInfo *data.BucketInfo
|
||||
if reqType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
|
||||
bktInfo, err = cfg.BucketResolver(r.Context(), bktName)
|
||||
if reqInfo.RequestType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
|
||||
bktInfo, err = cfg.BucketResolver(r.Context(), reqInfo.BucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reqInfo := GetReqInfo(r.Context())
|
||||
target := engine.NewRequestTargetWithNamespace(reqInfo.Namespace)
|
||||
if bktInfo != nil {
|
||||
cnrTarget := engine.ContainerTarget(bktInfo.CID.EncodeToString())
|
||||
|
@ -146,11 +148,11 @@ func policyCheck(r *http.Request, cfg PolicyConfig) error {
|
|||
case st == chain.Allow:
|
||||
return nil
|
||||
case st != chain.NoRuleFound:
|
||||
return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
return apierr.GetAPIErrorWithError(apierr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
}
|
||||
|
||||
if cfg.Settings.PolicyDenyByDefault() {
|
||||
return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
return apierr.GetAPIErrorWithError(apierr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -208,33 +210,6 @@ const (
|
|||
objectType
|
||||
)
|
||||
|
||||
func getBucketObject(r *http.Request, domains []string) (reqType ReqType, bktName string, objName string) {
|
||||
for _, domain := range domains {
|
||||
ind := strings.Index(r.Host, "."+domain)
|
||||
if ind == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
bkt := r.Host[:ind]
|
||||
if obj := strings.TrimPrefix(r.URL.Path, "/"); obj != "" {
|
||||
return objectType, bkt, obj
|
||||
}
|
||||
|
||||
return bucketType, bkt, ""
|
||||
}
|
||||
|
||||
bktObj := strings.TrimPrefix(r.URL.Path, "/")
|
||||
if bktObj == "" {
|
||||
return noneType, "", ""
|
||||
}
|
||||
|
||||
if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
|
||||
return objectType, bktObj[:ind], bktObj[ind+1:]
|
||||
}
|
||||
|
||||
return bucketType, strings.TrimSuffix(bktObj, "/"), ""
|
||||
}
|
||||
|
||||
func determineOperation(r *http.Request, reqType ReqType) (operation string) {
|
||||
switch reqType {
|
||||
case objectType:
|
||||
|
@ -297,8 +272,17 @@ func determineBucketOperation(r *http.Request) string {
|
|||
return ListObjectsV2MOperation
|
||||
case query.Get(ListTypeQuery) == "2":
|
||||
return ListObjectsV2Operation
|
||||
default:
|
||||
case len(query) == 0 || func() bool {
|
||||
for key := range query {
|
||||
if key != QueryDelimiter && key != QueryMaxKeys && key != QueryPrefix && key != QueryMarker && key != QueryEncodingType {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}():
|
||||
return ListObjectsV1Operation
|
||||
default:
|
||||
return unmatchedBucketOperation
|
||||
}
|
||||
case http.MethodPut:
|
||||
switch {
|
||||
|
@ -320,8 +304,10 @@ func determineBucketOperation(r *http.Request) string {
|
|||
return PutBucketVersioningOperation
|
||||
case query.Has(NotificationQuery):
|
||||
return PutBucketNotificationOperation
|
||||
default:
|
||||
case len(query) == 0:
|
||||
return CreateBucketOperation
|
||||
default:
|
||||
return unmatchedBucketOperation
|
||||
}
|
||||
case http.MethodPost:
|
||||
switch {
|
||||
|
@ -344,12 +330,14 @@ func determineBucketOperation(r *http.Request) string {
|
|||
return DeleteBucketLifecycleOperation
|
||||
case query.Has(EncryptionQuery):
|
||||
return DeleteBucketEncryptionOperation
|
||||
default:
|
||||
case len(query) == 0:
|
||||
return DeleteBucketOperation
|
||||
default:
|
||||
return unmatchedBucketOperation
|
||||
}
|
||||
}
|
||||
|
||||
return "UnmatchedBucketOperation"
|
||||
return unmatchedBucketOperation
|
||||
}
|
||||
|
||||
func determineObjectOperation(r *http.Request) string {
|
||||
|
@ -357,6 +345,8 @@ func determineObjectOperation(r *http.Request) string {
|
|||
switch r.Method {
|
||||
case http.MethodOptions:
|
||||
return OptionsObjectOperation
|
||||
case http.MethodPatch:
|
||||
return PatchObjectOperation
|
||||
case http.MethodHead:
|
||||
return HeadObjectOperation
|
||||
case http.MethodGet:
|
||||
|
@ -487,7 +477,7 @@ func determineRequestTags(r *http.Request, decoder XMLDecoder, op string) (map[s
|
|||
if strings.HasSuffix(op, PutObjectTaggingOperation) || strings.HasSuffix(op, PutBucketTaggingOperation) {
|
||||
tagging := new(data.Tagging)
|
||||
if err := decoder.NewXMLDecoder(r.Body).Decode(tagging); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error())
|
||||
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())
|
||||
}
|
||||
GetReqInfo(r.Context()).Tagging = tagging
|
||||
|
||||
|
@ -499,7 +489,7 @@ func determineRequestTags(r *http.Request, decoder XMLDecoder, op string) (map[s
|
|||
if tagging := r.Header.Get(amzTagging); len(tagging) > 0 {
|
||||
queries, err := url.ParseQuery(tagging)
|
||||
if err != nil {
|
||||
return nil, apiErr.GetAPIError(apiErr.ErrInvalidArgument)
|
||||
return nil, apierr.GetAPIError(apierr.ErrInvalidArgument)
|
||||
}
|
||||
for key := range queries {
|
||||
tags[fmt.Sprintf(s3.PropertyKeyFormatRequestTag, key)] = queries.Get(key)
|
||||
|
|
|
@ -8,79 +8,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReqTypeDetermination(t *testing.T) {
|
||||
bkt, obj, domain := "test-bucket", "test-object", "domain"
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
target string
|
||||
host string
|
||||
domains []string
|
||||
expectedType ReqType
|
||||
expectedBktName string
|
||||
expectedObjName string
|
||||
}{
|
||||
{
|
||||
name: "bucket request, path-style",
|
||||
target: "/" + bkt,
|
||||
expectedType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "bucket request with slash, path-style",
|
||||
target: "/" + bkt + "/",
|
||||
expectedType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "object request, path-style",
|
||||
target: "/" + bkt + "/" + obj,
|
||||
expectedType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj,
|
||||
},
|
||||
{
|
||||
name: "object request with slash, path-style",
|
||||
target: "/" + bkt + "/" + obj + "/",
|
||||
expectedType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj + "/",
|
||||
},
|
||||
{
|
||||
name: "none type request",
|
||||
target: "/",
|
||||
expectedType: noneType,
|
||||
},
|
||||
{
|
||||
name: "bucket request, virtual-hosted style",
|
||||
target: "/",
|
||||
host: bkt + "." + domain,
|
||||
domains: []string{"some-domain", domain},
|
||||
expectedType: bucketType,
|
||||
expectedBktName: bkt,
|
||||
},
|
||||
{
|
||||
name: "object request, virtual-hosted style",
|
||||
target: "/" + obj,
|
||||
host: bkt + "." + domain,
|
||||
domains: []string{"some-domain", domain},
|
||||
expectedType: objectType,
|
||||
expectedBktName: bkt,
|
||||
expectedObjName: obj,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
r := httptest.NewRequest(http.MethodPut, tc.target, nil)
|
||||
r.Host = tc.host
|
||||
|
||||
reqType, bktName, objName := getBucketObject(r, tc.domains)
|
||||
require.Equal(t, tc.expectedType, reqType)
|
||||
require.Equal(t, tc.expectedBktName, bktName)
|
||||
require.Equal(t, tc.expectedObjName, objName)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetermineBucketOperation(t *testing.T) {
|
||||
const defaultValue = "value"
|
||||
|
||||
|
@ -225,6 +152,12 @@ func TestDetermineBucketOperation(t *testing.T) {
|
|||
method: http.MethodGet,
|
||||
expected: ListObjectsV1Operation,
|
||||
},
|
||||
{
|
||||
name: "UnmatchedBucketOperation GET",
|
||||
method: http.MethodGet,
|
||||
queryParam: map[string]string{"query": ""},
|
||||
expected: unmatchedBucketOperation,
|
||||
},
|
||||
{
|
||||
name: "PutBucketCorsOperation",
|
||||
method: http.MethodPut,
|
||||
|
@ -284,6 +217,12 @@ func TestDetermineBucketOperation(t *testing.T) {
|
|||
method: http.MethodPut,
|
||||
expected: CreateBucketOperation,
|
||||
},
|
||||
{
|
||||
name: "UnmatchedBucketOperation PUT",
|
||||
method: http.MethodPut,
|
||||
queryParam: map[string]string{"query": ""},
|
||||
expected: unmatchedBucketOperation,
|
||||
},
|
||||
{
|
||||
name: "DeleteMultipleObjectsOperation",
|
||||
method: http.MethodPost,
|
||||
|
@ -336,10 +275,16 @@ func TestDetermineBucketOperation(t *testing.T) {
|
|||
method: http.MethodDelete,
|
||||
expected: DeleteBucketOperation,
|
||||
},
|
||||
{
|
||||
name: "UnmatchedBucketOperation DELETE",
|
||||
method: http.MethodDelete,
|
||||
queryParam: map[string]string{"query": ""},
|
||||
expected: unmatchedBucketOperation,
|
||||
},
|
||||
{
|
||||
name: "UnmatchedBucketOperation",
|
||||
method: "invalid-method",
|
||||
expected: "UnmatchedBucketOperation",
|
||||
expected: unmatchedBucketOperation,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
@ -28,19 +27,21 @@ type (
|
|||
// ReqInfo stores the request info.
|
||||
ReqInfo struct {
|
||||
sync.RWMutex
|
||||
RemoteHost string // Client Host/IP
|
||||
Host string // Node Host/IP
|
||||
UserAgent string // User Agent
|
||||
DeploymentID string // random generated s3-deployment-id
|
||||
RequestID string // x-amz-request-id
|
||||
API string // API name -- GetObject PutObject NewMultipartUpload etc.
|
||||
BucketName string // Bucket name
|
||||
ObjectName string // Object name
|
||||
TraceID string // Trace ID
|
||||
URL *url.URL // Request url
|
||||
Namespace string
|
||||
User string // User owner id
|
||||
Tagging *data.Tagging
|
||||
RemoteHost string // Client Host/IP
|
||||
Host string // Node Host/IP
|
||||
UserAgent string // User Agent
|
||||
DeploymentID string // random generated s3-deployment-id
|
||||
RequestID string // x-amz-request-id
|
||||
API string // API name -- GetObject PutObject NewMultipartUpload etc.
|
||||
BucketName string // Bucket name
|
||||
ObjectName string // Object name
|
||||
TraceID string // Trace ID
|
||||
URL *url.URL // Request url
|
||||
Namespace string
|
||||
User string // User owner id
|
||||
Tagging *data.Tagging
|
||||
RequestVHSEnabled bool
|
||||
RequestType ReqType
|
||||
}
|
||||
|
||||
// ObjectRequest represents object request data.
|
||||
|
@ -61,10 +62,6 @@ const (
|
|||
|
||||
const HdrAmzRequestID = "x-amz-request-id"
|
||||
|
||||
const (
|
||||
BucketURLPrm = "bucket"
|
||||
)
|
||||
|
||||
var deploymentID = uuid.Must(uuid.NewRandom())
|
||||
|
||||
var (
|
||||
|
@ -202,57 +199,6 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
|
|||
}
|
||||
}
|
||||
|
||||
// AddBucketName adds bucket name to ReqInfo from context.
|
||||
func AddBucketName(l *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
reqInfo.BucketName = chi.URLParam(r, BucketURLPrm)
|
||||
|
||||
if reqInfo.BucketName != "" {
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// AddObjectName adds objects name to ReqInfo from context.
|
||||
func AddObjectName(l *zap.Logger) Func {
|
||||
return func(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := GetReqInfo(ctx)
|
||||
reqLogger := reqLogOrDefault(ctx, l)
|
||||
|
||||
rctx := chi.RouteContext(ctx)
|
||||
// trim leading slash (always present)
|
||||
reqInfo.ObjectName = rctx.RoutePath[1:]
|
||||
|
||||
if r.URL.RawPath != "" {
|
||||
// we have to do this because of
|
||||
// https://github.com/go-chi/chi/issues/641
|
||||
// https://github.com/go-chi/chi/issues/642
|
||||
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
|
||||
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
|
||||
} else {
|
||||
reqInfo.ObjectName = obj
|
||||
}
|
||||
}
|
||||
|
||||
if reqInfo.ObjectName != "" {
|
||||
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
|
||||
}
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// getSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
|
||||
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
|
||||
// else fails.
|
||||
|
|
|
@ -62,7 +62,7 @@ const (
|
|||
hdrSSE = "X-Amz-Server-Side-Encryption"
|
||||
|
||||
// hdrSSECustomerKey is the HTTP header key referencing the
|
||||
// SSE-C client-provided key..
|
||||
// SSE-C client-provided key.
|
||||
hdrSSECustomerKey = hdrSSE + "-Customer-Key"
|
||||
|
||||
// hdrSSECopyKey is the HTTP header key referencing the SSE-C
|
||||
|
@ -74,7 +74,7 @@ var (
|
|||
xmlHeader = []byte(xml.Header)
|
||||
)
|
||||
|
||||
// Non exhaustive list of AWS S3 standard error responses -
|
||||
// Non-exhaustive list of AWS S3 standard error responses -
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
|
||||
var s3ErrorResponseMap = map[string]string{
|
||||
"AccessDenied": "Access Denied.",
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
|
||||
|
@ -29,20 +29,14 @@ type FrostFS interface {
|
|||
SystemDNS(context.Context) (string, error)
|
||||
}
|
||||
|
||||
type Settings interface {
|
||||
FormContainerZone(ns string) (zone string, isDefault bool)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
FrostFS FrostFS
|
||||
RPCAddress string
|
||||
Settings Settings
|
||||
}
|
||||
|
||||
type BucketResolver struct {
|
||||
rpcAddress string
|
||||
frostfs FrostFS
|
||||
settings Settings
|
||||
|
||||
mu sync.RWMutex
|
||||
resolvers []*Resolver
|
||||
|
@ -50,15 +44,15 @@ type BucketResolver struct {
|
|||
|
||||
type Resolver struct {
|
||||
Name string
|
||||
resolve func(context.Context, string) (cid.ID, error)
|
||||
resolve func(context.Context, string, string) (cid.ID, error)
|
||||
}
|
||||
|
||||
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (cid.ID, error)) {
|
||||
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (cid.ID, error)) {
|
||||
r.resolve = fn
|
||||
}
|
||||
|
||||
func (r *Resolver) Resolve(ctx context.Context, name string) (cid.ID, error) {
|
||||
return r.resolve(ctx, name)
|
||||
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||
return r.resolve(ctx, zone, name)
|
||||
}
|
||||
|
||||
func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) {
|
||||
|
@ -87,12 +81,12 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
|
|||
return resolvers, nil
|
||||
}
|
||||
|
||||
func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid.ID, err error) {
|
||||
func (r *BucketResolver) Resolve(ctx context.Context, zone, bktName string) (cnrID cid.ID, err error) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
for _, resolver := range r.resolvers {
|
||||
cnrID, resolverErr := resolver.Resolve(ctx, bktName)
|
||||
cnrID, resolverErr := resolver.Resolve(ctx, zone, bktName)
|
||||
if resolverErr != nil {
|
||||
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
|
||||
if err == nil {
|
||||
|
@ -123,7 +117,6 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
|
|||
cfg := &Config{
|
||||
FrostFS: r.frostfs,
|
||||
RPCAddress: r.rpcAddress,
|
||||
Settings: r.settings,
|
||||
}
|
||||
|
||||
resolvers, err := createResolvers(resolverNames, cfg)
|
||||
|
@ -152,30 +145,25 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
|
|||
func newResolver(name string, cfg *Config) (*Resolver, error) {
|
||||
switch name {
|
||||
case DNSResolver:
|
||||
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
|
||||
return NewDNSResolver(cfg.FrostFS)
|
||||
case NNSResolver:
|
||||
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
|
||||
return NewNNSResolver(cfg.RPCAddress)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown resolver: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
||||
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
|
||||
if frostFS == nil {
|
||||
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
|
||||
}
|
||||
|
||||
var dns ns.DNS
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
resolveFunc := func(ctx context.Context, zone, name string) (cid.ID, error) {
|
||||
var err error
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
|
||||
if isDefault {
|
||||
if zone == v2container.SysAttributeZoneDefault {
|
||||
zone, err = frostFS.SystemDNS(ctx)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
|
||||
|
@ -196,13 +184,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
||||
func NewNNSResolver(address string) (*Resolver, error) {
|
||||
if address == "" {
|
||||
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
|
||||
}
|
||||
if settings == nil {
|
||||
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
|
||||
}
|
||||
|
||||
var nns ns.NNS
|
||||
|
||||
|
@ -210,12 +195,9 @@ func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
|
|||
return nil, fmt.Errorf("dial %s: %w", address, err)
|
||||
}
|
||||
|
||||
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
|
||||
resolveFunc := func(_ context.Context, zone, name string) (cid.ID, error) {
|
||||
var d container.Domain
|
||||
d.SetName(name)
|
||||
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
|
||||
d.SetZone(zone)
|
||||
|
||||
cnrID, err := nns.ResolveContainerDomain(d)
|
||||
|
|
110
api/router.go
110
api/router.go
|
@ -87,6 +87,7 @@ type (
|
|||
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
||||
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
|
||||
PatchObjectHandler(http.ResponseWriter, *http.Request)
|
||||
|
||||
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
|
||||
|
@ -97,6 +98,8 @@ type Settings interface {
|
|||
s3middleware.RequestSettings
|
||||
s3middleware.PolicySettings
|
||||
s3middleware.MetricsSettings
|
||||
s3middleware.VHSSettings
|
||||
s3middleware.LogHTTPSettings
|
||||
}
|
||||
|
||||
type FrostFSID interface {
|
||||
|
@ -113,9 +116,6 @@ type Config struct {
|
|||
|
||||
MiddlewareSettings Settings
|
||||
|
||||
// Domains optional. If empty no virtual hosted domains will be attached.
|
||||
Domains []string
|
||||
|
||||
FrostfsID FrostFSID
|
||||
|
||||
FrostFSIDValidation bool
|
||||
|
@ -128,7 +128,9 @@ type Config struct {
|
|||
|
||||
func NewRouter(cfg Config) *chi.Mux {
|
||||
api := chi.NewRouter()
|
||||
|
||||
api.Use(
|
||||
s3middleware.LogHTTP(cfg.Log, cfg.MiddlewareSettings),
|
||||
s3middleware.Request(cfg.Log, cfg.MiddlewareSettings),
|
||||
middleware.ThrottleWithOpts(cfg.Throttle),
|
||||
middleware.Recoverer,
|
||||
|
@ -142,11 +144,11 @@ func NewRouter(cfg Config) *chi.Mux {
|
|||
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
|
||||
}
|
||||
|
||||
api.Use(s3middleware.PrepareAddressStyle(cfg.MiddlewareSettings, cfg.Log))
|
||||
api.Use(s3middleware.PolicyCheck(s3middleware.PolicyConfig{
|
||||
Storage: cfg.PolicyChecker,
|
||||
FrostfsID: cfg.FrostfsID,
|
||||
Settings: cfg.MiddlewareSettings,
|
||||
Domains: cfg.Domains,
|
||||
Log: cfg.Log,
|
||||
BucketResolver: cfg.Handler.ResolveBucket,
|
||||
Decoder: cfg.XMLDecoder,
|
||||
|
@ -154,22 +156,41 @@ func NewRouter(cfg Config) *chi.Mux {
|
|||
}))
|
||||
|
||||
defaultRouter := chi.NewRouter()
|
||||
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(cfg.Handler, cfg.Log))
|
||||
defaultRouter.Get("/", named("ListBuckets", cfg.Handler.ListBucketsHandler))
|
||||
defaultRouter.Mount("/{bucket}", bucketRouter(cfg.Handler))
|
||||
defaultRouter.Get("/", named(s3middleware.ListBucketsOperation, cfg.Handler.ListBucketsHandler))
|
||||
attachErrorHandler(defaultRouter)
|
||||
|
||||
hr := NewHostBucketRouter("bucket")
|
||||
hr.Default(defaultRouter)
|
||||
for _, domain := range cfg.Domains {
|
||||
hr.Map(domain, bucketRouter(cfg.Handler, cfg.Log))
|
||||
}
|
||||
api.Mount("/", hr)
|
||||
vhsRouter := bucketRouter(cfg.Handler)
|
||||
router := newGlobalRouter(defaultRouter, vhsRouter)
|
||||
|
||||
api.Mount("/", router)
|
||||
|
||||
attachErrorHandler(api)
|
||||
|
||||
return api
|
||||
}
|
||||
|
||||
type globalRouter struct {
|
||||
pathStyleRouter chi.Router
|
||||
vhsRouter chi.Router
|
||||
}
|
||||
|
||||
func newGlobalRouter(pathStyleRouter, vhsRouter chi.Router) *globalRouter {
|
||||
return &globalRouter{
|
||||
pathStyleRouter: pathStyleRouter,
|
||||
vhsRouter: vhsRouter,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *globalRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
router := g.pathStyleRouter
|
||||
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.RequestVHSEnabled {
|
||||
router = g.vhsRouter
|
||||
}
|
||||
|
||||
router.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
reqInfo := s3middleware.GetReqInfo(r.Context())
|
||||
|
@ -205,6 +226,28 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func notSupportedHandler() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := s3middleware.GetReqInfo(ctx)
|
||||
|
||||
_, wrErr := s3middleware.WriteErrorResponse(w, reqInfo, errors.GetAPIError(errors.ErrNotSupported))
|
||||
|
||||
if log := s3middleware.GetReqLog(ctx); log != nil {
|
||||
fields := []zap.Field{
|
||||
zap.String("http method", r.Method),
|
||||
zap.String("url", r.RequestURI),
|
||||
}
|
||||
|
||||
if wrErr != nil {
|
||||
fields = append(fields, zap.NamedError("write_response_error", wrErr))
|
||||
}
|
||||
|
||||
log.Error(logs.NotSupported, fields...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// attachErrorHandler set NotFoundHandler and MethodNotAllowedHandler for chi.Router.
|
||||
func attachErrorHandler(api *chi.Mux) {
|
||||
errorHandler := http.HandlerFunc(errorResponseHandler)
|
||||
|
@ -214,14 +257,13 @@ func attachErrorHandler(api *chi.Mux) {
|
|||
api.MethodNotAllowed(named("MethodNotAllowed", errorHandler))
|
||||
}
|
||||
|
||||
func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
||||
func bucketRouter(h Handler) chi.Router {
|
||||
bktRouter := chi.NewRouter()
|
||||
bktRouter.Use(
|
||||
s3middleware.AddBucketName(log),
|
||||
s3middleware.WrapHandler(h.AppendCORSHeaders),
|
||||
)
|
||||
|
||||
bktRouter.Mount("/", objectRouter(h, log))
|
||||
bktRouter.Mount("/", objectRouter(h))
|
||||
|
||||
bktRouter.Options("/", named(s3middleware.OptionsBucketOperation, h.Preflight))
|
||||
|
||||
|
@ -293,7 +335,14 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
Add(NewFilter().
|
||||
Queries(s3middleware.VersionsQuery).
|
||||
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
|
||||
DefaultHandler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler)))
|
||||
Add(NewFilter().
|
||||
AllowedQueries(s3middleware.QueryDelimiter, s3middleware.QueryMaxKeys, s3middleware.QueryPrefix,
|
||||
s3middleware.QueryMarker, s3middleware.QueryEncodingType).
|
||||
Handler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler))).
|
||||
Add(NewFilter().
|
||||
NoQueries().
|
||||
Handler(listWrapper(h))).
|
||||
DefaultHandler(notSupportedHandler()))
|
||||
})
|
||||
|
||||
// PUT method handlers
|
||||
|
@ -326,7 +375,10 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
Add(NewFilter().
|
||||
Queries(s3middleware.NotificationQuery).
|
||||
Handler(named(s3middleware.PutBucketNotificationOperation, h.PutBucketNotificationHandler))).
|
||||
DefaultHandler(named(s3middleware.CreateBucketOperation, h.CreateBucketHandler)))
|
||||
Add(NewFilter().
|
||||
NoQueries().
|
||||
Handler(named(s3middleware.CreateBucketOperation, h.CreateBucketHandler))).
|
||||
DefaultHandler(notSupportedHandler()))
|
||||
})
|
||||
|
||||
// POST method handlers
|
||||
|
@ -356,11 +408,14 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
Handler(named(s3middleware.DeleteBucketPolicyOperation, h.DeleteBucketPolicyHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.LifecycleQuery).
|
||||
Handler(named(s3middleware.PutBucketLifecycleOperation, h.PutBucketLifecycleHandler))).
|
||||
Handler(named(s3middleware.DeleteBucketLifecycleOperation, h.DeleteBucketLifecycleHandler))).
|
||||
Add(NewFilter().
|
||||
Queries(s3middleware.EncryptionQuery).
|
||||
Handler(named(s3middleware.DeleteBucketEncryptionOperation, h.DeleteBucketEncryptionHandler))).
|
||||
DefaultHandler(named(s3middleware.DeleteBucketOperation, h.DeleteBucketHandler)))
|
||||
Add(NewFilter().
|
||||
NoQueries().
|
||||
Handler(named(s3middleware.DeleteBucketOperation, h.DeleteBucketHandler))).
|
||||
DefaultHandler(notSupportedHandler()))
|
||||
})
|
||||
|
||||
attachErrorHandler(bktRouter)
|
||||
|
@ -368,14 +423,27 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
|
|||
return bktRouter
|
||||
}
|
||||
|
||||
func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
||||
func listWrapper(h Handler) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.BucketName == "" {
|
||||
reqInfo.API = s3middleware.ListBucketsOperation
|
||||
h.ListBucketsHandler(w, r)
|
||||
} else {
|
||||
reqInfo.API = s3middleware.ListObjectsV1Operation
|
||||
h.ListObjectsV1Handler(w, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func objectRouter(h Handler) chi.Router {
|
||||
objRouter := chi.NewRouter()
|
||||
objRouter.Use(s3middleware.AddObjectName(l))
|
||||
|
||||
objRouter.Options("/*", named(s3middleware.OptionsObjectOperation, h.Preflight))
|
||||
|
||||
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
||||
|
||||
objRouter.Patch("/*", named(s3middleware.PatchObjectOperation, h.PatchObjectHandler))
|
||||
|
||||
// GET method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
||||
|
|
|
@ -11,9 +11,11 @@ type HandlerFilters struct {
|
|||
}
|
||||
|
||||
type Filter struct {
|
||||
queries []Pair
|
||||
headers []Pair
|
||||
h http.Handler
|
||||
queries []Pair
|
||||
headers []Pair
|
||||
allowedQueries map[string]struct{}
|
||||
noQueries bool
|
||||
h http.Handler
|
||||
}
|
||||
|
||||
type Pair struct {
|
||||
|
@ -105,6 +107,22 @@ func (f *Filter) Queries(queries ...string) *Filter {
|
|||
return f
|
||||
}
|
||||
|
||||
// NoQueries sets flag indicating that request shouldn't have query parameters.
|
||||
func (f *Filter) NoQueries() *Filter {
|
||||
f.noQueries = true
|
||||
return f
|
||||
}
|
||||
|
||||
// AllowedQueries adds query parameter keys that may be present in request.
|
||||
func (f *Filter) AllowedQueries(queries ...string) *Filter {
|
||||
f.allowedQueries = make(map[string]struct{}, len(queries))
|
||||
for _, query := range queries {
|
||||
f.allowedQueries[query] = struct{}{}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (hf *HandlerFilters) DefaultHandler(handler http.HandlerFunc) *HandlerFilters {
|
||||
hf.defaultHandler = handler
|
||||
return hf
|
||||
|
@ -122,6 +140,17 @@ func (hf *HandlerFilters) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
func (hf *HandlerFilters) match(r *http.Request) http.Handler {
|
||||
LOOP:
|
||||
for _, filter := range hf.filters {
|
||||
if filter.noQueries && len(r.URL.Query()) > 0 {
|
||||
continue
|
||||
}
|
||||
if len(filter.allowedQueries) > 0 {
|
||||
queries := r.URL.Query()
|
||||
for key := range queries {
|
||||
if _, ok := filter.allowedQueries[key]; !ok {
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, header := range filter.headers {
|
||||
hdrVals := r.Header.Values(header.Key)
|
||||
if len(hdrVals) == 0 || header.Value != "" && header.Value != hdrVals[0] {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
|
||||
|
@ -23,7 +23,11 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
||||
const (
|
||||
FrostfsNamespaceHeader = "X-Frostfs-Namespace"
|
||||
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
|
||||
FrostfsServernameHeader = "X-Frostfs-Servername"
|
||||
)
|
||||
|
||||
type poolStatisticMock struct {
|
||||
}
|
||||
|
@ -36,8 +40,9 @@ type centerMock struct {
|
|||
t *testing.T
|
||||
anon bool
|
||||
noAuthHeader bool
|
||||
isError bool
|
||||
err error
|
||||
attrs []object.Attribute
|
||||
key *keys.PrivateKey
|
||||
}
|
||||
|
||||
func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
||||
|
@ -45,8 +50,8 @@ func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
|||
return nil, middleware.ErrNoAuthorizationHeader
|
||||
}
|
||||
|
||||
if c.isError {
|
||||
return nil, fmt.Errorf("some error")
|
||||
if c.err != nil {
|
||||
return nil, c.err
|
||||
}
|
||||
|
||||
var token *bearer.Token
|
||||
|
@ -54,8 +59,12 @@ func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
|||
if !c.anon {
|
||||
bt := bearertest.Token()
|
||||
token = &bt
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(c.t, err)
|
||||
key := c.key
|
||||
if key == nil {
|
||||
var err error
|
||||
key, err = keys.NewPrivateKey()
|
||||
require.NoError(c.t, err)
|
||||
}
|
||||
require.NoError(c.t, token.Sign(key.PrivateKey))
|
||||
}
|
||||
|
||||
|
@ -71,8 +80,12 @@ func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
|
|||
}
|
||||
|
||||
type middlewareSettingsMock struct {
|
||||
denyByDefault bool
|
||||
sourceIPHeader string
|
||||
denyByDefault bool
|
||||
sourceIPHeader string
|
||||
domains []string
|
||||
vhsEnabled bool
|
||||
vhsNamespacesEnabled map[string]bool
|
||||
logHTTP middleware.LogHTTPConfig
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) SourceIPHeader() string {
|
||||
|
@ -91,6 +104,29 @@ func (r *middlewareSettingsMock) PolicyDenyByDefault() bool {
|
|||
return r.denyByDefault
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) Domains() []string {
|
||||
return r.domains
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) GlobalVHS() bool {
|
||||
return r.vhsEnabled
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) VHSHeader() string {
|
||||
return FrostfsVHSHeader
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) ServernameHeader() string {
|
||||
return FrostfsServernameHeader
|
||||
}
|
||||
|
||||
func (r *middlewareSettingsMock) VHSNamespacesEnabled() map[string]bool {
|
||||
return r.vhsNamespacesEnabled
|
||||
}
|
||||
func (r *middlewareSettingsMock) LogHTTPConfig() middleware.LogHTTPConfig {
|
||||
return r.logHTTP
|
||||
}
|
||||
|
||||
type frostFSIDMock struct {
|
||||
tags map[string]string
|
||||
validateError bool
|
||||
|
@ -120,22 +156,21 @@ func (m *xmlMock) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
|||
}
|
||||
|
||||
type resourceTaggingMock struct {
|
||||
bucketTags map[string]string
|
||||
objectTags map[string]string
|
||||
noSuchObjectKey bool
|
||||
noSuchBucketKey bool
|
||||
bucketTags map[string]string
|
||||
objectTags map[string]string
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *resourceTaggingMock) GetBucketTagging(context.Context, *data.BucketInfo) (map[string]string, error) {
|
||||
if m.noSuchBucketKey {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
return m.bucketTags, nil
|
||||
}
|
||||
|
||||
func (m *resourceTaggingMock) GetObjectTagging(context.Context, *data.GetObjectTaggingParams) (string, map[string]string, error) {
|
||||
if m.noSuchObjectKey {
|
||||
return "", nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
||||
if m.err != nil {
|
||||
return "", nil, m.err
|
||||
}
|
||||
return "", m.objectTags, nil
|
||||
}
|
||||
|
@ -534,11 +569,15 @@ func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
|
|||
h.writeResponse(w, res)
|
||||
}
|
||||
|
||||
func (h *handlerMock) PatchObjectHandler(http.ResponseWriter, *http.Request) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
bktInfo, ok := h.buckets[reqInfo.Namespace+name]
|
||||
if !ok {
|
||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket)
|
||||
return nil, apierr.GetAPIError(apierr.ErrNoSuchBucket)
|
||||
}
|
||||
return bktInfo, nil
|
||||
}
|
||||
|
|
|
@ -14,7 +14,8 @@ import (
|
|||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
|
||||
s3middleware "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/metrics"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
|
@ -26,6 +27,7 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/policy-engine/schema/s3"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
@ -78,7 +80,6 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
|
|||
Metrics: metrics.NewAppMetrics(metricsConfig),
|
||||
MiddlewareSettings: middlewareSettings,
|
||||
PolicyChecker: policyChecker,
|
||||
Domains: []string{"domain1", "domain2"},
|
||||
FrostfsID: &frostFSIDMock{},
|
||||
XMLDecoder: &xmlMock{},
|
||||
Tagging: &resourceTaggingMock{},
|
||||
|
@ -215,18 +216,18 @@ func TestPolicyChecker(t *testing.T) {
|
|||
deleteObject(chiRouter, ns2, bktName2, objName2, nil)
|
||||
|
||||
// check we cannot access 'bucket' in custom namespace
|
||||
putObjectErr(chiRouter, ns2, bktName1, objName2, nil, apiErrors.ErrAccessDenied)
|
||||
deleteObjectErr(chiRouter, ns2, bktName1, objName2, nil, apiErrors.ErrAccessDenied)
|
||||
putObjectErr(chiRouter, ns2, bktName1, objName2, nil, apierr.ErrAccessDenied)
|
||||
deleteObjectErr(chiRouter, ns2, bktName1, objName2, nil, apierr.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestPolicyCheckerError(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
ns1, bktName1, objName1 := "", "bucket", "object"
|
||||
putObjectErr(chiRouter, ns1, bktName1, objName1, nil, apiErrors.ErrNoSuchBucket)
|
||||
putObjectErr(chiRouter, ns1, bktName1, objName1, nil, apierr.ErrNoSuchBucket)
|
||||
|
||||
chiRouter = prepareRouter(t)
|
||||
chiRouter.cfg.FrostfsID.(*frostFSIDMock).userGroupsError = true
|
||||
putObjectErr(chiRouter, ns1, bktName1, objName1, nil, apiErrors.ErrInternalError)
|
||||
putObjectErr(chiRouter, ns1, bktName1, objName1, nil, apierr.ErrInternalError)
|
||||
}
|
||||
|
||||
func TestPolicyCheckerReqTypeDetermination(t *testing.T) {
|
||||
|
@ -275,6 +276,36 @@ func TestPolicyCheckerReqTypeDetermination(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestPolicyCheckFrostfsErrors(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
ns1, bktName1, objName1 := "", "bucket", "object"
|
||||
|
||||
createBucket(chiRouter, ns1, bktName1)
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
chiRouter.cfg.Center.(*centerMock).key = key
|
||||
chiRouter.cfg.MiddlewareSettings.(*middlewareSettingsMock).denyByDefault = true
|
||||
|
||||
ruleChain := &chain.Chain{
|
||||
ID: chain.ID("id"),
|
||||
Rules: []chain.Rule{{
|
||||
Status: chain.Allow,
|
||||
Actions: chain.Actions{Names: []string{"*"}},
|
||||
Resources: chain.Resources{Names: []string{fmt.Sprintf(s3.ResourceFormatS3BucketObjects, bktName1)}},
|
||||
}},
|
||||
}
|
||||
|
||||
_, _, err = chiRouter.policyChecker.MorphRuleChainStorage().AddMorphRuleChain(chain.S3, engine.UserTarget(ns1+":"+key.Address()), ruleChain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check we can access 'bucket' in default namespace
|
||||
putObject(chiRouter, ns1, bktName1, objName1, nil)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).anon = true
|
||||
chiRouter.cfg.Tagging.(*resourceTaggingMock).err = frostfs.ErrAccessDenied
|
||||
getObjectErr(chiRouter, ns1, bktName1, objName1, apierr.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestDefaultBehaviorPolicyChecker(t *testing.T) {
|
||||
chiRouter := prepareRouter(t)
|
||||
ns, bktName := "", "bucket"
|
||||
|
@ -284,7 +315,7 @@ func TestDefaultBehaviorPolicyChecker(t *testing.T) {
|
|||
|
||||
// check we cannot access if rules not found when settings is enabled
|
||||
chiRouter.middlewareSettings.denyByDefault = true
|
||||
createBucketErr(chiRouter, ns, bktName, nil, apiErrors.ErrAccessDenied)
|
||||
createBucketErr(chiRouter, ns, bktName, nil, apierr.ErrAccessDenied)
|
||||
}
|
||||
|
||||
func TestDefaultPolicyCheckerWithUserTags(t *testing.T) {
|
||||
|
@ -295,7 +326,7 @@ func TestDefaultPolicyCheckerWithUserTags(t *testing.T) {
|
|||
allowOperations(router, ns, []string{"s3:CreateBucket"}, engineiam.Conditions{
|
||||
engineiam.CondStringEquals: engineiam.Condition{fmt.Sprintf(common.PropertyKeyFormatFrostFSIDUserClaim, "tag-test"): []string{"test"}},
|
||||
})
|
||||
createBucketErr(router, ns, bktName, nil, apiErrors.ErrAccessDenied)
|
||||
createBucketErr(router, ns, bktName, nil, apierr.ErrAccessDenied)
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["tag-test"] = "test"
|
||||
|
@ -322,8 +353,8 @@ func TestRequestParametersCheck(t *testing.T) {
|
|||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, prefix, "", "")
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "invalid", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apierr.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "invalid", "", "", apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("delimiter parameter, prohibit specific value", func(t *testing.T) {
|
||||
|
@ -345,7 +376,7 @@ func TestRequestParametersCheck(t *testing.T) {
|
|||
|
||||
listObjectsV1(router, ns, bktName, "", "", "")
|
||||
listObjectsV1(router, ns, bktName, "", "some-delimiter", "")
|
||||
listObjectsV1Err(router, ns, bktName, "", delimiter, "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", delimiter, "", apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, allow specific value", func(t *testing.T) {
|
||||
|
@ -366,9 +397,9 @@ func TestRequestParametersCheck(t *testing.T) {
|
|||
})
|
||||
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1), apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "invalid", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apierr.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1), apierr.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "invalid", apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, allow range of values", func(t *testing.T) {
|
||||
|
@ -390,7 +421,7 @@ func TestRequestParametersCheck(t *testing.T) {
|
|||
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys))
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys+1), apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys+1), apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("max-keys parameter, prohibit specific value", func(t *testing.T) {
|
||||
|
@ -412,7 +443,7 @@ func TestRequestParametersCheck(t *testing.T) {
|
|||
|
||||
listObjectsV1(router, ns, bktName, "", "", "")
|
||||
listObjectsV1(router, ns, bktName, "", "", strconv.Itoa(maxKeys-1))
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys), apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", strconv.Itoa(maxKeys), apierr.ErrAccessDenied)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -440,10 +471,10 @@ func TestRequestTagsCheck(t *testing.T) {
|
|||
|
||||
tagging, err = xml.Marshal(data.Tagging{TagSet: []data.Tag{{Key: "key", Value: tagValue}}})
|
||||
require.NoError(t, err)
|
||||
putBucketTaggingErr(router, ns, bktName, tagging, apiErrors.ErrAccessDenied)
|
||||
putBucketTaggingErr(router, ns, bktName, tagging, apierr.ErrAccessDenied)
|
||||
|
||||
tagging = nil
|
||||
putBucketTaggingErr(router, ns, bktName, tagging, apiErrors.ErrMalformedXML)
|
||||
putBucketTaggingErr(router, ns, bktName, tagging, apierr.ErrMalformedXML)
|
||||
})
|
||||
|
||||
t.Run("put object with tag", func(t *testing.T) {
|
||||
|
@ -465,7 +496,7 @@ func TestRequestTagsCheck(t *testing.T) {
|
|||
|
||||
putObject(router, ns, bktName, objName, &data.Tag{Key: tagKey, Value: tagValue})
|
||||
|
||||
putObjectErr(router, ns, bktName, objName, &data.Tag{Key: "key", Value: tagValue}, apiErrors.ErrAccessDenied)
|
||||
putObjectErr(router, ns, bktName, objName, &data.Tag{Key: "key", Value: tagValue}, apierr.ErrAccessDenied)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -491,7 +522,7 @@ func TestResourceTagsCheck(t *testing.T) {
|
|||
listObjectsV1(router, ns, bktName, "", "", "")
|
||||
|
||||
router.cfg.Tagging.(*resourceTaggingMock).bucketTags = map[string]string{}
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("object tagging", func(t *testing.T) {
|
||||
|
@ -516,22 +547,21 @@ func TestResourceTagsCheck(t *testing.T) {
|
|||
getObject(router, ns, bktName, objName)
|
||||
|
||||
router.cfg.Tagging.(*resourceTaggingMock).objectTags = map[string]string{}
|
||||
getObjectErr(router, ns, bktName, objName, apiErrors.ErrAccessDenied)
|
||||
getObjectErr(router, ns, bktName, objName, apierr.ErrAccessDenied)
|
||||
})
|
||||
|
||||
t.Run("non-existent resources", func(t *testing.T) {
|
||||
router := prepareRouter(t)
|
||||
ns, bktName, objName := "", "bucket", "object"
|
||||
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrNoSuchBucket)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apierr.ErrNoSuchBucket)
|
||||
|
||||
router.cfg.Tagging.(*resourceTaggingMock).noSuchBucketKey = true
|
||||
router.cfg.Tagging.(*resourceTaggingMock).err = apierr.GetAPIError(apierr.ErrNoSuchKey)
|
||||
createBucket(router, ns, bktName)
|
||||
getBucketErr(router, ns, bktName, apiErrors.ErrNoSuchKey)
|
||||
getBucketErr(router, ns, bktName, apierr.ErrNoSuchKey)
|
||||
|
||||
router.cfg.Tagging.(*resourceTaggingMock).noSuchObjectKey = true
|
||||
createBucket(router, ns, bktName)
|
||||
getObjectErr(router, ns, bktName, objName, apiErrors.ErrNoSuchKey)
|
||||
getObjectErr(router, ns, bktName, objName, apierr.ErrNoSuchKey)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -549,7 +579,7 @@ func TestAccessBoxAttributesCheck(t *testing.T) {
|
|||
engineiam.CondBool: engineiam.Condition{fmt.Sprintf(s3.PropertyKeyFormatAccessBoxAttr, attrKey): []string{attrValue}},
|
||||
})
|
||||
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apiErrors.ErrAccessDenied)
|
||||
listObjectsV1Err(router, ns, bktName, "", "", "", apierr.ErrAccessDenied)
|
||||
|
||||
var attr object.Attribute
|
||||
attr.SetKey(attrKey)
|
||||
|
@ -571,7 +601,7 @@ func TestSourceIPCheck(t *testing.T) {
|
|||
|
||||
router.middlewareSettings.sourceIPHeader = hdr
|
||||
header := map[string][]string{hdr: {"192.0.3.0"}}
|
||||
createBucketErr(router, ns, bktName, header, apiErrors.ErrAccessDenied)
|
||||
createBucketErr(router, ns, bktName, header, apierr.ErrAccessDenied)
|
||||
|
||||
router.middlewareSettings.sourceIPHeader = ""
|
||||
createBucket(router, ns, bktName)
|
||||
|
@ -587,7 +617,7 @@ func TestMFAPolicy(t *testing.T) {
|
|||
denyOperations(router, ns, []string{"s3:CreateBucket"}, engineiam.Conditions{
|
||||
engineiam.CondBool: engineiam.Condition{s3.PropertyKeyAccessBoxAttrMFA: []string{"false"}},
|
||||
})
|
||||
createBucketErr(router, ns, bktName, nil, apiErrors.ErrAccessDenied)
|
||||
createBucketErr(router, ns, bktName, nil, apierr.ErrAccessDenied)
|
||||
|
||||
var attr object.Attribute
|
||||
attr.SetKey("IAM-MFA")
|
||||
|
@ -631,7 +661,7 @@ func createBucket(router *routerMock, namespace, bktName string) {
|
|||
require.Equal(router.t, s3middleware.CreateBucketOperation, resp.Method)
|
||||
}
|
||||
|
||||
func createBucketErr(router *routerMock, namespace, bktName string, header http.Header, errCode apiErrors.ErrorCode) {
|
||||
func createBucketErr(router *routerMock, namespace, bktName string, header http.Header, errCode apierr.ErrorCode) {
|
||||
w := createBucketBase(router, namespace, bktName, header)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -647,7 +677,7 @@ func createBucketBase(router *routerMock, namespace, bktName string, header http
|
|||
return w
|
||||
}
|
||||
|
||||
func getBucketErr(router *routerMock, namespace, bktName string, errCode apiErrors.ErrorCode) {
|
||||
func getBucketErr(router *routerMock, namespace, bktName string, errCode apierr.ErrorCode) {
|
||||
w := getBucketBase(router, namespace, bktName)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -666,7 +696,7 @@ func putObject(router *routerMock, namespace, bktName, objName string, tag *data
|
|||
return resp
|
||||
}
|
||||
|
||||
func putObjectErr(router *routerMock, namespace, bktName, objName string, tag *data.Tag, errCode apiErrors.ErrorCode) {
|
||||
func putObjectErr(router *routerMock, namespace, bktName, objName string, tag *data.Tag, errCode apierr.ErrorCode) {
|
||||
w := putObjectBase(router, namespace, bktName, objName, tag)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -691,7 +721,7 @@ func deleteObject(router *routerMock, namespace, bktName, objName string, tag *d
|
|||
return resp
|
||||
}
|
||||
|
||||
func deleteObjectErr(router *routerMock, namespace, bktName, objName string, tag *data.Tag, errCode apiErrors.ErrorCode) {
|
||||
func deleteObjectErr(router *routerMock, namespace, bktName, objName string, tag *data.Tag, errCode apierr.ErrorCode) {
|
||||
w := deleteObjectBase(router, namespace, bktName, objName, tag)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -716,7 +746,7 @@ func putBucketTagging(router *routerMock, namespace, bktName string, tagging []b
|
|||
return resp
|
||||
}
|
||||
|
||||
func putBucketTaggingErr(router *routerMock, namespace, bktName string, tagging []byte, errCode apiErrors.ErrorCode) {
|
||||
func putBucketTaggingErr(router *routerMock, namespace, bktName string, tagging []byte, errCode apierr.ErrorCode) {
|
||||
w := putBucketTaggingBase(router, namespace, bktName, tagging)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -739,7 +769,7 @@ func getObject(router *routerMock, namespace, bktName, objName string) handlerRe
|
|||
return resp
|
||||
}
|
||||
|
||||
func getObjectErr(router *routerMock, namespace, bktName, objName string, errCode apiErrors.ErrorCode) {
|
||||
func getObjectErr(router *routerMock, namespace, bktName, objName string, errCode apierr.ErrorCode) {
|
||||
w := getObjectBase(router, namespace, bktName, objName)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -758,7 +788,7 @@ func listObjectsV1(router *routerMock, namespace, bktName, prefix, delimiter, ma
|
|||
return resp
|
||||
}
|
||||
|
||||
func listObjectsV1Err(router *routerMock, namespace, bktName, prefix, delimiter, maxKeys string, errCode apiErrors.ErrorCode) {
|
||||
func listObjectsV1Err(router *routerMock, namespace, bktName, prefix, delimiter, maxKeys string, errCode apierr.ErrorCode) {
|
||||
w := listObjectsV1Base(router, namespace, bktName, prefix, delimiter, maxKeys)
|
||||
assertAPIError(router.t, w, errCode)
|
||||
}
|
||||
|
@ -827,8 +857,17 @@ func TestAuthenticate(t *testing.T) {
|
|||
createBucket(chiRouter, "", "bkt-2")
|
||||
|
||||
chiRouter = prepareRouter(t)
|
||||
chiRouter.cfg.Center.(*centerMock).isError = true
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apiErrors.ErrAccessDenied)
|
||||
chiRouter.cfg.Center.(*centerMock).err = apierr.GetAPIError(apierr.ErrAccessDenied)
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apierr.ErrAccessDenied)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).err = frostfs.ErrGatewayTimeout
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apierr.ErrGatewayTimeout)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).err = apierr.GetAPIError(apierr.ErrInternalError)
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apierr.ErrAccessDenied)
|
||||
|
||||
chiRouter.cfg.Center.(*centerMock).err = apierr.GetAPIError(apierr.ErrBadRequest)
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apierr.ErrBadRequest)
|
||||
}
|
||||
|
||||
func TestFrostFSIDValidation(t *testing.T) {
|
||||
|
@ -844,7 +883,32 @@ func TestFrostFSIDValidation(t *testing.T) {
|
|||
// frostFSID validation failed
|
||||
chiRouter = prepareRouter(t, frostFSIDValidation(true))
|
||||
chiRouter.cfg.FrostfsID.(*frostFSIDMock).validateError = true
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apiErrors.ErrInternalError)
|
||||
createBucketErr(chiRouter, "", "bkt-3", nil, apierr.ErrInternalError)
|
||||
}
|
||||
|
||||
func TestRouterListObjectsV2Domains(t *testing.T) {
|
||||
chiRouter := prepareRouter(t, enableVHSDomains("domain.com"))
|
||||
|
||||
chiRouter.handler.buckets["bucket"] = &data.BucketInfo{}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
r.Host = "bucket.domain.com"
|
||||
query := make(url.Values)
|
||||
query.Set(s3middleware.ListTypeQuery, "2")
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
chiRouter.ServeHTTP(w, r)
|
||||
resp := readResponse(t, w)
|
||||
require.Equal(t, s3middleware.ListObjectsV2Operation, resp.Method)
|
||||
}
|
||||
|
||||
func enableVHSDomains(domains ...string) option {
|
||||
return func(cfg *Config) {
|
||||
setting := cfg.MiddlewareSettings.(*middlewareSettingsMock)
|
||||
setting.vhsEnabled = true
|
||||
setting.domains = domains
|
||||
}
|
||||
}
|
||||
|
||||
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
||||
|
@ -858,17 +922,17 @@ func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
|
|||
return res
|
||||
}
|
||||
|
||||
func assertAPIError(t *testing.T, w *httptest.ResponseRecorder, expectedErrorCode apiErrors.ErrorCode) {
|
||||
func assertAPIError(t *testing.T, w *httptest.ResponseRecorder, expectedErrorCode apierr.ErrorCode) {
|
||||
actualErrorResponse := &s3middleware.ErrorResponse{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedError := apiErrors.GetAPIError(expectedErrorCode)
|
||||
expectedError := apierr.GetAPIError(expectedErrorCode)
|
||||
|
||||
require.Equal(t, expectedError.HTTPStatusCode, w.Code)
|
||||
require.Equal(t, expectedError.Code, actualErrorResponse.Code)
|
||||
|
||||
if expectedError.ErrCode != apiErrors.ErrInternalError {
|
||||
if expectedError.ErrCode != apierr.ErrInternalError {
|
||||
require.Contains(t, actualErrorResponse.Message, expectedError.Description)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,9 @@ type (
|
|||
|
||||
// IssueSecretOptions contains options for passing to Agent.IssueSecret method.
|
||||
IssueSecretOptions struct {
|
||||
Container ContainerOptions
|
||||
Container cid.ID
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
FrostFSKey *keys.PrivateKey
|
||||
GatesPublicKeys []*keys.PublicKey
|
||||
Impersonate bool
|
||||
|
@ -114,7 +116,9 @@ type (
|
|||
UpdateSecretOptions struct {
|
||||
FrostFSKey *keys.PrivateKey
|
||||
GatesPublicKeys []*keys.PublicKey
|
||||
Address oid.Address
|
||||
IsCustom bool
|
||||
AccessKeyID string
|
||||
ContainerID cid.ID
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
CustomAttributes []object.Attribute
|
||||
}
|
||||
|
@ -141,7 +145,8 @@ type (
|
|||
|
||||
// ObtainSecretOptions contains options for passing to Agent.ObtainSecret method.
|
||||
ObtainSecretOptions struct {
|
||||
SecretAddress string
|
||||
Container cid.ID
|
||||
AccessKeyID string
|
||||
GatePrivateKey *keys.PrivateKey
|
||||
}
|
||||
)
|
||||
|
@ -168,32 +173,9 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
func (a *Agent) checkContainer(ctx context.Context, opts ContainerOptions, idOwner user.ID) (cid.ID, error) {
|
||||
if !opts.ID.Equals(cid.ID{}) {
|
||||
a.log.Info(logs.CheckContainer, zap.Stringer("cid", opts.ID))
|
||||
return opts.ID, a.frostFS.ContainerExists(ctx, opts.ID)
|
||||
}
|
||||
|
||||
a.log.Info(logs.CreateContainer,
|
||||
zap.String("friendly_name", opts.FriendlyName),
|
||||
zap.String("placement_policy", opts.PlacementPolicy))
|
||||
|
||||
var prm PrmContainerCreate
|
||||
|
||||
err := prm.Policy.DecodeString(opts.PlacementPolicy)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("failed to build placement policy: %w", err)
|
||||
}
|
||||
|
||||
prm.Owner = idOwner
|
||||
prm.FriendlyName = opts.FriendlyName
|
||||
|
||||
cnrID, err := a.frostFS.CreateContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("create container in FrostFS: %w", err)
|
||||
}
|
||||
|
||||
return cnrID, nil
|
||||
func (a *Agent) checkContainer(ctx context.Context, cnrID cid.ID) error {
|
||||
a.log.Info(logs.CheckContainer, zap.Stringer("cid", cnrID))
|
||||
return a.frostFS.ContainerExists(ctx, cnrID)
|
||||
}
|
||||
|
||||
func checkPolicy(policyString string) (*netmap.PlacementPolicy, error) {
|
||||
|
@ -255,20 +237,24 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
return fmt.Errorf("create tokens: %w", err)
|
||||
}
|
||||
|
||||
box, secrets, err := accessbox.PackTokens(gatesData, nil)
|
||||
var secret []byte
|
||||
isCustom := options.AccessKeyID != ""
|
||||
if isCustom {
|
||||
secret = []byte(options.SecretAccessKey)
|
||||
}
|
||||
box, secrets, err := accessbox.PackTokens(gatesData, secret, isCustom)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pack tokens: %w", err)
|
||||
}
|
||||
|
||||
box.ContainerPolicy = policies
|
||||
|
||||
var idOwner user.ID
|
||||
user.IDFromKey(&idOwner, options.FrostFSKey.PrivateKey.PublicKey)
|
||||
id, err := a.checkContainer(ctx, options.Container, idOwner)
|
||||
if err != nil {
|
||||
if err = a.checkContainer(ctx, options.Container); err != nil {
|
||||
return fmt.Errorf("check container: %w", err)
|
||||
}
|
||||
|
||||
var idOwner user.ID
|
||||
user.IDFromKey(&idOwner, options.FrostFSKey.PrivateKey.PublicKey)
|
||||
a.log.Info(logs.StoreBearerTokenIntoFrostFS,
|
||||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
|
@ -281,26 +267,31 @@ func (a *Agent) IssueSecret(ctx context.Context, w io.Writer, options *IssueSecr
|
|||
creds := tokens.New(cfg)
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
Container: options.Container,
|
||||
AccessKeyID: options.AccessKeyID,
|
||||
AccessBox: box,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
addr, err := creds.Put(ctx, id, prm)
|
||||
addr, err := creds.Put(ctx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to put creds: %w", err)
|
||||
}
|
||||
|
||||
accessKeyID := accessKeyIDFromAddr(addr)
|
||||
accessKeyID := options.AccessKeyID
|
||||
if accessKeyID == "" {
|
||||
accessKeyID = accessKeyIDFromAddr(addr)
|
||||
}
|
||||
|
||||
ir := &issuingResult{
|
||||
InitialAccessKeyID: accessKeyID,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secrets.SecretKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
|
||||
ContainerID: id.EncodeToString(),
|
||||
ContainerID: options.Container.EncodeToString(),
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
|
@ -337,13 +328,15 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
|
||||
creds := tokens.New(cfg)
|
||||
|
||||
box, _, err := creds.GetBox(ctx, options.Address)
|
||||
box, _, err := creds.GetBox(ctx, options.ContainerID, options.AccessKeyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get accessbox: %w", err)
|
||||
}
|
||||
|
||||
secret, err := hex.DecodeString(box.Gate.SecretKey)
|
||||
if err != nil {
|
||||
var secret []byte
|
||||
if options.IsCustom {
|
||||
secret = []byte(box.Gate.SecretKey)
|
||||
} else if secret, err = hex.DecodeString(box.Gate.SecretKey); err != nil {
|
||||
return fmt.Errorf("failed to decode secret key access box: %w", err)
|
||||
}
|
||||
|
||||
|
@ -360,7 +353,7 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
return fmt.Errorf("create tokens: %w", err)
|
||||
}
|
||||
|
||||
updatedBox, secrets, err := accessbox.PackTokens(gatesData, secret)
|
||||
updatedBox, secrets, err := accessbox.PackTokens(gatesData, secret, options.IsCustom)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pack tokens: %w", err)
|
||||
}
|
||||
|
@ -371,22 +364,26 @@ func (a *Agent) UpdateSecret(ctx context.Context, w io.Writer, options *UpdateSe
|
|||
zap.Stringer("owner_tkn", idOwner))
|
||||
|
||||
prm := tokens.CredentialsParam{
|
||||
OwnerID: idOwner,
|
||||
Container: options.ContainerID,
|
||||
AccessBox: updatedBox,
|
||||
Expiration: lifetime.Exp,
|
||||
Keys: options.GatesPublicKeys,
|
||||
CustomAttributes: options.CustomAttributes,
|
||||
}
|
||||
|
||||
oldAddr := options.Address
|
||||
addr, err := creds.Update(ctx, oldAddr, prm)
|
||||
addr, err := creds.Update(ctx, prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update creds: %w", err)
|
||||
}
|
||||
|
||||
accessKeyID := options.AccessKeyID
|
||||
if !options.IsCustom {
|
||||
accessKeyID = accessKeyIDFromAddr(addr)
|
||||
}
|
||||
|
||||
ir := &issuingResult{
|
||||
AccessKeyID: accessKeyIDFromAddr(addr),
|
||||
InitialAccessKeyID: accessKeyIDFromAddr(oldAddr),
|
||||
AccessKeyID: accessKeyID,
|
||||
InitialAccessKeyID: options.AccessKeyID,
|
||||
SecretAccessKey: secrets.SecretKey,
|
||||
OwnerPrivateKey: hex.EncodeToString(secrets.EphemeralKey.Bytes()),
|
||||
WalletPublicKey: hex.EncodeToString(options.FrostFSKey.PublicKey().Bytes()),
|
||||
|
@ -419,12 +416,7 @@ func (a *Agent) ObtainSecret(ctx context.Context, w io.Writer, options *ObtainSe
|
|||
|
||||
bearerCreds := tokens.New(cfg)
|
||||
|
||||
var addr oid.Address
|
||||
if err := addr.DecodeString(options.SecretAddress); err != nil {
|
||||
return fmt.Errorf("failed to parse secret address: %w", err)
|
||||
}
|
||||
|
||||
box, _, err := bearerCreds.GetBox(ctx, addr)
|
||||
box, _, err := bearerCreds.GetBox(ctx, options.Container, options.AccessKeyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get tokens: %w", err)
|
||||
}
|
||||
|
|
|
@ -4,22 +4,34 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var issueSecretCmd = &cobra.Command{
|
||||
Use: "issue-secret",
|
||||
Short: "Issue a secret in FrostFS network",
|
||||
Long: "Creates new s3 credentials to use with frostfs-s3-gw",
|
||||
Example: `frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a --attributes LOGIN=NUUb82KR2JrVByHs2YSKgtK29gKnF5q6Vt`,
|
||||
Example: `To create new s3 credentials use:
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a --attributes LOGIN=NUUb82KR2JrVByHs2YSKgtK29gKnF5q6Vt
|
||||
|
||||
To create new s3 credentials using specific access key id and secret access key use:
|
||||
frostfs-s3-authmate issue-secret --wallet wallet.json --peer s01.frostfs.devenv:8080 --gate-public-key 031a6c6fbbdf02ca351745fa86b9ba5a9452d785ac4f7fc2b7548ca2a46c4fcf4a --access-key-id my-access-key-id --secret-access-key my-secret-key --container-id BpExV76416Vo7GrkJsGwXGoLM35xsBwup8voedDZR3c6
|
||||
`,
|
||||
RunE: runIssueSecretCmd,
|
||||
}
|
||||
|
||||
|
@ -54,6 +66,9 @@ const (
|
|||
poolHealthcheckTimeoutFlag = "pool-healthcheck-timeout"
|
||||
poolRebalanceIntervalFlag = "pool-rebalance-interval"
|
||||
poolStreamTimeoutFlag = "pool-stream-timeout"
|
||||
|
||||
accessKeyIDFlag = "access-key-id"
|
||||
secretAccessKeyFlag = "secret-access-key"
|
||||
)
|
||||
|
||||
func initIssueSecretCmd() {
|
||||
|
@ -73,6 +88,9 @@ func initIssueSecretCmd() {
|
|||
issueSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
issueSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
issueSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
issueSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential that must be created")
|
||||
issueSecretCmd.Flags().String(secretAccessKeyFlag, "", "Secret access key of s3 credential that must be used")
|
||||
issueSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address (must be provided if container-id is nns name)")
|
||||
|
||||
_ = issueSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = issueSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -91,14 +109,6 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapPreparationError(fmt.Errorf("failed to load frostfs private key: %s", err))
|
||||
}
|
||||
|
||||
var cnrID cid.ID
|
||||
containerID := viper.GetString(containerIDFlag)
|
||||
if len(containerID) > 0 {
|
||||
if err = cnrID.DecodeString(containerID); err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse auth container id: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
var gatesPublicKeys []*keys.PublicKey
|
||||
for _, keyStr := range viper.GetStringSlice(gatePublicKeyFlag) {
|
||||
gpk, err := keys.NewPublicKeyFromString(keyStr)
|
||||
|
@ -137,17 +147,29 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(fmt.Errorf("failed to create FrostFS component: %s", err))
|
||||
}
|
||||
|
||||
var accessBox cid.ID
|
||||
if viper.IsSet(containerIDFlag) {
|
||||
if accessBox, err = util.ResolveContainerID(viper.GetString(containerIDFlag), viper.GetString(rpcEndpointFlag)); err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("resolve accessbox container id (make sure you provided %s): %w", rpcEndpointFlag, err))
|
||||
}
|
||||
} else if accessBox, err = createAccessBox(ctx, frostFS, key, log); err != nil {
|
||||
return wrapPreparationError(err)
|
||||
}
|
||||
|
||||
accessKeyID, secretAccessKey, err := parseAccessKeys()
|
||||
if err != nil {
|
||||
return wrapPreparationError(err)
|
||||
}
|
||||
|
||||
customAttrs, err := parseObjectAttrs(viper.GetString(attributesFlag))
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse attributes: %s", err))
|
||||
}
|
||||
|
||||
issueSecretOptions := &authmate.IssueSecretOptions{
|
||||
Container: authmate.ContainerOptions{
|
||||
ID: cnrID,
|
||||
FriendlyName: viper.GetString(containerFriendlyNameFlag),
|
||||
PlacementPolicy: viper.GetString(containerPlacementPolicyFlag),
|
||||
},
|
||||
Container: accessBox,
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretAccessKey,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
Impersonate: true,
|
||||
|
@ -164,3 +186,59 @@ func runIssueSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAccessKeys() (accessKeyID, secretAccessKey string, err error) {
|
||||
accessKeyID = viper.GetString(accessKeyIDFlag)
|
||||
secretAccessKey = viper.GetString(secretAccessKeyFlag)
|
||||
|
||||
if accessKeyID == "" && secretAccessKey != "" || accessKeyID != "" && secretAccessKey == "" {
|
||||
return "", "", fmt.Errorf("flags %s and %s must be both provided or not", accessKeyIDFlag, secretAccessKeyFlag)
|
||||
}
|
||||
|
||||
if accessKeyID != "" {
|
||||
if !isCustomCreds(accessKeyID) {
|
||||
return "", "", fmt.Errorf("invalid custom AccessKeyID format: %s", accessKeyID)
|
||||
}
|
||||
if !checkAccessKeyLength(accessKeyID) {
|
||||
return "", "", fmt.Errorf("invalid custom AccessKeyID length: %s", accessKeyID)
|
||||
}
|
||||
if !checkAccessKeyLength(secretAccessKey) {
|
||||
return "", "", fmt.Errorf("invalid custom SecretAccessKey length: %s", secretAccessKey)
|
||||
}
|
||||
}
|
||||
|
||||
return accessKeyID, secretAccessKey, nil
|
||||
}
|
||||
|
||||
func isCustomCreds(accessKeyID string) bool {
|
||||
var addr oid.Address
|
||||
return addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")) != nil
|
||||
}
|
||||
|
||||
func checkAccessKeyLength(key string) bool {
|
||||
return 4 <= len(key) && len(key) <= 128
|
||||
}
|
||||
|
||||
func createAccessBox(ctx context.Context, frostFS *frostfs.AuthmateFrostFS, key *keys.PrivateKey, log *zap.Logger) (cid.ID, error) {
|
||||
friendlyName := viper.GetString(containerFriendlyNameFlag)
|
||||
placementPolicy := viper.GetString(containerPlacementPolicyFlag)
|
||||
|
||||
log.Info(logs.CreateContainer, zap.String("friendly_name", friendlyName), zap.String("placement_policy", placementPolicy))
|
||||
|
||||
prm := authmate.PrmContainerCreate{
|
||||
FriendlyName: friendlyName,
|
||||
}
|
||||
|
||||
user.IDFromKey(&prm.Owner, key.PrivateKey.PublicKey)
|
||||
|
||||
if err := prm.Policy.DecodeString(placementPolicy); err != nil {
|
||||
return cid.ID{}, fmt.Errorf("failed to build placement policy: %w", err)
|
||||
}
|
||||
|
||||
accessBox, err := frostFS.CreateContainer(ctx, prm)
|
||||
if err != nil {
|
||||
return cid.ID{}, fmt.Errorf("create container in FrostFS: %w", err)
|
||||
}
|
||||
|
||||
return accessBox, nil
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
|
@ -24,7 +23,6 @@ var obtainSecretCmd = &cobra.Command{
|
|||
const (
|
||||
gateWalletFlag = "gate-wallet"
|
||||
gateAddressFlag = "gate-address"
|
||||
accessKeyIDFlag = "access-key-id"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -38,10 +36,12 @@ func initObtainSecretCmd() {
|
|||
obtainSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
|
||||
obtainSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
|
||||
obtainSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
|
||||
obtainSecretCmd.Flags().String(containerIDFlag, "", "CID or NNS name of auth container that contains provided credential (must be provided if custom access key id is used)")
|
||||
obtainSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
|
||||
obtainSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
obtainSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
obtainSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
obtainSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address (must be provided if container-id is nns name)")
|
||||
|
||||
_ = obtainSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = obtainSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -81,8 +81,14 @@ func runObtainSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapFrostFSInitError(cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2))
|
||||
}
|
||||
|
||||
accessBox, accessKeyID, _, err := getAccessBoxID()
|
||||
if err != nil {
|
||||
return wrapPreparationError(err)
|
||||
}
|
||||
|
||||
obtainSecretOptions := &authmate.ObtainSecretOptions{
|
||||
SecretAddress: strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1),
|
||||
Container: accessBox,
|
||||
AccessKeyID: accessKeyID,
|
||||
GatePrivateKey: gateKey,
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/commonclient"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/policy"
|
||||
ffsidContract "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/frostfsid/contract"
|
||||
policyContact "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/policy/contract"
|
||||
|
@ -116,6 +117,10 @@ func initFrostFSIDContract(ctx context.Context, log *zap.Logger, key *keys.Priva
|
|||
Contract: viper.GetString(frostfsIDContractFlag),
|
||||
ProxyContract: viper.GetString(proxyContractFlag),
|
||||
Key: key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
VerifyExecResults: true,
|
||||
},
|
||||
}
|
||||
|
||||
cli, err := ffsidContract.New(ctx, cfg)
|
||||
|
|
|
@ -68,4 +68,7 @@ GoVersion: {{ runtimeVersion }}
|
|||
|
||||
rootCmd.AddCommand(registerUserCmd)
|
||||
initRegisterUserCmd()
|
||||
|
||||
rootCmd.AddCommand(signCmd)
|
||||
initSignCmd()
|
||||
}
|
||||
|
|
115
cmd/s3-authmate/modules/sign.go
Normal file
115
cmd/s3-authmate/modules/sign.go
Normal file
|
@ -0,0 +1,115 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var signCmd = &cobra.Command{
|
||||
Use: "sign",
|
||||
Short: "Sign arbitrary data using AWS Signature Version 4",
|
||||
Long: `Generate signature for provided data using AWS credentials. Credentials must be placed in ~/.aws/credentials.
|
||||
You can provide profile to load using --profile flag or explicitly provide credentials and region using
|
||||
--aws-access-key-id, --aws-secret-access-key, --region.
|
||||
Note to override credentials you must provide both access key and secret key.`,
|
||||
Example: `frostfs-s3-authmate sign --data some-data
|
||||
frostfs-s3-authmate sign --data file://data.txt
|
||||
frostfs-s3-authmate sign --data file://data.txt --profile my-profile --time 2024-09-27
|
||||
frostfs-s3-authmate sign --data some-data --region ru --service s3 --time 2024-09-27 --aws-access-key-id ETaA2CadPcA7bAkLsML2PbTudXY8uRt2PDjCCwkvRv9s0FDCxWDXYc1SA1vKv8KbyCNsLY2AmAjJ92Vz5rgvsFCy --aws-secret-access-key c2d65ef2980f03f4f495bdebedeeae760496697880d61d106bb9a4e5cd2e0607`,
|
||||
RunE: runSignCmd,
|
||||
}
|
||||
|
||||
const (
|
||||
serviceFlag = "s3"
|
||||
timeFlag = "time"
|
||||
dataFlag = "data"
|
||||
)
|
||||
|
||||
func initSignCmd() {
|
||||
signCmd.Flags().StringP(dataFlag, "d", "", "Data to sign. Can be provided as string or as a file ('file://path-to-file')")
|
||||
signCmd.Flags().String(profileFlag, "", "AWS profile to load")
|
||||
signCmd.Flags().String(serviceFlag, "s3", "AWS service name to form signature")
|
||||
signCmd.Flags().String(timeFlag, "", "Signing time in '2006-01-02' format (default is current UTC time)")
|
||||
signCmd.Flags().String(regionFlag, "", "AWS region to use in signature (default is taken from ~/.aws/config)")
|
||||
signCmd.Flags().String(awsAccessKeyIDFlag, "", "AWS access key id to sign data (default is taken from ~/.aws/credentials)")
|
||||
signCmd.Flags().String(awsSecretAccessKeyFlag, "", "AWS secret access key to sign data (default is taken from ~/.aws/credentials)")
|
||||
|
||||
_ = signCmd.MarkFlagRequired(dataFlag)
|
||||
}
|
||||
|
||||
func runSignCmd(cmd *cobra.Command, _ []string) error {
|
||||
var cfg aws.Config
|
||||
|
||||
if region := viper.GetString(regionFlag); region != "" {
|
||||
cfg.Region = ®ion
|
||||
}
|
||||
accessKeyID := viper.GetString(awsAccessKeyIDFlag)
|
||||
secretAccessKey := viper.GetString(awsSecretAccessKeyFlag)
|
||||
|
||||
if accessKeyID != "" && secretAccessKey != "" {
|
||||
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(credentials.Value{
|
||||
AccessKeyID: accessKeyID,
|
||||
SecretAccessKey: secretAccessKey,
|
||||
})
|
||||
} else if accessKeyID != "" || secretAccessKey != "" {
|
||||
return wrapPreparationError(fmt.Errorf("both flags '%s' and '%s' must be provided", accessKeyIDFlag, awsSecretAccessKeyFlag))
|
||||
}
|
||||
|
||||
sess, err := session.NewSessionWithOptions(session.Options{
|
||||
Config: cfg,
|
||||
Profile: viper.GetString(profileFlag),
|
||||
SharedConfigState: session.SharedConfigEnable,
|
||||
})
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("couldn't get aws credentials: %w", err))
|
||||
}
|
||||
|
||||
data := viper.GetString(dataFlag)
|
||||
if strings.HasPrefix(data, "file://") {
|
||||
dataToSign, err := os.ReadFile(data[7:])
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("read data file: %w", err))
|
||||
}
|
||||
data = string(dataToSign)
|
||||
}
|
||||
|
||||
creds, err := sess.Config.Credentials.Get()
|
||||
if err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("get creds: %w", err))
|
||||
}
|
||||
|
||||
if sess.Config.Region == nil || *sess.Config.Region == "" {
|
||||
return wrapPreparationError(errors.New("missing region"))
|
||||
}
|
||||
|
||||
service := viper.GetString(serviceFlag)
|
||||
if service == "" {
|
||||
return wrapPreparationError(errors.New("missing service"))
|
||||
}
|
||||
|
||||
signTime := viper.GetTime(timeFlag)
|
||||
if signTime.IsZero() {
|
||||
signTime = time.Now()
|
||||
}
|
||||
|
||||
signature := auth.SignStr(creds.SecretAccessKey, service, *sess.Config.Region, signTime, data)
|
||||
|
||||
cmd.Println("service:", service)
|
||||
cmd.Println("region:", *sess.Config.Region)
|
||||
cmd.Println("time:", signTime.UTC().Format("20060102"))
|
||||
cmd.Println("accessKeyId:", creds.AccessKeyID)
|
||||
cmd.Printf("secretAccessKey: [****************%s]\n", creds.SecretAccessKey[max(0, len(creds.SecretAccessKey)-4):])
|
||||
cmd.Println("signature:", signature)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -4,11 +4,9 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/wallet"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -33,13 +31,15 @@ func initUpdateSecretCmd() {
|
|||
updateSecretCmd.Flags().String(peerFlag, "", "Address of a frostfs peer to connect to")
|
||||
updateSecretCmd.Flags().String(gateWalletFlag, "", "Path to the s3 gateway wallet to decrypt accessbox")
|
||||
updateSecretCmd.Flags().String(gateAddressFlag, "", "Address of the s3 gateway wallet account")
|
||||
updateSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be obtained")
|
||||
updateSecretCmd.Flags().String(accessKeyIDFlag, "", "Access key id of s3 credential for which secret must be updatedd")
|
||||
updateSecretCmd.Flags().String(containerIDFlag, "", "CID or NNS name of auth container that contains provided credential (must be provided if custom access key id is used)")
|
||||
updateSecretCmd.Flags().StringSlice(gatePublicKeyFlag, nil, "Public 256r1 key of a gate (use flags repeatedly for multiple gates or separate them by comma)")
|
||||
updateSecretCmd.Flags().Duration(poolDialTimeoutFlag, defaultPoolDialTimeout, "Timeout for connection to the node in pool to be established")
|
||||
updateSecretCmd.Flags().Duration(poolHealthcheckTimeoutFlag, defaultPoolHealthcheckTimeout, "Timeout for request to node to decide if it is alive")
|
||||
updateSecretCmd.Flags().Duration(poolRebalanceIntervalFlag, defaultPoolRebalanceInterval, "Interval for updating nodes health status")
|
||||
updateSecretCmd.Flags().Duration(poolStreamTimeoutFlag, defaultPoolStreamTimeout, "Timeout for individual operation in streaming RPC")
|
||||
updateSecretCmd.Flags().String(attributesFlag, "", "User attributes in form of Key1=Value1,Key2=Value2 (note: you cannot override system attributes)")
|
||||
updateSecretCmd.Flags().String(rpcEndpointFlag, "", "NEO node RPC address (must be provided if container-id is nns name)")
|
||||
|
||||
_ = updateSecretCmd.MarkFlagRequired(walletFlag)
|
||||
_ = updateSecretCmd.MarkFlagRequired(peerFlag)
|
||||
|
@ -66,10 +66,9 @@ func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
return wrapPreparationError(fmt.Errorf("failed to load s3 gate private key: %s", err))
|
||||
}
|
||||
|
||||
var accessBoxAddress oid.Address
|
||||
credAddr := strings.Replace(viper.GetString(accessKeyIDFlag), "0", "/", 1)
|
||||
if err = accessBoxAddress.DecodeString(credAddr); err != nil {
|
||||
return wrapPreparationError(fmt.Errorf("failed to parse creds address: %w", err))
|
||||
accessBox, accessKeyID, isCustom, err := getAccessBoxID()
|
||||
if err != nil {
|
||||
return wrapPreparationError(err)
|
||||
}
|
||||
|
||||
var gatesPublicKeys []*keys.PublicKey
|
||||
|
@ -101,7 +100,9 @@ func runUpdateSecretCmd(cmd *cobra.Command, _ []string) error {
|
|||
}
|
||||
|
||||
updateSecretOptions := &authmate.UpdateSecretOptions{
|
||||
Address: accessBoxAddress,
|
||||
ContainerID: accessBox,
|
||||
AccessKeyID: accessKeyID,
|
||||
IsCustom: isCustom,
|
||||
FrostFSKey: key,
|
||||
GatesPublicKeys: gatesPublicKeys,
|
||||
GatePrivateKey: gateKey,
|
||||
|
|
|
@ -3,6 +3,7 @@ package modules
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -11,8 +12,11 @@ import (
|
|||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/authmate"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -163,3 +167,23 @@ func parseObjectAttrs(attributes string) ([]object.Attribute, error) {
|
|||
|
||||
return attrs, nil
|
||||
}
|
||||
|
||||
func getAccessBoxID() (cid.ID, string, bool, error) {
|
||||
accessKeyID := viper.GetString(accessKeyIDFlag)
|
||||
|
||||
var accessBoxAddress oid.Address
|
||||
if err := accessBoxAddress.DecodeString(strings.Replace(accessKeyID, "0", "/", 1)); err == nil {
|
||||
return accessBoxAddress.Container(), accessKeyID, false, nil
|
||||
}
|
||||
|
||||
if !viper.IsSet(containerIDFlag) {
|
||||
return cid.ID{}, "", false, errors.New("accessbox parameter must be set when custom access key id is used")
|
||||
}
|
||||
|
||||
accessBox, err := util.ResolveContainerID(viper.GetString(containerIDFlag), viper.GetString(rpcEndpointFlag))
|
||||
if err != nil {
|
||||
return cid.ID{}, "", false, fmt.Errorf("resolve accessbox container id (make sure you provided %s): %w", rpcEndpointFlag, err)
|
||||
}
|
||||
|
||||
return accessBox, accessKeyID, true, nil
|
||||
}
|
||||
|
|
333
cmd/s3-gw/app.go
333
cmd/s3-gw/app.go
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
|
@ -11,11 +12,13 @@ import (
|
|||
"os"
|
||||
"os/signal"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-contract/commonclient"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
grpctracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
|
@ -86,12 +89,14 @@ type (
|
|||
|
||||
appSettings struct {
|
||||
logLevel zap.AtomicLevel
|
||||
httpLogging s3middleware.LogHTTPConfig
|
||||
maxClient maxClientsConfig
|
||||
defaultMaxAge int
|
||||
reconnectInterval time.Duration
|
||||
resolveZoneList []string
|
||||
isResolveListAllow bool // True if ResolveZoneList contains allowed zones
|
||||
frostfsidValidation bool
|
||||
accessbox *cid.ID
|
||||
|
||||
mu sync.RWMutex
|
||||
namespaces Namespaces
|
||||
|
@ -105,9 +110,13 @@ type (
|
|||
policyDenyByDefault bool
|
||||
sourceIPHeader string
|
||||
retryMaxAttempts int
|
||||
domains []string
|
||||
vhsEnabled bool
|
||||
vhsHeader string
|
||||
servernameHeader string
|
||||
vhsNamespacesEnabled map[string]bool
|
||||
retryMaxBackoff time.Duration
|
||||
retryStrategy handler.RetryStrategy
|
||||
domains []string
|
||||
}
|
||||
|
||||
maxClientsConfig struct {
|
||||
|
@ -124,18 +133,7 @@ type (
|
|||
func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
||||
objPool, treePool, key := getPools(ctx, log.logger, v)
|
||||
|
||||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(objPool, key), log.logger),
|
||||
Key: key,
|
||||
CacheConfig: getAccessBoxCacheConfig(v, log.logger),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(v, log.logger),
|
||||
}
|
||||
|
||||
// prepare auth center
|
||||
ctr := auth.New(tokens.New(cfg), v.GetStringSlice(cfgAllowedAccessKeyIDPrefixes))
|
||||
|
||||
app := &App{
|
||||
ctr: ctr,
|
||||
log: log.logger,
|
||||
cfg: v,
|
||||
pool: objPool,
|
||||
|
@ -154,6 +152,8 @@ func newApp(ctx context.Context, log *Logger, v *viper.Viper) *App {
|
|||
}
|
||||
|
||||
func (a *App) init(ctx context.Context) {
|
||||
a.initResolver()
|
||||
a.initAuthCenter(ctx)
|
||||
a.setRuntimeParameters()
|
||||
a.initFrostfsID(ctx)
|
||||
a.initPolicyStorage(ctx)
|
||||
|
@ -163,9 +163,26 @@ func (a *App) init(ctx context.Context) {
|
|||
a.initTracing(ctx)
|
||||
}
|
||||
|
||||
func (a *App) initLayer(ctx context.Context) {
|
||||
a.initResolver()
|
||||
func (a *App) initAuthCenter(ctx context.Context) {
|
||||
if a.cfg.IsSet(cfgContainersAccessBox) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgContainersAccessBox)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchAccessBoxContainerInfo, zap.Error(err))
|
||||
}
|
||||
a.settings.accessbox = &cnrID
|
||||
}
|
||||
|
||||
cfg := tokens.Config{
|
||||
FrostFS: frostfs.NewAuthmateFrostFS(frostfs.NewFrostFS(a.pool, a.key), a.log),
|
||||
Key: a.key,
|
||||
CacheConfig: getAccessBoxCacheConfig(a.cfg, a.log),
|
||||
RemovingCheckAfterDurations: fetchRemovingCheckInterval(a.cfg, a.log),
|
||||
}
|
||||
|
||||
a.ctr = auth.New(tokens.New(cfg), a.cfg.GetStringSlice(cfgAllowedAccessKeyIDPrefixes), a.settings)
|
||||
}
|
||||
|
||||
func (a *App) initLayer(ctx context.Context) {
|
||||
// prepare random key for anonymous requests
|
||||
randomKey, err := keys.NewPrivateKey()
|
||||
if err != nil {
|
||||
|
@ -183,17 +200,26 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
var lifecycleCnrInfo *data.BucketInfo
|
||||
if a.cfg.IsSet(cfgContainersLifecycle) {
|
||||
lifecycleCnrInfo, err = a.fetchContainerInfo(ctx, cfgContainersLifecycle)
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.CouldNotFetchLifecycleContainerInfo, zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
layerCfg := &layer.Config{
|
||||
Cache: layer.NewCache(getCacheOptions(a.cfg, a.log)),
|
||||
AnonKey: layer.AnonymousKey{
|
||||
Key: randomKey,
|
||||
},
|
||||
GateOwner: gateOwner,
|
||||
Resolver: a.bucketResolver,
|
||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||
Features: a.settings,
|
||||
GateKey: a.key,
|
||||
CORSCnrInfo: corsCnrInfo,
|
||||
GateOwner: gateOwner,
|
||||
Resolver: a.bucketResolver,
|
||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||
Features: a.settings,
|
||||
GateKey: a.key,
|
||||
CORSCnrInfo: corsCnrInfo,
|
||||
LifecycleCnrInfo: lifecycleCnrInfo,
|
||||
}
|
||||
|
||||
// prepare object layer
|
||||
|
@ -203,6 +229,7 @@ func (a *App) initLayer(ctx context.Context) {
|
|||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||
settings := &appSettings{
|
||||
logLevel: log.lvl,
|
||||
httpLogging: s3middleware.LogHTTPConfig{},
|
||||
maxClient: newMaxClients(v),
|
||||
defaultMaxAge: fetchDefaultMaxAge(v, log.logger),
|
||||
reconnectInterval: fetchReconnectInterval(v),
|
||||
|
@ -221,39 +248,100 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
|||
}
|
||||
|
||||
func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
|
||||
s.updateNamespacesSettings(v, log)
|
||||
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
|
||||
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||
s.setClientCut(v.GetBool(cfgClientCut))
|
||||
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
|
||||
s.setMD5Enabled(v.GetBool(cfgMD5Enabled))
|
||||
s.setPolicyDenyByDefault(v.GetBool(cfgPolicyDenyByDefault))
|
||||
s.setSourceIPHeader(v.GetString(cfgSourceIPHeader))
|
||||
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
|
||||
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
|
||||
s.setRetryStrategy(fetchRetryStrategy(v))
|
||||
s.setVHSSettings(v, log)
|
||||
}
|
||||
|
||||
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
|
||||
nsHeader := v.GetString(cfgResolveNamespaceHeader)
|
||||
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
|
||||
nsConfig, defaultNamespaces := fetchNamespacesConfig(log, v)
|
||||
vhsNamespacesEnabled := s.prepareVHSNamespaces(v, log, defaultNamespaces)
|
||||
defaultXMLNS := v.GetBool(cfgKludgeUseDefaultXMLNS)
|
||||
bypassContentEncodingInChunks := v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)
|
||||
clientCut := v.GetBool(cfgClientCut)
|
||||
maxBufferSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
|
||||
md5Enabled := v.GetBool(cfgMD5Enabled)
|
||||
policyDenyByDefault := v.GetBool(cfgPolicyDenyByDefault)
|
||||
sourceIPHeader := v.GetString(cfgSourceIPHeader)
|
||||
retryMaxAttempts := fetchRetryMaxAttempts(v)
|
||||
retryMaxBackoff := fetchRetryMaxBackoff(v)
|
||||
retryStrategy := fetchRetryStrategy(v)
|
||||
domains := fetchDomains(v, log)
|
||||
vhsEnabled := v.GetBool(cfgVHSEnabled)
|
||||
vhsHeader := v.GetString(cfgVHSHeader)
|
||||
servernameHeader := v.GetString(cfgServernameHeader)
|
||||
httpLoggingEnabled := v.GetBool(cfgHTTPLoggingEnabled)
|
||||
httpLoggingMaxBody := v.GetInt64(cfgHTTPLoggingMaxBody)
|
||||
httpLoggingMaxLogSize := v.GetInt(cfgHTTPLoggingMaxLogSize)
|
||||
httpLoggingOutputPath := v.GetString(cfgHTTPLoggingDestination)
|
||||
httpLoggingUseGzip := v.GetBool(cfgHTTPLoggingGzip)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.namespaceHeader = nsHeader
|
||||
s.httpLogging.Enabled = httpLoggingEnabled
|
||||
s.httpLogging.MaxBody = httpLoggingMaxBody
|
||||
s.httpLogging.MaxLogSize = httpLoggingMaxLogSize
|
||||
s.httpLogging.OutputPath = httpLoggingOutputPath
|
||||
s.httpLogging.UseGzip = httpLoggingUseGzip
|
||||
s.httpLogging.InitHTTPLogger(log)
|
||||
|
||||
s.namespaceHeader = namespaceHeader
|
||||
s.defaultNamespaces = defaultNamespaces
|
||||
s.namespaces = nsConfig.Namespaces
|
||||
s.defaultXMLNS = defaultXMLNS
|
||||
s.bypassContentEncodingInChunks = bypassContentEncodingInChunks
|
||||
s.clientCut = clientCut
|
||||
s.maxBufferSizeForPut = maxBufferSizeForPut
|
||||
s.md5Enabled = md5Enabled
|
||||
s.policyDenyByDefault = policyDenyByDefault
|
||||
s.sourceIPHeader = sourceIPHeader
|
||||
s.retryMaxAttempts = retryMaxAttempts
|
||||
s.retryMaxBackoff = retryMaxBackoff
|
||||
s.retryStrategy = retryStrategy
|
||||
s.domains = domains
|
||||
s.vhsEnabled = vhsEnabled
|
||||
s.vhsHeader = vhsHeader
|
||||
s.servernameHeader = servernameHeader
|
||||
s.vhsNamespacesEnabled = vhsNamespacesEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) setVHSSettings(v *viper.Viper, _ *zap.Logger) {
|
||||
domains := v.GetStringSlice(cfgListenDomains)
|
||||
func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger, defaultNamespaces []string) map[string]bool {
|
||||
nsMap := fetchVHSNamespaces(v, log)
|
||||
vhsNamespaces := make(map[string]bool, len(nsMap))
|
||||
for ns, flag := range nsMap {
|
||||
if slices.Contains(defaultNamespaces, ns) {
|
||||
ns = defaultNamespace
|
||||
}
|
||||
vhsNamespaces[ns] = flag
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return vhsNamespaces
|
||||
}
|
||||
|
||||
s.domains = domains
|
||||
func (s *appSettings) Domains() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.domains
|
||||
}
|
||||
|
||||
func (s *appSettings) GlobalVHS() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.vhsEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) VHSHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.vhsHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) ServernameHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.servernameHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) VHSNamespacesEnabled() map[string]bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.vhsNamespacesEnabled
|
||||
}
|
||||
|
||||
func (s *appSettings) BypassContentEncodingInChunks() bool {
|
||||
|
@ -262,36 +350,18 @@ func (s *appSettings) BypassContentEncodingInChunks() bool {
|
|||
return s.bypassContentEncodingInChunks
|
||||
}
|
||||
|
||||
func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
|
||||
s.mu.Lock()
|
||||
s.bypassContentEncodingInChunks = bypass
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) ClientCut() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.clientCut
|
||||
}
|
||||
|
||||
func (s *appSettings) setClientCut(clientCut bool) {
|
||||
s.mu.Lock()
|
||||
s.clientCut = clientCut
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) BufferMaxSizeForPut() uint64 {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.maxBufferSizeForPut
|
||||
}
|
||||
|
||||
func (s *appSettings) setBufferMaxSizeForPut(size uint64) {
|
||||
s.mu.Lock()
|
||||
s.maxBufferSizeForPut = size
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
@ -320,6 +390,13 @@ func (s *appSettings) DefaultCopiesNumbers(namespace string) []uint32 {
|
|||
return s.namespaces[namespace].CopiesNumbers[defaultConstraintName]
|
||||
}
|
||||
|
||||
func (s *appSettings) LogHTTPConfig() s3middleware.LogHTTPConfig {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
return s.httpLogging
|
||||
}
|
||||
|
||||
func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
||||
dec := xml.NewDecoder(r)
|
||||
dec.CharsetReader = func(charset string, reader io.Reader) (io.Reader, error) {
|
||||
|
@ -339,12 +416,6 @@ func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
|
|||
return dec
|
||||
}
|
||||
|
||||
func (s *appSettings) useDefaultXMLNamespace(useDefaultNamespace bool) {
|
||||
s.mu.Lock()
|
||||
s.defaultXMLNS = useDefaultNamespace
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) DefaultMaxAge() int {
|
||||
return s.defaultMaxAge
|
||||
}
|
||||
|
@ -363,24 +434,18 @@ func (s *appSettings) MD5Enabled() bool {
|
|||
return s.md5Enabled
|
||||
}
|
||||
|
||||
func (s *appSettings) setMD5Enabled(md5Enabled bool) {
|
||||
s.mu.Lock()
|
||||
s.md5Enabled = md5Enabled
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) NamespaceHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.namespaceHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
|
||||
func (s *appSettings) FormContainerZone(ns string) string {
|
||||
if len(ns) == 0 {
|
||||
return v2container.SysAttributeZoneDefault, true
|
||||
return v2container.SysAttributeZoneDefault
|
||||
}
|
||||
|
||||
return ns + ".ns", false
|
||||
return ns + ".ns"
|
||||
}
|
||||
|
||||
func (s *appSettings) isDefaultNamespace(ns string) bool {
|
||||
|
@ -404,64 +469,36 @@ func (s *appSettings) PolicyDenyByDefault() bool {
|
|||
return s.policyDenyByDefault
|
||||
}
|
||||
|
||||
func (s *appSettings) setPolicyDenyByDefault(policyDenyByDefault bool) {
|
||||
s.mu.Lock()
|
||||
s.policyDenyByDefault = policyDenyByDefault
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) setSourceIPHeader(header string) {
|
||||
s.mu.Lock()
|
||||
s.sourceIPHeader = header
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) SourceIPHeader() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.sourceIPHeader
|
||||
}
|
||||
|
||||
func (s *appSettings) setRetryMaxAttempts(maxAttempts int) {
|
||||
s.mu.Lock()
|
||||
s.retryMaxAttempts = maxAttempts
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) RetryMaxAttempts() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.retryMaxAttempts
|
||||
}
|
||||
|
||||
func (s *appSettings) setRetryMaxBackoff(maxBackoff time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.retryMaxBackoff = maxBackoff
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) RetryMaxBackoff() time.Duration {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.retryMaxBackoff
|
||||
}
|
||||
|
||||
func (s *appSettings) setRetryStrategy(strategy handler.RetryStrategy) {
|
||||
s.mu.Lock()
|
||||
s.retryStrategy = strategy
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *appSettings) RetryStrategy() handler.RetryStrategy {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.retryStrategy
|
||||
}
|
||||
|
||||
func (s *appSettings) Domains() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.domains
|
||||
func (s *appSettings) AccessBoxContainer() (cid.ID, bool) {
|
||||
if s.accessbox != nil {
|
||||
return *s.accessbox, true
|
||||
}
|
||||
|
||||
return cid.ID{}, false
|
||||
}
|
||||
|
||||
func (a *App) initAPI(ctx context.Context) {
|
||||
|
@ -473,6 +510,7 @@ func (a *App) initMetrics() {
|
|||
cfg := metrics.AppMetricsConfig{
|
||||
Logger: a.log,
|
||||
PoolStatistics: frostfs.NewPoolStatistic(a.pool),
|
||||
TreeStatistic: a.treePool,
|
||||
Enabled: a.cfg.GetBool(cfgPrometheusEnabled),
|
||||
}
|
||||
|
||||
|
@ -486,6 +524,10 @@ func (a *App) initFrostfsID(ctx context.Context) {
|
|||
Contract: a.cfg.GetString(cfgFrostfsIDContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
VerifyExecResults: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitFrostfsIDContractFailed, zap.Error(err))
|
||||
|
@ -507,6 +549,10 @@ func (a *App) initPolicyStorage(ctx context.Context) {
|
|||
Contract: a.cfg.GetString(cfgPolicyContract),
|
||||
ProxyContract: a.cfg.GetString(cfgProxyContract),
|
||||
Key: a.key,
|
||||
Waiter: commonclient.WaiterOptions{
|
||||
IgnoreAlreadyExistsError: false,
|
||||
VerifyExecResults: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
a.log.Fatal(logs.InitPolicyContractFailed, zap.Error(err))
|
||||
|
@ -531,7 +577,6 @@ func (a *App) getResolverConfig() *resolver.Config {
|
|||
return &resolver.Config{
|
||||
FrostFS: frostfs.NewResolverFrostFS(a.pool),
|
||||
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
|
||||
Settings: a.settings,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -562,6 +607,22 @@ func (a *App) initTracing(ctx context.Context) {
|
|||
InstanceID: instanceID,
|
||||
Version: version.Version,
|
||||
}
|
||||
|
||||
if trustedCa := a.cfg.GetString(cfgTracingTrustedCa); trustedCa != "" {
|
||||
caBytes, err := os.ReadFile(trustedCa)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
return
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
ok := certPool.AppendCertsFromPEM(caBytes)
|
||||
if !ok {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.String("error", "can't fill cert pool by ca cert"))
|
||||
return
|
||||
}
|
||||
cfg.ServerCaCertPool = certPool
|
||||
}
|
||||
|
||||
updated, err := tracing.Setup(ctx, cfg)
|
||||
if err != nil {
|
||||
a.log.Warn(logs.FailedToInitializeTracing, zap.Error(err))
|
||||
|
@ -605,7 +666,8 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
|||
prmTree.SetKey(key)
|
||||
logger.Info(logs.UsingCredentials, zap.String("FrostFS", hex.EncodeToString(key.PublicKey().Bytes())))
|
||||
|
||||
for _, peer := range fetchPeers(logger, cfg) {
|
||||
peers := fetchPeers(logger, cfg)
|
||||
for _, peer := range peers {
|
||||
prm.AddNode(peer)
|
||||
prmTree.AddNode(peer)
|
||||
}
|
||||
|
@ -628,23 +690,20 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
|||
|
||||
errorThreshold := fetchErrorThreshold(cfg)
|
||||
prm.SetErrorThreshold(errorThreshold)
|
||||
|
||||
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(cfg))
|
||||
|
||||
prm.SetLogger(logger)
|
||||
prmTree.SetLogger(logger)
|
||||
|
||||
prmTree.SetMaxRequestAttempts(cfg.GetInt(cfgTreePoolMaxAttempts))
|
||||
|
||||
var apiGRPCDialOpts []grpc.DialOption
|
||||
var treeGRPCDialOpts []grpc.DialOption
|
||||
if cfg.GetBool(cfgTracingEnabled) {
|
||||
interceptors := []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||
}
|
||||
treeGRPCDialOpts = append(treeGRPCDialOpts, interceptors...)
|
||||
apiGRPCDialOpts = append(apiGRPCDialOpts, interceptors...)
|
||||
interceptors := []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpctracing.NewUnaryClientInteceptor()),
|
||||
grpc.WithStreamInterceptor(grpctracing.NewStreamClientInterceptor()),
|
||||
}
|
||||
prm.SetGRPCDialOptions(apiGRPCDialOpts...)
|
||||
prmTree.SetGRPCDialOptions(treeGRPCDialOpts...)
|
||||
prm.SetGRPCDialOptions(interceptors...)
|
||||
prmTree.SetGRPCDialOptions(interceptors...)
|
||||
|
||||
p, err := pool.NewPool(prm)
|
||||
if err != nil {
|
||||
|
@ -655,6 +714,8 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
|
|||
logger.Fatal(logs.FailedToDialConnectionPool, zap.Error(err))
|
||||
}
|
||||
|
||||
prmTree.SetBootstrapAddress(peers[0].Address())
|
||||
|
||||
treePool, err := treepool.NewPool(prmTree)
|
||||
if err != nil {
|
||||
logger.Fatal(logs.FailedToCreateTreePool, zap.Error(err))
|
||||
|
@ -700,9 +761,6 @@ func (a *App) setHealthStatus() {
|
|||
|
||||
// Serve runs HTTP server to handle S3 API requests.
|
||||
func (a *App) Serve(ctx context.Context) {
|
||||
// Attach S3 API:
|
||||
a.log.Info(logs.FetchDomainsPrepareToUseAPI, zap.Strings("domains", a.settings.Domains()))
|
||||
|
||||
cfg := api.Config{
|
||||
Throttle: middleware.ThrottleOpts{
|
||||
Limit: a.settings.maxClient.count,
|
||||
|
@ -712,7 +770,6 @@ func (a *App) Serve(ctx context.Context) {
|
|||
Center: a.ctr,
|
||||
Log: a.log,
|
||||
Metrics: a.metrics,
|
||||
Domains: a.settings.Domains(),
|
||||
|
||||
MiddlewareSettings: a.settings,
|
||||
PolicyChecker: a.policyStorage,
|
||||
|
@ -927,6 +984,8 @@ func getCacheOptions(v *viper.Viper, l *zap.Logger) *layer.CachesConfig {
|
|||
cacheCfg.AccessControl.Lifetime = fetchCacheLifetime(v, l, cfgAccessControlCacheLifetime, cacheCfg.AccessControl.Lifetime)
|
||||
cacheCfg.AccessControl.Size = fetchCacheSize(v, l, cfgAccessControlCacheSize, cacheCfg.AccessControl.Size)
|
||||
|
||||
cacheCfg.NetworkInfo.Lifetime = fetchCacheLifetime(v, l, cfgNetworkInfoCacheLifetime, cacheCfg.NetworkInfo.Lifetime)
|
||||
|
||||
return cacheCfg
|
||||
}
|
||||
|
||||
|
@ -1065,16 +1124,30 @@ func (a *App) tryReconnect(ctx context.Context, sr *http.Server) bool {
|
|||
}
|
||||
|
||||
func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data.BucketInfo, err error) {
|
||||
cnrID, err := a.resolveContainerID(ctx, cfgKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getContainerInfo(ctx, cnrID, a.pool)
|
||||
}
|
||||
|
||||
func (a *App) resolveContainerID(ctx context.Context, cfgKey string) (cid.ID, error) {
|
||||
containerString := a.cfg.GetString(cfgKey)
|
||||
|
||||
var id cid.ID
|
||||
if err = id.DecodeString(containerString); err != nil {
|
||||
if id, err = a.bucketResolver.Resolve(ctx, containerString); err != nil {
|
||||
return nil, fmt.Errorf("resolve container name %s: %w", containerString, err)
|
||||
if err := id.DecodeString(containerString); err != nil {
|
||||
i := strings.Index(containerString, ".")
|
||||
if i < 0 {
|
||||
return cid.ID{}, fmt.Errorf("invalid container address: %s", containerString)
|
||||
}
|
||||
|
||||
if id, err = a.bucketResolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
|
||||
return cid.ID{}, fmt.Errorf("resolve container address %s: %w", containerString, err)
|
||||
}
|
||||
}
|
||||
|
||||
return getContainerInfo(ctx, id, a.pool)
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func getContainerInfo(ctx context.Context, id cid.ID, frostFSPool *pool.Pool) (*data.BucketInfo, error) {
|
||||
|
|
|
@ -30,6 +30,8 @@ import (
|
|||
const (
|
||||
destinationStdout = "stdout"
|
||||
destinationJournald = "journald"
|
||||
|
||||
wildcardPlaceholder = "<wildcard>"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -39,6 +41,10 @@ const (
|
|||
defaultStreamTimeout = 10 * time.Second
|
||||
defaultShutdownTimeout = 15 * time.Second
|
||||
|
||||
defaultLoggerSamplerInterval = 1 * time.Second
|
||||
|
||||
defaultGracefulCloseOnSwitchTimeout = 10 * time.Second
|
||||
|
||||
defaultPoolErrorThreshold uint32 = 100
|
||||
defaultPlacementPolicy = "REP 3"
|
||||
|
||||
|
@ -52,7 +58,9 @@ const (
|
|||
|
||||
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
|
||||
|
||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||
defaultNamespaceHeader = "X-Frostfs-Namespace"
|
||||
defaultVHSHeader = "X-Frostfs-S3-VHS"
|
||||
defaultServernameHeader = "X-Frostfs-Servername"
|
||||
|
||||
defaultConstraintName = "default"
|
||||
|
||||
|
@ -75,6 +83,19 @@ const ( // Settings.
|
|||
cfgLoggerLevel = "logger.level"
|
||||
cfgLoggerDestination = "logger.destination"
|
||||
|
||||
cfgLoggerSamplingEnabled = "logger.sampling.enabled"
|
||||
cfgLoggerSamplingInitial = "logger.sampling.initial"
|
||||
cfgLoggerSamplingThereafter = "logger.sampling.thereafter"
|
||||
cfgLoggerSamplingInterval = "logger.sampling.interval"
|
||||
|
||||
// HttpLogging.
|
||||
cfgHTTPLoggingEnabled = "http_logging.enabled"
|
||||
cfgHTTPLoggingMaxBody = "http_logging.max_body"
|
||||
cfgHTTPLoggingMaxLogSize = "http_logging.max_log_size"
|
||||
cfgHTTPLoggingDestination = "http_logging.destination"
|
||||
cfgHTTPLoggingGzip = "http_logging.gzip"
|
||||
cfgHTTPLoggingLogResponse = "http_logging.log_response"
|
||||
|
||||
// Wallet.
|
||||
cfgWalletPath = "wallet.path"
|
||||
cfgWalletAddress = "wallet.address"
|
||||
|
@ -116,6 +137,7 @@ const ( // Settings.
|
|||
cfgMorphPolicyCacheSize = "cache.morph_policy.size"
|
||||
cfgFrostfsIDCacheLifetime = "cache.frostfsid.lifetime"
|
||||
cfgFrostfsIDCacheSize = "cache.frostfsid.size"
|
||||
cfgNetworkInfoCacheLifetime = "cache.network_info.lifetime"
|
||||
|
||||
cfgAccessBoxCacheRemovingCheckInterval = "cache.accessbox.removing_check_interval"
|
||||
|
||||
|
@ -138,12 +160,19 @@ const ( // Settings.
|
|||
cfgPProfAddress = "pprof.address"
|
||||
|
||||
// Tracing.
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
cfgTracingEnabled = "tracing.enabled"
|
||||
cfgTracingExporter = "tracing.exporter"
|
||||
cfgTracingEndpoint = "tracing.endpoint"
|
||||
cfgTracingTrustedCa = "tracing.trusted_ca"
|
||||
|
||||
cfgListenDomains = "listen_domains"
|
||||
|
||||
// VHS.
|
||||
cfgVHSEnabled = "vhs.enabled"
|
||||
cfgVHSHeader = "vhs.vhs_header"
|
||||
cfgServernameHeader = "vhs.servername_header"
|
||||
cfgVHSNamespaces = "vhs.namespaces"
|
||||
|
||||
// Peers.
|
||||
cfgPeers = "peers"
|
||||
|
||||
|
@ -177,7 +206,9 @@ const ( // Settings.
|
|||
cfgSourceIPHeader = "source_ip_header"
|
||||
|
||||
// Containers.
|
||||
cfgContainersCORS = "containers.cors"
|
||||
cfgContainersCORS = "containers.cors"
|
||||
cfgContainersLifecycle = "containers.lifecycle"
|
||||
cfgContainersAccessBox = "containers.accessbox"
|
||||
|
||||
// Command line args.
|
||||
cmdHelp = "help"
|
||||
|
@ -199,6 +230,10 @@ const ( // Settings.
|
|||
// Sets max attempt to make successful tree request.
|
||||
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
|
||||
|
||||
// Specifies the timeout after which unhealthy client be closed during rebalancing
|
||||
// if it will become healthy back.
|
||||
cfgGracefulCloseOnSwitchTimeout = "frostfs.graceful_close_on_switch_timeout"
|
||||
|
||||
// List of allowed AccessKeyID prefixes.
|
||||
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
||||
|
||||
|
@ -285,6 +320,15 @@ func fetchRebalanceInterval(cfg *viper.Viper) time.Duration {
|
|||
return rebalanceInterval
|
||||
}
|
||||
|
||||
func fetchSetGracefulCloseOnSwitchTimeout(cfg *viper.Viper) time.Duration {
|
||||
val := cfg.GetDuration(cfgGracefulCloseOnSwitchTimeout)
|
||||
if val <= 0 {
|
||||
val = defaultGracefulCloseOnSwitchTimeout
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func fetchErrorThreshold(cfg *viper.Viper) uint32 {
|
||||
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
|
||||
if errorThreshold <= 0 {
|
||||
|
@ -667,6 +711,41 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
|
|||
return servers
|
||||
}
|
||||
|
||||
func fetchDomains(v *viper.Viper, log *zap.Logger) []string {
|
||||
domains := validateDomains(v.GetStringSlice(cfgListenDomains), log)
|
||||
|
||||
countParts := func(domain string) int {
|
||||
return strings.Count(domain, ".")
|
||||
}
|
||||
|
||||
sort.Slice(domains, func(i, j int) bool {
|
||||
return countParts(domains[i]) > countParts(domains[j])
|
||||
})
|
||||
|
||||
return domains
|
||||
}
|
||||
|
||||
func fetchVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
|
||||
vhsNamespacesEnabled := make(map[string]bool)
|
||||
nsMap := v.GetStringMap(cfgVHSNamespaces)
|
||||
for ns, val := range nsMap {
|
||||
if _, ok := vhsNamespacesEnabled[ns]; ok {
|
||||
log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns))
|
||||
continue
|
||||
}
|
||||
|
||||
enabledFlag, ok := val.(bool)
|
||||
if !ok {
|
||||
log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns))
|
||||
continue
|
||||
}
|
||||
|
||||
vhsNamespacesEnabled[ns] = enabledFlag
|
||||
}
|
||||
|
||||
return vhsNamespacesEnabled
|
||||
}
|
||||
|
||||
func newSettings() *viper.Viper {
|
||||
v := viper.New()
|
||||
|
||||
|
@ -717,6 +796,18 @@ func newSettings() *viper.Viper {
|
|||
// logger:
|
||||
v.SetDefault(cfgLoggerLevel, "debug")
|
||||
v.SetDefault(cfgLoggerDestination, "stdout")
|
||||
v.SetDefault(cfgLoggerSamplingEnabled, false)
|
||||
v.SetDefault(cfgLoggerSamplingThereafter, 100)
|
||||
v.SetDefault(cfgLoggerSamplingInitial, 100)
|
||||
v.SetDefault(cfgLoggerSamplingInterval, defaultLoggerSamplerInterval)
|
||||
|
||||
// http logger
|
||||
v.SetDefault(cfgHTTPLoggingEnabled, false)
|
||||
v.SetDefault(cfgHTTPLoggingMaxBody, 1024)
|
||||
v.SetDefault(cfgHTTPLoggingMaxLogSize, 50)
|
||||
v.SetDefault(cfgHTTPLoggingDestination, "stdout")
|
||||
v.SetDefault(cfgHTTPLoggingGzip, false)
|
||||
v.SetDefault(cfgHTTPLoggingLogResponse, true)
|
||||
|
||||
// pool:
|
||||
v.SetDefault(cfgPoolErrorThreshold, defaultPoolErrorThreshold)
|
||||
|
@ -753,6 +844,10 @@ func newSettings() *viper.Viper {
|
|||
v.SetDefault(cfgRetryMaxAttempts, defaultRetryMaxAttempts)
|
||||
v.SetDefault(cfgRetryMaxBackoff, defaultRetryMaxBackoff)
|
||||
|
||||
// vhs
|
||||
v.SetDefault(cfgVHSHeader, defaultVHSHeader)
|
||||
v.SetDefault(cfgServernameHeader, defaultServernameHeader)
|
||||
|
||||
// Bind flags
|
||||
if err := bindFlags(v, flags); err != nil {
|
||||
panic(fmt.Errorf("bind flags: %w", err))
|
||||
|
@ -948,9 +1043,9 @@ func pickLogger(v *viper.Viper) *Logger {
|
|||
|
||||
switch dest {
|
||||
case destinationStdout:
|
||||
return newStdoutLogger(lvl)
|
||||
return newStdoutLogger(v, lvl)
|
||||
case destinationJournald:
|
||||
return newJournaldLogger(lvl)
|
||||
return newJournaldLogger(v, lvl)
|
||||
default:
|
||||
panic(fmt.Sprintf("wrong destination for logger: %s", dest))
|
||||
}
|
||||
|
@ -963,53 +1058,74 @@ func pickLogger(v *viper.Viper) *Logger {
|
|||
// - parameterized level (debug by default)
|
||||
// - console encoding
|
||||
// - ISO8601 time encoding
|
||||
// - sampling intervals
|
||||
//
|
||||
// and atomic log level to dynamically change it.
|
||||
//
|
||||
// Logger records a stack trace for all messages at or above fatal level.
|
||||
//
|
||||
// See also zapcore.Level, zap.NewProductionConfig, zap.AddStacktrace.
|
||||
func newStdoutLogger(lvl zapcore.Level) *Logger {
|
||||
c := zap.NewProductionConfig()
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
c.Encoding = "console"
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
func newStdoutLogger(v *viper.Viper, lvl zapcore.Level) *Logger {
|
||||
stdout := zapcore.AddSync(os.Stderr)
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
l, err := c.Build(
|
||||
zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)),
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("build zap logger instance: %v", err))
|
||||
}
|
||||
consoleOutCore := zapcore.NewCore(newLogEncoder(), stdout, level)
|
||||
|
||||
consoleOutCore = samplingEnabling(v, consoleOutCore)
|
||||
|
||||
return &Logger{
|
||||
logger: l,
|
||||
lvl: c.Level,
|
||||
logger: zap.New(consoleOutCore, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel))),
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newJournaldLogger(lvl zapcore.Level) *Logger {
|
||||
c := zap.NewProductionConfig()
|
||||
c.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
c.Level = zap.NewAtomicLevelAt(lvl)
|
||||
func newJournaldLogger(v *viper.Viper, lvl zapcore.Level) *Logger {
|
||||
level := zap.NewAtomicLevelAt(lvl)
|
||||
|
||||
encoder := zapjournald.NewPartialEncoder(zapcore.NewConsoleEncoder(c.EncoderConfig), zapjournald.SyslogFields)
|
||||
encoder := zapjournald.NewPartialEncoder(newLogEncoder(), zapjournald.SyslogFields)
|
||||
|
||||
core := zapjournald.NewCore(c.Level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
core := zapjournald.NewCore(level, encoder, &journald.Journal{}, zapjournald.SyslogFields)
|
||||
coreWithContext := core.With([]zapcore.Field{
|
||||
zapjournald.SyslogFacility(zapjournald.LogDaemon),
|
||||
zapjournald.SyslogIdentifier(),
|
||||
zapjournald.SyslogPid(),
|
||||
})
|
||||
|
||||
coreWithContext = samplingEnabling(v, coreWithContext)
|
||||
|
||||
l := zap.New(coreWithContext, zap.AddStacktrace(zap.NewAtomicLevelAt(zap.FatalLevel)))
|
||||
|
||||
return &Logger{
|
||||
logger: l,
|
||||
lvl: c.Level,
|
||||
lvl: level,
|
||||
}
|
||||
}
|
||||
|
||||
func newLogEncoder() zapcore.Encoder {
|
||||
c := zap.NewProductionEncoderConfig()
|
||||
c.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
return zapcore.NewConsoleEncoder(c)
|
||||
}
|
||||
|
||||
func samplingEnabling(v *viper.Viper, core zapcore.Core) zapcore.Core {
|
||||
// Zap samples by logging the first cgfLoggerSamplingInitial entries with a given level
|
||||
// and message within the specified time interval.
|
||||
// In the above config, only the first cgfLoggerSamplingInitial log entries with the same level and message
|
||||
// are recorded in cfgLoggerSamplingInterval interval. Every other log entry will be dropped within the interval since
|
||||
// cfgLoggerSamplingThereafter is specified here.
|
||||
if v.GetBool(cfgLoggerSamplingEnabled) {
|
||||
core = zapcore.NewSamplerWithOptions(
|
||||
core,
|
||||
v.GetDuration(cfgLoggerSamplingInterval),
|
||||
v.GetInt(cfgLoggerSamplingInitial),
|
||||
v.GetInt(cfgLoggerSamplingThereafter),
|
||||
)
|
||||
}
|
||||
|
||||
return core
|
||||
}
|
||||
|
||||
func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
||||
var lvl zapcore.Level
|
||||
lvlStr := v.GetString(cfgLoggerLevel)
|
||||
|
@ -1028,3 +1144,19 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
|
|||
}
|
||||
return lvl, nil
|
||||
}
|
||||
|
||||
func validateDomains(domains []string, log *zap.Logger) []string {
|
||||
validDomains := make([]string, 0, len(domains))
|
||||
LOOP:
|
||||
for _, domain := range domains {
|
||||
domainParts := strings.Split(domain, ".")
|
||||
for _, part := range domainParts {
|
||||
if strings.ContainsAny(part, "<>") && part != wildcardPlaceholder {
|
||||
log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain))
|
||||
continue LOOP
|
||||
}
|
||||
}
|
||||
validDomains = append(validDomains, domain)
|
||||
}
|
||||
return validDomains
|
||||
}
|
||||
|
|
34
cmd/s3-gw/validate_test.go
Normal file
34
cmd/s3-gw/validate_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap/zaptest"
|
||||
)
|
||||
|
||||
func TestValidateDomains(t *testing.T) {
|
||||
inputDomains := []string{
|
||||
"s3dev.frostfs.devenv",
|
||||
"s3dev.<invalid>.frostfs.devenv",
|
||||
"s3dev.<wildcard>.frostfs.devenv",
|
||||
"s3dev.<wildcard.frostfs.devenv",
|
||||
"s3dev.wildcard>.frostfs.devenv",
|
||||
"s3dev.<wild.card>.frostfs.devenv",
|
||||
"<invalid>.frostfs.devenv",
|
||||
"<wildcard>.frostfs.devenv>",
|
||||
"<wildcard>.frostfs.devenv",
|
||||
"s3dev.fro<stfs.devenv",
|
||||
"<wildcard>.dev.<wildcard>.frostfs.devenv",
|
||||
"<wildcard>.dev.<wildc>ard>.frostfs.devenv",
|
||||
}
|
||||
expectedDomains := []string{
|
||||
"s3dev.frostfs.devenv",
|
||||
"s3dev.<wildcard>.frostfs.devenv",
|
||||
"<wildcard>.frostfs.devenv",
|
||||
"<wildcard>.dev.<wildcard>.frostfs.devenv",
|
||||
}
|
||||
|
||||
actualDomains := validateDomains(inputDomains, zaptest.NewLogger(t))
|
||||
require.Equal(t, expectedDomains, actualDomains)
|
||||
}
|
50
cmd/s3-playback/internal/playback/multipart.go
Normal file
50
cmd/s3-playback/internal/playback/multipart.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package playback
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type MultipartUpload struct {
|
||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"`
|
||||
Bucket string `json:"bucket" xml:"Bucket"`
|
||||
Key string `json:"key" xml:"Key"`
|
||||
UploadID string `json:"uploadId" xml:"UploadId"`
|
||||
}
|
||||
|
||||
func HandleResponse(r *http.Request, mparts map[string]MultipartUpload, resp []byte, logResponse []byte) error {
|
||||
var mpart, mpartOld MultipartUpload
|
||||
if r.Method != "POST" || !r.URL.Query().Has("uploads") {
|
||||
return nil
|
||||
}
|
||||
// get new uploadId from response
|
||||
err := xml.Unmarshal(resp, &mpart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xml unmarshal error: %w", err)
|
||||
}
|
||||
// get old uploadId from logs
|
||||
err = xml.Unmarshal(logResponse, &mpartOld)
|
||||
if err != nil {
|
||||
return fmt.Errorf("xml unmarshal error: %w", err)
|
||||
}
|
||||
if mpartOld.UploadID != "" {
|
||||
mparts[mpartOld.UploadID] = mpart
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func SwapUploadID(r *http.Request, settings *Settings) error {
|
||||
var uploadID string
|
||||
query := r.URL.Query()
|
||||
uploadID = query.Get("uploadId")
|
||||
mpart, ok := settings.Multiparts[uploadID]
|
||||
if !ok {
|
||||
return fmt.Errorf("no multipart upload with specified uploadId")
|
||||
}
|
||||
query.Set("uploadId", mpart.UploadID)
|
||||
r.URL.RawQuery = query.Encode()
|
||||
|
||||
return nil
|
||||
}
|
97
cmd/s3-playback/internal/playback/request.go
Normal file
97
cmd/s3-playback/internal/playback/request.go
Normal file
|
@ -0,0 +1,97 @@
|
|||
package playback
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/detector"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/xmlutils"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
)
|
||||
|
||||
type (
|
||||
httpBody []byte
|
||||
LoggedRequest struct {
|
||||
From string `json:"from"`
|
||||
URI string `json:"URI"`
|
||||
Method string `json:"method"`
|
||||
Payload httpBody `json:"payload,omitempty"`
|
||||
Response httpBody `json:"response,omitempty"`
|
||||
Query url.Values `json:"query"`
|
||||
Header http.Header `json:"headers"`
|
||||
}
|
||||
Credentials struct {
|
||||
AccessKey string
|
||||
SecretKey string
|
||||
}
|
||||
Settings struct {
|
||||
Endpoint string
|
||||
Creds Credentials
|
||||
Multiparts map[string]MultipartUpload
|
||||
Client *http.Client
|
||||
}
|
||||
)
|
||||
|
||||
func (h *httpBody) UnmarshalJSON(data []byte) error {
|
||||
unquoted, err := strconv.Unquote(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unquote data: %w", err)
|
||||
}
|
||||
detect := detector.NewDetector(strings.NewReader(unquoted), xmlutils.DetectXML)
|
||||
dataType, err := detect.Detect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect data: %w", err)
|
||||
}
|
||||
reader := xmlutils.ChooseReader(dataType, detect.RestoredReader())
|
||||
*h, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal httpbody: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign replace Authorization header with new Access key id and Signature values.
|
||||
func Sign(ctx context.Context, r *http.Request, creds Credentials) error {
|
||||
credProvider := credentials.NewStaticCredentialsProvider(creds.AccessKey, creds.SecretKey, "")
|
||||
awsCred, err := credProvider.Retrieve(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authHdr := r.Header.Get(auth.AuthorizationHdr)
|
||||
authInfo, err := parseAuthHeader(authHdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHeader := strings.Replace(authHdr, authInfo["access_key_id"], creds.AccessKey, 1)
|
||||
r.Header.Set(auth.AuthorizationHdr, newHeader)
|
||||
|
||||
signer := v4.NewSigner()
|
||||
signatureDateTimeStr := r.Header.Get(api.AmzDate)
|
||||
signatureDateTime, err := time.Parse("20060102T150405Z", signatureDateTimeStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return signer.SignHTTP(ctx, awsCred, r, r.Header.Get(api.AmzContentSha256), authInfo["service"], authInfo["region"], signatureDateTime)
|
||||
}
|
||||
|
||||
func parseAuthHeader(authHeader string) (map[string]string, error) {
|
||||
authInfo := auth.NewRegexpMatcher(auth.AuthorizationFieldRegexp).GetSubmatches(authHeader)
|
||||
if len(authInfo) == 0 {
|
||||
return nil, errors.New("no matches found")
|
||||
}
|
||||
|
||||
return authInfo, nil
|
||||
}
|
98
cmd/s3-playback/internal/playback/request_test.go
Normal file
98
cmd/s3-playback/internal/playback/request_test.go
Normal file
|
@ -0,0 +1,98 @@
|
|||
package playback
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var errNoMatches = errors.New("no matches found")
|
||||
|
||||
func withoutValue(data map[string]string, field string) map[string]string {
|
||||
result := make(map[string]string)
|
||||
for k, v := range data {
|
||||
result[k] = v
|
||||
}
|
||||
result[field] = ""
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func TestParseAuthHeader(t *testing.T) {
|
||||
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
|
||||
|
||||
defaultAuthInfo := map[string]string{
|
||||
"access_key_id": "oid0cid",
|
||||
"service": "s3",
|
||||
"region": "us-east-1",
|
||||
"v4_signature": "2811ccb9e242f41426738fb1f",
|
||||
"signed_header_fields": "host;x-amz-content-sha256;x-amz-date",
|
||||
"date": "20210809",
|
||||
}
|
||||
for _, tc := range []struct {
|
||||
title string
|
||||
header string
|
||||
err error
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
title: "correct full header",
|
||||
header: defaultHeader,
|
||||
err: nil,
|
||||
expected: defaultAuthInfo,
|
||||
},
|
||||
{
|
||||
title: "correct with empty region",
|
||||
header: strings.Replace(defaultHeader, "/us-east-1/", "//", 1),
|
||||
err: nil,
|
||||
expected: withoutValue(defaultAuthInfo, "region"),
|
||||
},
|
||||
{
|
||||
title: "empty access key",
|
||||
header: strings.Replace(defaultHeader, "oid0cid", "", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
title: "empty service",
|
||||
header: strings.Replace(defaultHeader, "/s3/", "//", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
title: "empty date",
|
||||
header: strings.Replace(defaultHeader, "/20210809/", "//", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
title: "empty v4_signature",
|
||||
header: strings.Replace(defaultHeader, "Signature=2811ccb9e242f41426738fb1f",
|
||||
"Signature=", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
title: "empty signed_fields",
|
||||
header: strings.Replace(defaultHeader, "SignedHeaders=host;x-amz-content-sha256;x-amz-date",
|
||||
"SignedHeaders=", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
title: "empty signed_fields",
|
||||
header: strings.Replace(defaultHeader, "SignedHeaders=host;x-amz-content-sha256;x-amz-date",
|
||||
"SignedHeaders=", 1),
|
||||
err: errNoMatches,
|
||||
expected: nil,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.title, func(t *testing.T) {
|
||||
authInfo, err := parseAuthHeader(tc.header)
|
||||
require.Equal(t, err, tc.err, tc.header)
|
||||
require.Equal(t, tc.expected, authInfo, tc.header)
|
||||
})
|
||||
}
|
||||
}
|
20
cmd/s3-playback/main.go
Normal file
20
cmd/s3-playback/main.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/cmd/s3-playback/modules"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)
|
||||
|
||||
if cmd, err := modules.Execute(ctx); err != nil {
|
||||
cmd.PrintErrln("Error:", err.Error())
|
||||
cmd.PrintErrf("Run '%v --help' for usage.\n", cmd.CommandPath())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
67
cmd/s3-playback/modules/root.go
Normal file
67
cmd/s3-playback/modules/root.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
package modules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPrintResponseLimit = 1024
|
||||
cfgConfigPath = "config"
|
||||
cfgHTTPTimeoutFlag = "http-timeout"
|
||||
cfgSkipVerifyTLS = "skip-verify-tls"
|
||||
)
|
||||
|
||||
var (
|
||||
cfgFile string
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "frostfs-s3-playback",
|
||||
Version: version.Version,
|
||||
Short: "FrostFS S3 Traffic Playback",
|
||||
Long: "Helps to reproduce s3 commands from log files",
|
||||
Example: "frostfs-s3-playback [--skip-verify-tls] [--http-timeout <timeout>] " +
|
||||
"[--version] --config <config_path> <command>",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return viper.BindPFlags(cmd.Flags())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func Execute(ctx context.Context) (*cobra.Command, error) {
|
||||
return rootCmd.ExecuteContextC(ctx)
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
viper.SetConfigFile(cfgFile)
|
||||
_ = viper.ReadInConfig()
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
cobra.EnableTraverseRunHooks = true
|
||||
rootCmd.SetGlobalNormalizationFunc(func(_ *pflag.FlagSet, name string) pflag.NormalizedName {
|
||||
return pflag.NormalizedName(strings.ReplaceAll(name, "_", "-"))
|
||||
})
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, cfgConfigPath, "", "configuration filepath")
|
||||
_ = rootCmd.MarkPersistentFlagRequired(cfgConfigPath)
|
||||
_ = rootCmd.MarkPersistentFlagFilename(cfgConfigPath)
|
||||
rootCmd.PersistentFlags().Duration(cfgHTTPTimeoutFlag, time.Minute, "http request timeout")
|
||||
rootCmd.PersistentFlags().Bool(cfgSkipVerifyTLS, false, "skip TLS certificate verification")
|
||||
rootCmd.SetOut(os.Stdout)
|
||||
|
||||
initRunCmd()
|
||||
rootCmd.AddCommand(runCmd)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue