Compare commits

..

3 commits

Author SHA1 Message Date
fea911c417 [#449] Add support headers for vhs and servername
All checks were successful
/ DCO (pull_request) Successful in 52s
/ Vulncheck (pull_request) Successful in 1m9s
/ Builds (1.21) (pull_request) Successful in 1m29s
/ Builds (1.22) (pull_request) Successful in 1m28s
/ Lint (pull_request) Successful in 2m18s
/ Tests (1.21) (pull_request) Successful in 1m28s
/ Tests (1.22) (pull_request) Successful in 1m29s
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-19 14:46:15 +03:00
c0d624df68 [#446] Add tests for address style middleware
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-19 14:46:15 +03:00
271f64170d [#446] Add support virtual-hosted-style
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-19 14:46:10 +03:00
163 changed files with 2236 additions and 10122 deletions

View file

@ -1,14 +1,13 @@
FROM golang:1.22 AS builder FROM golang:1.21 AS builder
ARG BUILD=now ARG BUILD=now
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw
ARG VERSION=dev ARG VERSION=dev
ARG GOFLAGS=""
WORKDIR /src WORKDIR /src
COPY . /src COPY . /src
RUN make GOFLAGS=${GOFLAGS} RUN make
# Executable image # Executable image
FROM alpine AS frostfs-s3-gw FROM alpine AS frostfs-s3-gw

View file

@ -1,3 +1,3 @@
.git .git
.cache .cache
.forgejo .github

View file

@ -1,8 +1,4 @@
on: on: [pull_request]
pull_request:
push:
branches:
- master
jobs: jobs:
builds: builds:
@ -10,7 +6,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.22', '1.23' ] go_versions: [ '1.21', '1.22' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View file

@ -12,7 +12,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.22'
- name: Run commit format checker - name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3 uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -1,8 +1,4 @@
on: on: [pull_request]
pull_request:
push:
branches:
- master
jobs: jobs:
lint: lint:
@ -14,7 +10,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.22'
cache: true cache: true
- name: Install linters - name: Install linters
@ -28,7 +24,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go_versions: [ '1.22', '1.23' ] go_versions: [ '1.21', '1.22' ]
fail-fast: false fail-fast: false
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View file

@ -1,8 +1,4 @@
on: on: [pull_request]
pull_request:
push:
branches:
- master
jobs: jobs:
vulncheck: vulncheck:
@ -16,7 +12,7 @@ jobs:
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v3
with: with:
go-version: '1.23' go-version: '1.22'
- name: Install govulncheck - name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest run: go install golang.org/x/vuln/cmd/govulncheck@latest

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @alexvanin @dkirillov

View file

Before

Width:  |  Height:  |  Size: 5.5 KiB

After

Width:  |  Height:  |  Size: 5.5 KiB

View file

@ -12,8 +12,7 @@ run:
# output configuration options # output configuration options
output: output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number" # colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
formats: format: tab
- format: tab
# all available settings of specific linters # all available settings of specific linters
linters-settings: linters-settings:

View file

@ -4,105 +4,8 @@ This document outlines major changes between releases.
## [Unreleased] ## [Unreleased]
## [0.31.0] - Rongbuk - 2024-11-20
### Fixed
- Docker warnings during image build (#421)
- `PartNumberMarker` in ListMultipart response (#451)
- PostObject handling (#456)
- Tag logging errors (#452)
- Removing of duplicated parts in tree service during split brain (#448)
- Container resolving (#482)
- FrostFS to S3 error transformation (#488)
- Default bucket routing (#507)
- encoding-type in ListBucketObjectVersions (#404)
- SIGHUP support for `tracing.enabled` config parameter (#520)
- `trace_id` parameter in logs (#501)
- Listing marker processing (#539)
- Content-MD5 header check (#540)
- Precondition check (#538)
- Bucket name check during all S3 operations (#556)
### Added ### Added
- Support for separate container for all CORS settings (#422) - Add support for virtual hosted style addressing (#446, #449)
- `X-Amz-Force-Delete-Bucket` header for forced bucket removal (#31)
- `Location` support in CompleteMultipart response (#451)
- Tree pool request duration metric (#447)
- Expiration lifecycle configuration support (#42, #412, #459, #460, #516, #536)
- Add support for virtual hosted style addressing (#446, #449, #493)
- Support `frostfs.graceful_close_on_switch_timeout` (#475)
- Vulnerability report document (#413)
- Support patch object method (#462, #473, #466, #479)
- Enhanced logging and request reproducer (#369)
- Root CA configuration for tracing (#484)
- Log sampling policy configuration (#461)
- `sign` command to `frostfs-s3-authmate` (#467)
- Support custom aws credentials (#509)
- Source IP binding configuration for FrostFS requests (#521)
- Tracing attributes (#549)
### Changed
- Split `FrostFS` interface into separate read methods (#427)
- golangci-lint v1.60 support (#474)
- Updated Go version to 1.22 (#470)
- Container removal after failed bucket creation (#434)
- Explicit check for `.` symbol in bucket name (#506)
- Transaction waiter in contract clients (#522)
- Avoid maintenance mode storage node during object operations (#524)
- Content-Type does not include in Presigned URL of s3-authmate (#505)
- Check owner ID before deleting bucket (#528)
- S3-Authmate now uses APE instead basic-ACL (#553)
### Removed
- Reduce using mutex when update app settings (#329)
## [0.30.8] - 2024-10-18
### Fixed
- Error handling for correct connection switch in SDK Pool (#517)
## [0.30.7] - 2024-10-03
### Fixed
- Correct aws-chunk encoding size handling (#511)
## [0.30.6] - 2024-09-17
### Fixed
- Object size of objects upload with aws-chunked encoding (#450)
- Object size of objects upload with negative Content-Length (#486)
## [0.30.5] - 2024-09-16
### Fixed
- Panic catchers for fuzzing tests (#492)
## [0.30.4] - 2024-09-03
### Added
- Fuzzing tests (#480)
## [0.30.3] - 2024-08-27
### Fixed
- Empty listing when multipart upload contains more than 1000 parts (#471)
## [0.30.2] - 2024-08-20
### Fixed
- Error counting in pool component before connection switch (#468)
### Added
- Log of endpoint address during tree pool errors (#468)
## [0.30.1] - 2024-07-25
### Fixed
- Redundant system node removal in tree service (#437)
### Added
- Log details on SDK Pool health status change (#439)
## [0.30.0] - Kangshung -2024-07-19 ## [0.30.0] - Kangshung -2024-07-19
@ -333,13 +236,4 @@ To see CHANGELOG for older versions, refer to https://github.com/nspcc-dev/neofs
[0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2 [0.29.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.1...v0.29.2
[0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3 [0.29.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.2...v0.29.3
[0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0 [0.30.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.29.3...v0.30.0
[0.30.1]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...v0.30.1 [Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.0...master
[0.30.2]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.1...v0.30.2
[0.30.3]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.2...v0.30.3
[0.30.4]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.3...v0.30.4
[0.30.5]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.4...v0.30.5
[0.30.6]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.5...v0.30.6
[0.30.7]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.6...v0.30.7
[0.30.8]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.7...v0.30.8
[0.31.0]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.30.8...v0.31.0
[Unreleased]: https://git.frostfs.info/TrueCloudLab/frostfs-s3-gw/compare/v0.31.0...master

View file

@ -1 +0,0 @@
.* @alexvanin @dkirillov

View file

@ -4,8 +4,8 @@
REPO ?= $(shell go list -m) REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop") VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22 GO_VERSION ?= 1.22
LINT_VERSION ?= 1.60.1 LINT_VERSION ?= 1.56.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6 TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
BINDIR = bin BINDIR = bin
METRICS_DUMP_OUT ?= ./metrics-dump.json METRICS_DUMP_OUT ?= ./metrics-dump.json
@ -14,8 +14,6 @@ METRICS_DUMP_OUT ?= ./metrics-dump.json
CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*))) CMDS = $(addprefix frostfs-, $(notdir $(wildcard cmd/*)))
BINS = $(addprefix $(BINDIR)/, $(CMDS)) BINS = $(addprefix $(BINDIR)/, $(CMDS))
GOFLAGS ?=
# Variables for docker # Variables for docker
REPO_BASENAME = $(shell basename `go list -m`) REPO_BASENAME = $(shell basename `go list -m`)
HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)" HUB_IMAGE ?= "truecloudlab/$(REPO_BASENAME)"
@ -25,12 +23,6 @@ OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION) LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache TMP_DIR := .cache
# Variables for fuzzing
FUZZ_NGFUZZ_DIR ?= ""
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all"
FUZZ_AUX ?= ""
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc .PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
# .deb package versioning # .deb package versioning
@ -46,7 +38,6 @@ all: $(BINS)
$(BINS): $(BINDIR) dep $(BINS): $(BINDIR) dep
@echo "⇒ Build $@" @echo "⇒ Build $@"
CGO_ENABLED=0 \ CGO_ENABLED=0 \
GOFLAGS=$(GOFLAGS) \
go build -v -trimpath \ go build -v -trimpath \
-ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \ -ldflags "-X $(REPO)/internal/version.Version=$(VERSION)" \
-o $@ ./cmd/$(subst frostfs-,,$(notdir $@)) -o $@ ./cmd/$(subst frostfs-,,$(notdir $@))
@ -73,7 +64,7 @@ docker/%:
-w /src \ -w /src \
-u `stat -c "%u:%g" .` \ -u `stat -c "%u:%g" .` \
--env HOME=/src \ --env HOME=/src \
golang:$(GO_VERSION) make GOFLAGS=$(GOFLAGS) $*,\ golang:$(GO_VERSION) make $*,\
@echo "supported docker targets: all $(BINS) lint") @echo "supported docker targets: all $(BINS) lint")
# Run tests # Run tests
@ -85,34 +76,6 @@ cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic @go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html @go tool cover -html=coverage.txt -o coverage.html
# Run fuzzing
CLANG := $(shell which clang-17 2>/dev/null)
.PHONY: check-clang all
check-clang:
ifeq ($(CLANG),)
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
@exit 1
endif
.PHONY: check-ngfuzz all
check-ngfuzz:
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
exit 1; \
fi
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: fuzz
fuzz: install-fuzzing-deps
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
# Reformat code # Reformat code
format: format:
@echo "⇒ Processing gofmt check" @echo "⇒ Processing gofmt check"
@ -124,7 +87,6 @@ image:
@docker build \ @docker build \
--build-arg REPO=$(REPO) \ --build-arg REPO=$(REPO) \
--build-arg VERSION=$(VERSION) \ --build-arg VERSION=$(VERSION) \
--build-arg GOFLAGS=$(GOFLAGS) \
--rm \ --rm \
-f .docker/Dockerfile \ -f .docker/Dockerfile \
-t $(HUB_IMAGE):$(HUB_TAG) . -t $(HUB_IMAGE):$(HUB_TAG) .

View file

@ -1,5 +1,5 @@
<p align="center"> <p align="center">
<img src="./.forgejo/logo.svg" width="500px" alt="FrostFS logo"> <img src="./.github/logo.svg" width="500px" alt="FrostFS logo">
</p> </p>
<p align="center"> <p align="center">
<a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>. <a href="https://frostfs.info">FrostFS</a> is a decentralized distributed object storage integrated with the <a href="https://neo.org">NEO Blockchain</a>.
@ -93,24 +93,6 @@ HTTP/1.1 200 OK
Also, you can configure domains using `.env` variables or `yaml` file. Also, you can configure domains using `.env` variables or `yaml` file.
## Fuzzing
To run fuzzing tests use the following command:
```shell
$ make fuzz
```
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
You can also use the following arguments:
```
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
````
## Documentation ## Documentation
- [Configuration](./docs/configuration.md) - [Configuration](./docs/configuration.md)

View file

@ -1,26 +0,0 @@
# Security Policy
## How To Report a Vulnerability
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
Instead, you can report it using one of the following ways:
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
* The type of issue (e.g., buffer overflow, or cross-site scripting)
* Affected version(s)
* Impact of the issue, including how an attacker might exploit the issue
* Step-by-step instructions to reproduce the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Full paths of source file(s) related to the manifestation of the issue
* Any special configuration required to reproduce the issue
* Any log files that are related to this issue (if possible)
* Proof-of-concept or exploit code (if possible)
This information will help us triage your report more quickly.

View file

@ -1 +1 @@
v0.31.0 v0.30.0

View file

@ -14,17 +14,16 @@ import (
"time" "time"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4" v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
) )
// AuthorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter. // authorizationFieldRegexp -- is regexp for credentials with Base58 encoded cid and oid and '0' (zero) as delimiter.
var AuthorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`) var authorizationFieldRegexp = regexp.MustCompile(`AWS4-HMAC-SHA256 Credential=(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request,\s*SignedHeaders=(?P<signed_header_fields>.+),\s*Signature=(?P<v4_signature>.+)`)
// postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy. // postPolicyCredentialRegexp -- is regexp for credentials when uploading file using POST with policy.
var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`) var postPolicyCredentialRegexp = regexp.MustCompile(`(?P<access_key_id>[^/]+)/(?P<date>[^/]+)/(?P<region>[^/]*)/(?P<service>[^/]+)/aws4_request`)
@ -35,11 +34,6 @@ type (
postReg *RegexpSubmatcher postReg *RegexpSubmatcher
cli tokens.Credentials cli tokens.Credentials
allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed allowedAccessKeyIDPrefixes []string // empty slice means all access key ids are allowed
settings CenterSettings
}
CenterSettings interface {
AccessBoxContainer() (cid.ID, bool)
} }
//nolint:revive //nolint:revive
@ -56,6 +50,7 @@ type (
) )
const ( const (
accessKeyPartsNum = 2
authHeaderPartsNum = 6 authHeaderPartsNum = 6
maxFormSizeMemory = 50 * 1048576 // 50 MB maxFormSizeMemory = 50 * 1048576 // 50 MB
@ -87,20 +82,24 @@ var ContentSHA256HeaderStandardValue = map[string]struct{}{
} }
// New creates an instance of AuthCenter. // New creates an instance of AuthCenter.
func New(creds tokens.Credentials, prefixes []string, settings CenterSettings) *Center { func New(creds tokens.Credentials, prefixes []string) *Center {
return &Center{ return &Center{
cli: creds, cli: creds,
reg: NewRegexpMatcher(AuthorizationFieldRegexp), reg: NewRegexpMatcher(authorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp), postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
allowedAccessKeyIDPrefixes: prefixes, allowedAccessKeyIDPrefixes: prefixes,
settings: settings,
} }
} }
func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) { func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
submatches := c.reg.GetSubmatches(header) submatches := c.reg.GetSubmatches(header)
if len(submatches) != authHeaderPartsNum { if len(submatches) != authHeaderPartsNum {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), header) return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), header)
}
accessKey := strings.Split(submatches["access_key_id"], "0")
if len(accessKey) != accessKeyPartsNum {
return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKey)
} }
signedFields := strings.Split(submatches["signed_header_fields"], ";") signedFields := strings.Split(submatches["signed_header_fields"], ";")
@ -115,6 +114,15 @@ func (c *Center) parseAuthHeader(header string) (*AuthHeader, error) {
}, nil }, nil
} }
func getAddress(accessKeyID string) (oid.Address, error) {
var addr oid.Address
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err != nil {
return addr, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrInvalidAccessKeyID), accessKeyID)
}
return addr, nil
}
func IsStandardContentSHA256(key string) bool { func IsStandardContentSHA256(key string) bool {
_, ok := ContentSHA256HeaderStandardValue[key] _, ok := ContentSHA256HeaderStandardValue[key]
return ok return ok
@ -173,14 +181,14 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
return nil, err return nil, err
} }
cnrID, err := c.getAccessBoxContainer(authHdr.AccessKeyID) addr, err := getAddress(authHdr.AccessKeyID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, authHdr.AccessKeyID) box, attrs, err := c.cli.GetBox(r.Context(), addr)
if err != nil { if err != nil {
return nil, fmt.Errorf("get box by access key '%s': %w", authHdr.AccessKeyID, err) return nil, fmt.Errorf("get box '%s': %w", addr, err)
} }
if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil { if err = checkFormatHashContentSHA256(r.Header.Get(AmzContentSHA256)); err != nil {
@ -208,29 +216,15 @@ func (c *Center) Authenticate(r *http.Request) (*middleware.Box, error) {
return result, nil return result, nil
} }
func (c *Center) getAccessBoxContainer(accessKeyID string) (cid.ID, error) {
var addr oid.Address
if err := addr.DecodeString(strings.ReplaceAll(accessKeyID, "0", "/")); err == nil {
return addr.Container(), nil
}
cnrID, ok := c.settings.AccessBoxContainer()
if ok {
return cnrID, nil
}
return cid.ID{}, fmt.Errorf("%w: unknown container for creds '%s'", apierr.GetAPIError(apierr.ErrInvalidAccessKeyID), accessKeyID)
}
func checkFormatHashContentSHA256(hash string) error { func checkFormatHashContentSHA256(hash string) error {
if !IsStandardContentSHA256(hash) { if !IsStandardContentSHA256(hash) {
hashBinary, err := hex.DecodeString(hash) hashBinary, err := hex.DecodeString(hash)
if err != nil { if err != nil {
return fmt.Errorf("%w: decode hash: %s: %s", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), return fmt.Errorf("%w: decode hash: %s: %s", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch),
hash, err.Error()) hash, err.Error())
} }
if len(hashBinary) != sha256.Size && len(hash) != 0 { if len(hashBinary) != sha256.Size && len(hash) != 0 {
return fmt.Errorf("%w: invalid hash size %d", apierr.GetAPIError(apierr.ErrContentSHA256Mismatch), len(hashBinary)) return fmt.Errorf("%w: invalid hash size %d", apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch), len(hashBinary))
} }
} }
@ -248,12 +242,12 @@ func (c Center) checkAccessKeyID(accessKeyID string) error {
} }
} }
return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apierr.GetAPIError(apierr.ErrAccessDenied)) return fmt.Errorf("%w: accesskeyID prefix isn't allowed", apiErrors.GetAPIError(apiErrors.ErrAccessDenied))
} }
func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) { func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil { if err := r.ParseMultipartForm(maxFormSizeMemory); err != nil {
return nil, fmt.Errorf("%w: parse multipart form with max size %d", apierr.GetAPIError(apierr.ErrInvalidArgument), maxFormSizeMemory) return nil, fmt.Errorf("%w: parse multipart form with max size %d", apiErrors.GetAPIError(apiErrors.ErrInvalidArgument), maxFormSizeMemory)
} }
if err := prepareForm(r.MultipartForm); err != nil { if err := prepareForm(r.MultipartForm); err != nil {
@ -268,7 +262,7 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
creds := MultipartFormValue(r, "x-amz-credential") creds := MultipartFormValue(r, "x-amz-credential")
submatches := c.postReg.GetSubmatches(creds) submatches := c.postReg.GetSubmatches(creds)
if len(submatches) != 4 { if len(submatches) != 4 {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrAuthorizationHeaderMalformed), creds) return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrAuthorizationHeaderMalformed), creds)
} }
signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date")) signatureDateTime, err := time.Parse("20060102T150405Z", MultipartFormValue(r, "x-amz-date"))
@ -278,14 +272,14 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
accessKeyID := submatches["access_key_id"] accessKeyID := submatches["access_key_id"]
cnrID, err := c.getAccessBoxContainer(accessKeyID) addr, err := getAddress(accessKeyID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
box, attrs, err := c.cli.GetBox(r.Context(), cnrID, accessKeyID) box, attrs, err := c.cli.GetBox(r.Context(), addr)
if err != nil { if err != nil {
return nil, fmt.Errorf("get box by accessKeyID '%s': %w", accessKeyID, err) return nil, fmt.Errorf("get box '%s': %w", addr, err)
} }
secret := box.Gate.SecretKey secret := box.Gate.SecretKey
@ -294,7 +288,7 @@ func (c *Center) checkFormData(r *http.Request) (*middleware.Box, error) {
signature := SignStr(secret, service, region, signatureDateTime, policy) signature := SignStr(secret, service, region, signatureDateTime, policy)
reqSignature := MultipartFormValue(r, "x-amz-signature") reqSignature := MultipartFormValue(r, "x-amz-signature")
if signature != reqSignature { if signature != reqSignature {
return nil, fmt.Errorf("%w: %s != %s", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch), return nil, fmt.Errorf("%w: %s != %s", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
reqSignature, signature) reqSignature, signature)
} }
@ -339,11 +333,11 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
if authHeader.IsPresigned { if authHeader.IsPresigned {
now := time.Now() now := time.Now()
if signatureDateTime.Add(authHeader.Expiration).Before(now) { if signatureDateTime.Add(authHeader.Expiration).Before(now) {
return fmt.Errorf("%w: expired: now %s, signature %s", apierr.GetAPIError(apierr.ErrExpiredPresignRequest), return fmt.Errorf("%w: expired: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrExpiredPresignRequest),
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339)) now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
} }
if now.Before(signatureDateTime) { if now.Before(signatureDateTime) {
return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apierr.GetAPIError(apierr.ErrBadRequest), return fmt.Errorf("%w: signature time from the future: now %s, signature %s", apiErrors.GetAPIError(apiErrors.ErrBadRequest),
now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339)) now.Format(time.RFC3339), signatureDateTime.Format(time.RFC3339))
} }
if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil { if _, err := signer.Presign(request, nil, authHeader.Service, authHeader.Region, authHeader.Expiration, signatureDateTime); err != nil {
@ -358,7 +352,7 @@ func (c *Center) checkSign(authHeader *AuthHeader, box *accessbox.Box, request *
} }
if authHeader.SignatureV4 != signature { if authHeader.SignatureV4 != signature {
return fmt.Errorf("%w: %s != %s: headers %v", apierr.GetAPIError(apierr.ErrSignatureDoesNotMatch), return fmt.Errorf("%w: %s != %s: headers %v", apiErrors.GetAPIError(apiErrors.ErrSignatureDoesNotMatch),
authHeader.SignatureV4, signature, authHeader.SignedFields) authHeader.SignatureV4, signature, authHeader.SignedFields)
} }

View file

@ -1,88 +0,0 @@
//go:build gofuzz
// +build gofuzz
package auth
import (
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go/aws/credentials"
utils "github.com/trailofbits/go-fuzz-utils"
)
const (
fuzzSuccessExitCode = 0
fuzzFailExitCode = -1
)
func InitFuzzAuthenticate() {
}
func DoFuzzAuthenticate(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var accessKeyAddr oid.Address
err = tp.Fill(accessKeyAddr)
if err != nil {
return fuzzFailExitCode
}
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey, err := tp.GetString()
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
}
req, err := PresignRequest(awsCreds, reqData, presignData)
if req == nil {
return fuzzFailExitCode
}
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &Center{
cli: mock,
reg: NewRegexpMatcher(AuthorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
}
_, _ = c.Authenticate(req)
return fuzzSuccessExitCode
}
func FuzzAuthenticate(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzAuthenticate(data)
})
}

View file

@ -17,9 +17,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/tokens"
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors" frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
@ -29,23 +28,11 @@ import (
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
) )
type centerSettingsMock struct {
accessBoxContainer *cid.ID
}
func (c *centerSettingsMock) AccessBoxContainer() (cid.ID, bool) {
if c.accessBoxContainer == nil {
return cid.ID{}, false
}
return *c.accessBoxContainer, true
}
func TestAuthHeaderParse(t *testing.T) { func TestAuthHeaderParse(t *testing.T) {
defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f" defaultHeader := "AWS4-HMAC-SHA256 Credential=oid0cid/20210809/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=2811ccb9e242f41426738fb1f"
center := &Center{ center := &Center{
reg: NewRegexpMatcher(AuthorizationFieldRegexp), reg: NewRegexpMatcher(authorizationFieldRegexp),
settings: &centerSettingsMock{},
} }
for _, tc := range []struct { for _, tc := range []struct {
@ -70,6 +57,11 @@ func TestAuthHeaderParse(t *testing.T) {
err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed), err: errors.GetAPIError(errors.ErrAuthorizationHeaderMalformed),
expected: nil, expected: nil,
}, },
{
header: strings.ReplaceAll(defaultHeader, "oid0cid", "oidcid"),
err: errors.GetAPIError(errors.ErrInvalidAccessKeyID),
expected: nil,
},
} { } {
authHeader, err := center.parseAuthHeader(tc.header) authHeader, err := center.parseAuthHeader(tc.header)
require.ErrorIs(t, err, tc.err, tc.header) require.ErrorIs(t, err, tc.err, tc.header)
@ -77,6 +69,43 @@ func TestAuthHeaderParse(t *testing.T) {
} }
} }
func TestAuthHeaderGetAddress(t *testing.T) {
defaulErr := errors.GetAPIError(errors.ErrInvalidAccessKeyID)
for _, tc := range []struct {
authHeader *AuthHeader
err error
}{
{
authHeader: &AuthHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJM0HrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: nil,
},
{
authHeader: &AuthHeader{
AccessKeyID: "vWqF8cMDRbJcvnPLALoQGnABPPhw8NyYMcGsfDPfZJMHrgjonN8CgFvCZ3kh9BUXw4W2tJ5E7EAGhueSF122HB",
},
err: defaulErr,
},
{
authHeader: &AuthHeader{
AccessKeyID: "oid0cid",
},
err: defaulErr,
},
{
authHeader: &AuthHeader{
AccessKeyID: "oidcid",
},
err: defaulErr,
},
} {
_, err := getAddress(tc.authHeader.AccessKeyID)
require.ErrorIs(t, err, tc.err, tc.authHeader.AccessKeyID)
}
}
func TestSignature(t *testing.T) { func TestSignature(t *testing.T) {
secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872" secret := "66be461c3cd429941c55daf42fad2b8153e5a2016ba89c9494d97677cc9d3872"
strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ==" strToSign := "eyAiZXhwaXJhdGlvbiI6ICIyMDE1LTEyLTMwVDEyOjAwOjAwLjAwMFoiLAogICJjb25kaXRpb25zIjogWwogICAgeyJidWNrZXQiOiAiYWNsIn0sCiAgICBbInN0YXJ0cy13aXRoIiwgIiRrZXkiLCAidXNlci91c2VyMS8iXSwKICAgIHsic3VjY2Vzc19hY3Rpb25fcmVkaXJlY3QiOiAiaHR0cDovL2xvY2FsaG9zdDo4MDg0L2FjbCJ9LAogICAgWyJzdGFydHMtd2l0aCIsICIkQ29udGVudC1UeXBlIiwgImltYWdlLyJdLAogICAgeyJ4LWFtei1tZXRhLXV1aWQiOiAiMTQzNjUxMjM2NTEyNzQifSwKICAgIFsic3RhcnRzLXdpdGgiLCAiJHgtYW16LW1ldGEtdGFnIiwgIiJdLAoKICAgIHsiWC1BbXotQ3JlZGVudGlhbCI6ICI4Vmk0MVBIbjVGMXNzY2J4OUhqMXdmMUU2aERUYURpNndxOGhxTU05NllKdTA1QzVDeUVkVlFoV1E2aVZGekFpTkxXaTlFc3BiUTE5ZDRuR3pTYnZVZm10TS8yMDE1MTIyOS91cy1lYXN0LTEvczMvYXdzNF9yZXF1ZXN0In0sCiAgICB7IngtYW16LWFsZ29yaXRobSI6ICJBV1M0LUhNQUMtU0hBMjU2In0sCiAgICB7IlgtQW16LURhdGUiOiAiMjAxNTEyMjlUMDAwMDAwWiIgfSwKICAgIHsieC1pZ25vcmUtdG1wIjogInNvbWV0aGluZyIgfQogIF0KfQ=="
@ -142,17 +171,17 @@ func TestCheckFormatContentSHA256(t *testing.T) {
} }
type frostFSMock struct { type frostFSMock struct {
objects map[string]*object.Object objects map[oid.Address]*object.Object
} }
func newFrostFSMock() *frostFSMock { func newFrostFSMock() *frostFSMock {
return &frostFSMock{ return &frostFSMock{
objects: map[string]*object.Object{}, objects: map[oid.Address]*object.Object{},
} }
} }
func (f *frostFSMock) GetCredsObject(_ context.Context, prm tokens.PrmGetCredsObject) (*object.Object, error) { func (f *frostFSMock) GetCredsObject(_ context.Context, address oid.Address) (*object.Object, error) {
obj, ok := f.objects[prm.AccessKeyID] obj, ok := f.objects[address]
if !ok { if !ok {
return nil, fmt.Errorf("not found") return nil, fmt.Errorf("not found")
} }
@ -179,7 +208,7 @@ func TestAuthenticate(t *testing.T) {
GateKey: key.PublicKey(), GateKey: key.PublicKey(),
}} }}
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false) accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"))
require.NoError(t, err) require.NoError(t, err)
data, err := accessBox.Marshal() data, err := accessBox.Marshal()
require.NoError(t, err) require.NoError(t, err)
@ -190,10 +219,10 @@ func TestAuthenticate(t *testing.T) {
obj.SetContainerID(addr.Container()) obj.SetContainerID(addr.Container())
obj.SetID(addr.Object()) obj.SetID(addr.Object())
accessKeyID := getAccessKeyID(addr)
frostfs := newFrostFSMock() frostfs := newFrostFSMock()
frostfs.objects[accessKeyID] = &obj frostfs.objects[addr] = &obj
accessKeyID := addr.Container().String() + "0" + addr.Object().String()
awsCreds := credentials.NewStaticCredentials(accessKeyID, secret.SecretKey, "") awsCreds := credentials.NewStaticCredentials(accessKeyID, secret.SecretKey, "")
defaultSigner := v4.NewSigner(awsCreds) defaultSigner := v4.NewSigner(awsCreds)
@ -384,13 +413,13 @@ func TestAuthenticate(t *testing.T) {
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
creds := tokens.New(bigConfig) creds := tokens.New(bigConfig)
cntr := New(creds, tc.prefixes, &centerSettingsMock{}) cntr := New(creds, tc.prefixes)
box, err := cntr.Authenticate(tc.request) box, err := cntr.Authenticate(tc.request)
if tc.err { if tc.err {
require.Error(t, err) require.Error(t, err)
if tc.errCode > 0 { if tc.errCode > 0 {
err = frosterr.UnwrapErr(err) err = frostfsErrors.UnwrapErr(err)
require.Equal(t, errors.GetAPIError(tc.errCode), err) require.Equal(t, errors.GetAPIError(tc.errCode), err)
} }
} else { } else {
@ -426,7 +455,7 @@ func TestHTTPPostAuthenticate(t *testing.T) {
GateKey: key.PublicKey(), GateKey: key.PublicKey(),
}} }}
accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"), false) accessBox, secret, err := accessbox.PackTokens(gateData, []byte("secret"))
require.NoError(t, err) require.NoError(t, err)
data, err := accessBox.Marshal() data, err := accessBox.Marshal()
require.NoError(t, err) require.NoError(t, err)
@ -437,11 +466,10 @@ func TestHTTPPostAuthenticate(t *testing.T) {
obj.SetContainerID(addr.Container()) obj.SetContainerID(addr.Container())
obj.SetID(addr.Object()) obj.SetID(addr.Object())
accessKeyID := getAccessKeyID(addr)
frostfs := newFrostFSMock() frostfs := newFrostFSMock()
frostfs.objects[accessKeyID] = &obj frostfs.objects[addr] = &obj
accessKeyID := addr.Container().String() + "0" + addr.Object().String()
invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String() invalidAccessKeyID := oidtest.Address().String() + "0" + oidtest.Address().Object().String()
timeToSign := time.Now() timeToSign := time.Now()
@ -562,13 +590,13 @@ func TestHTTPPostAuthenticate(t *testing.T) {
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
creds := tokens.New(bigConfig) creds := tokens.New(bigConfig)
cntr := New(creds, tc.prefixes, &centerSettingsMock{}) cntr := New(creds, tc.prefixes)
box, err := cntr.Authenticate(tc.request) box, err := cntr.Authenticate(tc.request)
if tc.err { if tc.err {
require.Error(t, err) require.Error(t, err)
if tc.errCode > 0 { if tc.errCode > 0 {
err = frosterr.UnwrapErr(err) err = frostfsErrors.UnwrapErr(err)
require.Equal(t, errors.GetAPIError(tc.errCode), err) require.Equal(t, errors.GetAPIError(tc.errCode), err)
} }
} else { } else {
@ -605,7 +633,3 @@ func getRequestWithMultipartForm(t *testing.T, policy, creds, date, sign, fieldN
return req return req
} }
func getAccessKeyID(addr oid.Address) string {
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
}

View file

@ -23,7 +23,6 @@ type PresignData struct {
Region string Region string
Lifetime time.Duration Lifetime time.Duration
SignTime time.Time SignTime time.Time
Headers map[string]string
} }
// PresignRequest forms pre-signed request to access objects without aws credentials. // PresignRequest forms pre-signed request to access objects without aws credentials.
@ -35,10 +34,7 @@ func PresignRequest(creds *credentials.Credentials, reqData RequestData, presign
} }
req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z")) req.Header.Set(AmzDate, presignData.SignTime.Format("20060102T150405Z"))
req.Header.Set(ContentTypeHdr, "text/plain")
for k, v := range presignData.Headers {
req.Header.Set(k, v)
}
signer := v4.NewSigner(creds) signer := v4.NewSigner(creds)
signer.DisableURIPathEscaping = true signer.DisableURIPathEscaping = true

View file

@ -29,11 +29,11 @@ func newTokensFrostfsMock() *credentialsMock {
} }
func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) { func (m credentialsMock) addBox(addr oid.Address, box *accessbox.Box) {
m.boxes[getAccessKeyID(addr)] = box m.boxes[addr.String()] = box
} }
func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string) (*accessbox.Box, []object.Attribute, error) { func (m credentialsMock) GetBox(_ context.Context, addr oid.Address) (*accessbox.Box, []object.Attribute, error) {
box, ok := m.boxes[accessKeyID] box, ok := m.boxes[addr.String()]
if !ok { if !ok {
return nil, nil, &apistatus.ObjectNotFound{} return nil, nil, &apistatus.ObjectNotFound{}
} }
@ -41,11 +41,11 @@ func (m credentialsMock) GetBox(_ context.Context, _ cid.ID, accessKeyID string)
return box, nil, nil return box, nil, nil
} }
func (m credentialsMock) Put(context.Context, tokens.CredentialsParam) (oid.Address, error) { func (m credentialsMock) Put(context.Context, cid.ID, tokens.CredentialsParam) (oid.Address, error) {
return oid.Address{}, nil return oid.Address{}, nil
} }
func (m credentialsMock) Update(context.Context, tokens.CredentialsParam) (oid.Address, error) { func (m credentialsMock) Update(context.Context, oid.Address, tokens.CredentialsParam) (oid.Address, error) {
return oid.Address{}, nil return oid.Address{}, nil
} }
@ -85,9 +85,8 @@ func TestCheckSign(t *testing.T) {
c := &Center{ c := &Center{
cli: mock, cli: mock,
reg: NewRegexpMatcher(AuthorizationFieldRegexp), reg: NewRegexpMatcher(authorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp), postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
settings: &centerSettingsMock{},
} }
box, err := c.Authenticate(req) box, err := c.Authenticate(req)
require.NoError(t, err) require.NoError(t, err)

View file

@ -30,7 +30,6 @@ type (
Box *accessbox.Box Box *accessbox.Box
Attributes []object.Attribute Attributes []object.Attribute
PutTime time.Time PutTime time.Time
Address *oid.Address
} }
) )
@ -58,8 +57,8 @@ func NewAccessBoxCache(config *Config) *AccessBoxCache {
} }
// Get returns a cached accessbox. // Get returns a cached accessbox.
func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue { func (o *AccessBoxCache) Get(address oid.Address) *AccessBoxCacheValue {
entry, err := o.cache.Get(accessKeyID) entry, err := o.cache.Get(address)
if err != nil { if err != nil {
return nil return nil
} }
@ -75,11 +74,16 @@ func (o *AccessBoxCache) Get(accessKeyID string) *AccessBoxCacheValue {
} }
// Put stores an accessbox to cache. // Put stores an accessbox to cache.
func (o *AccessBoxCache) Put(accessKeyID string, val *AccessBoxCacheValue) error { func (o *AccessBoxCache) Put(address oid.Address, box *accessbox.Box, attrs []object.Attribute) error {
return o.cache.Set(accessKeyID, val) val := &AccessBoxCacheValue{
Box: box,
Attributes: attrs,
PutTime: time.Now(),
}
return o.cache.Set(address, val)
} }
// Delete removes an accessbox from cache. // Delete removes an accessbox from cache.
func (o *AccessBoxCache) Delete(accessKeyID string) { func (o *AccessBoxCache) Delete(address oid.Address) {
o.cache.Remove(accessKeyID) o.cache.Remove(address)
} }

View file

@ -1,14 +1,13 @@
package cache package cache
import ( import (
"strings"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client" "git.frostfs.info/TrueCloudLab/frostfs-contract/frostfsid/client"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test" cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test" oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neo-go/pkg/util" "github.com/nspcc-dev/neo-go/pkg/util"
@ -23,21 +22,18 @@ func TestAccessBoxCacheType(t *testing.T) {
addr := oidtest.Address() addr := oidtest.Address()
box := &accessbox.Box{} box := &accessbox.Box{}
val := &AccessBoxCacheValue{ var attrs []object.Attribute
Box: box,
}
accessKeyID := getAccessKeyID(addr) err := cache.Put(addr, box, attrs)
err := cache.Put(accessKeyID, val)
require.NoError(t, err) require.NoError(t, err)
resVal := cache.Get(accessKeyID) val := cache.Get(addr)
require.Equal(t, box, resVal.Box) require.Equal(t, box, val.Box)
require.Equal(t, attrs, val.Attributes)
require.Equal(t, 0, observedLog.Len()) require.Equal(t, 0, observedLog.Len())
err = cache.cache.Set(accessKeyID, "tmp") err = cache.cache.Set(addr, "tmp")
require.NoError(t, err) require.NoError(t, err)
assertInvalidCacheEntry(t, cache.Get(accessKeyID), observedLog) assertInvalidCacheEntry(t, cache.Get(addr), observedLog)
} }
func TestBucketsCacheType(t *testing.T) { func TestBucketsCacheType(t *testing.T) {
@ -234,7 +230,3 @@ func getObservedLogger() (*zap.Logger, *observer.ObservedLogs) {
loggerCore, observedLog := observer.New(zap.WarnLevel) loggerCore, observedLog := observer.New(zap.WarnLevel)
return zap.New(loggerCore), observedLog return zap.New(loggerCore), observedLog
} }
func getAccessKeyID(addr oid.Address) string {
return strings.ReplaceAll(addr.EncodeToString(), "/", "0")
}

View file

@ -1,65 +0,0 @@
package cache
import (
"fmt"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/bluele/gcache"
"go.uber.org/zap"
)
type (
// NetworkInfoCache provides cache for network info.
NetworkInfoCache struct {
cache gcache.Cache
logger *zap.Logger
}
// NetworkInfoCacheConfig stores expiration params for cache.
NetworkInfoCacheConfig struct {
Lifetime time.Duration
Logger *zap.Logger
}
)
const (
DefaultNetworkInfoCacheLifetime = 1 * time.Minute
networkInfoCacheSize = 1
networkInfoKey = "network_info"
)
// DefaultNetworkInfoConfig returns new default cache expiration values.
func DefaultNetworkInfoConfig(logger *zap.Logger) *NetworkInfoCacheConfig {
return &NetworkInfoCacheConfig{
Lifetime: DefaultNetworkInfoCacheLifetime,
Logger: logger,
}
}
// NewNetworkInfoCache creates an object of NetworkInfoCache.
func NewNetworkInfoCache(config *NetworkInfoCacheConfig) *NetworkInfoCache {
gc := gcache.New(networkInfoCacheSize).LRU().Expiration(config.Lifetime).Build()
return &NetworkInfoCache{cache: gc, logger: config.Logger}
}
func (c *NetworkInfoCache) Get() *netmap.NetworkInfo {
entry, err := c.cache.Get(networkInfoKey)
if err != nil {
return nil
}
result, ok := entry.(netmap.NetworkInfo)
if !ok {
c.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return &result
}
func (c *NetworkInfoCache) Put(info netmap.NetworkInfo) error {
return c.cache.Set(networkInfoKey, info)
}

20
api/cache/system.go vendored
View file

@ -88,22 +88,6 @@ func (o *SystemCache) GetCORS(key string) *data.CORSConfiguration {
return result return result
} }
func (o *SystemCache) GetLifecycleConfiguration(key string) *data.LifecycleConfiguration {
entry, err := o.cache.Get(key)
if err != nil {
return nil
}
result, ok := entry.(*data.LifecycleConfiguration)
if !ok {
o.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
zap.String("expected", fmt.Sprintf("%T", result)))
return nil
}
return result
}
func (o *SystemCache) GetSettings(key string) *data.BucketSettings { func (o *SystemCache) GetSettings(key string) *data.BucketSettings {
entry, err := o.cache.Get(key) entry, err := o.cache.Get(key)
if err != nil { if err != nil {
@ -149,10 +133,6 @@ func (o *SystemCache) PutCORS(key string, obj *data.CORSConfiguration) error {
return o.cache.Set(key, obj) return o.cache.Set(key, obj)
} }
func (o *SystemCache) PutLifecycleConfiguration(key string, obj *data.LifecycleConfiguration) error {
return o.cache.Set(key, obj)
}
func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error { func (o *SystemCache) PutSettings(key string, settings *data.BucketSettings) error {
return o.cache.Set(key, settings) return o.cache.Set(key, settings)
} }

View file

@ -14,7 +14,6 @@ import (
const ( const (
bktSettingsObject = ".s3-settings" bktSettingsObject = ".s3-settings"
bktCORSConfigurationObject = ".s3-cors" bktCORSConfigurationObject = ".s3-cors"
bktLifecycleConfigurationObject = ".s3-lifecycle"
VersioningUnversioned = "Unversioned" VersioningUnversioned = "Unversioned"
VersioningEnabled = "Enabled" VersioningEnabled = "Enabled"
@ -82,15 +81,6 @@ type (
VersionID string VersionID string
NoErrorOnDeleteMarker bool NoErrorOnDeleteMarker bool
} }
// CreatedObjectInfo stores created object info.
CreatedObjectInfo struct {
ID oid.ID
Size uint64
HashSum []byte
MD5Sum []byte
CreationEpoch uint64
}
) )
// SettingsObjectName is a system name for a bucket settings file. // SettingsObjectName is a system name for a bucket settings file.
@ -101,10 +91,6 @@ func (b *BucketInfo) CORSObjectName() string {
return b.CID.EncodeToString() + bktCORSConfigurationObject return b.CID.EncodeToString() + bktCORSConfigurationObject
} }
func (b *BucketInfo) LifecycleConfigurationObjectName() string {
return b.CID.EncodeToString() + bktLifecycleConfigurationObject
}
// VersionID returns object version from ObjectInfo. // VersionID returns object version from ObjectInfo.
func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() } func (o *ObjectInfo) VersionID() string { return o.ID.EncodeToString() }

View file

@ -1,56 +0,0 @@
package data
import "encoding/xml"
const (
LifecycleStatusEnabled = "Enabled"
LifecycleStatusDisabled = "Disabled"
)
type (
LifecycleConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LifecycleConfiguration" json:"-"`
Rules []LifecycleRule `xml:"Rule"`
}
LifecycleRule struct {
Status string `xml:"Status,omitempty"`
AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
Expiration *LifecycleExpiration `xml:"Expiration,omitempty"`
Filter *LifecycleRuleFilter `xml:"Filter,omitempty"`
ID string `xml:"ID,omitempty"`
NonCurrentVersionExpiration *NonCurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
}
AbortIncompleteMultipartUpload struct {
DaysAfterInitiation *int `xml:"DaysAfterInitiation,omitempty"`
}
LifecycleExpiration struct {
Date string `xml:"Date,omitempty"`
Days *int `xml:"Days,omitempty"`
Epoch *uint64 `xml:"Epoch,omitempty"`
ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker,omitempty"`
}
LifecycleRuleFilter struct {
And *LifecycleRuleAndOperator `xml:"And,omitempty"`
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tag *Tag `xml:"Tag,omitempty"`
}
LifecycleRuleAndOperator struct {
ObjectSizeGreaterThan *uint64 `xml:"ObjectSizeGreaterThan,omitempty"`
ObjectSizeLessThan *uint64 `xml:"ObjectSizeLessThan,omitempty"`
Prefix string `xml:"Prefix,omitempty"`
Tags []Tag `xml:"Tag"`
}
NonCurrentVersionExpiration struct {
NewerNonCurrentVersions *int `xml:"NewerNoncurrentVersions,omitempty"`
NonCurrentDays *int `xml:"NoncurrentDays,omitempty"`
}
)

View file

@ -72,7 +72,6 @@ type BaseNodeVersion struct {
Created *time.Time Created *time.Time
Owner *user.ID Owner *user.ID
IsDeleteMarker bool IsDeleteMarker bool
CreationEpoch uint64
} }
func (v *BaseNodeVersion) GetETag(md5Enabled bool) string { func (v *BaseNodeVersion) GetETag(md5Enabled bool) string {
@ -111,7 +110,6 @@ type MultipartInfo struct {
Meta map[string]string Meta map[string]string
CopiesNumbers []uint32 CopiesNumbers []uint32
Finished bool Finished bool
CreationEpoch uint64
} }
// PartInfo is upload information about part. // PartInfo is upload information about part.
@ -126,14 +124,6 @@ type PartInfo struct {
Created time.Time `json:"created"` Created time.Time `json:"created"`
} }
type PartInfoExtended struct {
PartInfo
// Timestamp is used to find the latest version of part info in case of tree split
// when there are multiple nodes for the same part.
Timestamp uint64
}
// ToHeaderString form short part representation to use in S3-Completed-Parts header. // ToHeaderString form short part representation to use in S3-Completed-Parts header.
func (p *PartInfo) ToHeaderString() string { func (p *PartInfo) ToHeaderString() string {
// ETag value contains SHA256 checksum which is used while getting object parts attributes. // ETag value contains SHA256 checksum which is used while getting object parts attributes.

View file

@ -1,13 +1,10 @@
package errors package errors
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs" frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
) )
type ( type (
@ -190,9 +187,6 @@ const (
ErrInvalidRequestLargeCopy ErrInvalidRequestLargeCopy
ErrInvalidStorageClass ErrInvalidStorageClass
VersionIDMarkerWithoutKeyMarker VersionIDMarkerWithoutKeyMarker
ErrInvalidRangeLength
ErrRangeOutOfBounds
ErrMissingContentRange
ErrMalformedJSON ErrMalformedJSON
ErrInsecureClientRequest ErrInsecureClientRequest
@ -1745,30 +1739,12 @@ var errorCodes = errorCodeMap{
Description: "Part number must be an integer between 1 and 10000, inclusive", Description: "Part number must be an integer between 1 and 10000, inclusive",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrInvalidRangeLength: {
ErrCode: ErrInvalidRangeLength,
Code: "InvalidRange",
Description: "Provided range length must be equal to content length",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrRangeOutOfBounds: {
ErrCode: ErrRangeOutOfBounds,
Code: "InvalidRange",
Description: "Provided range is outside of object bounds",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrMissingContentRange: {
ErrCode: ErrMissingContentRange,
Code: "MissingContentRange",
Description: "Content-Range header is mandatory for this type of request",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here. // Add your error structure here.
} }
// IsS3Error checks if the provided error is a specific s3 error. // IsS3Error checks if the provided error is a specific s3 error.
func IsS3Error(err error, code ErrorCode) bool { func IsS3Error(err error, code ErrorCode) bool {
err = frosterr.UnwrapErr(err) err = frosterrors.UnwrapErr(err)
e, ok := err.(Error) e, ok := err.(Error)
return ok && e.ErrCode == code return ok && e.ErrCode == code
} }
@ -1805,26 +1781,6 @@ func GetAPIErrorWithError(code ErrorCode, err error) Error {
return errorCodes.toAPIErrWithErr(code, err) return errorCodes.toAPIErrWithErr(code, err)
} }
// TransformToS3Error converts FrostFS error to the corresponding S3 error type.
func TransformToS3Error(err error) error {
err = frosterr.UnwrapErr(err) // this wouldn't work with errors.Join
var s3err Error
if errors.As(err, &s3err) {
return err
}
if errors.Is(err, frostfs.ErrAccessDenied) ||
errors.Is(err, tree.ErrNodeAccessDenied) {
return GetAPIError(ErrAccessDenied)
}
if errors.Is(err, frostfs.ErrGatewayTimeout) {
return GetAPIError(ErrGatewayTimeout)
}
return GetAPIError(ErrInternalError)
}
// ObjectError -- error that is linked to a specific object. // ObjectError -- error that is linked to a specific object.
type ObjectError struct { type ObjectError struct {
Err error Err error

View file

@ -2,12 +2,7 @@ package errors
import ( import (
"errors" "errors"
"fmt"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"github.com/stretchr/testify/require"
) )
func BenchmarkErrCode(b *testing.B) { func BenchmarkErrCode(b *testing.B) {
@ -29,56 +24,3 @@ func BenchmarkErrorsIs(b *testing.B) {
} }
} }
} }
func TestTransformS3Errors(t *testing.T) {
for _, tc := range []struct {
name string
err error
expected ErrorCode
}{
{
name: "simple std error to internal error",
err: errors.New("some error"),
expected: ErrInternalError,
},
{
name: "layer access denied error to s3 access denied error",
err: frostfs.ErrAccessDenied,
expected: ErrAccessDenied,
},
{
name: "wrapped layer access denied error to s3 access denied error",
err: fmt.Errorf("wrap: %w", frostfs.ErrAccessDenied),
expected: ErrAccessDenied,
},
{
name: "layer node access denied error to s3 access denied error",
err: tree.ErrNodeAccessDenied,
expected: ErrAccessDenied,
},
{
name: "layer gateway timeout error to s3 gateway timeout error",
err: frostfs.ErrGatewayTimeout,
expected: ErrGatewayTimeout,
},
{
name: "s3 error to s3 error",
err: GetAPIError(ErrInvalidPart),
expected: ErrInvalidPart,
},
{
name: "wrapped s3 error to s3 error",
err: fmt.Errorf("wrap: %w", GetAPIError(ErrInvalidPart)),
expected: ErrInvalidPart,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := TransformToS3Error(tc.err)
s3err, ok := err.(Error)
require.True(t, ok, "error must be s3 error")
require.Equalf(t, tc.expected, s3err.ErrCode,
"expected: '%s', got: '%s'",
GetAPIError(tc.expected).Code, GetAPIError(s3err.ErrCode).Code)
})
}
}

View file

@ -52,18 +52,18 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, h.encodeBucketCannedACL(ctx, bktInfo, settings)); err != nil { if err = middleware.EncodeToResponse(w, h.encodeBucketCannedACL(ctx, bktInfo, settings)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
} }
@ -127,18 +127,17 @@ func getTokenIssuerKey(box *accessbox.Box) (*keys.PublicKey, error) {
} }
func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
@ -150,30 +149,30 @@ func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request,
defer func() { defer func() {
if errBody := r.Body.Close(); errBody != nil { if errBody := r.Body.Close(); errBody != nil {
h.reqLogger(ctx).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody)) h.reqLogger(r.Context()).Warn(logs.CouldNotCloseRequestBody, zap.Error(errBody))
} }
}() }()
written, err := io.Copy(io.Discard, r.Body) written, err := io.Copy(io.Discard, r.Body)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't read request body", reqInfo, err) h.logAndSendError(w, "couldn't read request body", reqInfo, err)
return return
} }
if written != 0 || len(r.Header.Get(api.AmzACL)) == 0 { if written != 0 || len(r.Header.Get(api.AmzACL)) == 0 {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return return
} }
cannedACL, err := parseCannedACL(r.Header) cannedACL, err := parseCannedACL(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse canned ACL", reqInfo, err) h.logAndSendError(w, "could not parse canned ACL", reqInfo, err)
return return
} }
chainRules := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID) chainRules := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID)
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chainRules); err != nil { if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chainRules); err != nil {
h.logAndSendError(ctx, w, "failed to add morph rule chains", reqInfo, err) h.logAndSendError(w, "failed to add morph rule chains", reqInfo, err)
return return
} }
@ -185,7 +184,7 @@ func (h *handler) putBucketACLAPEHandler(w http.ResponseWriter, r *http.Request,
} }
if err = h.obj.PutBucketSettings(ctx, sp); err != nil { if err = h.obj.PutBucketSettings(ctx, sp); err != nil {
h.logAndSendError(ctx, w, "couldn't save bucket settings", reqInfo, err, h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
zap.String("container_id", bktInfo.CID.EncodeToString())) zap.String("container_id", bktInfo.CID.EncodeToString()))
return return
} }
@ -199,18 +198,18 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, h.encodePrivateCannedACL(ctx, bktInfo, settings)); err != nil { if err = middleware.EncodeToResponse(w, h.encodePrivateCannedACL(ctx, bktInfo, settings)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
} }
@ -220,20 +219,19 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
reqInfo := middleware.GetReqInfo(ctx) reqInfo := middleware.GetReqInfo(ctx)
if _, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil { if _, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
} }
func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -242,13 +240,13 @@ func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Re
if strings.Contains(err.Error(), "not found") { if strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error()) err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
} }
h.logAndSendError(ctx, w, "failed to get policy from storage", reqInfo, err) h.logAndSendError(w, "failed to get policy from storage", reqInfo, err)
return return
} }
var bktPolicy engineiam.Policy var bktPolicy engineiam.Policy
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil { if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
h.logAndSendError(ctx, w, "could not parse bucket policy", reqInfo, err) h.logAndSendError(w, "could not parse bucket policy", reqInfo, err)
return return
} }
@ -265,18 +263,17 @@ func (h *handler) GetBucketPolicyStatusHandler(w http.ResponseWriter, r *http.Re
} }
if err = middleware.EncodeToResponse(w, policyStatus); err != nil { if err = middleware.EncodeToResponse(w, policyStatus); err != nil {
h.logAndSendError(ctx, w, "encode and write response", reqInfo, err) h.logAndSendError(w, "encode and write response", reqInfo, err)
return return
} }
} }
func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -285,7 +282,7 @@ func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
if strings.Contains(err.Error(), "not found") { if strings.Contains(err.Error(), "not found") {
err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error()) err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucketPolicy), err.Error())
} }
h.logAndSendError(ctx, w, "failed to get policy from storage", reqInfo, err) h.logAndSendError(w, "failed to get policy from storage", reqInfo, err)
return return
} }
@ -293,23 +290,22 @@ func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
if _, err = w.Write(jsonPolicy); err != nil { if _, err = w.Write(jsonPolicy); err != nil {
h.logAndSendError(ctx, w, "write json policy to client", reqInfo, err) h.logAndSendError(w, "write json policy to client", reqInfo, err)
} }
} }
func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
chainIDs := []chain.ID{getBucketChainID(chain.S3, bktInfo), getBucketChainID(chain.Ingress, bktInfo)} chainIDs := []chain.ID{getBucketChainID(chain.S3, bktInfo), getBucketChainID(chain.Ingress, bktInfo)}
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil { if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err) h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
return return
} }
} }
@ -328,41 +324,40 @@ func checkOwner(info *data.BucketInfo, owner string) error {
} }
func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
jsonPolicy, err := io.ReadAll(r.Body) jsonPolicy, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "read body", reqInfo, err) h.logAndSendError(w, "read body", reqInfo, err)
return return
} }
var bktPolicy engineiam.Policy var bktPolicy engineiam.Policy
if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil { if err = json.Unmarshal(jsonPolicy, &bktPolicy); err != nil {
h.logAndSendError(ctx, w, "could not parse bucket policy", reqInfo, err) h.logAndSendError(w, "could not parse bucket policy", reqInfo, err)
return return
} }
for _, stat := range bktPolicy.Statement { for _, stat := range bktPolicy.Statement {
if len(stat.NotResource) != 0 { if len(stat.NotResource) != 0 {
h.logAndSendError(ctx, w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy)) h.logAndSendError(w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
return return
} }
if len(stat.NotPrincipal) != 0 && stat.Effect == engineiam.AllowEffect { if len(stat.NotPrincipal) != 0 && stat.Effect == engineiam.AllowEffect {
h.logAndSendError(ctx, w, "invalid NotPrincipal", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicyNotPrincipal)) h.logAndSendError(w, "invalid NotPrincipal", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicyNotPrincipal))
return return
} }
for _, resource := range stat.Resource { for _, resource := range stat.Resource {
if reqInfo.BucketName != strings.Split(strings.TrimPrefix(resource, arnAwsPrefix), "/")[0] { if reqInfo.BucketName != strings.Split(strings.TrimPrefix(resource, arnAwsPrefix), "/")[0] {
h.logAndSendError(ctx, w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy)) h.logAndSendError(w, "policy resource mismatched bucket", reqInfo, errors.GetAPIError(errors.ErrMalformedPolicy))
return return
} }
} }
@ -370,7 +365,7 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
s3Chain, err := engineiam.ConvertToS3Chain(bktPolicy, h.frostfsid) s3Chain, err := engineiam.ConvertToS3Chain(bktPolicy, h.frostfsid)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not convert s3 policy to chain policy", reqInfo, err) h.logAndSendError(w, "could not convert s3 policy to chain policy", reqInfo, err)
return return
} }
s3Chain.ID = getBucketChainID(chain.S3, bktInfo) s3Chain.ID = getBucketChainID(chain.S3, bktInfo)
@ -379,10 +374,10 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
if err == nil { if err == nil {
nativeChain.ID = getBucketChainID(chain.Ingress, bktInfo) nativeChain.ID = getBucketChainID(chain.Ingress, bktInfo)
} else if !stderrors.Is(err, engineiam.ErrActionsNotApplicable) { } else if !stderrors.Is(err, engineiam.ErrActionsNotApplicable) {
h.logAndSendError(ctx, w, "could not convert s3 policy to native chain policy", reqInfo, err) h.logAndSendError(w, "could not convert s3 policy to native chain policy", reqInfo, err)
return return
} else { } else {
h.reqLogger(ctx).Warn(logs.PolicyCouldntBeConvertedToNativeRules) h.reqLogger(r.Context()).Warn(logs.PolicyCouldntBeConvertedToNativeRules)
} }
chainsToSave := []*chain.Chain{s3Chain} chainsToSave := []*chain.Chain{s3Chain}
@ -391,7 +386,7 @@ func (h *handler) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
} }
if err = h.ape.PutBucketPolicy(reqInfo.Namespace, bktInfo.CID, jsonPolicy, chainsToSave); err != nil { if err = h.ape.PutBucketPolicy(reqInfo.Namespace, bktInfo.CID, jsonPolicy, chainsToSave); err != nil {
h.logAndSendError(ctx, w, "failed to update policy in contract", reqInfo, err) h.logAndSendError(w, "failed to update policy in contract", reqInfo, err)
return return
} }
} }

View file

@ -11,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
@ -28,12 +28,12 @@ func TestPutObjectACLErrorAPE(t *testing.T) {
info := createBucket(hc, bktName) info := createBucket(hc, bktName)
putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported) putObjectWithHeadersAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored putObjectWithHeaders(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) // only `private` canned acl is allowed, that is actually ignored
putObjectWithHeaders(hc, bktName, objName, nil) putObjectWithHeaders(hc, bktName, objName, nil)
aclBody := &AccessControlPolicy{} aclBody := &AccessControlPolicy{}
putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported) putObjectACLAssertS3Error(hc, bktName, objName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
aclRes := getObjectACL(hc, bktName, objName) aclRes := getObjectACL(hc, bktName, objName)
checkPrivateACL(t, aclRes, info.Key.PublicKey()) checkPrivateACL(t, aclRes, info.Key.PublicKey())
@ -49,7 +49,7 @@ func TestCreateObjectACLErrorAPE(t *testing.T) {
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPublic}}, http.StatusBadRequest) copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPublic}}, http.StatusBadRequest)
copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPrivate}}, http.StatusOK) copyObject(hc, bktName, objName, objNameCopy, CopyMeta{Headers: map[string]string{api.AmzACL: basicACLPrivate}}, http.StatusOK)
createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, apierr.ErrAccessControlListNotSupported) createMultipartUploadAssertS3Error(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPublic}, s3errors.ErrAccessControlListNotSupported)
createMultipartUpload(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate}) createMultipartUpload(hc, bktName, objName, map[string]string{api.AmzACL: basicACLPrivate})
} }
@ -60,7 +60,7 @@ func TestBucketACLAPE(t *testing.T) {
info := createBucket(hc, bktName) info := createBucket(hc, bktName)
aclBody := &AccessControlPolicy{} aclBody := &AccessControlPolicy{}
putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, apierr.ErrAccessControlListNotSupported) putBucketACLAssertS3Error(hc, bktName, info.Box, nil, aclBody, s3errors.ErrAccessControlListNotSupported)
aclRes := getBucketACL(hc, bktName) aclRes := getBucketACL(hc, bktName)
checkPrivateACL(t, aclRes, info.Key.PublicKey()) checkPrivateACL(t, aclRes, info.Key.PublicKey())
@ -113,7 +113,7 @@ func TestBucketPolicy(t *testing.T) {
createTestBucket(hc, bktName) createTestBucket(hc, bktName)
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy) getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
newPolicy := engineiam.Policy{ newPolicy := engineiam.Policy{
Version: "2012-10-17", Version: "2012-10-17",
@ -125,7 +125,7 @@ func TestBucketPolicy(t *testing.T) {
}}, }},
} }
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicy) putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicy)
newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*" newPolicy.Statement[0].Resource[0] = arnAwsPrefix + bktName + "/*"
putBucketPolicy(hc, bktName, newPolicy) putBucketPolicy(hc, bktName, newPolicy)
@ -140,7 +140,7 @@ func TestBucketPolicyStatus(t *testing.T) {
createTestBucket(hc, bktName) createTestBucket(hc, bktName)
getBucketPolicy(hc, bktName, apierr.ErrNoSuchBucketPolicy) getBucketPolicy(hc, bktName, s3errors.ErrNoSuchBucketPolicy)
newPolicy := engineiam.Policy{ newPolicy := engineiam.Policy{
Version: "2012-10-17", Version: "2012-10-17",
@ -152,7 +152,7 @@ func TestBucketPolicyStatus(t *testing.T) {
}}, }},
} }
putBucketPolicy(hc, bktName, newPolicy, apierr.ErrMalformedPolicyNotPrincipal) putBucketPolicy(hc, bktName, newPolicy, s3errors.ErrMalformedPolicyNotPrincipal)
newPolicy.Statement[0].NotPrincipal = nil newPolicy.Statement[0].NotPrincipal = nil
newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}} newPolicy.Statement[0].Principal = map[engineiam.PrincipalType][]string{engineiam.Wildcard: {}}
@ -191,7 +191,6 @@ func TestDeleteBucketWithPolicy(t *testing.T) {
require.Len(t, hc.h.ape.(*apeMock).policyMap, 1) require.Len(t, hc.h.ape.(*apeMock).policyMap, 1)
require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4) require.Len(t, hc.h.ape.(*apeMock).chainMap[engine.ContainerTarget(bi.CID.EncodeToString())], 4)
hc.owner = bi.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent) deleteBucket(t, hc, bktName, http.StatusNoContent)
require.Empty(t, hc.h.ape.(*apeMock).policyMap) require.Empty(t, hc.h.ape.(*apeMock).policyMap)
@ -222,7 +221,7 @@ func TestPutBucketPolicy(t *testing.T) {
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
} }
func getBucketPolicy(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) engineiam.Policy { func getBucketPolicy(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) engineiam.Policy {
w, r := prepareTestRequest(hc, bktName, "", nil) w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().GetBucketPolicyHandler(w, r) hc.Handler().GetBucketPolicyHandler(w, r)
@ -232,13 +231,13 @@ func getBucketPolicy(hc *handlerContext, bktName string, errCode ...apierr.Error
err := json.NewDecoder(w.Result().Body).Decode(&policy) err := json.NewDecoder(w.Result().Body).Decode(&policy)
require.NoError(hc.t, err) require.NoError(hc.t, err)
} else { } else {
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0])) assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
} }
return policy return policy
} }
func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...apierr.ErrorCode) PolicyStatus { func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...s3errors.ErrorCode) PolicyStatus {
w, r := prepareTestRequest(hc, bktName, "", nil) w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().GetBucketPolicyStatusHandler(w, r) hc.Handler().GetBucketPolicyStatusHandler(w, r)
@ -248,13 +247,13 @@ func getBucketPolicyStatus(hc *handlerContext, bktName string, errCode ...apierr
err := xml.NewDecoder(w.Result().Body).Decode(&policyStatus) err := xml.NewDecoder(w.Result().Body).Decode(&policyStatus)
require.NoError(hc.t, err) require.NoError(hc.t, err)
} else { } else {
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0])) assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
} }
return policyStatus return policyStatus
} }
func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...apierr.ErrorCode) { func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Policy, errCode ...s3errors.ErrorCode) {
body, err := json.Marshal(bktPolicy) body, err := json.Marshal(bktPolicy)
require.NoError(hc.t, err) require.NoError(hc.t, err)
@ -264,7 +263,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy engineiam.Pol
if len(errCode) == 0 { if len(errCode) == 0 {
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
} else { } else {
assertS3Error(hc.t, w, apierr.GetAPIError(errCode[0])) assertS3Error(hc.t, w, s3errors.GetAPIError(errCode[0]))
} }
} }
@ -313,9 +312,9 @@ func createBucket(hc *handlerContext, bktName string) *createBucketInfo {
} }
} }
func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code apierr.ErrorCode) { func createBucketAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, code s3errors.ErrorCode) {
w := createBucketBase(hc, bktName, box) w := createBucketBase(hc, bktName, box)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, s3errors.GetAPIError(code))
} }
func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder { func createBucketBase(hc *handlerContext, bktName string, box *accessbox.Box) *httptest.ResponseRecorder {
@ -331,9 +330,9 @@ func putBucketACL(hc *handlerContext, bktName string, box *accessbox.Box, header
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
} }
func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) { func putBucketACLAssertS3Error(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
w := putBucketACLBase(hc, bktName, box, header, body) w := putBucketACLBase(hc, bktName, box, header, body)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, s3errors.GetAPIError(code))
} }
func putBucketACLBase(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder { func putBucketACLBase(hc *handlerContext, bktName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
@ -361,9 +360,9 @@ func getBucketACLBase(hc *handlerContext, bktName string) *httptest.ResponseReco
return w return w
} }
func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code apierr.ErrorCode) { func putObjectACLAssertS3Error(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy, code s3errors.ErrorCode) {
w := putObjectACLBase(hc, bktName, objName, box, header, body) w := putObjectACLBase(hc, bktName, objName, box, header, body)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, s3errors.GetAPIError(code))
} }
func putObjectACLBase(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder { func putObjectACLBase(hc *handlerContext, bktName, objName string, box *accessbox.Box, header map[string]string, body *AccessControlPolicy) *httptest.ResponseRecorder {
@ -397,9 +396,9 @@ func putObjectWithHeaders(hc *handlerContext, bktName, objName string, headers m
return w.Header() return w.Header()
} }
func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code apierr.ErrorCode) { func putObjectWithHeadersAssertS3Error(hc *handlerContext, bktName, objName string, headers map[string]string, code s3errors.ErrorCode) {
w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil) w := putObjectWithHeadersBase(hc, bktName, objName, headers, nil, nil)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, s3errors.GetAPIError(code))
} }
func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, headers map[string]string, box *accessbox.Box, data []byte) *httptest.ResponseRecorder { func putObjectWithHeadersBase(hc *handlerContext, bktName, objName string, headers map[string]string, box *accessbox.Box, data []byte) *httptest.ResponseRecorder {

View file

@ -70,18 +70,17 @@ var validAttributes = map[string]struct{}{
} }
func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
params, err := parseGetObjectAttributeArgs(r) params, err := parseGetObjectAttributeArgs(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid request", reqInfo, err) h.logAndSendError(w, "invalid request", reqInfo, err)
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -91,44 +90,44 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
VersionID: params.VersionID, VersionID: params.VersionID,
} }
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p) extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not fetch object info", reqInfo, err) h.logAndSendError(w, "could not fetch object info", reqInfo, err)
return return
} }
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
encryptionParams, err := formEncryptionParams(r) encryptionParams, err := formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil { if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err)) h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return return
} }
if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil { if err = checkPreconditions(info, params.Conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err) h.logAndSendError(w, "precondition failed", reqInfo, err)
return return
} }
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo) bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled()) response, err := encodeToObjectAttributesResponse(info, params, h.cfg.MD5Enabled())
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't encode object info to response", reqInfo, err) h.logAndSendError(w, "couldn't encode object info to response", reqInfo, err)
return return
} }
writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned()) writeAttributesHeaders(w.Header(), extendedInfo, bktSettings.Unversioned())
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }

View file

@ -65,7 +65,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
srcBucket, srcObject, err := path2BucketObject(src) srcBucket, srcObject, err := path2BucketObject(src)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err) h.logAndSendError(w, "invalid source copy", reqInfo, err)
return return
} }
@ -75,74 +75,74 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil { if srcObjPrm.BktInfo, err = h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner); err != nil {
h.logAndSendError(ctx, w, "couldn't get source bucket", reqInfo, err) h.logAndSendError(w, "couldn't get source bucket", reqInfo, err)
return return
} }
dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) dstBktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get target bucket", reqInfo, err) h.logAndSendError(w, "couldn't get target bucket", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo) settings, err := h.obj.GetBucketSettings(ctx, dstBktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
if cannedACLStatus == aclStatusYes { if cannedACLStatus == aclStatusYes {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return return
} }
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm) extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not find object", reqInfo, err) h.logAndSendError(w, "could not find object", reqInfo, err)
return return
} }
srcObjInfo := extendedSrcObjInfo.ObjectInfo srcObjInfo := extendedSrcObjInfo.ObjectInfo
srcEncryptionParams, err := formCopySourceEncryptionParams(r) srcEncryptionParams, err := formCopySourceEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
dstEncryptionParams, err := formEncryptionParams(r) dstEncryptionParams, err := formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil { if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) || if errors.IsS3Error(err, errors.ErrInvalidEncryptionParameters) || errors.IsS3Error(err, errors.ErrSSEEncryptedObject) ||
errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) { errors.IsS3Error(err, errors.ErrInvalidSSECustomerParameters) {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, err, zap.Error(err)) h.logAndSendError(w, "encryption doesn't match object", reqInfo, err, zap.Error(err))
return return
} }
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err)) h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return return
} }
var dstSize uint64 var dstSize uint64
srcSize, err := layer.GetObjectSize(srcObjInfo) srcSize, err := layer.GetObjectSize(srcObjInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err) h.logAndSendError(w, "failed to get source object size", reqInfo, err)
return return
} else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html } else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
h.logAndSendError(ctx, w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy)) h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
return return
} }
dstSize = srcSize dstSize = srcSize
args, err := parseCopyObjectArgs(r.Header) args, err := parseCopyObjectArgs(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err) h.logAndSendError(w, "could not parse request params", reqInfo, err)
return return
} }
if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) { if isCopyingToItselfForbidden(reqInfo, srcBucket, srcObject, settings, args) {
h.logAndSendError(ctx, w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest)) h.logAndSendError(w, "copying to itself without changing anything", reqInfo, errors.GetAPIError(errors.ErrInvalidCopyDest))
return return
} }
@ -153,7 +153,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
if args.TaggingDirective == replaceDirective { if args.TaggingDirective == replaceDirective {
tagSet, err = parseTaggingHeader(r.Header) tagSet, err = parseTaggingHeader(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err) h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
return return
} }
} else { } else {
@ -168,13 +168,13 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
_, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm) _, tagSet, err = h.obj.GetObjectTagging(ctx, tagPrm)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err) h.logAndSendError(w, "could not get object tagging", reqInfo, err)
return return
} }
} }
if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil { if err = checkPreconditions(srcObjInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed)) h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed))
return return
} }
@ -202,20 +202,20 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint) params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, dstBktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(w, "invalid copies number", reqInfo, err)
return return
} }
params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header) params.Lock, err = formObjectLock(ctx, dstBktInfo, settings.LockConfiguration, r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err) h.logAndSendError(w, "could not form object lock", reqInfo, err)
return return
} }
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)} additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
extendedDstObjInfo, err := h.obj.CopyObject(ctx, params) extendedDstObjInfo, err := h.obj.CopyObject(ctx, params)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't copy object", reqInfo, err, additional...) h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
return return
} }
dstObjInfo := extendedDstObjInfo.ObjectInfo dstObjInfo := extendedDstObjInfo.ObjectInfo
@ -224,7 +224,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339), LastModified: dstObjInfo.Created.UTC().Format(time.RFC3339),
ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())), ETag: data.Quote(dstObjInfo.ETag(h.cfg.MD5Enabled())),
}); err != nil { }); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...) h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
return return
} }
@ -239,7 +239,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
NodeVersion: extendedDstObjInfo.NodeVersion, NodeVersion: extendedDstObjInfo.NodeVersion,
} }
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil { if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err) h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
return return
} }
} }

View file

@ -20,34 +20,32 @@ const (
) )
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
cors, err := h.obj.GetBucketCORS(ctx, bktInfo) cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err) h.logAndSendError(w, "could not get cors", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, cors); err != nil { if err = middleware.EncodeToResponse(w, cors); err != nil {
h.logAndSendError(ctx, w, "could not encode cors to response", reqInfo, err) h.logAndSendError(w, "could not encode cors to response", reqInfo, err)
return return
} }
} }
func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -59,33 +57,32 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint) p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(w, "invalid copies number", reqInfo, err)
return return
} }
if err = h.obj.PutBucketCORS(ctx, p); err != nil { if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "could not put cors configuration", reqInfo, err) h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
return return
} }
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
} }
func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if err = h.obj.DeleteBucketCORS(ctx, bktInfo); err != nil { if err = h.obj.DeleteBucketCORS(r.Context(), bktInfo); err != nil {
h.logAndSendError(ctx, w, "could not delete cors", reqInfo, err) h.logAndSendError(w, "could not delete cors", reqInfo, err)
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
@ -105,7 +102,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
if reqInfo.BucketName == "" { if reqInfo.BucketName == "" {
return return
} }
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName) bktInfo, err := h.obj.GetBucketInfo(ctx, reqInfo.BucketName)
if err != nil { if err != nil {
h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err)) h.reqLogger(ctx).Warn(logs.GetBucketInfo, zap.Error(err))
return return
@ -152,22 +149,21 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
} }
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) { func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx) bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
bktInfo, err := h.getBucketInfo(ctx, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
origin := r.Header.Get(api.Origin) origin := r.Header.Get(api.Origin)
if origin == "" { if origin == "" {
h.logAndSendError(ctx, w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest)) h.logAndSendError(w, "origin request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
} }
method := r.Header.Get(api.AccessControlRequestMethod) method := r.Header.Get(api.AccessControlRequestMethod)
if method == "" { if method == "" {
h.logAndSendError(ctx, w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest)) h.logAndSendError(w, "Access-Control-Request-Method request header needed", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return return
} }
@ -177,9 +173,9 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
headers = strings.Split(requestHeaders, ", ") headers = strings.Split(requestHeaders, ", ")
} }
cors, err := h.obj.GetBucketCORS(ctx, bktInfo) cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get cors", reqInfo, err) h.logAndSendError(w, "could not get cors", reqInfo, err)
return return
} }
@ -208,7 +204,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
w.Header().Set(api.AccessControlAllowCredentials, "true") w.Header().Set(api.AccessControlAllowCredentials, "true")
} }
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
return return
@ -217,7 +213,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
} }
} }
} }
h.logAndSendError(ctx, w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied)) h.logAndSendError(w, "Forbidden", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} }
func checkSubslice(slice []string, subSlice []string) bool { func checkSubslice(slice []string, subSlice []string) bool {

View file

@ -71,19 +71,13 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo) bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return
}
networkInfo, err := h.obj.GetNetworkInfo(ctx)
if err != nil {
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
return return
} }
@ -91,15 +85,14 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
BktInfo: bktInfo, BktInfo: bktInfo,
Objects: versionedObject, Objects: versionedObject,
Settings: bktSettings, Settings: bktSettings,
NetworkInfo: networkInfo,
} }
deletedObjects := h.obj.DeleteObjects(ctx, p) deletedObjects := h.obj.DeleteObjects(ctx, p)
deletedObject := deletedObjects[0] deletedObject := deletedObjects[0]
if deletedObject.Error != nil { if deletedObject.Error != nil {
if isErrObjectLocked(deletedObject.Error) { if isErrObjectLocked(deletedObject.Error) {
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied)) h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} else { } else {
h.logAndSendError(ctx, w, "could not delete object", reqInfo, deletedObject.Error) h.logAndSendError(w, "could not delete object", reqInfo, deletedObject.Error)
} }
return return
} }
@ -134,26 +127,26 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
// Content-Md5 is required and should be set // Content-Md5 is required and should be set
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if _, ok := r.Header[api.ContentMD5]; !ok { if _, ok := r.Header[api.ContentMD5]; !ok {
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5)) h.logAndSendError(w, "missing Content-MD5", reqInfo, errors.GetAPIError(errors.ErrMissingContentMD5))
return return
} }
// Content-Length is required and should be non-zero // Content-Length is required and should be non-zero
// http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
if r.ContentLength <= 0 { if r.ContentLength <= 0 {
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength)) h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
return return
} }
// Unmarshal list of keys to be deleted. // Unmarshal list of keys to be deleted.
requested := &DeleteObjectsRequest{} requested := &DeleteObjectsRequest{}
if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(requested); err != nil {
h.logAndSendError(ctx, w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error())) h.logAndSendError(w, "couldn't decode body", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
return return
} }
if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete { if len(requested.Objects) == 0 || len(requested.Objects) > maxObjectsToDelete {
h.logAndSendError(ctx, w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "number of objects to delete must be greater than 0 and less or equal to 1000", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }
@ -178,19 +171,13 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo) bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return
}
networkInfo, err := h.obj.GetNetworkInfo(ctx)
if err != nil {
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
return return
} }
@ -198,7 +185,6 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
BktInfo: bktInfo, BktInfo: bktInfo,
Objects: toRemove, Objects: toRemove,
Settings: bktSettings, Settings: bktSettings,
NetworkInfo: networkInfo,
IsMultiple: true, IsMultiple: true,
} }
deletedObjects := h.obj.DeleteObjects(ctx, p) deletedObjects := h.obj.DeleteObjects(ctx, p)
@ -231,28 +217,22 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
} }
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "could not write response", reqInfo, err) h.logAndSendError(w, "could not write response", reqInfo, err)
return return
} }
} }
func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return
}
if err = checkOwner(bktInfo, reqInfo.User); err != nil {
h.logAndSendError(ctx, w, "request owner id does not match bucket owner id", reqInfo, err)
return return
} }
var sessionToken *session.Container var sessionToken *session.Container
boxData, err := middleware.GetBoxData(ctx) boxData, err := middleware.GetBoxData(r.Context())
if err == nil { if err == nil {
sessionToken = boxData.Gate.SessionTokenForDelete() sessionToken = boxData.Gate.SessionTokenForDelete()
} }
@ -265,12 +245,12 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
} }
} }
if err = h.obj.DeleteBucket(ctx, &layer.DeleteBucketParams{ if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
BktInfo: bktInfo, BktInfo: bktInfo,
SessionToken: sessionToken, SessionToken: sessionToken,
SkipCheck: skipObjCheck, SkipCheck: skipObjCheck,
}); err != nil { }); err != nil {
h.logAndSendError(ctx, w, "couldn't delete bucket", reqInfo, err) h.logAndSendError(w, "couldn't delete bucket", reqInfo, err)
return return
} }
@ -281,7 +261,7 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
getBucketCannedChainID(chain.Ingress, bktInfo.CID), getBucketCannedChainID(chain.Ingress, bktInfo.CID),
} }
if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil { if err = h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
h.logAndSendError(ctx, w, "failed to delete policy from storage", reqInfo, err) h.logAndSendError(w, "failed to delete policy from storage", reqInfo, err)
return return
} }

View file

@ -10,7 +10,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -37,7 +37,6 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}}) deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent) deleteBucket(t, hc, bktName, http.StatusNoContent)
} }
@ -54,12 +53,11 @@ func TestDeleteBucket(t *testing.T) {
tc := prepareHandlerContext(t) tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-removal", "object-to-delete" bktName, objName := "bucket-for-removal", "object-to-delete"
bktInfo, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName) _, objInfo := createVersionedBucketAndObject(t, tc, bktName, objName)
deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion) deleteMarkerVersion, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
require.True(t, isDeleteMarker) require.True(t, isDeleteMarker)
tc.owner = bktInfo.Owner
deleteBucket(t, tc, bktName, http.StatusConflict) deleteBucket(t, tc, bktName, http.StatusConflict)
deleteObject(t, tc, bktName, objName, objInfo.VersionID()) deleteObject(t, tc, bktName, objName, objInfo.VersionID())
deleteBucket(t, tc, bktName, http.StatusConflict) deleteBucket(t, tc, bktName, http.StatusConflict)
@ -84,7 +82,6 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}}) deleteObjects(t, hc, bktName, [][2]string{{objName, emptyVersion}})
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent) deleteBucket(t, hc, bktName, http.StatusNoContent)
} }
@ -102,7 +99,6 @@ func TestForceDeleteBucket(t *testing.T) {
addr.SetContainer(bktInfo.CID) addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID) addr.SetObject(nodeVersion.OID)
hc.owner = bktInfo.Owner
deleteBucketForce(t, hc, bktName, http.StatusConflict, "false") deleteBucketForce(t, hc, bktName, http.StatusConflict, "false")
deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true") deleteBucketForce(t, hc, bktName, http.StatusNoContent, "true")
} }
@ -135,7 +131,7 @@ func TestDeleteObjectsError(t *testing.T) {
addr.SetContainer(bktInfo.CID) addr.SetContainer(bktInfo.CID)
addr.SetObject(nodeVersion.OID) addr.SetObject(nodeVersion.OID)
expectedError := apierr.GetAPIError(apierr.ErrAccessDenied) expectedError := apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
hc.tp.SetObjectError(addr, expectedError) hc.tp.SetObjectError(addr, expectedError)
w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}}) w := deleteObjectsBase(hc, bktName, [][2]string{{objName, nodeVersion.OID.EncodeToString()}})
@ -461,17 +457,6 @@ func TestDeleteObjectCheckMarkerReturn(t *testing.T) {
require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2) require.Equal(t, deleteMarkerVersion, deleteMarkerVersion2)
} }
func TestDeleteBucketByNotOwner(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-name"
bktInfo := createTestBucket(hc, bktName)
deleteBucket(t, hc, bktName, http.StatusForbidden)
hc.owner = bktInfo.Owner
deleteBucket(t, hc, bktName, http.StatusNoContent)
}
func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) { func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
bktInfo := createTestBucket(tc, bktName) bktInfo := createTestBucket(tc, bktName)
@ -568,9 +553,9 @@ func checkNotFound(t *testing.T, hc *handlerContext, bktName, objName, version s
assertStatus(t, w, http.StatusNotFound) assertStatus(t, w, http.StatusNotFound)
} }
func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) { func headObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apiErrors.ErrorCode) {
w := headObjectBase(hc, bktName, objName, version) w := headObjectBase(hc, bktName, objName, version)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, apiErrors.GetAPIError(code))
} }
func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) { func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version string) {
@ -578,18 +563,6 @@ func checkFound(t *testing.T, hc *handlerContext, bktName, objName, version stri
assertStatus(t, w, http.StatusOK) assertStatus(t, w, http.StatusOK)
} }
func headObjectWithHeaders(hc *handlerContext, bktName, objName, version string, headers map[string]string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Add(api.QueryVersionID, version)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
for k, v := range headers {
r.Header.Set(k, v)
}
hc.Handler().HeadObjectHandler(w, r)
return w
}
func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder { func headObjectBase(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {
query := make(url.Values) query := make(url.Values)
query.Add(api.QueryVersionID, version) query.Add(api.QueryVersionID, version)

View file

@ -16,7 +16,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -38,7 +37,7 @@ func TestSimpleGetEncrypted(t *testing.T) {
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName}) objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
require.NoError(t, err) require.NoError(t, err)
obj, err := tc.MockedPool().GetObject(tc.Context(), frostfs.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID}) obj, err := tc.MockedPool().GetObject(tc.Context(), layer.PrmObjectGet{Container: bktInfo.CID, Object: objInfo.ID})
require.NoError(t, err) require.NoError(t, err)
encryptedContent, err := io.ReadAll(obj.Payload) encryptedContent, err := io.ReadAll(obj.Payload)
require.NoError(t, err) require.NoError(t, err)
@ -48,25 +47,6 @@ func TestSimpleGetEncrypted(t *testing.T) {
require.Equal(t, content, string(response)) require.Equal(t, content, string(response))
} }
func TestMD5HeaderBadOrEmpty(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-sse-c", "object-to-encrypt"
createTestBucket(tc, bktName)
content := "content"
headers := map[string]string{
api.ContentMD5: "",
}
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrInvalidDigest)
headers = map[string]string{
api.ContentMD5: "YWJjMTIzIT8kKiYoKSctPUB+",
}
putEncryptedObjectWithHeadersErr(t, tc, bktName, objName, content, headers, errors.ErrBadDigest)
}
func TestGetEncryptedRange(t *testing.T) { func TestGetEncryptedRange(t *testing.T) {
tc := prepareHandlerContext(t) tc := prepareHandlerContext(t)
@ -308,21 +288,6 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
return w return w
} }
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
assertStatus(hc.t, w, http.StatusNoContent)
}
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Set(uploadIDQuery, uploadID)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().AbortMultipartUploadHandler(w, r)
return w
}
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) { func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size) return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
} }
@ -379,15 +344,6 @@ func putEncryptedObject(t *testing.T, tc *handlerContext, bktName, objName, cont
assertStatus(t, w, http.StatusOK) assertStatus(t, w, http.StatusOK)
} }
func putEncryptedObjectWithHeadersErr(t *testing.T, tc *handlerContext, bktName, objName, content string, headers map[string]string, code errors.ErrorCode) {
body := bytes.NewReader([]byte(content))
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
setHeaders(r, headers)
tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, errors.GetAPIError(code))
}
func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) { func getEncryptedObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header) {
w, r := prepareTestRequest(hc, bktName, objName, nil) w, r := prepareTestRequest(hc, bktName, objName, nil)
setEncryptHeaders(r) setEncryptHeaders(r)
@ -399,15 +355,6 @@ func getObject(hc *handlerContext, bktName, objName string) ([]byte, http.Header
return getObjectBase(hc, w, r) return getObjectBase(hc, w, r)
} }
func getObjectWithHeaders(hc *handlerContext, bktName, objName string, headers map[string]string) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, objName, nil)
for k, v := range headers {
r.Header.Set(k, v)
}
hc.Handler().GetObjectHandler(w, r)
return w
}
func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) { func getObjectBase(hc *handlerContext, w *httptest.ResponseRecorder, r *http.Request) ([]byte, http.Header) {
hc.Handler().GetObjectHandler(w, r) hc.Handler().GetObjectHandler(w, r)
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)

View file

@ -78,27 +78,6 @@ func addSSECHeaders(responseHeader http.Header, requestHeader http.Header) {
responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5)) responseHeader.Set(api.AmzServerSideEncryptionCustomerKeyMD5, requestHeader.Get(api.AmzServerSideEncryptionCustomerKeyMD5))
} }
func writeNotModifiedHeaders(h http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, isBucketUnversioned, md5Enabled bool) {
h.Set(api.ETag, data.Quote(extendedInfo.ObjectInfo.ETag(md5Enabled)))
h.Set(api.LastModified, extendedInfo.ObjectInfo.Created.UTC().Format(http.TimeFormat))
h.Set(api.AmzTaggingCount, strconv.Itoa(tagSetLength))
if !isBucketUnversioned {
h.Set(api.AmzVersionID, extendedInfo.Version())
}
if cacheControl := extendedInfo.ObjectInfo.Headers[api.CacheControl]; cacheControl != "" {
h.Set(api.CacheControl, cacheControl)
}
for key, val := range extendedInfo.ObjectInfo.Headers {
if layer.IsSystemHeader(key) {
continue
}
h[api.MetadataPrefix+key] = []string{val}
}
}
func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int, func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.ExtendedObjectInfo, tagSetLength int,
isBucketUnversioned, md5Enabled bool) { isBucketUnversioned, md5Enabled bool) {
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
@ -150,19 +129,18 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
var ( var (
params *layer.RangeParams params *layer.RangeParams
ctx = r.Context() reqInfo = middleware.GetReqInfo(r.Context())
reqInfo = middleware.GetReqInfo(ctx)
) )
conditional, err := parseConditionalHeaders(r.Header) conditional, err := parseConditionalHeaders(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err) h.logAndSendError(w, "could not parse request params", reqInfo, err)
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -172,16 +150,37 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID), VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
} }
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p) extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not find object", reqInfo, err) h.logAndSendError(w, "could not find object", reqInfo, err)
return return
} }
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo) if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err)
return
}
encryptionParams, err := formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
fullSize, err := layer.GetObjectSize(info)
if err != nil {
h.logAndSendError(w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return
}
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
h.logAndSendError(w, "could not parse range header", reqInfo, err)
return return
} }
@ -191,48 +190,24 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
VersionID: info.VersionID(), VersionID: info.VersionID(),
} }
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion) tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) { if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err) h.logAndSendError(w, "could not get object meta data", reqInfo, err)
return return
} }
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil { if layer.IsAuthenticatedRequest(r.Context()) {
if errors.IsS3Error(err, errors.ErrNotModified) {
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
}
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
return
}
encryptionParams, err := formEncryptionParams(r)
if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err)
return
}
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return
}
fullSize, err := layer.GetObjectSize(info)
if err != nil {
h.logAndSendError(ctx, w, "invalid size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
return
}
if params, err = fetchRangeHeader(r.Header, fullSize); err != nil {
h.logAndSendError(ctx, w, "could not parse range header", reqInfo, err)
return
}
if layer.IsAuthenticatedRequest(ctx) {
overrideResponseHeaders(w.Header(), reqInfo.URL.Query()) overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
} }
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil { if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
h.logAndSendError(ctx, w, "could not get locking info", reqInfo, err) h.logAndSendError(w, "could not get locking info", reqInfo, err)
return
}
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
@ -244,9 +219,9 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
Encryption: encryptionParams, Encryption: encryptionParams,
} }
objPayload, err := h.obj.GetObject(ctx, getPayloadParams) objPayload, err := h.obj.GetObject(r.Context(), getPayloadParams)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get object payload", reqInfo, err) h.logAndSendError(w, "could not get object payload", reqInfo, err)
return return
} }
@ -258,7 +233,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
if err = objPayload.StreamTo(w); err != nil { if err = objPayload.StreamTo(w); err != nil {
h.logAndSendError(ctx, w, "could not stream object payload", reqInfo, err) h.logAndSendError(w, "could not stream object payload", reqInfo, err)
return return
} }
} }

View file

@ -2,7 +2,7 @@ package handler
import ( import (
"bytes" "bytes"
"errors" stderrors "errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
@ -13,7 +13,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -89,7 +89,7 @@ func TestPreconditions(t *testing.T) {
name: "IfMatch false", name: "IfMatch false",
info: newInfo(etag, today), info: newInfo(etag, today),
args: &conditionalArgs{IfMatch: etag2}, args: &conditionalArgs{IfMatch: etag2},
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)}, expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
{ {
name: "IfNoneMatch true", name: "IfNoneMatch true",
info: newInfo(etag, today), info: newInfo(etag, today),
@ -99,7 +99,7 @@ func TestPreconditions(t *testing.T) {
name: "IfNoneMatch false", name: "IfNoneMatch false",
info: newInfo(etag, today), info: newInfo(etag, today),
args: &conditionalArgs{IfNoneMatch: etag}, args: &conditionalArgs{IfNoneMatch: etag},
expected: apierr.GetAPIError(apierr.ErrNotModified)}, expected: errors.GetAPIError(errors.ErrNotModified)},
{ {
name: "IfModifiedSince true", name: "IfModifiedSince true",
info: newInfo(etag, today), info: newInfo(etag, today),
@ -109,7 +109,7 @@ func TestPreconditions(t *testing.T) {
name: "IfModifiedSince false", name: "IfModifiedSince false",
info: newInfo(etag, yesterday), info: newInfo(etag, yesterday),
args: &conditionalArgs{IfModifiedSince: &today}, args: &conditionalArgs{IfModifiedSince: &today},
expected: apierr.GetAPIError(apierr.ErrNotModified)}, expected: errors.GetAPIError(errors.ErrNotModified)},
{ {
name: "IfUnmodifiedSince true", name: "IfUnmodifiedSince true",
info: newInfo(etag, yesterday), info: newInfo(etag, yesterday),
@ -119,7 +119,7 @@ func TestPreconditions(t *testing.T) {
name: "IfUnmodifiedSince false", name: "IfUnmodifiedSince false",
info: newInfo(etag, today), info: newInfo(etag, today),
args: &conditionalArgs{IfUnmodifiedSince: &yesterday}, args: &conditionalArgs{IfUnmodifiedSince: &yesterday},
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed)}, expected: errors.GetAPIError(errors.ErrPreconditionFailed)},
{ {
name: "IfMatch true, IfUnmodifiedSince false", name: "IfMatch true, IfUnmodifiedSince false",
@ -131,19 +131,19 @@ func TestPreconditions(t *testing.T) {
name: "IfMatch false, IfUnmodifiedSince true", name: "IfMatch false, IfUnmodifiedSince true",
info: newInfo(etag, yesterday), info: newInfo(etag, yesterday),
args: &conditionalArgs{IfMatch: etag2, IfUnmodifiedSince: &today}, args: &conditionalArgs{IfMatch: etag2, IfUnmodifiedSince: &today},
expected: apierr.GetAPIError(apierr.ErrPreconditionFailed), expected: errors.GetAPIError(errors.ErrPreconditionFailed),
}, },
{ {
name: "IfNoneMatch false, IfModifiedSince true", name: "IfNoneMatch false, IfModifiedSince true",
info: newInfo(etag, today), info: newInfo(etag, today),
args: &conditionalArgs{IfNoneMatch: etag, IfModifiedSince: &yesterday}, args: &conditionalArgs{IfNoneMatch: etag, IfModifiedSince: &yesterday},
expected: apierr.GetAPIError(apierr.ErrNotModified), expected: errors.GetAPIError(errors.ErrNotModified),
}, },
{ {
name: "IfNoneMatch true, IfModifiedSince false", name: "IfNoneMatch true, IfModifiedSince false",
info: newInfo(etag, yesterday), info: newInfo(etag, yesterday),
args: &conditionalArgs{IfNoneMatch: etag2, IfModifiedSince: &today}, args: &conditionalArgs{IfNoneMatch: etag2, IfModifiedSince: &today},
expected: apierr.GetAPIError(apierr.ErrNotModified), expected: errors.GetAPIError(errors.ErrNotModified),
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
@ -151,7 +151,7 @@ func TestPreconditions(t *testing.T) {
if tc.expected == nil { if tc.expected == nil {
require.NoError(t, actual) require.NoError(t, actual)
} else { } else {
require.True(t, errors.Is(actual, tc.expected), tc.expected, actual) require.True(t, stderrors.Is(actual, tc.expected), tc.expected, actual)
} }
}) })
} }
@ -193,8 +193,8 @@ func TestGetObject(t *testing.T) {
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{}) hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{}) hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion) getObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), errors.ErrNoSuchVersion)
getObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey) getObjectAssertS3Error(hc, bktName, objName, emptyVersion, errors.ErrNoSuchKey)
} }
func TestGetObjectEnabledMD5(t *testing.T) { func TestGetObjectEnabledMD5(t *testing.T) {
@ -210,27 +210,6 @@ func TestGetObjectEnabledMD5(t *testing.T) {
require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag)) require.Equal(t, data.Quote(objInfo.MD5Sum), headers.Get(api.ETag))
} }
func TestGetObjectNotModifiedHeaders(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
createVersionedBucket(hc, bktName)
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
require.NotEmpty(t, etag)
require.NotEmpty(t, versionID)
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
w := getObjectWithHeaders(hc, bktName, objName, map[string]string{api.IfNoneMatch: etag})
require.Equal(t, http.StatusNotModified, w.Code)
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
require.Equal(t, etag, w.Header().Get(api.ETag))
require.NotEmpty(t, w.Header().Get(api.LastModified))
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
require.Equal(t, "value", w.Header().Get(api.CacheControl))
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
}
func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header { func putObjectContent(hc *handlerContext, bktName, objName, content string) http.Header {
body := bytes.NewReader([]byte(content)) body := bytes.NewReader([]byte(content))
w, r := prepareTestPayloadRequest(hc, bktName, objName, body) w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
@ -249,17 +228,9 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
return content return content
} }
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte { func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
w := getObjectBaseResponse(tc, bktName, objName, version)
assertStatus(tc.t, w, http.StatusOK)
content, err := io.ReadAll(w.Result().Body)
require.NoError(tc.t, err)
return content
}
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code apierr.ErrorCode) {
w := getObjectBaseResponse(hc, bktName, objName, version) w := getObjectBaseResponse(hc, bktName, objName, version)
assertS3Error(hc.t, w, apierr.GetAPIError(code)) assertS3Error(hc.t, w, errors.GetAPIError(code))
} }
func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder { func getObjectBaseResponse(hc *handlerContext, bktName, objName, version string) *httptest.ResponseRecorder {

File diff suppressed because it is too large Load diff

View file

@ -20,7 +20,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/resolver"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/service/tree"
@ -38,12 +37,8 @@ import (
) )
type handlerContext struct { type handlerContext struct {
*handlerContextBase
t *testing.T
}
type handlerContextBase struct {
owner user.ID owner user.ID
t *testing.T
h *handler h *handler
tp *layer.TestFrostFS tp *layer.TestFrostFS
tree *tree.Tree tree *tree.Tree
@ -55,19 +50,19 @@ type handlerContextBase struct {
cache *layer.Cache cache *layer.Cache
} }
func (hc *handlerContextBase) Handler() *handler { func (hc *handlerContext) Handler() *handler {
return hc.h return hc.h
} }
func (hc *handlerContextBase) MockedPool() *layer.TestFrostFS { func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
return hc.tp return hc.tp
} }
func (hc *handlerContextBase) Layer() *layer.Layer { func (hc *handlerContext) Layer() *layer.Layer {
return hc.h.obj return hc.h.obj
} }
func (hc *handlerContextBase) Context() context.Context { func (hc *handlerContext) Context() context.Context {
return hc.context return hc.context
} }
@ -141,34 +136,22 @@ func (c *configMock) RetryStrategy() RetryStrategy {
} }
func prepareHandlerContext(t *testing.T) *handlerContext { func prepareHandlerContext(t *testing.T) *handlerContext {
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample())) return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(zap.NewExample()))
require.NoError(t, err)
return &handlerContext{
handlerContextBase: hc,
t: t,
}
} }
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext { func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
hc, err := prepareHandlerContextBase(getMinCacheConfig(zap.NewExample())) return prepareHandlerContextBase(t, getMinCacheConfig(zap.NewExample()))
require.NoError(t, err)
return &handlerContext{
handlerContextBase: hc,
t: t,
}
} }
func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBase, error) { func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig) *handlerContext {
key, err := keys.NewPrivateKey() key, err := keys.NewPrivateKey()
if err != nil { require.NoError(t, err)
return nil, err
}
log := zap.NewExample() l := zap.NewExample()
tp := layer.NewTestFrostFS(key) tp := layer.NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"} testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (cid.ID, error) { testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
return tp.ContainerID(name) return tp.ContainerID(name)
}) })
@ -176,9 +159,7 @@ func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBas
user.IDFromKey(&owner, key.PrivateKey.PublicKey) user.IDFromKey(&owner, key.PrivateKey.PublicKey)
memCli, err := tree.NewTreeServiceClientMemory() memCli, err := tree.NewTreeServiceClientMemory()
if err != nil { require.NoError(t, err)
return nil, err
}
treeMock := tree.NewTree(memCli, zap.NewExample()) treeMock := tree.NewTree(memCli, zap.NewExample())
@ -195,38 +176,32 @@ func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBas
var pp netmap.PlacementPolicy var pp netmap.PlacementPolicy
err = pp.DecodeString("REP 1") err = pp.DecodeString("REP 1")
if err != nil { require.NoError(t, err)
return nil, err
}
cfg := &configMock{ cfg := &configMock{
defaultPolicy: pp, defaultPolicy: pp,
} }
h := &handler{ h := &handler{
log: log, log: l,
obj: layer.NewLayer(log, tp, layerCfg), obj: layer.NewLayer(l, tp, layerCfg),
cfg: cfg, cfg: cfg,
ape: newAPEMock(), ape: newAPEMock(),
frostfsid: newFrostfsIDMock(), frostfsid: newFrostfsIDMock(),
} }
accessBox, err := newTestAccessBox(key) return &handlerContext{
if err != nil {
return nil, err
}
return &handlerContextBase{
owner: owner, owner: owner,
t: t,
h: h, h: h,
tp: tp, tp: tp,
tree: treeMock, tree: treeMock,
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: accessBox}), context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: newTestAccessBox(t, key)}),
config: cfg, config: cfg,
layerFeatures: features, layerFeatures: features,
treeMock: memCli, treeMock: memCli,
cache: layerCfg.Cache, cache: layerCfg.Cache,
}, nil }
} }
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig { func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
@ -244,14 +219,12 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
Buckets: minCacheCfg, Buckets: minCacheCfg,
System: minCacheCfg, System: minCacheCfg,
AccessControl: minCacheCfg, AccessControl: minCacheCfg,
NetworkInfo: &cache.NetworkInfoCacheConfig{Lifetime: minCacheCfg.Lifetime},
} }
} }
type apeMock struct { type apeMock struct {
chainMap map[engine.Target][]*chain.Chain chainMap map[engine.Target][]*chain.Chain
policyMap map[string][]byte policyMap map[string][]byte
err error
} }
func newAPEMock() *apeMock { func newAPEMock() *apeMock {
@ -295,10 +268,6 @@ func (a *apeMock) DeletePolicy(namespace string, cnrID cid.ID) error {
} }
func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error { func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain []*chain.Chain) error {
if a.err != nil {
return a.err
}
if err := a.PutPolicy(ns, cnrID, policy); err != nil { if err := a.PutPolicy(ns, cnrID, policy); err != nil {
return err return err
} }
@ -313,10 +282,6 @@ func (a *apeMock) PutBucketPolicy(ns string, cnrID cid.ID, policy []byte, chain
} }
func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error { func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.ID) error {
if a.err != nil {
return a.err
}
if err := a.DeletePolicy(ns, cnrID); err != nil { if err := a.DeletePolicy(ns, cnrID); err != nil {
return err return err
} }
@ -330,10 +295,6 @@ func (a *apeMock) DeleteBucketPolicy(ns string, cnrID cid.ID, chainIDs []chain.I
} }
func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) { func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
if a.err != nil {
return nil, a.err
}
policy, ok := a.policyMap[ns+cnrID.EncodeToString()] policy, ok := a.policyMap[ns+cnrID.EncodeToString()]
if !ok { if !ok {
return nil, errors.New("not found") return nil, errors.New("not found")
@ -343,10 +304,6 @@ func (a *apeMock) GetBucketPolicy(ns string, cnrID cid.ID) ([]byte, error) {
} }
func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error { func (a *apeMock) SaveACLChains(cid string, chains []*chain.Chain) error {
if a.err != nil {
return a.err
}
for i := range chains { for i := range chains {
if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil { if err := a.AddChain(engine.ContainerTarget(cid), chains[i]); err != nil {
return err return err
@ -388,7 +345,7 @@ func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
} }
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo { func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
res, err := hc.MockedPool().CreateContainer(hc.Context(), frostfs.PrmContainerCreate{ res, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
Creator: hc.owner, Creator: hc.owner,
Name: bktName, Name: bktName,
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}}, AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
@ -435,7 +392,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{ extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
BktInfo: bktInfo, BktInfo: bktInfo,
Object: objName, Object: objName,
Size: ptr(uint64(len(content))), Size: uint64(len(content)),
Reader: bytes.NewReader(content), Reader: bytes.NewReader(content),
Header: header, Header: header,
Encryption: encryption, Encryption: encryption,
@ -462,7 +419,6 @@ func prepareTestRequestWithQuery(hc *handlerContext, bktName, objName string, qu
r.URL.RawQuery = query.Encode() r.URL.RawQuery = query.Encode()
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "") reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
reqInfo.User = hc.owner.String()
r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo)) r = r.WithContext(middleware.SetReqInfo(hc.Context(), reqInfo))
return w, r return w, r

View file

@ -27,18 +27,17 @@ func getRangeToDetectContentType(maxSize uint64) *layer.RangeParams {
} }
func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
conditional, err := parseConditionalHeaders(r.Header) conditional, err := parseConditionalHeaders(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse request params", reqInfo, err) h.logAndSendError(w, "could not parse request params", reqInfo, err)
return return
} }
@ -48,27 +47,26 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID), VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
} }
extendedInfo, err := h.obj.GetExtendedObjectInfo(ctx, p) extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not find object", reqInfo, err) h.logAndSendError(w, "could not find object", reqInfo, err)
return return
} }
info := extendedInfo.ObjectInfo info := extendedInfo.ObjectInfo
encryptionParams, err := formEncryptionParams(r) encryptionParams, err := formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil { if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err)) h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
return return
} }
bktSettings, err := h.obj.GetBucketSettings(ctx, bktInfo) if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
if err != nil { h.logAndSendError(w, "precondition failed", reqInfo, err)
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return return
} }
@ -78,17 +76,9 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
VersionID: info.VersionID(), VersionID: info.VersionID(),
} }
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(ctx, t, extendedInfo.NodeVersion) tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) { if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
h.logAndSendError(ctx, w, "could not get object meta data", reqInfo, err) h.logAndSendError(w, "could not get object meta data", reqInfo, err)
return
}
if err = checkPreconditions(info, conditional, h.cfg.MD5Enabled()); err != nil {
if errors.IsS3Error(err, errors.ErrNotModified) {
writeNotModifiedHeaders(w.Header(), extendedInfo, len(tagSet), bktSettings.Unversioned(), h.cfg.MD5Enabled())
}
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
return return
} }
@ -101,15 +91,15 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
BucketInfo: bktInfo, BucketInfo: bktInfo,
} }
objPayload, err := h.obj.GetObject(ctx, getParams) objPayload, err := h.obj.GetObject(r.Context(), getParams)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID)) h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
return return
} }
buffer, err := io.ReadAll(objPayload) buffer, err := io.ReadAll(objPayload)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID)) h.logAndSendError(w, "could not partly read payload to detect content type", reqInfo, err, zap.Stringer("oid", info.ID))
return return
} }
@ -118,7 +108,13 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil { if err = h.setLockingHeaders(bktInfo, lockInfo, w.Header()); err != nil {
h.logAndSendError(ctx, w, "could not get locking info", reqInfo, err) h.logAndSendError(w, "could not get locking info", reqInfo, err)
return
}
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
@ -127,12 +123,11 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -146,7 +141,7 @@ func (h *handler) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
} }
if err = middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone); err != nil { if err = middleware.WriteResponse(w, http.StatusOK, nil, middleware.MimeNone); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
} }

View file

@ -6,7 +6,7 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
@ -95,29 +95,8 @@ func TestHeadObject(t *testing.T) {
hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{}) hc.tp.SetObjectError(addr, &apistatus.ObjectNotFound{})
hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{}) hc.tp.SetObjectError(objInfo.Address(), &apistatus.ObjectNotFound{})
headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), apierr.ErrNoSuchVersion) headObjectAssertS3Error(hc, bktName, objName, objInfo.VersionID(), s3errors.ErrNoSuchVersion)
headObjectAssertS3Error(hc, bktName, objName, emptyVersion, apierr.ErrNoSuchKey) headObjectAssertS3Error(hc, bktName, objName, emptyVersion, s3errors.ErrNoSuchKey)
}
func TestHeadObjectNotModifiedHeaders(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName, metadataHeader := "bucket", "obj", api.MetadataPrefix+"header"
createVersionedBucket(hc, bktName)
header := putObjectWithHeaders(hc, bktName, objName, map[string]string{api.CacheControl: "value", metadataHeader: "value"})
etag, versionID := header.Get(api.ETag), header.Get(api.AmzVersionID)
require.NotEmpty(t, etag)
require.NotEmpty(t, versionID)
putObjectTagging(t, hc, bktName, objName, map[string]string{"key": "value"})
w := headObjectWithHeaders(hc, bktName, objName, emptyVersion, map[string]string{api.IfNoneMatch: etag})
require.Equal(t, http.StatusNotModified, w.Code)
require.Equal(t, "1", w.Header().Get(api.AmzTaggingCount))
require.Equal(t, etag, w.Header().Get(api.ETag))
require.NotEmpty(t, w.Header().Get(api.LastModified))
require.Equal(t, versionID, w.Header().Get(api.AmzVersionID))
require.Equal(t, "value", w.Header().Get(api.CacheControl))
require.Equal(t, []string{"value"}, w.Header()[metadataHeader])
} }
func TestIsAvailableToResolve(t *testing.T) { func TestIsAvailableToResolve(t *testing.T) {
@ -140,25 +119,21 @@ func TestIsAvailableToResolve(t *testing.T) {
} }
} }
func newTestAccessBox(key *keys.PrivateKey) (*accessbox.Box, error) { func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
var err error var err error
if key == nil { if key == nil {
key, err = keys.NewPrivateKey() key, err = keys.NewPrivateKey()
if err != nil { require.NoError(t, err)
return nil, err
}
} }
var btoken bearer.Token var btoken bearer.Token
btoken.SetImpersonate(true) btoken.SetImpersonate(true)
err = btoken.Sign(key.PrivateKey) err = btoken.Sign(key.PrivateKey)
if err != nil { require.NoError(t, err)
return nil, err
}
return &accessbox.Box{ return &accessbox.Box{
Gate: &accessbox.GateData{ Gate: &accessbox.GateData{
BearerToken: &btoken, BearerToken: &btoken,
}, },
}, nil }
} }

View file

@ -7,16 +7,15 @@ import (
) )
func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil { if err = middleware.EncodeToResponse(w, LocationResponse{Location: bktInfo.LocationConstraint}); err != nil {
h.logAndSendError(ctx, w, "couldn't encode bucket location response", reqInfo, err) h.logAndSendError(w, "couldn't encode bucket location response", reqInfo, err)
} }
} }

View file

@ -1,309 +0,0 @@
package handler
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"fmt"
"io"
"net/http"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/util"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"github.com/google/uuid"
)
const (
maxRules = 1000
maxRuleIDLen = 255
maxNewerNoncurrentVersions = 100
)
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
cfg, err := h.obj.GetBucketLifecycleConfiguration(ctx, bktInfo)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket lifecycle configuration", reqInfo, err)
return
}
if err = middleware.EncodeToResponse(w, cfg); err != nil {
h.logAndSendError(ctx, w, "could not encode GetBucketLifecycle response", reqInfo, err)
return
}
}
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
var buf bytes.Buffer
tee := io.TeeReader(r.Body, &buf)
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
// Content-Md5 is required and should be set
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html
if _, ok := r.Header[api.ContentMD5]; !ok {
h.logAndSendError(ctx, w, "missing Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrMissingContentMD5))
return
}
headerMD5, err := base64.StdEncoding.DecodeString(r.Header.Get(api.ContentMD5))
if err != nil {
h.logAndSendError(ctx, w, "invalid Content-MD5", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
return
}
cfg := new(data.LifecycleConfiguration)
if err = h.cfg.NewXMLDecoder(tee).Decode(cfg); err != nil {
h.logAndSendError(ctx, w, "could not decode body", reqInfo, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()))
return
}
bodyMD5, err := getContentMD5(&buf)
if err != nil {
h.logAndSendError(ctx, w, "could not get content md5", reqInfo, err)
return
}
if !bytes.Equal(headerMD5, bodyMD5) {
h.logAndSendError(ctx, w, "Content-MD5 does not match", reqInfo, apierr.GetAPIError(apierr.ErrInvalidDigest))
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
networkInfo, err := h.obj.GetNetworkInfo(ctx)
if err != nil {
h.logAndSendError(ctx, w, "could not get network info", reqInfo, err)
return
}
if err = checkLifecycleConfiguration(ctx, cfg, &networkInfo); err != nil {
h.logAndSendError(ctx, w, "invalid lifecycle configuration", reqInfo, err)
return
}
params := &layer.PutBucketLifecycleParams{
BktInfo: bktInfo,
LifecycleCfg: cfg,
}
params.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return
}
if err = h.obj.PutBucketLifecycleConfiguration(ctx, params); err != nil {
h.logAndSendError(ctx, w, "could not put bucket lifecycle configuration", reqInfo, err)
return
}
}
func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
if err = h.obj.DeleteBucketLifecycleConfiguration(ctx, bktInfo); err != nil {
h.logAndSendError(ctx, w, "could not delete bucket lifecycle configuration", reqInfo, err)
return
}
w.WriteHeader(http.StatusNoContent)
}
func checkLifecycleConfiguration(ctx context.Context, cfg *data.LifecycleConfiguration, ni *netmap.NetworkInfo) error {
now := layer.TimeNow(ctx)
if len(cfg.Rules) > maxRules {
return fmt.Errorf("%w: number of rules cannot be greater than %d", apierr.GetAPIError(apierr.ErrInvalidRequest), maxRules)
}
ids := make(map[string]struct{}, len(cfg.Rules))
for i, rule := range cfg.Rules {
if rule.ID == "" {
id, err := uuid.NewRandom()
if err != nil {
return fmt.Errorf("generate uuid: %w", err)
}
cfg.Rules[i].ID = id.String()
rule.ID = id.String()
}
if _, ok := ids[rule.ID]; ok {
return fmt.Errorf("%w: duplicate 'ID': %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.ID)
}
ids[rule.ID] = struct{}{}
if len(rule.ID) > maxRuleIDLen {
return fmt.Errorf("%w: 'ID' value cannot be longer than %d characters", apierr.GetAPIError(apierr.ErrInvalidArgument), maxRuleIDLen)
}
if rule.Status != data.LifecycleStatusEnabled && rule.Status != data.LifecycleStatusDisabled {
return fmt.Errorf("%w: invalid lifecycle status: %s", apierr.GetAPIError(apierr.ErrMalformedXML), rule.Status)
}
if rule.AbortIncompleteMultipartUpload == nil && rule.Expiration == nil && rule.NonCurrentVersionExpiration == nil {
return fmt.Errorf("%w: at least one action needs to be specified in a rule", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
if rule.AbortIncompleteMultipartUpload != nil {
if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil &&
*rule.AbortIncompleteMultipartUpload.DaysAfterInitiation <= 0 {
return fmt.Errorf("%w: days after initiation must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
}
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
return fmt.Errorf("%w: abort incomplete multipart upload cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
}
if rule.Expiration != nil {
if rule.Expiration.ExpiredObjectDeleteMarker != nil {
if rule.Expiration.Days != nil || rule.Expiration.Date != "" {
return fmt.Errorf("%w: expired object delete marker cannot be specified with days or date", apierr.GetAPIError(apierr.ErrMalformedXML))
}
if rule.Filter != nil && (rule.Filter.Tag != nil || (rule.Filter.And != nil && len(rule.Filter.And.Tags) > 0)) {
return fmt.Errorf("%w: expired object delete marker cannot be specified with tags", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
}
if rule.Expiration.Days != nil && *rule.Expiration.Days <= 0 {
return fmt.Errorf("%w: expiration days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
}
if rule.Expiration.Date != "" {
parsedTime, err := time.Parse("2006-01-02T15:04:05Z", rule.Expiration.Date)
if err != nil {
return fmt.Errorf("%w: invalid value of expiration date: %s", apierr.GetAPIError(apierr.ErrInvalidArgument), rule.Expiration.Date)
}
epoch, err := util.TimeToEpoch(ni, now, parsedTime)
if err != nil {
return fmt.Errorf("convert time to epoch: %w", err)
}
cfg.Rules[i].Expiration.Epoch = &epoch
}
}
if rule.NonCurrentVersionExpiration != nil {
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil && rule.NonCurrentVersionExpiration.NonCurrentDays == nil {
return fmt.Errorf("%w: newer noncurrent versions cannot be specified without noncurrent days", apierr.GetAPIError(apierr.ErrMalformedXML))
}
if rule.NonCurrentVersionExpiration.NewerNonCurrentVersions != nil &&
(*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions > maxNewerNoncurrentVersions ||
*rule.NonCurrentVersionExpiration.NewerNonCurrentVersions <= 0) {
return fmt.Errorf("%w: newer noncurrent versions must be a positive integer up to %d", apierr.GetAPIError(apierr.ErrInvalidArgument),
maxNewerNoncurrentVersions)
}
if rule.NonCurrentVersionExpiration.NonCurrentDays != nil && *rule.NonCurrentVersionExpiration.NonCurrentDays <= 0 {
return fmt.Errorf("%w: noncurrent days must be a positive integer", apierr.GetAPIError(apierr.ErrInvalidArgument))
}
}
if err := checkLifecycleRuleFilter(rule.Filter); err != nil {
return err
}
if rule.Filter != nil && rule.Filter.Prefix != "" && rule.Prefix != "" {
return fmt.Errorf("%w: rule cannot have two prefixes", apierr.GetAPIError(apierr.ErrMalformedXML))
}
}
return nil
}
func checkLifecycleRuleFilter(filter *data.LifecycleRuleFilter) error {
if filter == nil {
return nil
}
var fields int
if filter.And != nil {
fields++
for _, tag := range filter.And.Tags {
err := checkTag(tag)
if err != nil {
return err
}
}
if filter.And.ObjectSizeLessThan != nil {
if *filter.And.ObjectSizeLessThan == 0 {
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
if filter.And.ObjectSizeGreaterThan != nil && *filter.And.ObjectSizeLessThan <= *filter.And.ObjectSizeGreaterThan {
return fmt.Errorf("%w: the maximum object size must be larger than the minimum object size", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
}
}
if filter.ObjectSizeGreaterThan != nil {
fields++
}
if filter.ObjectSizeLessThan != nil {
if *filter.ObjectSizeLessThan == 0 {
return fmt.Errorf("%w: the maximum object size must be more than 0", apierr.GetAPIError(apierr.ErrInvalidRequest))
}
fields++
}
if filter.Prefix != "" {
fields++
}
if filter.Tag != nil {
fields++
err := checkTag(*filter.Tag)
if err != nil {
return err
}
}
if fields > 1 {
return fmt.Errorf("%w: filter cannot have more than one field", apierr.GetAPIError(apierr.ErrMalformedXML))
}
return nil
}
func getContentMD5(reader io.Reader) ([]byte, error) {
hash := md5.New()
_, err := io.Copy(hash, reader)
if err != nil {
return nil, err
}
return hash.Sum(nil), nil
}

View file

@ -1,563 +0,0 @@
package handler
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/base64"
"encoding/xml"
"io"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"github.com/mr-tron/base58"
"github.com/stretchr/testify/require"
)
func TestPutBucketLifecycleConfiguration(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName := "bucket-lifecycle"
createBucket(hc, bktName)
for _, tc := range []struct {
name string
body *data.LifecycleConfiguration
errorCode apierr.ErrorCode
}{
{
name: "correct configuration",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
ID: "rule-1",
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
Date: time.Now().Format("2006-01-02T15:04:05.000Z"),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
Prefix: "prefix/",
Tags: []data.Tag{{Key: "key", Value: "value"}, {Key: "tag", Value: ""}},
ObjectSizeGreaterThan: ptr(uint64(100)),
},
},
},
{
ID: "rule-2",
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(14),
},
Expiration: &data.LifecycleExpiration{
ExpiredObjectDeleteMarker: ptr(true),
},
Filter: &data.LifecycleRuleFilter{
ObjectSizeLessThan: ptr(uint64(100)),
},
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NewerNonCurrentVersions: ptr(1),
NonCurrentDays: ptr(21),
},
},
},
},
},
{
name: "too many rules",
body: func() *data.LifecycleConfiguration {
lifecycle := new(data.LifecycleConfiguration)
for i := 0; i <= maxRules; i++ {
lifecycle.Rules = append(lifecycle.Rules, data.LifecycleRule{
ID: "Rule" + strconv.Itoa(i),
})
}
return lifecycle
}(),
errorCode: apierr.ErrInvalidRequest,
},
{
name: "duplicate rule ID",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
ID: "Rule",
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
{
ID: "Rule",
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "too long rule ID",
body: func() *data.LifecycleConfiguration {
id := make([]byte, maxRuleIDLen+1)
_, err := io.ReadFull(rand.Reader, id)
require.NoError(t, err)
return &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
ID: base58.Encode(id)[:maxRuleIDLen+1],
},
},
}
}(),
errorCode: apierr.ErrInvalidArgument,
},
{
name: "invalid status",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: "invalid",
},
},
},
errorCode: apierr.ErrMalformedXML,
},
{
name: "no actions",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Filter: &data.LifecycleRuleFilter{
Prefix: "prefix/",
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
{
name: "invalid days after initiation",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(0),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "invalid expired object delete marker declaration",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
ExpiredObjectDeleteMarker: ptr(false),
},
},
},
},
errorCode: apierr.ErrMalformedXML,
},
{
name: "invalid expiration days",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(0),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "invalid expiration date",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Date: "invalid",
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "newer noncurrent versions is too small",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NonCurrentDays: ptr(1),
NewerNonCurrentVersions: ptr(0),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "newer noncurrent versions is too large",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NonCurrentDays: ptr(1),
NewerNonCurrentVersions: ptr(101),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "invalid noncurrent days",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NonCurrentDays: ptr(0),
},
},
},
},
errorCode: apierr.ErrInvalidArgument,
},
{
name: "more than one filter field",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
Prefix: "prefix/",
ObjectSizeGreaterThan: ptr(uint64(100)),
},
},
},
},
errorCode: apierr.ErrMalformedXML,
},
{
name: "invalid tag in filter",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
Tag: &data.Tag{},
},
},
},
},
errorCode: apierr.ErrInvalidTagKey,
},
{
name: "abort incomplete multipart upload with tag",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(14),
},
Filter: &data.LifecycleRuleFilter{
Tag: &data.Tag{Key: "key", Value: "value"},
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
{
name: "expired object delete marker with tag",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
ExpiredObjectDeleteMarker: ptr(true),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
Tags: []data.Tag{{Key: "key", Value: "value"}},
},
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
{
name: "invalid size range",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
ObjectSizeGreaterThan: ptr(uint64(100)),
ObjectSizeLessThan: ptr(uint64(100)),
},
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
{
name: "two prefixes",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
Prefix: "prefix-1/",
},
Prefix: "prefix-2/",
},
},
},
errorCode: apierr.ErrMalformedXML,
},
{
name: "newer noncurrent versions without noncurrent days",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
NonCurrentVersionExpiration: &data.NonCurrentVersionExpiration{
NewerNonCurrentVersions: ptr(10),
},
},
},
},
errorCode: apierr.ErrMalformedXML,
},
{
name: "invalid maximum object size in filter",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
ObjectSizeLessThan: ptr(uint64(0)),
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
{
name: "invalid maximum object size in filter and",
body: &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
Filter: &data.LifecycleRuleFilter{
And: &data.LifecycleRuleAndOperator{
Prefix: "prefix/",
ObjectSizeLessThan: ptr(uint64(0)),
},
},
},
},
},
errorCode: apierr.ErrInvalidRequest,
},
} {
t.Run(tc.name, func(t *testing.T) {
if tc.errorCode > 0 {
putBucketLifecycleConfigurationErr(hc, bktName, tc.body, apierr.GetAPIError(tc.errorCode))
return
}
putBucketLifecycleConfiguration(hc, bktName, tc.body)
cfg := getBucketLifecycleConfiguration(hc, bktName)
require.Equal(t, tc.body.Rules, cfg.Rules)
deleteBucketLifecycleConfiguration(hc, bktName)
getBucketLifecycleConfigurationErr(hc, bktName, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration))
})
}
}
func TestPutBucketLifecycleIDGeneration(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle-id"
createBucket(hc, bktName)
lifecycle := &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
{
Status: data.LifecycleStatusEnabled,
AbortIncompleteMultipartUpload: &data.AbortIncompleteMultipartUpload{
DaysAfterInitiation: ptr(14),
},
},
},
}
putBucketLifecycleConfiguration(hc, bktName, lifecycle)
cfg := getBucketLifecycleConfiguration(hc, bktName)
require.Len(t, cfg.Rules, 2)
require.NotEmpty(t, cfg.Rules[0].ID)
require.NotEmpty(t, cfg.Rules[1].ID)
}
func TestPutBucketLifecycleInvalidMD5(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle-md5"
createBucket(hc, bktName)
lifecycle := &data.LifecycleConfiguration{
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
}
w, r := prepareTestRequest(hc, bktName, "", lifecycle)
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMissingContentMD5))
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
r.Header.Set(api.ContentMD5, "")
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
w, r = prepareTestRequest(hc, bktName, "", lifecycle)
r.Header.Set(api.ContentMD5, "some-hash")
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
}
func TestPutBucketLifecycleInvalidXML(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-lifecycle-invalid-xml"
createBucket(hc, bktName)
cfg := &data.CORSConfiguration{}
body, err := xml.Marshal(cfg)
require.NoError(t, err)
contentMD5, err := getContentMD5(bytes.NewReader(body))
require.NoError(t, err)
w, r := prepareTestRequest(hc, bktName, "", cfg)
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(contentMD5))
hc.Handler().PutBucketLifecycleHandler(w, r)
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrMalformedXML))
}
func putBucketLifecycleConfiguration(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) {
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
assertStatus(hc.t, w, http.StatusOK)
}
func putBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration, err apierr.Error) {
w := putBucketLifecycleConfigurationBase(hc, bktName, cfg)
assertS3Error(hc.t, w, err)
}
func putBucketLifecycleConfigurationBase(hc *handlerContext, bktName string, cfg *data.LifecycleConfiguration) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", cfg)
rawBody, err := xml.Marshal(cfg)
require.NoError(hc.t, err)
hash := md5.New()
hash.Write(rawBody)
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString(hash.Sum(nil)))
hc.Handler().PutBucketLifecycleHandler(w, r)
return w
}
func getBucketLifecycleConfiguration(hc *handlerContext, bktName string) *data.LifecycleConfiguration {
w := getBucketLifecycleConfigurationBase(hc, bktName)
assertStatus(hc.t, w, http.StatusOK)
res := &data.LifecycleConfiguration{}
parseTestResponse(hc.t, w, res)
return res
}
func getBucketLifecycleConfigurationErr(hc *handlerContext, bktName string, err apierr.Error) {
w := getBucketLifecycleConfigurationBase(hc, bktName)
assertS3Error(hc.t, w, err)
}
func getBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().GetBucketLifecycleHandler(w, r)
return w
}
func deleteBucketLifecycleConfiguration(hc *handlerContext, bktName string) {
w := deleteBucketLifecycleConfigurationBase(hc, bktName)
assertStatus(hc.t, w, http.StatusNoContent)
}
func deleteBucketLifecycleConfigurationBase(hc *handlerContext, bktName string) *httptest.ResponseRecorder {
w, r := prepareTestRequest(hc, bktName, "", nil)
hc.Handler().DeleteBucketLifecycleHandler(w, r)
return w
}
func ptr[T any](t T) *T {
return &t
}

View file

@ -15,13 +15,12 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
var ( var (
own user.ID own user.ID
res *ListBucketsResponse res *ListBucketsResponse
ctx = r.Context() reqInfo = middleware.GetReqInfo(r.Context())
reqInfo = middleware.GetReqInfo(ctx)
) )
list, err := h.obj.ListBuckets(ctx) list, err := h.obj.ListBuckets(r.Context())
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
@ -44,6 +43,6 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
} }
if err = middleware.EncodeToResponse(w, res); err != nil { if err = middleware.EncodeToResponse(w, res); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }

View file

@ -9,7 +9,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
) )
@ -26,35 +26,34 @@ const (
) )
func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "couldn't put object locking configuration", reqInfo, h.logAndSendError(w, "couldn't put object locking configuration", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed))
return return
} }
lockingConf := &data.ObjectLockConfiguration{} lockingConf := &data.ObjectLockConfiguration{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(lockingConf); err != nil {
h.logAndSendError(ctx, w, "couldn't parse locking configuration", reqInfo, err) h.logAndSendError(w, "couldn't parse locking configuration", reqInfo, err)
return return
} }
if err = checkLockConfiguration(lockingConf); err != nil { if err = checkLockConfiguration(lockingConf); err != nil {
h.logAndSendError(ctx, w, "invalid lock configuration", reqInfo, err) h.logAndSendError(w, "invalid lock configuration", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
@ -67,31 +66,30 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
Settings: &newSettings, Settings: &newSettings,
} }
if err = h.obj.PutBucketSettings(ctx, sp); err != nil { if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
h.logAndSendError(ctx, w, "couldn't put bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't put bucket settings", reqInfo, err)
return return
} }
} }
func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "object lock disabled", reqInfo, h.logAndSendError(w, "object lock disabled", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
@ -103,34 +101,33 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
} }
if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil { if err = middleware.EncodeToResponse(w, settings.LockConfiguration); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "object lock disabled", reqInfo, h.logAndSendError(w, "object lock disabled", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
return return
} }
legalHold := &data.LegalHold{} legalHold := &data.LegalHold{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(legalHold); err != nil {
h.logAndSendError(ctx, w, "couldn't parse legal hold configuration", reqInfo, err) h.logAndSendError(w, "couldn't parse legal hold configuration", reqInfo, err)
return return
} }
if legalHold.Status != legalHoldOn && legalHold.Status != legalHoldOff { if legalHold.Status != legalHoldOn && legalHold.Status != legalHoldOff {
h.logAndSendError(ctx, w, "invalid legal hold status", reqInfo, h.logAndSendError(w, "invalid legal hold status", reqInfo,
fmt.Errorf("invalid status %s", legalHold.Status)) fmt.Errorf("invalid status %s", legalHold.Status))
return return
} }
@ -150,29 +147,28 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint) p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(w, "invalid copies number", reqInfo, err)
return return
} }
if err = h.obj.PutLockInfo(ctx, p); err != nil { if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "couldn't head put legal hold", reqInfo, err) h.logAndSendError(w, "couldn't head put legal hold", reqInfo, err)
return return
} }
} }
func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "object lock disabled", reqInfo, h.logAndSendError(w, "object lock disabled", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
return return
} }
@ -182,9 +178,9 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID), VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
} }
lockInfo, err := h.obj.GetLockInfo(ctx, p) lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't head lock object", reqInfo, err) h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
return return
} }
@ -194,34 +190,33 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
} }
if err = middleware.EncodeToResponse(w, legalHold); err != nil { if err = middleware.EncodeToResponse(w, legalHold); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "object lock disabled", reqInfo, h.logAndSendError(w, "object lock disabled", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
return return
} }
retention := &data.Retention{} retention := &data.Retention{}
if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(retention); err != nil {
h.logAndSendError(ctx, w, "couldn't parse object retention", reqInfo, err) h.logAndSendError(w, "couldn't parse object retention", reqInfo, err)
return return
} }
lock, err := formObjectLockFromRetention(ctx, retention, r.Header) lock, err := formObjectLockFromRetention(r.Context(), retention, r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid retention configuration", reqInfo, err) h.logAndSendError(w, "invalid retention configuration", reqInfo, err)
return return
} }
@ -236,29 +231,28 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint) p.CopiesNumbers, err = h.pickCopiesNumbers(parseMetadata(r), reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(w, "invalid copies number", reqInfo, err)
return return
} }
if err = h.obj.PutLockInfo(ctx, p); err != nil { if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "couldn't put legal hold", reqInfo, err) h.logAndSendError(w, "couldn't put legal hold", reqInfo, err)
return return
} }
} }
func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "object lock disabled", reqInfo, h.logAndSendError(w, "object lock disabled", reqInfo,
apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound)) apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound))
return return
} }
@ -268,14 +262,14 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID), VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
} }
lockInfo, err := h.obj.GetLockInfo(ctx, p) lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't head lock object", reqInfo, err) h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
return return
} }
if !lockInfo.IsRetentionSet() { if !lockInfo.IsRetentionSet() {
h.logAndSendError(ctx, w, "retention lock isn't set", reqInfo, apierr.GetAPIError(apierr.ErrNoSuchKey)) h.logAndSendError(w, "retention lock isn't set", reqInfo, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey))
return return
} }
@ -288,7 +282,7 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
} }
if err = middleware.EncodeToResponse(w, retention); err != nil { if err = middleware.EncodeToResponse(w, retention); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
@ -320,7 +314,7 @@ func checkLockConfiguration(conf *data.ObjectLockConfiguration) error {
func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) { func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig *data.ObjectLockConfiguration, header http.Header) (*data.ObjectLock, error) {
if !bktInfo.ObjectLockEnabled { if !bktInfo.ObjectLockEnabled {
if existLockHeaders(header) { if existLockHeaders(header) {
return nil, apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound) return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound)
} }
return nil, nil return nil, nil
} }
@ -352,7 +346,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
until := header.Get(api.AmzObjectLockRetainUntilDate) until := header.Get(api.AmzObjectLockRetainUntilDate)
if mode != "" && until == "" || mode == "" && until != "" { if mode != "" && until == "" || mode == "" && until != "" {
return nil, apierr.GetAPIError(apierr.ErrObjectLockInvalidHeaders) return nil, apiErrors.GetAPIError(apiErrors.ErrObjectLockInvalidHeaders)
} }
if mode != "" { if mode != "" {
@ -361,7 +355,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
} }
if mode != complianceMode && mode != governanceMode { if mode != complianceMode && mode != governanceMode {
return nil, apierr.GetAPIError(apierr.ErrUnknownWORMModeDirective) return nil, apiErrors.GetAPIError(apiErrors.ErrUnknownWORMModeDirective)
} }
objectLock.Retention.IsCompliance = mode == complianceMode objectLock.Retention.IsCompliance = mode == complianceMode
@ -370,7 +364,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
if until != "" { if until != "" {
retentionDate, err := time.Parse(time.RFC3339, until) retentionDate, err := time.Parse(time.RFC3339, until)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidRetentionDate) return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidRetentionDate)
} }
if objectLock.Retention == nil { if objectLock.Retention == nil {
objectLock.Retention = &data.RetentionLock{} objectLock.Retention = &data.RetentionLock{}
@ -388,7 +382,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
} }
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) { if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate) return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
} }
} }
@ -403,16 +397,16 @@ func existLockHeaders(header http.Header) bool {
func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) { func formObjectLockFromRetention(ctx context.Context, retention *data.Retention, header http.Header) (*data.ObjectLock, error) {
if retention.Mode != governanceMode && retention.Mode != complianceMode { if retention.Mode != governanceMode && retention.Mode != complianceMode {
return nil, apierr.GetAPIError(apierr.ErrMalformedXML) return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
} }
retentionDate, err := time.Parse(time.RFC3339, retention.RetainUntilDate) retentionDate, err := time.Parse(time.RFC3339, retention.RetainUntilDate)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrMalformedXML) return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
} }
if retentionDate.Before(layer.TimeNow(ctx)) { if retentionDate.Before(layer.TimeNow(ctx)) {
return nil, apierr.GetAPIError(apierr.ErrPastObjectLockRetainDate) return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
} }
var bypass bool var bypass bool

View file

@ -12,7 +12,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -270,24 +270,23 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
bucket string bucket string
expectedError apierr.Error expectedError apiErrors.Error
noError bool noError bool
configuration *data.ObjectLockConfiguration configuration *data.ObjectLockConfiguration
}{ }{
{ {
name: "bkt not found", name: "bkt not found",
bucket: "not-found-bucket", expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
}, },
{ {
name: "bkt lock disabled", name: "bkt lock disabled",
bucket: bktLockDisabled, bucket: bktLockDisabled,
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotAllowed), expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotAllowed),
}, },
{ {
name: "invalid configuration", name: "invalid configuration",
bucket: bktLockEnabled, bucket: bktLockEnabled,
expectedError: apierr.GetAPIError(apierr.ErrInternalError), expectedError: apiErrors.GetAPIError(apiErrors.ErrInternalError),
configuration: &data.ObjectLockConfiguration{ObjectLockEnabled: "dummy"}, configuration: &data.ObjectLockConfiguration{ObjectLockEnabled: "dummy"},
}, },
{ {
@ -360,19 +359,18 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
bucket string bucket string
expectedError apierr.Error expectedError apiErrors.Error
noError bool noError bool
expectedConf *data.ObjectLockConfiguration expectedConf *data.ObjectLockConfiguration
}{ }{
{ {
name: "bkt not found", name: "bkt not found",
bucket: "not-found-bucket", expectedError: apiErrors.GetAPIError(apiErrors.ErrNoSuchBucket),
expectedError: apierr.GetAPIError(apierr.ErrNoSuchBucket),
}, },
{ {
name: "bkt lock disabled", name: "bkt lock disabled",
bucket: bktLockDisabled, bucket: bktLockDisabled,
expectedError: apierr.GetAPIError(apierr.ErrObjectLockConfigurationNotFound), expectedError: apiErrors.GetAPIError(apiErrors.ErrObjectLockConfigurationNotFound),
}, },
{ {
name: "bkt lock enabled empty default", name: "bkt lock enabled empty default",
@ -409,7 +407,7 @@ func TestGetBucketLockConfigurationHandler(t *testing.T) {
} }
} }
func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apierr.Error) { func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError apiErrors.Error) {
actualErrorResponse := &middleware.ErrorResponse{} actualErrorResponse := &middleware.ErrorResponse{}
err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse) err := xml.NewDecoder(w.Result().Body).Decode(actualErrorResponse)
require.NoError(t, err) require.NoError(t, err)
@ -417,7 +415,7 @@ func assertS3Error(t *testing.T, w *httptest.ResponseRecorder, expectedError api
require.Equal(t, expectedError.HTTPStatusCode, w.Code) require.Equal(t, expectedError.HTTPStatusCode, w.Code)
require.Equal(t, expectedError.Code, actualErrorResponse.Code) require.Equal(t, expectedError.Code, actualErrorResponse.Code)
if expectedError.ErrCode != apierr.ErrInternalError { if expectedError.ErrCode != apiErrors.ErrInternalError {
require.Equal(t, expectedError.Description, actualErrorResponse.Message) require.Equal(t, expectedError.Description, actualErrorResponse.Message)
} }
} }
@ -475,33 +473,33 @@ func TestObjectRetention(t *testing.T) {
objName := "obj-for-retention" objName := "obj-for-retention"
createTestObject(hc, bktInfo, objName, encryption.Params{}) createTestObject(hc, bktInfo, objName, encryption.Params{})
getObjectRetention(hc, bktName, objName, nil, apierr.ErrNoSuchKey) getObjectRetention(hc, bktName, objName, nil, apiErrors.ErrNoSuchKey)
retention := &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)} retention := &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
putObjectRetention(hc, bktName, objName, retention, false, 0) putObjectRetention(hc, bktName, objName, retention, false, 0)
getObjectRetention(hc, bktName, objName, retention, 0) getObjectRetention(hc, bktName, objName, retention, 0)
retention = &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().UTC().Add(time.Minute).Format(time.RFC3339)} retention = &data.Retention{Mode: governanceMode, RetainUntilDate: time.Now().UTC().Add(time.Minute).Format(time.RFC3339)}
putObjectRetention(hc, bktName, objName, retention, false, apierr.ErrInternalError) putObjectRetention(hc, bktName, objName, retention, false, apiErrors.ErrInternalError)
retention = &data.Retention{Mode: complianceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)} retention = &data.Retention{Mode: complianceMode, RetainUntilDate: time.Now().Add(time.Minute).UTC().Format(time.RFC3339)}
putObjectRetention(hc, bktName, objName, retention, true, 0) putObjectRetention(hc, bktName, objName, retention, true, 0)
getObjectRetention(hc, bktName, objName, retention, 0) getObjectRetention(hc, bktName, objName, retention, 0)
putObjectRetention(hc, bktName, objName, retention, true, apierr.ErrInternalError) putObjectRetention(hc, bktName, objName, retention, true, apiErrors.ErrInternalError)
} }
func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) { func getObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
w, r := prepareTestRequest(hc, bktName, objName, nil) w, r := prepareTestRequest(hc, bktName, objName, nil)
hc.Handler().GetObjectRetentionHandler(w, r) hc.Handler().GetObjectRetentionHandler(w, r)
if errCode == 0 { if errCode == 0 {
assertRetention(hc.t, w, retention) assertRetention(hc.t, w, retention)
} else { } else {
assertS3Error(hc.t, w, apierr.GetAPIError(errCode)) assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
} }
} }
func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apierr.ErrorCode) { func putObjectRetention(hc *handlerContext, bktName, objName string, retention *data.Retention, byPass bool, errCode apiErrors.ErrorCode) {
w, r := prepareTestRequest(hc, bktName, objName, retention) w, r := prepareTestRequest(hc, bktName, objName, retention)
if byPass { if byPass {
r.Header.Set(api.AmzBypassGovernanceRetention, strconv.FormatBool(true)) r.Header.Set(api.AmzBypassGovernanceRetention, strconv.FormatBool(true))
@ -510,7 +508,7 @@ func putObjectRetention(hc *handlerContext, bktName, objName string, retention *
if errCode == 0 { if errCode == 0 {
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
} else { } else {
assertS3Error(hc.t, w, apierr.GetAPIError(errCode)) assertS3Error(hc.t, w, apiErrors.GetAPIError(errCode))
} }
} }
@ -574,37 +572,37 @@ func TestPutLockErrors(t *testing.T) {
createTestBucketWithLock(hc, bktName, nil) createTestBucketWithLock(hc, bktName, nil)
headers := map[string]string{api.AmzObjectLockMode: complianceMode} headers := map[string]string{api.AmzObjectLockMode: complianceMode}
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders) putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
delete(headers, api.AmzObjectLockMode) delete(headers, api.AmzObjectLockMode)
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Add(time.Minute).Format(time.RFC3339) headers[api.AmzObjectLockRetainUntilDate] = time.Now().Add(time.Minute).Format(time.RFC3339)
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrObjectLockInvalidHeaders) putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrObjectLockInvalidHeaders)
headers[api.AmzObjectLockMode] = "dummy" headers[api.AmzObjectLockMode] = "dummy"
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrUnknownWORMModeDirective) putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrUnknownWORMModeDirective)
headers[api.AmzObjectLockMode] = complianceMode headers[api.AmzObjectLockMode] = complianceMode
headers[api.AmzObjectLockRetainUntilDate] = time.Now().Format(time.RFC3339) headers[api.AmzObjectLockRetainUntilDate] = time.Now().Format(time.RFC3339)
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrPastObjectLockRetainDate) putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrPastObjectLockRetainDate)
headers[api.AmzObjectLockRetainUntilDate] = "dummy" headers[api.AmzObjectLockRetainUntilDate] = "dummy"
putObjectWithLockFailed(t, hc, bktName, objName, headers, apierr.ErrInvalidRetentionDate) putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
putObject(hc, bktName, objName) putObject(hc, bktName, objName)
retention := &data.Retention{Mode: governanceMode} retention := &data.Retention{Mode: governanceMode}
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML) putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
retention.Mode = "dummy" retention.Mode = "dummy"
retention.RetainUntilDate = time.Now().Add(time.Minute).UTC().Format(time.RFC3339) retention.RetainUntilDate = time.Now().Add(time.Minute).UTC().Format(time.RFC3339)
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrMalformedXML) putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
retention.Mode = governanceMode retention.Mode = governanceMode
retention.RetainUntilDate = time.Now().UTC().Format(time.RFC3339) retention.RetainUntilDate = time.Now().UTC().Format(time.RFC3339)
putObjectRetentionFailed(t, hc, bktName, objName, retention, apierr.ErrPastObjectLockRetainDate) putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrPastObjectLockRetainDate)
} }
func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apierr.ErrorCode) { func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName string, headers map[string]string, errCode apiErrors.ErrorCode) {
w, r := prepareTestRequest(hc, bktName, objName, nil) w, r := prepareTestRequest(hc, bktName, objName, nil)
for key, val := range headers { for key, val := range headers {
@ -612,13 +610,13 @@ func putObjectWithLockFailed(t *testing.T, hc *handlerContext, bktName, objName
} }
hc.Handler().PutObjectHandler(w, r) hc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(errCode)) assertS3Error(t, w, apiErrors.GetAPIError(errCode))
} }
func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apierr.ErrorCode) { func putObjectRetentionFailed(t *testing.T, hc *handlerContext, bktName, objName string, retention *data.Retention, errCode apiErrors.ErrorCode) {
w, r := prepareTestRequest(hc, bktName, objName, retention) w, r := prepareTestRequest(hc, bktName, objName, retention)
hc.Handler().PutObjectRetentionHandler(w, r) hc.Handler().PutObjectRetentionHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(errCode)) assertS3Error(t, w, apiErrors.GetAPIError(errCode))
} }
func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, retention *data.Retention, delta float64) { func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, retention *data.Retention, delta float64) {

View file

@ -7,7 +7,6 @@ import (
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
"strings"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
@ -104,20 +103,19 @@ const (
) )
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
uploadID := uuid.New() uploadID := uuid.New()
cannedACLStatus := aclHeadersStatus(r) cannedACLStatus := aclHeadersStatus(r)
additional := []zap.Field{zap.String("uploadID", uploadID.String())} additional := []zap.Field{zap.String("uploadID", uploadID.String())}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if cannedACLStatus == aclStatusYes { if cannedACLStatus == aclStatusYes {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return return
} }
@ -133,14 +131,14 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
if len(r.Header.Get(api.AmzTagging)) > 0 { if len(r.Header.Get(api.AmzTagging)) > 0 {
p.Data.TagSet, err = parseTaggingHeader(r.Header) p.Data.TagSet, err = parseTaggingHeader(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse tagging", reqInfo, err, additional...) h.logAndSendError(w, "could not parse tagging", reqInfo, err, additional...)
return return
} }
} }
p.Info.Encryption, err = formEncryptionParams(r) p.Info.Encryption, err = formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...) h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return return
} }
@ -154,12 +152,12 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint) p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err, additional...) h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
return return
} }
if err = h.obj.CreateMultipartUpload(ctx, p); err != nil { if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "could create multipart upload", reqInfo, err, additional...) h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
return return
} }
@ -174,18 +172,17 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
} }
if err = middleware.EncodeToResponse(w, resp); err != nil { if err = middleware.EncodeToResponse(w, resp); err != nil {
h.logAndSendError(ctx, w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...) h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
return return
} }
} }
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -198,17 +195,20 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
partNumber, err := strconv.Atoi(partNumStr) partNumber, err := strconv.Atoi(partNumStr)
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber { if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
h.logAndSendError(ctx, w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...) h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
return return
} }
body, err := h.getBodyReader(r) body, err := h.getBodyReader(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to get body reader", reqInfo, err, additional...) h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
return return
} }
size := h.getPutPayloadSize(r) var size uint64
if r.ContentLength > 0 {
size = uint64(r.ContentLength)
}
p := &layer.UploadPartParams{ p := &layer.UploadPartParams{
Info: &layer.UploadInfoParams{ Info: &layer.UploadInfoParams{
@ -225,13 +225,13 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
p.Info.Encryption, err = formEncryptionParams(r) p.Info.Encryption, err = formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...) h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return return
} }
hash, err := h.obj.UploadPart(ctx, p) hash, err := h.obj.UploadPart(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not upload a part", reqInfo, err, additional...) h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
return return
} }
@ -241,7 +241,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set(api.ETag, data.Quote(hash)) w.Header().Set(api.ETag, data.Quote(hash))
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
} }
@ -259,7 +259,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
partNumber, err := strconv.Atoi(partNumStr) partNumber, err := strconv.Atoi(partNumStr)
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber { if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
h.logAndSendError(ctx, w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...) h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
return return
} }
@ -270,26 +270,26 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
} }
srcBucket, srcObject, err := path2BucketObject(src) srcBucket, srcObject, err := path2BucketObject(src)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid source copy", reqInfo, err, additional...) h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
return return
} }
srcRange, err := parseRange(r.Header.Get(api.AmzCopySourceRange)) srcRange, err := parseRange(r.Header.Get(api.AmzCopySourceRange))
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse copy range", reqInfo, h.logAndSendError(w, "could not parse copy range", reqInfo,
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...) errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
return return
} }
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner) srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get source bucket info", reqInfo, err, additional...) h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get target bucket info", reqInfo, err, additional...) h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
return return
} }
@ -302,35 +302,35 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm) srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
if err != nil { if err != nil {
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" { if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
h.logAndSendError(ctx, w, "could not head source object version", reqInfo, h.logAndSendError(w, "could not head source object version", reqInfo,
errors.GetAPIError(errors.ErrBadRequest), additional...) errors.GetAPIError(errors.ErrBadRequest), additional...)
return return
} }
h.logAndSendError(ctx, w, "could not head source object", reqInfo, err, additional...) h.logAndSendError(w, "could not head source object", reqInfo, err, additional...)
return return
} }
args, err := parseCopyObjectArgs(r.Header) args, err := parseCopyObjectArgs(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse copy object args", reqInfo, h.logAndSendError(w, "could not parse copy object args", reqInfo,
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...) errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
return return
} }
if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil { if err = checkPreconditions(srcInfo, args.Conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed), h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
additional...) additional...)
return return
} }
srcEncryptionParams, err := formCopySourceEncryptionParams(r) srcEncryptionParams, err := formCopySourceEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil { if err = srcEncryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
h.logAndSendError(ctx, w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...) h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
return return
} }
@ -350,13 +350,13 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
p.Info.Encryption, err = formEncryptionParams(r) p.Info.Encryption, err = formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err, additional...) h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
return return
} }
info, err := h.obj.UploadPartCopy(ctx, p) info, err := h.obj.UploadPartCopy(ctx, p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not upload part copy", reqInfo, err, additional...) h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
return return
} }
@ -370,23 +370,22 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
} }
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...) h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
} }
} }
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
@ -402,12 +401,12 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
reqBody := new(CompleteMultipartUpload) reqBody := new(CompleteMultipartUpload)
if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil { if err = h.cfg.NewXMLDecoder(r.Body).Decode(reqBody); err != nil {
h.logAndSendError(ctx, w, "could not read complete multipart upload xml", reqInfo, h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()), additional...) fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()), additional...)
return return
} }
if len(reqBody.Parts) == 0 { if len(reqBody.Parts) == 0 {
h.logAndSendError(ctx, w, "invalid xml with parts", reqInfo, errors.GetAPIError(errors.ErrMalformedXML), additional...) h.logAndSendError(w, "invalid xml with parts", reqInfo, errors.GetAPIError(errors.ErrMalformedXML), additional...)
return return
} }
@ -421,7 +420,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
objInfo, err := h.completeMultipartUpload(r, c, bktInfo) objInfo, err := h.completeMultipartUpload(r, c, bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "complete multipart error", reqInfo, err, additional...) h.logAndSendError(w, "complete multipart error", reqInfo, err, additional...)
return return
} }
@ -437,7 +436,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
} }
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err, additional...) h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
} }
} }
@ -496,12 +495,11 @@ func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMult
} }
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -514,7 +512,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
if maxUploadsStr != "" { if maxUploadsStr != "" {
val, err := strconv.Atoi(maxUploadsStr) val, err := strconv.Atoi(maxUploadsStr)
if err != nil || val < 1 || val > 1000 { if err != nil || val < 1 || val > 1000 {
h.logAndSendError(ctx, w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads)) h.logAndSendError(w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
return return
} }
maxUploads = val maxUploads = val
@ -530,29 +528,23 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName), UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
} }
if p.EncodingType != "" && strings.ToLower(p.EncodingType) != urlEncodingType { list, err := h.obj.ListMultipartUploads(r.Context(), p)
h.logAndSendError(ctx, w, "invalid encoding type", reqInfo, errors.GetAPIError(errors.ErrInvalidEncodingMethod))
return
}
list, err := h.obj.ListMultipartUploads(ctx, p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not list multipart uploads", reqInfo, err) h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil { if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -561,14 +553,14 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
queryValues = reqInfo.URL.Query() queryValues = reqInfo.URL.Query()
uploadID = queryValues.Get(uploadIDHeaderName) uploadID = queryValues.Get(uploadIDHeaderName)
additional = []zap.Field{zap.String("uploadID", uploadID)} additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
maxParts = layer.MaxSizePartsList maxParts = layer.MaxSizePartsList
) )
if queryValues.Get("max-parts") != "" { if queryValues.Get("max-parts") != "" {
val, err := strconv.Atoi(queryValues.Get("max-parts")) val, err := strconv.Atoi(queryValues.Get("max-parts"))
if err != nil || val < 0 { if err != nil || val < 0 {
h.logAndSendError(ctx, w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...) h.logAndSendError(w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
return return
} }
if val < layer.MaxSizePartsList { if val < layer.MaxSizePartsList {
@ -578,7 +570,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
if queryValues.Get("part-number-marker") != "" { if queryValues.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker < 0 { if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker < 0 {
h.logAndSendError(ctx, w, "invalid PartNumberMarker", reqInfo, err, additional...) h.logAndSendError(w, "invalid PartNumberMarker", reqInfo, err, additional...)
return return
} }
} }
@ -595,33 +587,32 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
p.Info.Encryption, err = formEncryptionParams(r) p.Info.Encryption, err = formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
list, err := h.obj.ListParts(ctx, p) list, err := h.obj.ListParts(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not list parts", reqInfo, err, additional...) h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
return return
} }
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil { if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName) uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
additional := []zap.Field{zap.String("uploadID", uploadID)} additional := []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
p := &layer.UploadInfoParams{ p := &layer.UploadInfoParams{
UploadID: uploadID, UploadID: uploadID,
@ -631,12 +622,12 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
p.Encryption, err = formEncryptionParams(r) p.Encryption, err = formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
if err = h.obj.AbortMultipartUpload(ctx, p); err != nil { if err = h.obj.AbortMultipartUpload(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "could not abort multipart upload", reqInfo, err, additional...) h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
return return
} }
@ -647,14 +638,14 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
res := ListMultipartUploadsResponse{ res := ListMultipartUploadsResponse{
Bucket: params.Bkt.Name, Bucket: params.Bkt.Name,
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType), CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
Delimiter: s3PathEncode(params.Delimiter, params.EncodingType), Delimiter: params.Delimiter,
EncodingType: params.EncodingType, EncodingType: params.EncodingType,
IsTruncated: info.IsTruncated, IsTruncated: info.IsTruncated,
KeyMarker: s3PathEncode(params.KeyMarker, params.EncodingType), KeyMarker: params.KeyMarker,
MaxUploads: params.MaxUploads, MaxUploads: params.MaxUploads,
NextKeyMarker: s3PathEncode(info.NextKeyMarker, params.EncodingType), NextKeyMarker: info.NextKeyMarker,
NextUploadIDMarker: info.NextUploadIDMarker, NextUploadIDMarker: info.NextUploadIDMarker,
Prefix: s3PathEncode(params.Prefix, params.EncodingType), Prefix: params.Prefix,
UploadIDMarker: params.UploadIDMarker, UploadIDMarker: params.UploadIDMarker,
} }
@ -666,7 +657,7 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
ID: u.Owner.String(), ID: u.Owner.String(),
DisplayName: u.Owner.String(), DisplayName: u.Owner.String(),
}, },
Key: s3PathEncode(u.Key, params.EncodingType), Key: u.Key,
Owner: Owner{ Owner: Owner{
ID: u.Owner.String(), ID: u.Owner.String(),
DisplayName: u.Owner.String(), DisplayName: u.Owner.String(),

View file

@ -2,27 +2,21 @@ package handler
import ( import (
"crypto/md5" "crypto/md5"
"crypto/rand"
"crypto/tls" "crypto/tls"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest"
"net/url" "net/url"
"strconv" "strconv"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -41,7 +35,7 @@ func TestMultipartUploadInvalidPart(t *testing.T) {
etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize) etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize) etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2}) w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall)) assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
} }
func TestDeleteMultipartAllParts(t *testing.T) { func TestDeleteMultipartAllParts(t *testing.T) {
@ -105,7 +99,7 @@ func TestMultipartReUploadPart(t *testing.T) {
require.Equal(t, etag2, list.Parts[1].ETag) require.Equal(t, etag2, list.Parts[1].ETag)
w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2}) w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall)) assertS3Error(hc.t, w, s3Errors.GetAPIError(s3Errors.ErrEntityTooSmall))
etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst) etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast) etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)
@ -128,108 +122,6 @@ func TestMultipartReUploadPart(t *testing.T) {
equalDataSlices(t, append(data1, data2...), data) equalDataSlices(t, append(data1, data2...), data)
} }
func TestMultipartRemovePartsSplit(t *testing.T) {
bktName, objName := "bucket-to-upload-part", "object-multipart"
partSize := 8
t.Run("reupload part", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
require.Len(t, list.Parts, 1)
require.Equal(t, `"etag"`, list.Parts[0].ETag)
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
require.Len(t, list.Parts, 1)
require.Equal(t, etag1, list.Parts[0].ETag)
require.Len(t, hc.tp.Objects(), 1)
})
t.Run("abort multipart", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
require.Empty(t, hc.tp.Objects())
})
t.Run("complete multipart", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
})
}
func containsOID(objects []*object.Object, objID oid.ID) bool {
for _, o := range objects {
oID, _ := o.ID()
if oID.Equals(objID) {
return true
}
}
return false
}
func TestListMultipartUploads(t *testing.T) { func TestListMultipartUploads(t *testing.T) {
hc := prepareHandlerContext(t) hc := prepareHandlerContext(t)
@ -253,14 +145,14 @@ func TestListMultipartUploads(t *testing.T) {
}) })
t.Run("check max uploads", func(t *testing.T) { t.Run("check max uploads", func(t *testing.T) {
listUploads := listMultipartUploads(hc, bktName, "", "", "", "", 2) listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", "", 2)
require.Len(t, listUploads.Uploads, 2) require.Len(t, listUploads.Uploads, 2)
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID) require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID) require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
}) })
t.Run("check prefix", func(t *testing.T) { t.Run("check prefix", func(t *testing.T) {
listUploads := listMultipartUploads(hc, bktName, "/my", "", "", "", -1) listUploads := listMultipartUploadsBase(hc, bktName, "/my", "", "", "", -1)
require.Len(t, listUploads.Uploads, 2) require.Len(t, listUploads.Uploads, 2)
require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID) require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID) require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
@ -268,7 +160,7 @@ func TestListMultipartUploads(t *testing.T) {
t.Run("check markers", func(t *testing.T) { t.Run("check markers", func(t *testing.T) {
t.Run("check only key-marker", func(t *testing.T) { t.Run("check only key-marker", func(t *testing.T) {
listUploads := listMultipartUploads(hc, bktName, "", "", "", objName2, -1) listUploads := listMultipartUploadsBase(hc, bktName, "", "", "", objName2, -1)
require.Len(t, listUploads.Uploads, 1) require.Len(t, listUploads.Uploads, 1)
// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list. // If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID) require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
@ -279,7 +171,7 @@ func TestListMultipartUploads(t *testing.T) {
if uploadIDMarker > uploadInfo2.UploadID { if uploadIDMarker > uploadInfo2.UploadID {
uploadIDMarker = uploadInfo2.UploadID uploadIDMarker = uploadInfo2.UploadID
} }
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, "", -1) listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, "", -1)
// If key-marker is not specified, the upload-id-marker parameter is ignored. // If key-marker is not specified, the upload-id-marker parameter is ignored.
require.Len(t, listUploads.Uploads, 3) require.Len(t, listUploads.Uploads, 3)
}) })
@ -287,7 +179,7 @@ func TestListMultipartUploads(t *testing.T) {
t.Run("check key-marker along with upload-id-marker", func(t *testing.T) { t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
uploadIDMarker := "00000000-0000-0000-0000-000000000000" uploadIDMarker := "00000000-0000-0000-0000-000000000000"
listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, objName3, -1) listUploads := listMultipartUploadsBase(hc, bktName, "", "", uploadIDMarker, objName3, -1)
require.Len(t, listUploads.Uploads, 1) require.Len(t, listUploads.Uploads, 1)
// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included, // If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker. // provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
@ -522,7 +414,7 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
r.Header.Set(api.AmzContentSha256, tc.hash) r.Header.Set(api.AmzContentSha256, tc.hash)
hc.Handler().UploadPartHandler(w, r) hc.Handler().UploadPartHandler(w, r)
if tc.error { if tc.error {
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)) assertS3Error(t, w, s3Errors.GetAPIError(s3Errors.ErrContentSHA256Mismatch))
list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK) list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
require.Len(t, list.Parts, 1) require.Len(t, list.Parts, 1)
@ -623,73 +515,6 @@ func TestMultipartObjectLocation(t *testing.T) {
} }
} }
func TestUploadPartWithNegativeContentLength(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "bucket-to-upload-part", "object-multipart"
createTestBucket(hc, bktName)
partSize := 5 * 1024 * 1024
multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
partBody := make([]byte, partSize)
_, err := rand.Read(partBody)
require.NoError(hc.t, err)
query := make(url.Values)
query.Set(uploadIDQuery, multipartUpload.UploadID)
query.Set(partNumberQuery, "1")
w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
r.ContentLength = -1
hc.Handler().UploadPartHandler(w, r)
assertStatus(hc.t, w, http.StatusOK)
completeMultipartUpload(hc, bktName, objName, multipartUpload.UploadID, []string{w.Header().Get(api.ETag)})
res, _ := getObject(hc, bktName, objName)
equalDataSlices(t, partBody, res)
resp := getObjectAttributes(hc, bktName, objName, objectParts)
require.Len(t, resp.ObjectParts.Parts, 1)
require.Equal(t, partSize, resp.ObjectParts.Parts[0].Size)
}
func TestListMultipartUploadsEncoding(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-to-list-uploads-encoding"
createTestBucket(hc, bktName)
listAllMultipartUploadsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
objects := []string{"foo()/bar", "foo()/bar/xyzzy", "asdf+b"}
for _, objName := range objects {
createMultipartUpload(hc, bktName, objName, nil)
}
listResponse := listMultipartUploadsURL(hc, bktName, "foo(", ")", "", "", -1)
require.Len(t, listResponse.CommonPrefixes, 1)
require.Equal(t, "foo%28%29", listResponse.CommonPrefixes[0].Prefix)
require.Equal(t, "foo%28", listResponse.Prefix)
require.Equal(t, "%29", listResponse.Delimiter)
require.Equal(t, "url", listResponse.EncodingType)
require.Equal(t, maxObjectList, listResponse.MaxUploads)
listResponse = listMultipartUploads(hc, bktName, "", "", "", "", 1)
require.Empty(t, listResponse.EncodingType)
listResponse = listMultipartUploadsURL(hc, bktName, "", "", "", listResponse.NextKeyMarker, 1)
require.Len(t, listResponse.CommonPrefixes, 0)
require.Len(t, listResponse.Uploads, 1)
require.Equal(t, "foo%28%29/bar", listResponse.Uploads[0].Key)
require.Equal(t, "asdf%2Bb", listResponse.KeyMarker)
require.Equal(t, "foo%28%29/bar", listResponse.NextKeyMarker)
require.Equal(t, "url", listResponse.EncodingType)
require.Equal(t, 1, listResponse.MaxUploads)
}
func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse { func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end) return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
} }
@ -715,42 +540,16 @@ func uploadPartCopyBase(hc *handlerContext, bktName, objName string, encrypted b
return uploadPartCopyResponse return uploadPartCopyResponse
} }
func listMultipartUploads(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, "", maxUploads)
assertStatus(hc.t, w, http.StatusOK)
res := &ListMultipartUploadsResponse{}
parseTestResponse(hc.t, w, res)
return res
}
func listMultipartUploadsURL(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, urlEncodingType, maxUploads)
assertStatus(hc.t, w, http.StatusOK)
res := &ListMultipartUploadsResponse{}
parseTestResponse(hc.t, w, res)
return res
}
func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse { func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", "", -1) return listMultipartUploadsBase(hc, bktName, "", "", "", "", -1)
assertStatus(hc.t, w, http.StatusOK)
res := &ListMultipartUploadsResponse{}
parseTestResponse(hc.t, w, res)
return res
} }
func listAllMultipartUploadsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) { func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
w := listMultipartUploadsBase(hc, bktName, "", "", "", "", encoding, -1)
assertS3Error(hc.t, w, err)
}
func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker, encoding string, maxUploads int) *httptest.ResponseRecorder {
query := make(url.Values) query := make(url.Values)
query.Set(prefixQueryName, prefix) query.Set(prefixQueryName, prefix)
query.Set(delimiterQueryName, delimiter) query.Set(delimiterQueryName, delimiter)
query.Set(uploadIDMarkerQueryName, uploadIDMarker) query.Set(uploadIDMarkerQueryName, uploadIDMarker)
query.Set(keyMarkerQueryName, keyMarker) query.Set(keyMarkerQueryName, keyMarker)
query.Set(encodingTypeQueryName, encoding)
if maxUploads != -1 { if maxUploads != -1 {
query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads)) query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
} }
@ -758,7 +557,10 @@ func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, up
w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil) w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)
hc.Handler().ListMultipartUploadsHandler(w, r) hc.Handler().ListMultipartUploadsHandler(w, r)
return w listPartsResponse := &ListMultipartUploadsResponse{}
readResponse(hc.t, w, http.StatusOK, listPartsResponse)
return listPartsResponse
} }
func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse { func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {

View file

@ -7,6 +7,10 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
) )
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported)) h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
}
func (h *handler) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not supported", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotSupported))
} }

View file

@ -4,7 +4,6 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
"strings"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
@ -17,27 +16,26 @@ import (
// ListObjectsV1Handler handles objects listing requests for API version 1. // ListObjectsV1Handler handles objects listing requests for API version 1.
func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
params, err := parseListObjectsArgsV1(reqInfo) params, err := parseListObjectsArgsV1(reqInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to parse arguments", reqInfo, err) h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
return return
} }
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil { if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
list, err := h.obj.ListObjectsV1(ctx, params) list, err := h.obj.ListObjectsV1(r.Context(), params)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil { if err = middleware.EncodeToResponse(w, h.encodeV1(params, list)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
@ -62,27 +60,26 @@ func (h *handler) encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjects
// ListObjectsV2Handler handles objects listing requests for API version 2. // ListObjectsV2Handler handles objects listing requests for API version 2.
func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
params, err := parseListObjectsArgsV2(reqInfo) params, err := parseListObjectsArgsV2(reqInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to parse arguments", reqInfo, err) h.logAndSendError(w, "failed to parse arguments", reqInfo, err)
return return
} }
if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil { if params.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
list, err := h.obj.ListObjectsV2(ctx, params) list, err := h.obj.ListObjectsV2(r.Context(), params)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil { if err = middleware.EncodeToResponse(w, h.encodeV2(params, list)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
@ -156,10 +153,6 @@ func parseListObjectArgs(reqInfo *middleware.ReqInfo) (*layer.ListObjectsParamsC
res.Delimiter = queryValues.Get("delimiter") res.Delimiter = queryValues.Get("delimiter")
res.Encode = queryValues.Get("encoding-type") res.Encode = queryValues.Get("encoding-type")
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
}
if queryValues.Get("max-keys") == "" { if queryValues.Get("max-keys") == "" {
res.MaxKeys = maxObjectList res.MaxKeys = maxObjectList
} else if res.MaxKeys, err = strconv.Atoi(queryValues.Get("max-keys")); err != nil || res.MaxKeys < 0 { } else if res.MaxKeys, err = strconv.Atoi(queryValues.Get("max-keys")); err != nil || res.MaxKeys < 0 {
@ -221,28 +214,27 @@ func fillContents(src []*data.ExtendedNodeVersion, encode string, fetchOwner, md
} }
func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
p, err := parseListObjectVersionsRequest(reqInfo) p, err := parseListObjectVersionsRequest(reqInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to parse request", reqInfo, err) h.logAndSendError(w, "failed to parse request", reqInfo, err)
return return
} }
if p.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil { if p.BktInfo, err = h.getBucketAndCheckOwner(r, reqInfo.BucketName); err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
info, err := h.obj.ListObjectVersions(ctx, p) info, err := h.obj.ListObjectVersions(r.Context(), p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
response := encodeListObjectVersionsToResponse(p, info, p.BktInfo.Name, h.cfg.MD5Enabled()) response := encodeListObjectVersionsToResponse(p, info, p.BktInfo.Name, h.cfg.MD5Enabled())
if err = middleware.EncodeToResponse(w, response); err != nil { if err = middleware.EncodeToResponse(w, response); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
@ -265,10 +257,6 @@ func parseListObjectVersionsRequest(reqInfo *middleware.ReqInfo) (*layer.ListObj
res.Encode = queryValues.Get("encoding-type") res.Encode = queryValues.Get("encoding-type")
res.VersionIDMarker = queryValues.Get("version-id-marker") res.VersionIDMarker = queryValues.Get("version-id-marker")
if res.Encode != "" && strings.ToLower(res.Encode) != urlEncodingType {
return nil, errors.GetAPIError(errors.ErrInvalidEncodingMethod)
}
if res.VersionIDMarker != "" && res.KeyMarker == "" { if res.VersionIDMarker != "" && res.KeyMarker == "" {
return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker) return nil, errors.GetAPIError(errors.VersionIDMarkerWithoutKeyMarker)
} }

View file

@ -4,25 +4,19 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptest"
"net/url" "net/url"
"sort" "sort"
"strconv" "strconv"
"strings"
"testing" "testing"
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest" "go.uber.org/zap/zaptest"
"go.uber.org/zap/zaptest/observer"
) )
func TestParseContinuationToken(t *testing.T) { func TestParseContinuationToken(t *testing.T) {
@ -99,35 +93,8 @@ func TestListObjectsWithOldTreeNodes(t *testing.T) {
checkListVersionsOldNodes(hc, listVers.Version, objInfos) checkListVersionsOldNodes(hc, listVers.Version, objInfos)
} }
func TestListObjectsVersionsSkipLogTaggingNodesError(t *testing.T) {
loggerCore, observedLog := observer.New(zap.DebugLevel)
log := zap.New(loggerCore)
hcBase, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
require.NoError(t, err)
hc := &handlerContext{
handlerContextBase: hcBase,
t: t,
}
bktName, objName := "bucket-versioning-enabled", "versions/object"
bktInfo := createTestBucket(hc, bktName)
createTestObject(hc, bktInfo, objName, encryption.Params{})
createTestObject(hc, bktInfo, objName, encryption.Params{})
putObjectTagging(hc.t, hc, bktName, objName, map[string]string{"tag1": "val1"})
listObjectsVersions(hc, bktName, "", "", "", "", -1)
filtered := observedLog.Filter(func(entry observer.LoggedEntry) bool {
return strings.Contains(entry.Message, logs.ParseTreeNode)
})
require.Empty(t, filtered)
}
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) { func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0, true) nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0)
require.NoError(hc.t, err) require.NoError(hc.t, err)
for _, node := range nodes { for _, node := range nodes {
@ -171,17 +138,11 @@ func checkListVersionsOldNodes(hc *handlerContext, list []ObjectVersionResponse,
} }
func TestListObjectsContextCanceled(t *testing.T) { func TestListObjectsContextCanceled(t *testing.T) {
log := zaptest.NewLogger(t) layerCfg := layer.DefaultCachesConfigs(zaptest.NewLogger(t))
layerCfg := layer.DefaultCachesConfigs(log)
layerCfg.SessionList.Lifetime = time.Hour layerCfg.SessionList.Lifetime = time.Hour
layerCfg.SessionList.Size = 1 layerCfg.SessionList.Size = 1
hcBase, err := prepareHandlerContextBase(layerCfg) hc := prepareHandlerContextBase(t, layerCfg)
require.NoError(t, err)
hc := &handlerContext{
handlerContextBase: hcBase,
t: t,
}
bktName := "bucket-versioning-enabled" bktName := "bucket-versioning-enabled"
bktInfo := createTestBucket(hc, bktName) bktInfo := createTestBucket(hc, bktName)
@ -681,80 +642,6 @@ func TestS3BucketListDelimiterNotSkipSpecial(t *testing.T) {
} }
} }
func TestS3BucketListMarkerUnreadable(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-for-listing"
bktInfo := createTestBucket(hc, bktName)
objects := []string{"bar", "baz", "foo", "quxx"}
for _, objName := range objects {
createTestObject(hc, bktInfo, objName, encryption.Params{})
}
list := listObjectsV1(hc, bktName, "", "", "\x0a", -1)
require.Equal(t, "\x0a", list.Marker)
require.False(t, list.IsTruncated)
require.Len(t, list.Contents, len(objects))
for i := 0; i < len(list.Contents); i++ {
require.Equal(t, objects[i], list.Contents[i].Key)
}
}
func TestS3BucketListMarkerNotInList(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-for-listing"
bktInfo := createTestBucket(hc, bktName)
objects := []string{"bar", "baz", "foo", "quxx"}
for _, objName := range objects {
createTestObject(hc, bktInfo, objName, encryption.Params{})
}
list := listObjectsV1(hc, bktName, "", "", "blah", -1)
require.Equal(t, "blah", list.Marker)
expected := []string{"foo", "quxx"}
require.Len(t, list.Contents, len(expected))
for i := 0; i < len(list.Contents); i++ {
require.Equal(t, expected[i], list.Contents[i].Key)
}
}
func TestListTruncatedCacheHit(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-for-listing"
bktInfo := createTestBucket(hc, bktName)
objects := []string{"bar", "baz", "foo", "quxx"}
for _, objName := range objects {
createTestObject(hc, bktInfo, objName, encryption.Params{})
}
list := listObjectsV1(hc, bktName, "", "", "", 2)
require.True(t, list.IsTruncated)
require.Len(t, list.Contents, 2)
for i := 0; i < len(list.Contents); i++ {
require.Equal(t, objects[i], list.Contents[i].Key)
}
cacheKey := cache.CreateListSessionCacheKey(bktInfo.CID, "", list.NextMarker)
list = listObjectsV1(hc, bktName, "", "", list.NextMarker, 2)
require.Nil(t, hc.cache.GetListSession(hc.owner, cacheKey))
require.False(t, list.IsTruncated)
require.Len(t, list.Contents, 2)
for i := 0; i < len(list.Contents); i++ {
require.Equal(t, objects[i+2], list.Contents[i].Key)
}
}
func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) { func TestMintVersioningListObjectVersionsVersionIDContinuation(t *testing.T) {
hc := prepareHandlerContext(t) hc := prepareHandlerContext(t)
@ -831,16 +718,6 @@ func TestListObjectVersionsEncoding(t *testing.T) {
require.Equal(t, 3, listResponse.MaxKeys) require.Equal(t, 3, listResponse.MaxKeys)
} }
func TestListingsWithInvalidEncodingType(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bucket-for-listing-invalid-encoding"
createTestBucket(hc, bktName)
listObjectsVersionsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
listObjectsV2Err(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
listObjectsV1Err(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))
}
func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) { func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, names []string) {
for i, v := range versions.Version { for i, v := range versions.Version {
require.Equal(t, names[i], v.Key) require.Equal(t, names[i], v.Key)
@ -848,19 +725,10 @@ func checkVersionsNames(t *testing.T, versions *ListObjectsVersionsResponse, nam
} }
func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response { func listObjectsV2(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken string, maxKeys int) *ListObjectsV2Response {
w := listObjectsV2Base(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys) return listObjectsV2Ext(hc, bktName, prefix, delimiter, startAfter, continuationToken, "", maxKeys)
assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsV2Response{}
parseTestResponse(hc.t, w, res)
return res
} }
func listObjectsV2Err(hc *handlerContext, bktName, encoding string, err apierr.Error) { func listObjectsV2Ext(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *ListObjectsV2Response {
w := listObjectsV2Base(hc, bktName, "", "", "", "", encoding, -1)
assertS3Error(hc.t, w, err)
}
func listObjectsV2Base(hc *handlerContext, bktName, prefix, delimiter, startAfter, continuationToken, encodingType string, maxKeys int) *httptest.ResponseRecorder {
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys) query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
query.Add("fetch-owner", "true") query.Add("fetch-owner", "true")
if len(startAfter) != 0 { if len(startAfter) != 0 {
@ -875,7 +743,10 @@ func listObjectsV2Base(hc *handlerContext, bktName, prefix, delimiter, startAfte
w, r := prepareTestFullRequest(hc, bktName, "", query, nil) w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
hc.Handler().ListObjectsV2Handler(w, r) hc.Handler().ListObjectsV2Handler(w, r)
return w assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsV2Response{}
parseTestResponse(hc.t, w, res)
return res
} }
func validateListV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int, func validateListV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int,
@ -935,54 +806,28 @@ func prepareCommonListObjectsQuery(prefix, delimiter string, maxKeys int) url.Va
} }
func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response { func listObjectsV1(hc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
w := listObjectsV1Base(hc, bktName, prefix, delimiter, marker, "", maxKeys) query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
if len(marker) != 0 {
query.Add("marker", marker)
}
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
hc.Handler().ListObjectsV1Handler(w, r)
assertStatus(hc.t, w, http.StatusOK) assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsV1Response{} res := &ListObjectsV1Response{}
parseTestResponse(hc.t, w, res) parseTestResponse(hc.t, w, res)
return res return res
} }
func listObjectsV1Err(hc *handlerContext, bktName, encoding string, err apierr.Error) {
w := listObjectsV1Base(hc, bktName, "", "", "", encoding, -1)
assertS3Error(hc.t, w, err)
}
func listObjectsV1Base(hc *handlerContext, bktName, prefix, delimiter, marker, encoding string, maxKeys int) *httptest.ResponseRecorder {
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
if len(marker) != 0 {
query.Add("marker", marker)
}
if len(encoding) != 0 {
query.Add("encoding-type", encoding)
}
w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
hc.Handler().ListObjectsV1Handler(w, r)
return w
}
func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse { func listObjectsVersions(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
w := listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, "", maxKeys) return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, false)
assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(hc.t, w, res)
return res
} }
func listObjectsVersionsURL(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse { func listObjectsVersionsURL(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int) *ListObjectsVersionsResponse {
w := listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, urlEncodingType, maxKeys) return listObjectsVersionsBase(hc, bktName, prefix, delimiter, keyMarker, versionIDMarker, maxKeys, true)
assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(hc.t, w, res)
return res
} }
func listObjectsVersionsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) { func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker string, maxKeys int, encode bool) *ListObjectsVersionsResponse {
w := listObjectsVersionsBase(hc, bktName, "", "", "", "", encoding, -1)
assertS3Error(hc.t, w, err)
}
func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, keyMarker, versionIDMarker, encoding string, maxKeys int) *httptest.ResponseRecorder {
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys) query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
if len(keyMarker) != 0 { if len(keyMarker) != 0 {
query.Add("key-marker", keyMarker) query.Add("key-marker", keyMarker)
@ -990,11 +835,14 @@ func listObjectsVersionsBase(hc *handlerContext, bktName, prefix, delimiter, key
if len(versionIDMarker) != 0 { if len(versionIDMarker) != 0 {
query.Add("version-id-marker", versionIDMarker) query.Add("version-id-marker", versionIDMarker)
} }
if len(encoding) != 0 { if encode {
query.Add("encoding-type", encoding) query.Add("encoding-type", "url")
} }
w, r := prepareTestFullRequest(hc, bktName, "", query, nil) w, r := prepareTestFullRequest(hc, bktName, "", query, nil)
hc.Handler().ListBucketObjectVersionsHandler(w, r) hc.Handler().ListBucketObjectVersionsHandler(w, r)
return w assertStatus(hc.t, w, http.StatusOK)
res := &ListObjectsVersionsResponse{}
parseTestResponse(hc.t, w, res)
return res
} }

View file

@ -1,195 +0,0 @@
package handler
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
const maxPatchSize = 5 * 1024 * 1024 * 1024 // 5GB
func (h *handler) PatchObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
if _, ok := r.Header[api.ContentRange]; !ok {
h.logAndSendError(ctx, w, "missing Content-Range", reqInfo, errors.GetAPIError(errors.ErrMissingContentRange))
return
}
if _, ok := r.Header[api.ContentLength]; !ok {
h.logAndSendError(ctx, w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
return
}
conditional, err := parsePatchConditionalHeaders(r.Header)
if err != nil {
h.logAndSendError(ctx, w, "could not parse conditional headers", reqInfo, err)
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err)
return
}
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err)
return
}
srcObjPrm := &layer.HeadObjectParams{
Object: reqInfo.ObjectName,
BktInfo: bktInfo,
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
}
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
if err != nil {
h.logAndSendError(ctx, w, "could not find object", reqInfo, err)
return
}
srcObjInfo := extendedSrcObjInfo.ObjectInfo
if err = checkPreconditions(srcObjInfo, conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(ctx, w, "precondition failed", reqInfo, err)
return
}
srcSize, err := layer.GetObjectSize(srcObjInfo)
if err != nil {
h.logAndSendError(ctx, w, "failed to get source object size", reqInfo, err)
return
}
byteRange, err := parsePatchByteRange(r.Header.Get(api.ContentRange), srcSize)
if err != nil {
h.logAndSendError(ctx, w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if maxPatchSize < byteRange.End-byteRange.Start+1 {
h.logAndSendError(ctx, w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if uint64(r.ContentLength) != (byteRange.End - byteRange.Start + 1) {
h.logAndSendError(ctx, w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength))
return
}
if byteRange.Start > srcSize {
h.logAndSendError(ctx, w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
return
}
params := &layer.PatchObjectParams{
Object: extendedSrcObjInfo,
BktInfo: bktInfo,
NewBytes: r.Body,
Range: byteRange,
VersioningEnabled: settings.VersioningEnabled(),
}
params.CopiesNumbers, err = h.pickCopiesNumbers(nil, reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err)
return
}
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
if err != nil {
if isErrObjectLocked(err) {
h.logAndSendError(ctx, w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} else {
h.logAndSendError(ctx, w, "could not patch object", reqInfo, err)
}
return
}
if settings.VersioningEnabled() {
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
}
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
resp := PatchObjectResult{
Object: PatchObject{
LastModified: extendedObjInfo.ObjectInfo.Created.UTC().Format(time.RFC3339),
ETag: data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())),
},
}
if err = middleware.EncodeToResponse(w, resp); err != nil {
h.logAndSendError(ctx, w, "could not encode PatchObjectResult to response", reqInfo, err)
return
}
}
func parsePatchConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
var err error
args := &conditionalArgs{
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
}
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
return nil, err
}
return args, nil
}
func parsePatchByteRange(rangeStr string, objSize uint64) (*layer.RangeParams, error) {
const prefix = "bytes "
if rangeStr == "" {
return nil, fmt.Errorf("empty range")
}
if !strings.HasPrefix(rangeStr, prefix) {
return nil, fmt.Errorf("unknown unit in range header")
}
rangeStr, _, found := strings.Cut(strings.TrimPrefix(rangeStr, prefix), "/") // value after / is ignored
if !found {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
startStr, endStr, found := strings.Cut(rangeStr, "-")
if !found {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
start, err := strconv.ParseUint(startStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid start byte: %s", startStr)
}
end := objSize - 1
if len(endStr) > 0 {
end, err = strconv.ParseUint(endStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid end byte: %s", endStr)
}
}
if start > end {
return nil, fmt.Errorf("start byte is greater than end byte")
}
return &layer.RangeParams{
Start: start,
End: end,
}, nil
}

View file

@ -1,532 +0,0 @@
package handler
import (
"bytes"
"crypto/md5"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"github.com/stretchr/testify/require"
)
func TestPatch(t *testing.T) {
tc := prepareHandlerContext(t)
tc.config.md5Enabled = true
bktName, objName := "bucket-for-patch", "object-for-patch"
createTestBucket(tc, bktName)
content := []byte("old object content")
md5Hash := md5.New()
md5Hash.Write(content)
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
created := time.Now()
tc.Handler().PutObjectHandler(w, r)
require.Equal(t, etag, w.Header().Get(api.ETag))
patchPayload := []byte("new")
sha256Hash := sha256.New()
sha256Hash.Write(patchPayload)
sha256Hash.Write(content[len(patchPayload):])
hash := hex.EncodeToString(sha256Hash.Sum(nil))
for _, tt := range []struct {
name string
rng string
headers map[string]string
code apierr.ErrorCode
}{
{
name: "success",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
api.IfMatch: etag,
},
},
{
name: "invalid range syntax",
rng: "bytes 0-2",
code: apierr.ErrInvalidRange,
},
{
name: "invalid range length",
rng: "bytes 0-5/*",
code: apierr.ErrInvalidRangeLength,
},
{
name: "invalid range start",
rng: "bytes 20-22/*",
code: apierr.ErrRangeOutOfBounds,
},
{
name: "range is too long",
rng: "bytes 0-5368709120/*",
code: apierr.ErrInvalidRange,
},
{
name: "If-Unmodified-Since precondition are not satisfied",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
},
code: apierr.ErrPreconditionFailed,
},
{
name: "If-Match precondition are not satisfied",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfMatch: "etag",
},
code: apierr.ErrPreconditionFailed,
},
} {
t.Run(tt.name, func(t *testing.T) {
if tt.code == 0 {
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
require.Equal(t, data.Quote(hash), res.Object.ETag)
} else {
patchObjectErr(tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
}
})
}
}
func TestPatchMultipartObject(t *testing.T) {
tc := prepareHandlerContextWithMinCache(t)
tc.config.md5Enabled = true
bktName, objName, partSize := "bucket-for-multipart-patch", "object-for-multipart-patch", 5*1024*1024
createTestBucket(tc, bktName)
t.Run("patch beginning of the first part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(patchSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{patchBody, data1[patchSize:], data2, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch middle of the first part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/4)+"-"+strconv.Itoa(partSize*3/4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/4], patchBody, data1[partSize*3/4:], data2, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch first and second parts", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3/4)+"-"+strconv.Itoa(partSize*5/4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize*3/4], patchBody, data2[partSize/4:], data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch all parts", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize * 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2-1)+"-"+strconv.Itoa(partSize/2+patchSize-2)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2-1], patchBody, data3[partSize/2-1:]}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch all parts and append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize * 3
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2)+"-"+strconv.Itoa(partSize/2+patchSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2], patchBody}, []byte("")), object)
require.Equal(t, partSize*7/2, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch second part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize)+"-"+strconv.Itoa(partSize*2-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, patchBody, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part, equal size", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part, increase size", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize+1)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
require.Equal(t, partSize*3+1, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part with offset and append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2+3)+"-"+strconv.Itoa(partSize*3+2)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3[:3], patchBody}, []byte("")), object)
require.Equal(t, partSize*3+3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3)+"-"+strconv.Itoa(partSize*4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3, patchBody}, []byte("")), object)
require.Equal(t, partSize*4, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch empty multipart", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, 0)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(partSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, patchBody, object)
require.Equal(t, partSize, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-1"))
})
}
func TestPatchWithVersion(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName := "bucket", "obj"
createVersionedBucket(hc, bktName)
objHeader := putObjectContent(hc, bktName, objName, "content")
putObjectContent(hc, bktName, objName, "some content")
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
require.False(t, res.IsTruncated)
require.Len(t, res.Version, 3)
for _, version := range res.Version {
content := getObjectVersion(hc, bktName, objName, version.VersionID)
if version.IsLatest {
require.Equal(t, []byte("content updated"), content)
continue
}
if version.VersionID == objHeader.Get(api.AmzVersionID) {
require.Equal(t, []byte("content"), content)
continue
}
require.Equal(t, []byte("some content"), content)
}
}
func TestPatchEncryptedObject(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
createTestBucket(tc, bktName)
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
setEncryptHeaders(r)
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
patchObjectErr(tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, apierr.ErrInternalError)
}
func TestPatchMissingHeaders(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
createTestBucket(tc, bktName)
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
setEncryptHeaders(r)
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w = httptest.NewRecorder()
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
tc.Handler().PatchObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentRange))
w = httptest.NewRecorder()
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
r.Header.Set(api.ContentRange, "bytes 0-2/*")
tc.Handler().PatchObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentLength))
}
func TestPatchInvalidBucketName(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket", "object"
createTestBucket(tc, bktName)
patchObjectErr(tc, "bkt_name", objName, "bytes 2-4/*", []byte("new"), nil, apierr.ErrInvalidBucketName)
}
func TestParsePatchByteRange(t *testing.T) {
for _, tt := range []struct {
rng string
size uint64
expected *layer.RangeParams
err bool
}{
{
rng: "bytes 2-7/*",
expected: &layer.RangeParams{
Start: 2,
End: 7,
},
},
{
rng: "bytes 2-7/3",
expected: &layer.RangeParams{
Start: 2,
End: 7,
},
},
{
rng: "bytes 2-/*",
size: 9,
expected: &layer.RangeParams{
Start: 2,
End: 8,
},
},
{
rng: "bytes 2-/3",
size: 9,
expected: &layer.RangeParams{
Start: 2,
End: 8,
},
},
{
rng: "",
err: true,
},
{
rng: "2-7/*",
err: true,
},
{
rng: "bytes 7-2/*",
err: true,
},
{
rng: "bytes 2-7",
err: true,
},
{
rng: "bytes 2/*",
err: true,
},
{
rng: "bytes a-7/*",
err: true,
},
{
rng: "bytes 2-a/*",
err: true,
},
} {
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
rng, err := parsePatchByteRange(tt.rng, tt.size)
if tt.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.expected.Start, rng.Start)
require.Equal(t, tt.expected.End, rng.End)
}
})
}
}
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
assertStatus(t, w, http.StatusOK)
result := &PatchObjectResult{}
err := xml.NewDecoder(w.Result().Body).Decode(result)
require.NoError(t, err)
return result
}
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
assertStatus(t, w, http.StatusOK)
result := &PatchObjectResult{}
err := xml.NewDecoder(w.Result().Body).Decode(result)
require.NoError(t, err)
return result
}
func patchObjectErr(tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code apierr.ErrorCode) {
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
assertS3Error(tc.t, w, apierr.GetAPIError(code))
}
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
query := make(url.Values)
if len(version) > 0 {
query.Add(api.QueryVersionID, version)
}
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
r.Header.Set(api.ContentRange, rng)
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
for k, v := range headers {
r.Header.Set(k, v)
}
tc.Handler().PatchObjectHandler(w, r)
return w
}

View file

@ -2,12 +2,11 @@ package handler
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"errors" stderrors "errors"
"fmt" "fmt"
"io" "io"
"mime/multipart" "mime/multipart"
@ -21,7 +20,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
@ -92,11 +91,11 @@ func (p *postPolicy) CheckField(key string, value string) error {
} }
cond := p.condition(key) cond := p.condition(key)
if cond == nil { if cond == nil {
return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
} }
if !cond.match(value) { if !cond.match(value) {
return apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) return errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
} }
return nil return nil
@ -193,24 +192,24 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket objInfo", reqInfo, err) h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
if cannedACLStatus == aclStatusYes { if cannedACLStatus == aclStatusYes {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return return
} }
tagSet, err := parseTaggingHeader(r.Header) tagSet, err := parseTaggingHeader(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse tagging header", reqInfo, err) h.logAndSendError(w, "could not parse tagging header", reqInfo, err)
return return
} }
@ -230,44 +229,44 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
encryptionParams, err := formEncryptionParams(r) encryptionParams, err := formEncryptionParams(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid sse headers", reqInfo, err) h.logAndSendError(w, "invalid sse headers", reqInfo, err)
return return
} }
body, err := h.getBodyReader(r) body, err := h.getBodyReader(r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed to get body reader", reqInfo, err) h.logAndSendError(w, "failed to get body reader", reqInfo, err)
return return
} }
if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 { if encodings := r.Header.Get(api.ContentEncoding); len(encodings) > 0 {
metadata[api.ContentEncoding] = encodings metadata[api.ContentEncoding] = encodings
} }
size := h.getPutPayloadSize(r) var size uint64
if r.ContentLength > 0 {
size = uint64(r.ContentLength)
}
params := &layer.PutObjectParams{ params := &layer.PutObjectParams{
BktInfo: bktInfo, BktInfo: bktInfo,
Object: reqInfo.ObjectName, Object: reqInfo.ObjectName,
Reader: body, Reader: body,
Size: size,
Header: metadata, Header: metadata,
Encryption: encryptionParams, Encryption: encryptionParams,
ContentMD5: getMD5Header(r), ContentMD5: r.Header.Get(api.ContentMD5),
ContentSHA256Hash: r.Header.Get(api.AmzContentSha256), ContentSHA256Hash: r.Header.Get(api.AmzContentSha256),
} }
if size > 0 {
params.Size = &size
}
params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint) params.CopiesNumbers, err = h.pickCopiesNumbers(metadata, reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "invalid copies number", reqInfo, err) h.logAndSendError(w, "invalid copies number", reqInfo, err)
return return
} }
params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header) params.Lock, err = formObjectLock(ctx, bktInfo, settings.LockConfiguration, r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not form object lock", reqInfo, err) h.logAndSendError(w, "could not form object lock", reqInfo, err)
return return
} }
@ -275,7 +274,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
_, err2 := io.Copy(io.Discard, body) _, err2 := io.Copy(io.Discard, body)
err3 := body.Close() err3 := body.Close()
h.logAndSendError(ctx, w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3})) h.logAndSendError(w, "could not upload object", reqInfo, err, zap.Errors("body close errors", []error{err2, err3}))
return return
} }
objInfo := extendedObjInfo.ObjectInfo objInfo := extendedObjInfo.ObjectInfo
@ -290,8 +289,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
TagSet: tagSet, TagSet: tagSet,
NodeVersion: extendedObjInfo.NodeVersion, NodeVersion: extendedObjInfo.NodeVersion,
} }
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil { if err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err) h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
return return
} }
} }
@ -306,7 +305,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled()))) w.Header().Set(api.ETag, data.Quote(objInfo.ETag(h.cfg.MD5Enabled())))
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
} }
@ -333,16 +332,16 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() { if !chunkedEncoding && !h.cfg.BypassContentEncodingInChunks() {
return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'", return nil, fmt.Errorf("%w: request is not chunk encoded, encodings '%s'",
apierr.GetAPIError(apierr.ErrInvalidEncodingMethod), strings.Join(encodings, ",")) errors.GetAPIError(errors.ErrInvalidEncodingMethod), strings.Join(encodings, ","))
} }
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength) decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
if len(decodeContentSize) == 0 { if len(decodeContentSize) == 0 {
return nil, apierr.GetAPIError(apierr.ErrMissingContentLength) return nil, errors.GetAPIError(errors.ErrMissingContentLength)
} }
if _, err := strconv.Atoi(decodeContentSize); err != nil { if _, err := strconv.Atoi(decodeContentSize); err != nil {
return nil, fmt.Errorf("%w: parse decoded content length: %s", apierr.GetAPIError(apierr.ErrMissingContentLength), err.Error()) return nil, fmt.Errorf("%w: parse decoded content length: %s", errors.GetAPIError(errors.ErrMissingContentLength), err.Error())
} }
chunkReader, err := newSignV4ChunkedReader(r) chunkReader, err := newSignV4ChunkedReader(r)
@ -378,43 +377,43 @@ func formEncryptionParamsBase(r *http.Request, isCopySource bool) (enc encryptio
} }
if r.TLS == nil { if r.TLS == nil {
return enc, apierr.GetAPIError(apierr.ErrInsecureSSECustomerRequest) return enc, errors.GetAPIError(errors.ErrInsecureSSECustomerRequest)
} }
if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 { if len(sseCustomerKey) > 0 && len(sseCustomerAlgorithm) == 0 {
return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerAlgorithm) return enc, errors.GetAPIError(errors.ErrMissingSSECustomerAlgorithm)
} }
if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 { if len(sseCustomerAlgorithm) > 0 && len(sseCustomerKey) == 0 {
return enc, apierr.GetAPIError(apierr.ErrMissingSSECustomerKey) return enc, errors.GetAPIError(errors.ErrMissingSSECustomerKey)
} }
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm { if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
return enc, apierr.GetAPIError(apierr.ErrInvalidEncryptionAlgorithm) return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
} }
key, err := base64.StdEncoding.DecodeString(sseCustomerKey) key, err := base64.StdEncoding.DecodeString(sseCustomerKey)
if err != nil { if err != nil {
if isCopySource { if isCopySource {
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters) return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
} }
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey) return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
} }
if len(key) != layer.AESKeySize { if len(key) != layer.AESKeySize {
if isCopySource { if isCopySource {
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerParameters) return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerParameters)
} }
return enc, apierr.GetAPIError(apierr.ErrInvalidSSECustomerKey) return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
} }
keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5) keyMD5, err := base64.StdEncoding.DecodeString(sseCustomerKeyMD5)
if err != nil { if err != nil {
return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch) return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
} }
md5Sum := md5.Sum(key) md5Sum := md5.Sum(key)
if !bytes.Equal(md5Sum[:], keyMD5) { if !bytes.Equal(md5Sum[:], keyMD5) {
return enc, apierr.GetAPIError(apierr.ErrSSECustomerKeyMD5Mismatch) return enc, errors.GetAPIError(errors.ErrSSECustomerKeyMD5Mismatch)
} }
params, err := encryption.NewParams(key) params, err := encryption.NewParams(key)
@ -435,7 +434,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
policy, err := checkPostPolicy(r, reqInfo, metadata) policy, err := checkPostPolicy(r, reqInfo, metadata)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "failed check policy", reqInfo, err) h.logAndSendError(w, "failed check policy", reqInfo, err)
return return
} }
@ -443,31 +442,31 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
buffer := bytes.NewBufferString(tagging) buffer := bytes.NewBufferString(tagging)
tags := new(data.Tagging) tags := new(data.Tagging)
if err = h.cfg.NewXMLDecoder(buffer).Decode(tags); err != nil { if err = h.cfg.NewXMLDecoder(buffer).Decode(tags); err != nil {
h.logAndSendError(ctx, w, "could not decode tag set", reqInfo, h.logAndSendError(w, "could not decode tag set", reqInfo,
fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error())) fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error()))
return return
} }
tagSet, err = h.readTagSet(tags) tagSet, err = h.readTagSet(tags)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
} }
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket objInfo", reqInfo, err) h.logAndSendError(w, "could not get bucket objInfo", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
if acl := auth.MultipartFormValue(r, "acl"); acl != "" && acl != basicACLPrivate { if acl := auth.MultipartFormValue(r, "acl"); acl != "" && acl != basicACLPrivate {
h.logAndSendError(ctx, w, "acl not supported for this bucket", reqInfo, apierr.GetAPIError(apierr.ErrAccessControlListNotSupported)) h.logAndSendError(w, "acl not supported for this bucket", reqInfo, errors.GetAPIError(errors.ErrAccessControlListNotSupported))
return return
} }
@ -485,7 +484,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
if reqInfo.ObjectName == "" || strings.Contains(reqInfo.ObjectName, "${filename}") { if reqInfo.ObjectName == "" || strings.Contains(reqInfo.ObjectName, "${filename}") {
_, head, err := r.FormFile("file") _, head, err := r.FormFile("file")
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse file field", reqInfo, err) h.logAndSendError(w, "could not parse file field", reqInfo, err)
return return
} }
filename = head.Filename filename = head.Filename
@ -494,7 +493,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
var head *multipart.FileHeader var head *multipart.FileHeader
contentReader, head, err = r.FormFile("file") contentReader, head, err = r.FormFile("file")
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse file field", reqInfo, err) h.logAndSendError(w, "could not parse file field", reqInfo, err)
return return
} }
size = uint64(head.Size) size = uint64(head.Size)
@ -508,12 +507,12 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
} }
if reqInfo.ObjectName == "" { if reqInfo.ObjectName == "" {
h.logAndSendError(ctx, w, "missing object name", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument)) h.logAndSendError(w, "missing object name", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
return return
} }
if !policy.CheckContentLength(size) { if !policy.CheckContentLength(size) {
h.logAndSendError(ctx, w, "invalid content-length", reqInfo, apierr.GetAPIError(apierr.ErrInvalidArgument)) h.logAndSendError(w, "invalid content-length", reqInfo, errors.GetAPIError(errors.ErrInvalidArgument))
return return
} }
@ -521,13 +520,13 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
BktInfo: bktInfo, BktInfo: bktInfo,
Object: reqInfo.ObjectName, Object: reqInfo.ObjectName,
Reader: contentReader, Reader: contentReader,
Size: &size, Size: size,
Header: metadata, Header: metadata,
} }
extendedObjInfo, err := h.obj.PutObject(ctx, params) extendedObjInfo, err := h.obj.PutObject(ctx, params)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not upload object", reqInfo, err) h.logAndSendError(w, "could not upload object", reqInfo, err)
return return
} }
objInfo := extendedObjInfo.ObjectInfo objInfo := extendedObjInfo.ObjectInfo
@ -543,7 +542,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
} }
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil { if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
h.logAndSendError(ctx, w, "could not upload object tagging", reqInfo, err) h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
return return
} }
} }
@ -571,10 +570,10 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status) w.WriteHeader(status)
respData, err := middleware.EncodeResponse(resp) respData, err := middleware.EncodeResponse(resp)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "encode response", reqInfo, err) h.logAndSendError(w, "encode response", reqInfo, err)
} }
if _, err = w.Write(respData); err != nil { if _, err = w.Write(respData); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
return return
} }
@ -595,13 +594,13 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
return nil, fmt.Errorf("could not unmarshal policy: %w", err) return nil, fmt.Errorf("could not unmarshal policy: %w", err)
} }
if policy.Expiration.Before(time.Now()) { if policy.Expiration.Before(time.Now()) {
return nil, fmt.Errorf("policy is expired: %w", apierr.GetAPIError(apierr.ErrInvalidArgument)) return nil, fmt.Errorf("policy is expired: %w", errors.GetAPIError(errors.ErrInvalidArgument))
} }
policy.empty = false policy.empty = false
} }
if r.MultipartForm == nil { if r.MultipartForm == nil {
return nil, errors.New("empty multipart form") return nil, stderrors.New("empty multipart form")
} }
for key, v := range r.MultipartForm.Value { for key, v := range r.MultipartForm.Value {
@ -632,7 +631,7 @@ func checkPostPolicy(r *http.Request, reqInfo *middleware.ReqInfo, metadata map[
for _, cond := range policy.Conditions { for _, cond := range policy.Conditions {
if cond.Key == "bucket" { if cond.Key == "bucket" {
if !cond.match(reqInfo.BucketName) { if !cond.match(reqInfo.BucketName) {
return nil, apierr.GetAPIError(apierr.ErrPostPolicyConditionInvalidFormat) return nil, errors.GetAPIError(errors.ErrPostPolicyConditionInvalidFormat)
} }
} }
} }
@ -674,10 +673,10 @@ func parseTaggingHeader(header http.Header) (map[string]string, error) {
if tagging := header.Get(api.AmzTagging); len(tagging) > 0 { if tagging := header.Get(api.AmzTagging); len(tagging) > 0 {
queries, err := url.ParseQuery(tagging) queries, err := url.ParseQuery(tagging)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidArgument) return nil, errors.GetAPIError(errors.ErrInvalidArgument)
} }
if len(queries) > maxTags { if len(queries) > maxTags {
return nil, apierr.GetAPIError(apierr.ErrInvalidTagsSizeExceed) return nil, errors.GetAPIError(errors.ErrInvalidTagsSizeExceed)
} }
tagSet = make(map[string]string, len(queries)) tagSet = make(map[string]string, len(queries))
for k, v := range queries { for k, v := range queries {
@ -727,7 +726,7 @@ func (h *handler) parseCommonCreateBucketParams(reqInfo *middleware.ReqInfo, box
} }
if p.SessionContainerCreation == nil { if p.SessionContainerCreation == nil {
return nil, nil, fmt.Errorf("%w: couldn't find session token for put", apierr.GetAPIError(apierr.ErrAccessDenied)) return nil, nil, fmt.Errorf("%w: couldn't find session token for put", errors.GetAPIError(errors.ErrAccessDenied))
} }
if err := checkBucketName(reqInfo.BucketName); err != nil { if err := checkBucketName(reqInfo.BucketName); err != nil {
@ -759,33 +758,32 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
boxData, err := middleware.GetBoxData(ctx) boxData, err := middleware.GetBoxData(ctx)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "get access box from request", reqInfo, err) h.logAndSendError(w, "get access box from request", reqInfo, err)
return return
} }
key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r) key, p, err := h.parseCommonCreateBucketParams(reqInfo, boxData, r)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "parse create bucket params", reqInfo, err) h.logAndSendError(w, "parse create bucket params", reqInfo, err)
return return
} }
cannedACL, err := parseCannedACL(r.Header) cannedACL, err := parseCannedACL(r.Header)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not parse canned ACL", reqInfo, err) h.logAndSendError(w, "could not parse canned ACL", reqInfo, err)
return return
} }
bktInfo, err := h.obj.CreateBucket(ctx, p) bktInfo, err := h.obj.CreateBucket(ctx, p)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not create bucket", reqInfo, err) h.logAndSendError(w, "could not create bucket", reqInfo, err)
return return
} }
h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID)) h.reqLogger(ctx).Info(logs.BucketIsCreated, zap.Stringer("container_id", bktInfo.CID))
chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID) chains := bucketCannedACLToAPERules(cannedACL, reqInfo, bktInfo.CID)
if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil { if err = h.ape.SaveACLChains(bktInfo.CID.EncodeToString(), chains); err != nil {
cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains) h.logAndSendError(w, "failed to add morph rule chain", reqInfo, err)
h.logAndSendError(ctx, w, "failed to add morph rule chain", reqInfo, err, zap.NamedError("cleanup_error", cleanErr))
return return
} }
@ -806,40 +804,17 @@ func (h *handler) createBucketHandlerPolicy(w http.ResponseWriter, r *http.Reque
return h.obj.PutBucketSettings(ctx, sp) return h.obj.PutBucketSettings(ctx, sp)
}, h.putBucketSettingsRetryer()) }, h.putBucketSettingsRetryer())
if err != nil { if err != nil {
cleanErr := h.cleanupBucketCreation(ctx, reqInfo, bktInfo, boxData, chains) h.logAndSendError(w, "couldn't save bucket settings", reqInfo, err,
h.logAndSendError(ctx, w, "couldn't save bucket settings", reqInfo, err, zap.String("container_id", bktInfo.CID.EncodeToString()))
zap.String("container_id", bktInfo.CID.EncodeToString()), zap.NamedError("cleanup_error", cleanErr))
return return
} }
if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil { if err = middleware.WriteSuccessResponseHeadersOnly(w); err != nil {
h.logAndSendError(ctx, w, "write response", reqInfo, err) h.logAndSendError(w, "write response", reqInfo, err)
return return
} }
} }
func (h *handler) cleanupBucketCreation(ctx context.Context, reqInfo *middleware.ReqInfo, bktInfo *data.BucketInfo, boxData *accessbox.Box, chains []*chain.Chain) error {
prm := &layer.DeleteBucketParams{
BktInfo: bktInfo,
SessionToken: boxData.Gate.SessionTokenForDelete(),
}
if err := h.obj.DeleteContainer(ctx, prm); err != nil {
return err
}
chainIDs := make([]chain.ID, len(chains))
for i, c := range chains {
chainIDs[i] = c.ID
}
if err := h.ape.DeleteBucketPolicy(reqInfo.Namespace, bktInfo.CID, chainIDs); err != nil {
return fmt.Errorf("delete bucket acl policy: %w", err)
}
return nil
}
func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 { func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 {
return retry.NewStandard(func(options *retry.StandardOptions) { return retry.NewStandard(func(options *retry.StandardOptions) {
options.MaxAttempts = h.cfg.RetryMaxAttempts() options.MaxAttempts = h.cfg.RetryMaxAttempts()
@ -853,7 +828,7 @@ func (h *handler) putBucketSettingsRetryer() aws.RetryerV2 {
} }
options.Retryables = []retry.IsErrorRetryable{retry.IsErrorRetryableFunc(func(err error) aws.Ternary { options.Retryables = []retry.IsErrorRetryable{retry.IsErrorRetryableFunc(func(err error) aws.Ternary {
if errors.Is(err, tree.ErrNodeAccessDenied) { if stderrors.Is(err, tree.ErrNodeAccessDenied) {
return aws.TrueTernary return aws.TrueTernary
} }
return aws.FalseTernary return aws.FalseTernary
@ -982,7 +957,7 @@ func (h handler) setPlacementPolicy(prm *layer.CreateBucketParams, namespace, lo
return nil return nil
} }
return apierr.GetAPIError(apierr.ErrInvalidLocationConstraint) return errors.GetAPIError(errors.ErrInvalidLocationConstraint)
} }
func isLockEnabled(log *zap.Logger, header http.Header) bool { func isLockEnabled(log *zap.Logger, header http.Header) bool {
@ -1001,22 +976,28 @@ func isLockEnabled(log *zap.Logger, header http.Header) bool {
func checkBucketName(bucketName string) error { func checkBucketName(bucketName string) error {
if len(bucketName) < 3 || len(bucketName) > 63 { if len(bucketName) < 3 || len(bucketName) > 63 {
return apierr.GetAPIError(apierr.ErrInvalidBucketName) return errors.GetAPIError(errors.ErrInvalidBucketName)
} }
if strings.HasPrefix(bucketName, "xn--") || strings.HasSuffix(bucketName, "-s3alias") { if strings.HasPrefix(bucketName, "xn--") || strings.HasSuffix(bucketName, "-s3alias") {
return apierr.GetAPIError(apierr.ErrInvalidBucketName) return errors.GetAPIError(errors.ErrInvalidBucketName)
} }
if net.ParseIP(bucketName) != nil { if net.ParseIP(bucketName) != nil {
return apierr.GetAPIError(apierr.ErrInvalidBucketName) return errors.GetAPIError(errors.ErrInvalidBucketName)
} }
for i, r := range bucketName { labels := strings.Split(bucketName, ".")
if r == '.' || (!isAlphaNum(r) && r != '-') { for _, label := range labels {
return apierr.GetAPIError(apierr.ErrInvalidBucketName) if len(label) == 0 {
return errors.GetAPIError(errors.ErrInvalidBucketName)
}
for i, r := range label {
if !isAlphaNum(r) && r != '-' {
return errors.GetAPIError(errors.ErrInvalidBucketName)
}
if (i == 0 || i == len(label)-1) && r == '-' {
return errors.GetAPIError(errors.ErrInvalidBucketName)
} }
if (i == 0 || i == len(bucketName)-1) && r == '-' {
return apierr.GetAPIError(apierr.ErrInvalidBucketName)
} }
} }
@ -1034,17 +1015,7 @@ func (h *handler) parseLocationConstraint(r *http.Request) (*createBucketParams,
params := new(createBucketParams) params := new(createBucketParams)
if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(params); err != nil {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()) return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrMalformedXML), err.Error())
} }
return params, nil return params, nil
} }
func getMD5Header(r *http.Request) *string {
var md5Hdr *string
if len(r.Header.Values(api.ContentMD5)) != 0 {
hdr := r.Header.Get(api.ContentMD5)
md5Hdr = &hdr
}
return md5Hdr
}

View file

@ -7,7 +7,6 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"io" "io"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@ -21,7 +20,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4" v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
@ -30,11 +29,6 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
const (
awsChunkedRequestExampleDecodedContentLength = 66560
awsChunkedRequestExampleContentLength = 66824
)
func TestCheckBucketName(t *testing.T) { func TestCheckBucketName(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
@ -42,10 +36,10 @@ func TestCheckBucketName(t *testing.T) {
}{ }{
{name: "bucket"}, {name: "bucket"},
{name: "2bucket"}, {name: "2bucket"},
{name: "buc.ket"},
{name: "buc-ket"}, {name: "buc-ket"},
{name: "abc"}, {name: "abc"},
{name: "63aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {name: "63aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
{name: "buc.ket", err: true},
{name: "buc.-ket", err: true}, {name: "buc.-ket", err: true},
{name: "bucket.", err: true}, {name: "bucket.", err: true},
{name: ".bucket", err: true}, {name: ".bucket", err: true},
@ -205,7 +199,7 @@ func TestPostObject(t *testing.T) {
t.Run(tc.key+";"+tc.filename, func(t *testing.T) { t.Run(tc.key+";"+tc.filename, func(t *testing.T) {
w := postObjectBase(hc, ns, bktName, tc.key, tc.filename, tc.content) w := postObjectBase(hc, ns, bktName, tc.key, tc.filename, tc.content)
if tc.err { if tc.err {
assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrInternalError)) assertS3Error(hc.t, w, s3errors.GetAPIError(s3errors.ErrInternalError))
return return
} }
assertStatus(hc.t, w, http.StatusNoContent) assertStatus(hc.t, w, http.StatusNoContent)
@ -251,10 +245,6 @@ func TestPutObjectWithNegativeContentLength(t *testing.T) {
tc.Handler().HeadObjectHandler(w, r) tc.Handler().HeadObjectHandler(w, r)
assertStatus(t, w, http.StatusOK) assertStatus(t, w, http.StatusOK)
require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength)) require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength))
result := listVersions(t, tc, bktName)
require.Len(t, result.Version, 1)
require.EqualValues(t, len(content), result.Version[0].Size)
} }
func TestPutObjectWithStreamBodyError(t *testing.T) { func TestPutObjectWithStreamBodyError(t *testing.T) {
@ -268,7 +258,7 @@ func TestPutObjectWithStreamBodyError(t *testing.T) {
r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256) r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256)
r.Header.Set(api.ContentEncoding, api.AwsChunked) r.Header.Set(api.ContentEncoding, api.AwsChunked)
tc.Handler().PutObjectHandler(w, r) tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrMissingContentLength)) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
checkNotFound(t, tc, bktName, objName, emptyVersion) checkNotFound(t, tc, bktName, objName, emptyVersion)
} }
@ -284,13 +274,7 @@ func TestPutObjectWithInvalidContentMD5(t *testing.T) {
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content)) w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid"))) r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("invalid")))
tc.Handler().PutObjectHandler(w, r) tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrBadDigest)) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidDigest))
content = []byte("content")
w, r = prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
r.Header.Set(api.ContentMD5, base64.StdEncoding.EncodeToString([]byte("")))
tc.Handler().PutObjectHandler(w, r)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidDigest))
checkNotFound(t, tc, bktName, objName, emptyVersion) checkNotFound(t, tc, bktName, objName, emptyVersion)
} }
@ -353,7 +337,7 @@ func TestPutObjectCheckContentSHA256(t *testing.T) {
hc.Handler().PutObjectHandler(w, r) hc.Handler().PutObjectHandler(w, r)
if tc.error { if tc.error {
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch)) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch))
w, r := prepareTestRequest(hc, bktName, objName, nil) w, r := prepareTestRequest(hc, bktName, objName, nil)
hc.Handler().GetObjectHandler(w, r) hc.Handler().GetObjectHandler(w, r)
@ -377,37 +361,12 @@ func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
hc.Handler().PutObjectHandler(w, req) hc.Handler().PutObjectHandler(w, req)
assertStatus(t, w, http.StatusOK) assertStatus(t, w, http.StatusOK)
w, req = prepareTestRequest(hc, bktName, objName, nil) data := getObjectRange(t, hc, bktName, objName, 0, 66824)
hc.Handler().HeadObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
for i := range chunk { for i := range chunk {
require.Equal(t, chunk[i], data[i]) require.Equal(t, chunk[i], data[i])
} }
} }
func TestPutObjectWithStreamEmptyBodyAWSExample(t *testing.T) {
hc := prepareHandlerContext(t)
bktName, objName := "dkirillov", "tmp"
createTestBucket(hc, bktName)
w, req := getEmptyChunkedRequest(hc.context, t, bktName, objName)
hc.Handler().PutObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
w, req = prepareTestRequest(hc, bktName, objName, nil)
hc.Handler().HeadObjectHandler(w, req)
assertStatus(t, w, http.StatusOK)
require.Equal(t, "0", w.Header().Get(api.ContentLength))
res := listObjectsV1(hc, bktName, "", "", "", -1)
require.Len(t, res.Contents, 1)
require.Empty(t, res.Contents[0].Size)
}
func TestPutChunkedTestContentEncoding(t *testing.T) { func TestPutChunkedTestContentEncoding(t *testing.T) {
hc := prepareHandlerContext(t) hc := prepareHandlerContext(t)
@ -426,7 +385,7 @@ func TestPutChunkedTestContentEncoding(t *testing.T) {
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName) w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
req.Header.Set(api.ContentEncoding, "gzip") req.Header.Set(api.ContentEncoding, "gzip")
hc.Handler().PutObjectHandler(w, req) hc.Handler().PutObjectHandler(w, req)
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidEncodingMethod)) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod))
hc.config.bypassContentEncodingInChunks = true hc.config.bypassContentEncodingInChunks = true
w, req, _ = getChunkedRequest(hc.context, t, bktName, objName) w, req, _ = getChunkedRequest(hc.context, t, bktName, objName)
@ -438,8 +397,6 @@ func TestPutChunkedTestContentEncoding(t *testing.T) {
require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding)) require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding))
} }
// getChunkedRequest implements request example from
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) { func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
chunk := make([]byte, 65*1024) chunk := make([]byte, 65*1024)
for i := range chunk { for i := range chunk {
@ -467,9 +424,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil) req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
require.NoError(t, err) require.NoError(t, err)
req.Header.Set("content-encoding", "aws-chunked") req.Header.Set("content-encoding", "aws-chunked")
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength)) req.Header.Set("content-length", "66824")
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength)) req.Header.Set("x-amz-decoded-content-length", "66560")
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z") signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
@ -500,69 +457,15 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
return w, req, chunk return w, req, chunk
} }
func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request) {
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
reqBody := bytes.NewBufferString("0;chunk-signature=311a7142c8f3a07972c3aca65c36484b513a8fee48ab7178c7225388f2ae9894\r\n\r\n")
req, err := http.NewRequest("PUT", "http://localhost:8084/"+bktName+"/"+objName, reqBody)
require.NoError(t, err)
req.Header.Set("Amz-Sdk-Invocation-Id", "8a8cd4be-aef8-8034-f08d-a6144ade41f9")
req.Header.Set("Amz-Sdk-Request", "attempt=1; max=2")
req.Header.Set(api.Authorization, "AWS4-HMAC-SHA256 Credential=48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh/20241003/us-east-1/s3/aws4_request, SignedHeaders=amz-sdk-invocation-id;amz-sdk-request;content-encoding;content-length;content-type;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length, Signature=4b530ab4af2381f214941af591266b209968264a2c94337fa1efc048c7dff352")
req.Header.Set(api.ContentEncoding, "aws-chunked")
req.Header.Set(api.ContentLength, "86")
req.Header.Set(api.ContentType, "text/plain; charset=UTF-8")
req.Header.Set(api.AmzDate, "20241003T100055Z")
req.Header.Set(api.AmzContentSha256, "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
req.Header.Set(api.AmzDecodedContentLength, "0")
signTime, err := time.Parse("20060102T150405Z", "20241003T100055Z")
require.NoError(t, err)
w := httptest.NewRecorder()
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
ClientTime: signTime,
AuthHeaders: &middleware.AuthHeader{
AccessKeyID: AWSAccessKeyID,
SignatureV4: "4b530ab4af2381f214941af591266b209968264a2c94337fa1efc048c7dff352",
Region: "us-east-1",
},
AccessBox: &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: AWSSecretAccessKey,
},
},
}))
return w, req
}
func TestCreateBucket(t *testing.T) { func TestCreateBucket(t *testing.T) {
hc := prepareHandlerContext(t) hc := prepareHandlerContext(t)
bktName := "bkt-name" bktName := "bkt-name"
info := createBucket(hc, bktName) info := createBucket(hc, bktName)
createBucketAssertS3Error(hc, bktName, info.Box, apierr.ErrBucketAlreadyOwnedByYou) createBucketAssertS3Error(hc, bktName, info.Box, s3errors.ErrBucketAlreadyOwnedByYou)
box2, _ := createAccessBox(t) box2, _ := createAccessBox(t)
createBucketAssertS3Error(hc, bktName, box2, apierr.ErrBucketAlreadyExists) createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
}
func TestCreateBucketWithoutPermissions(t *testing.T) {
hc := prepareHandlerContext(t)
bktName := "bkt-name"
hc.h.ape.(*apeMock).err = errors.New("no permissions")
box, _ := createAccessBox(t)
createBucketAssertS3Error(hc, bktName, box, apierr.ErrInternalError)
_, err := hc.tp.ContainerID(bktName)
require.Errorf(t, err, "container exists after failed creation, but shouldn't")
} }
func TestCreateNamespacedBucket(t *testing.T) { func TestCreateNamespacedBucket(t *testing.T) {

View file

@ -195,15 +195,6 @@ type PostResponse struct {
ETag string `xml:"Etag"` ETag string `xml:"Etag"`
} }
type PatchObjectResult struct {
Object PatchObject `xml:"Object"`
}
type PatchObject struct {
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
}
// MarshalXML -- StringMap marshals into XML. // MarshalXML -- StringMap marshals into XML.
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
tokens := []xml.Token{start} tokens := []xml.Token{start}

View file

@ -26,13 +26,13 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
tagSet, err := h.readTagSet(reqInfo.Tagging) tagSet, err := h.readTagSet(reqInfo.Tagging)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -46,7 +46,7 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
} }
if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil { if err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
h.logAndSendError(ctx, w, "could not put object tagging", reqInfo, err) h.logAndSendError(w, "could not put object tagging", reqInfo, err)
return return
} }
@ -54,18 +54,17 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
} }
func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket settings", reqInfo, err) h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return return
} }
@ -77,9 +76,9 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
}, },
} }
versionID, tagSet, err := h.obj.GetObjectTagging(ctx, tagPrm) versionID, tagSet, err := h.obj.GetObjectTagging(r.Context(), tagPrm)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err) h.logAndSendError(w, "could not get object tagging", reqInfo, err)
return return
} }
@ -87,7 +86,7 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
w.Header().Set(api.AmzVersionID, versionID) w.Header().Set(api.AmzVersionID, versionID)
} }
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil { if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }
@ -97,7 +96,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
@ -108,7 +107,7 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
} }
if err = h.obj.DeleteObjectTagging(ctx, p); err != nil { if err = h.obj.DeleteObjectTagging(ctx, p); err != nil {
h.logAndSendError(ctx, w, "could not delete object tagging", reqInfo, err) h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
return return
} }
@ -116,61 +115,58 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
} }
func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
tagSet, err := h.readTagSet(reqInfo.Tagging) tagSet, err := h.readTagSet(reqInfo.Tagging)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not read tag set", reqInfo, err) h.logAndSendError(w, "could not read tag set", reqInfo, err)
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if err = h.obj.PutBucketTagging(ctx, bktInfo, tagSet); err != nil { if err = h.obj.PutBucketTagging(r.Context(), bktInfo, tagSet); err != nil {
h.logAndSendError(ctx, w, "could not put object tagging", reqInfo, err) h.logAndSendError(w, "could not put object tagging", reqInfo, err)
return return
} }
} }
func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
tagSet, err := h.obj.GetBucketTagging(ctx, bktInfo) tagSet, err := h.obj.GetBucketTagging(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get object tagging", reqInfo, err) h.logAndSendError(w, "could not get object tagging", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil { if err = middleware.EncodeToResponse(w, encodeTagging(tagSet)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
return return
} }
} }
func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
if err = h.obj.DeleteBucketTagging(ctx, bktInfo); err != nil { if err = h.obj.DeleteBucketTagging(r.Context(), bktInfo); err != nil {
h.logAndSendError(ctx, w, "could not delete bucket tagging", reqInfo, err) h.logAndSendError(w, "could not delete bucket tagging", reqInfo, err)
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)

View file

@ -6,7 +6,7 @@ import (
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -98,7 +98,7 @@ func TestPutObjectTaggingCheckUniqueness(t *testing.T) {
middleware.GetReqInfo(r.Context()).Tagging = tc.body middleware.GetReqInfo(r.Context()).Tagging = tc.body
hc.Handler().PutObjectTaggingHandler(w, r) hc.Handler().PutObjectTaggingHandler(w, r)
if tc.error { if tc.error {
assertS3Error(t, w, apierr.GetAPIError(apierr.ErrInvalidTagKeyUniqueness)) assertS3Error(t, w, apiErrors.GetAPIError(apiErrors.ErrInvalidTagKeyUniqueness))
return return
} }
assertStatus(t, w, http.StatusOK) assertStatus(t, w, http.StatusOK)

View file

@ -8,53 +8,61 @@ import (
) )
func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketAccelerateHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketRequestPaymentHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketLoggingHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketReplicationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) DeleteBucketWebsiteHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
}
func (h *handler) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }
func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
h.logAndSendError(r.Context(), w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented)) h.logAndSendError(w, "not implemented", middleware.GetReqInfo(r.Context()), errors.GetAPIError(errors.ErrNotImplemented))
} }

View file

@ -10,11 +10,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
frosterrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -26,14 +28,15 @@ func (h *handler) reqLogger(ctx context.Context) *zap.Logger {
return h.log return h.log
} }
func (h *handler) logAndSendError(ctx context.Context, w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) { func (h *handler) logAndSendError(w http.ResponseWriter, logText string, reqInfo *middleware.ReqInfo, err error, additional ...zap.Field) {
err = handleDeleteMarker(w, err) err = handleDeleteMarker(w, err)
if code, wrErr := middleware.WriteErrorResponse(w, reqInfo, apierr.TransformToS3Error(err)); wrErr != nil { if code, wrErr := middleware.WriteErrorResponse(w, reqInfo, transformToS3Error(err)); wrErr != nil {
additional = append(additional, zap.NamedError("write_response_error", wrErr)) additional = append(additional, zap.NamedError("write_response_error", wrErr))
} else { } else {
additional = append(additional, zap.Int("status", code)) additional = append(additional, zap.Int("status", code))
} }
fields := []zap.Field{ fields := []zap.Field{
zap.String("request_id", reqInfo.RequestID),
zap.String("method", reqInfo.API), zap.String("method", reqInfo.API),
zap.String("bucket", reqInfo.BucketName), zap.String("bucket", reqInfo.BucketName),
zap.String("object", reqInfo.ObjectName), zap.String("object", reqInfo.ObjectName),
@ -41,7 +44,10 @@ func (h *handler) logAndSendError(ctx context.Context, w http.ResponseWriter, lo
zap.String("user", reqInfo.User), zap.String("user", reqInfo.User),
zap.Error(err)} zap.Error(err)}
fields = append(fields, additional...) fields = append(fields, additional...)
h.reqLogger(ctx).Error(logs.RequestFailed, fields...) if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
}
h.log.Error(logs.RequestFailed, fields...) // consider using h.reqLogger (it requires accept context.Context or http.Request)
} }
func handleDeleteMarker(w http.ResponseWriter, err error) error { func handleDeleteMarker(w http.ResponseWriter, err error) error {
@ -51,26 +57,37 @@ func handleDeleteMarker(w http.ResponseWriter, err error) error {
} }
w.Header().Set(api.AmzDeleteMarker, "true") w.Header().Set(api.AmzDeleteMarker, "true")
return fmt.Errorf("%w: %s", apierr.GetAPIError(target.ErrorCode), err) return fmt.Errorf("%w: %s", s3errors.GetAPIError(target.ErrorCode), err)
}
func transformToS3Error(err error) error {
err = frosterrors.UnwrapErr(err) // this wouldn't work with errors.Join
if _, ok := err.(s3errors.Error); ok {
return err
}
if errors.Is(err, layer.ErrAccessDenied) ||
errors.Is(err, layer.ErrNodeAccessDenied) {
return s3errors.GetAPIError(s3errors.ErrAccessDenied)
}
if errors.Is(err, layer.ErrGatewayTimeout) {
return s3errors.GetAPIError(s3errors.ErrGatewayTimeout)
}
return s3errors.GetAPIError(s3errors.ErrInternalError)
} }
func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) { func (h *handler) ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error) {
return h.getBucketInfo(ctx, bucket) return h.obj.GetBucketInfo(ctx, bucket)
} }
func (h *handler) ResolveCID(ctx context.Context, bucket string) (cid.ID, error) { func (h *handler) ResolveCID(ctx context.Context, bucket string) (cid.ID, error) {
return h.obj.ResolveCID(ctx, bucket) return h.obj.ResolveCID(ctx, bucket)
} }
func (h *handler) getBucketInfo(ctx context.Context, bucket string) (*data.BucketInfo, error) {
if err := checkBucketName(bucket); err != nil {
return nil, err
}
return h.obj.GetBucketInfo(ctx, bucket)
}
func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) { func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) {
bktInfo, err := h.getBucketInfo(r.Context(), bucket) bktInfo, err := h.obj.GetBucketInfo(r.Context(), bucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -89,19 +106,6 @@ func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header
return bktInfo, checkOwner(bktInfo, expected) return bktInfo, checkOwner(bktInfo, expected)
} }
func (h *handler) getPutPayloadSize(r *http.Request) uint64 {
decodeContentSize := r.Header.Get(api.AmzDecodedContentLength)
decodedSize, err := strconv.Atoi(decodeContentSize)
if err != nil {
if r.ContentLength >= 0 {
return uint64(r.ContentLength)
}
return 0
}
return uint64(decodedSize)
}
func parseRange(s string) (*layer.RangeParams, error) { func parseRange(s string) (*layer.RangeParams, error) {
if s == "" { if s == "" {
return nil, nil return nil, nil
@ -110,26 +114,26 @@ func parseRange(s string) (*layer.RangeParams, error) {
prefix := "bytes=" prefix := "bytes="
if !strings.HasPrefix(s, prefix) { if !strings.HasPrefix(s, prefix) {
return nil, apierr.GetAPIError(apierr.ErrInvalidRange) return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
} }
s = strings.TrimPrefix(s, prefix) s = strings.TrimPrefix(s, prefix)
valuesStr := strings.Split(s, "-") valuesStr := strings.Split(s, "-")
if len(valuesStr) != 2 { if len(valuesStr) != 2 {
return nil, apierr.GetAPIError(apierr.ErrInvalidRange) return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
} }
values := make([]uint64, 0, len(valuesStr)) values := make([]uint64, 0, len(valuesStr))
for _, v := range valuesStr { for _, v := range valuesStr {
num, err := strconv.ParseUint(v, 10, 64) num, err := strconv.ParseUint(v, 10, 64)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidRange) return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
} }
values = append(values, num) values = append(values, num)
} }
if values[0] > values[1] { if values[0] > values[1] {
return nil, apierr.GetAPIError(apierr.ErrInvalidRange) return nil, s3errors.GetAPIError(s3errors.ErrInvalidRange)
} }
return &layer.RangeParams{ return &layer.RangeParams{

View file

@ -1 +1,64 @@
package handler package handler
import (
"errors"
"fmt"
"testing"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"github.com/stretchr/testify/require"
)
func TestTransformS3Errors(t *testing.T) {
for _, tc := range []struct {
name string
err error
expected s3errors.ErrorCode
}{
{
name: "simple std error to internal error",
err: errors.New("some error"),
expected: s3errors.ErrInternalError,
},
{
name: "layer access denied error to s3 access denied error",
err: layer.ErrAccessDenied,
expected: s3errors.ErrAccessDenied,
},
{
name: "wrapped layer access denied error to s3 access denied error",
err: fmt.Errorf("wrap: %w", layer.ErrAccessDenied),
expected: s3errors.ErrAccessDenied,
},
{
name: "layer node access denied error to s3 access denied error",
err: layer.ErrNodeAccessDenied,
expected: s3errors.ErrAccessDenied,
},
{
name: "layer gateway timeout error to s3 gateway timeout error",
err: layer.ErrGatewayTimeout,
expected: s3errors.ErrGatewayTimeout,
},
{
name: "s3 error to s3 error",
err: s3errors.GetAPIError(s3errors.ErrInvalidPart),
expected: s3errors.ErrInvalidPart,
},
{
name: "wrapped s3 error to s3 error",
err: fmt.Errorf("wrap: %w", s3errors.GetAPIError(s3errors.ErrInvalidPart)),
expected: s3errors.ErrInvalidPart,
},
} {
t.Run(tc.name, func(t *testing.T) {
err := transformToS3Error(tc.err)
s3err, ok := err.(s3errors.Error)
require.True(t, ok, "error must be s3 error")
require.Equalf(t, tc.expected, s3err.ErrCode,
"expected: '%s', got: '%s'",
s3errors.GetAPIError(tc.expected).Code, s3errors.GetAPIError(s3err.ErrCode).Code)
})
}
}

View file

@ -10,29 +10,28 @@ import (
) )
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
configuration := new(VersioningConfiguration) configuration := new(VersioningConfiguration)
if err := h.cfg.NewXMLDecoder(r.Body).Decode(configuration); err != nil { if err := h.cfg.NewXMLDecoder(r.Body).Decode(configuration); err != nil {
h.logAndSendError(ctx, w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException)) h.logAndSendError(w, "couldn't decode versioning configuration", reqInfo, errors.GetAPIError(errors.ErrIllegalVersioningConfigurationException))
return return
} }
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get bucket settings", reqInfo, err) h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
return return
} }
if configuration.Status != data.VersioningEnabled && configuration.Status != data.VersioningSuspended { if configuration.Status != data.VersioningEnabled && configuration.Status != data.VersioningSuspended {
h.logAndSendError(ctx, w, "invalid versioning configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML)) h.logAndSendError(w, "invalid versioning configuration", reqInfo, errors.GetAPIError(errors.ErrMalformedXML))
return return
} }
@ -46,34 +45,33 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
} }
if p.Settings.VersioningSuspended() && bktInfo.ObjectLockEnabled { if p.Settings.VersioningSuspended() && bktInfo.ObjectLockEnabled {
h.logAndSendError(ctx, w, "couldn't suspend bucket versioning", reqInfo, errors.GetAPIError(errors.ErrObjectLockConfigurationVersioningCannotBeChanged)) h.logAndSendError(w, "couldn't suspend bucket versioning", reqInfo, errors.GetAPIError(errors.ErrObjectLockConfigurationVersioningCannotBeChanged))
return return
} }
if err = h.obj.PutBucketSettings(ctx, p); err != nil { if err = h.obj.PutBucketSettings(r.Context(), p); err != nil {
h.logAndSendError(ctx, w, "couldn't put update versioning settings", reqInfo, err) h.logAndSendError(w, "couldn't put update versioning settings", reqInfo, err)
} }
} }
// GetBucketVersioningHandler implements bucket versioning getter handler. // GetBucketVersioningHandler implements bucket versioning getter handler.
func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) { func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context() reqInfo := middleware.GetReqInfo(r.Context())
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName) bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "could not get bucket info", reqInfo, err) h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return return
} }
settings, err := h.obj.GetBucketSettings(ctx, bktInfo) settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
if err != nil { if err != nil {
h.logAndSendError(ctx, w, "couldn't get version settings", reqInfo, err) h.logAndSendError(w, "couldn't get version settings", reqInfo, err)
return return
} }
if err = middleware.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil { if err = middleware.EncodeToResponse(w, formVersioningConfiguration(settings)); err != nil {
h.logAndSendError(ctx, w, "something went wrong", reqInfo, err) h.logAndSendError(w, "something went wrong", reqInfo, err)
} }
} }

View file

@ -5,7 +5,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap" "go.uber.org/zap"
@ -20,7 +19,6 @@ type Cache struct {
bucketCache *cache.BucketCache bucketCache *cache.BucketCache
systemCache *cache.SystemCache systemCache *cache.SystemCache
accessCache *cache.AccessControlCache accessCache *cache.AccessControlCache
networkInfoCache *cache.NetworkInfoCache
} }
// CachesConfig contains params for caches. // CachesConfig contains params for caches.
@ -33,7 +31,6 @@ type CachesConfig struct {
Buckets *cache.Config Buckets *cache.Config
System *cache.Config System *cache.Config
AccessControl *cache.Config AccessControl *cache.Config
NetworkInfo *cache.NetworkInfoCacheConfig
} }
// DefaultCachesConfigs returns filled configs. // DefaultCachesConfigs returns filled configs.
@ -47,7 +44,6 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
Buckets: cache.DefaultBucketConfig(logger), Buckets: cache.DefaultBucketConfig(logger),
System: cache.DefaultSystemConfig(logger), System: cache.DefaultSystemConfig(logger),
AccessControl: cache.DefaultAccessControlConfig(logger), AccessControl: cache.DefaultAccessControlConfig(logger),
NetworkInfo: cache.DefaultNetworkInfoConfig(logger),
} }
} }
@ -61,7 +57,6 @@ func NewCache(cfg *CachesConfig) *Cache {
bucketCache: cache.NewBucketCache(cfg.Buckets), bucketCache: cache.NewBucketCache(cfg.Buckets),
systemCache: cache.NewSystemCache(cfg.System), systemCache: cache.NewSystemCache(cfg.System),
accessCache: cache.NewAccessControlCache(cfg.AccessControl), accessCache: cache.NewAccessControlCache(cfg.AccessControl),
networkInfoCache: cache.NewNetworkInfoCache(cfg.NetworkInfo),
} }
} }
@ -262,39 +257,3 @@ func (c *Cache) PutCORS(owner user.ID, bkt *data.BucketInfo, cors *data.CORSConf
func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) { func (c *Cache) DeleteCORS(bktInfo *data.BucketInfo) {
c.systemCache.Delete(bktInfo.CORSObjectName()) c.systemCache.Delete(bktInfo.CORSObjectName())
} }
func (c *Cache) GetLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo) *data.LifecycleConfiguration {
key := bkt.LifecycleConfigurationObjectName()
if !c.accessCache.Get(owner, key) {
return nil
}
return c.systemCache.GetLifecycleConfiguration(key)
}
func (c *Cache) PutLifecycleConfiguration(owner user.ID, bkt *data.BucketInfo, cfg *data.LifecycleConfiguration) {
key := bkt.LifecycleConfigurationObjectName()
if err := c.systemCache.PutLifecycleConfiguration(key, cfg); err != nil {
c.logger.Warn(logs.CouldntCacheLifecycleConfiguration, zap.String("bucket", bkt.Name), zap.Error(err))
}
if err := c.accessCache.Put(owner, key); err != nil {
c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
}
}
func (c *Cache) DeleteLifecycleConfiguration(bktInfo *data.BucketInfo) {
c.systemCache.Delete(bktInfo.LifecycleConfigurationObjectName())
}
func (c *Cache) GetNetworkInfo() *netmap.NetworkInfo {
return c.networkInfoCache.Get()
}
func (c *Cache) PutNetworkInfo(info netmap.NetworkInfo) {
if err := c.networkInfoCache.Put(info); err != nil {
c.logger.Warn(logs.CouldntCacheNetworkInfo, zap.Error(err))
}
}

View file

@ -6,8 +6,7 @@ import (
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
) )
func (n *Layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *data.ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, data.LockInfo, error) { func (n *Layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *data.ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, data.LockInfo, error) {
@ -30,8 +29,8 @@ func (n *Layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *data.Ob
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion) tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return nil, data.LockInfo{}, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return nil, data.LockInfo{}, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
return nil, data.LockInfo{}, err return nil, data.LockInfo{}, err
} }

View file

@ -7,8 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
@ -21,7 +20,7 @@ const (
AttributeLockEnabled = "LockEnabled" AttributeLockEnabled = "LockEnabled"
) )
func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*data.BucketInfo, error) { func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.BucketInfo, error) {
var ( var (
err error err error
res *container.Container res *container.Container
@ -38,7 +37,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d
res, err = n.frostFS.Container(ctx, prm) res, err = n.frostFS.Container(ctx, prm)
if err != nil { if err != nil {
if client.IsErrContainerNotFound(err) { if client.IsErrContainerNotFound(err) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchBucket), err.Error()) return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchBucket), err.Error())
} }
return nil, fmt.Errorf("get frostfs container: %w", err) return nil, fmt.Errorf("get frostfs container: %w", err)
} }
@ -65,7 +64,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d
} }
} }
zone := n.features.FormContainerZone(reqInfo.Namespace) zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
if zone != info.Zone { if zone != info.Zone {
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID) return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
} }
@ -78,7 +77,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm frostfs.PrmContainer) (*d
func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) { func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
stoken := n.SessionTokenForRead(ctx) stoken := n.SessionTokenForRead(ctx)
prm := frostfs.PrmUserContainers{ prm := PrmUserContainers{
UserID: n.BearerOwner(ctx), UserID: n.BearerOwner(ctx),
SessionToken: stoken, SessionToken: stoken,
} }
@ -91,7 +90,7 @@ func (n *Layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
list := make([]*data.BucketInfo, 0, len(res)) list := make([]*data.BucketInfo, 0, len(res))
for i := range res { for i := range res {
getPrm := frostfs.PrmContainer{ getPrm := PrmContainer{
ContainerID: res[i], ContainerID: res[i],
SessionToken: stoken, SessionToken: stoken,
} }
@ -112,7 +111,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
} }
zone := n.features.FormContainerZone(p.Namespace) zone, _ := n.features.FormContainerZone(p.Namespace)
bktInfo := &data.BucketInfo{ bktInfo := &data.BucketInfo{
Name: p.Name, Name: p.Name,
@ -133,7 +132,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
}) })
} }
res, err := n.frostFS.CreateContainer(ctx, frostfs.PrmContainerCreate{ res, err := n.frostFS.CreateContainer(ctx, PrmContainerCreate{
Creator: bktInfo.Owner, Creator: bktInfo.Owner,
Policy: p.Policy, Policy: p.Policy,
Name: p.Name, Name: p.Name,
@ -141,6 +140,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
SessionToken: p.SessionContainerCreation, SessionToken: p.SessionContainerCreation,
CreationTime: bktInfo.Created, CreationTime: bktInfo.Created,
AdditionalAttributes: attributes, AdditionalAttributes: attributes,
BasicACL: 0, // means APE
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("create container: %w", err) return nil, fmt.Errorf("create container: %w", err)

View file

@ -3,14 +3,12 @@ package layer
import ( import (
"bytes" "bytes"
"context" "context"
"errors" errorsStd "errors"
"fmt" "fmt"
"io" "io"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -33,14 +31,14 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
} }
if cors.CORSRules == nil { if cors.CORSRules == nil {
return apierr.GetAPIError(apierr.ErrMalformedXML) return errors.GetAPIError(errors.ErrMalformedXML)
} }
if err := checkCORS(cors); err != nil { if err := checkCORS(cors); err != nil {
return err return err
} }
prm := frostfs.PrmObjectCreate{ prm := PrmObjectCreate{
Payload: &buf, Payload: &buf,
Filepath: p.BktInfo.CORSObjectName(), Filepath: p.BktInfo.CORSObjectName(),
CreationTime: TimeNow(ctx), CreationTime: TimeNow(ctx),
@ -57,13 +55,13 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
prm.Container = corsBkt.CID prm.Container = corsBkt.CID
createdObj, err := n.objectPutAndHash(ctx, prm, corsBkt) _, objID, _, _, err := n.objectPutAndHash(ctx, prm, corsBkt)
if err != nil { if err != nil {
return fmt.Errorf("put cors object: %w", err) return fmt.Errorf("put cors object: %w", err)
} }
objsToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, newAddress(corsBkt.CID, createdObj.ID)) objsToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, newAddress(corsBkt.CID, objID))
objToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove) objToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
if err != nil && !objToDeleteNotFound { if err != nil && !objToDeleteNotFound {
return err return err
} }
@ -81,7 +79,7 @@ func (n *Layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
// deleteCORSObject removes object and logs in case of error. // deleteCORSObject removes object and logs in case of error.
func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) { func (n *Layer) deleteCORSObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
var prmAuth frostfs.PrmAuth var prmAuth PrmAuth
corsBkt := bktInfo corsBkt := bktInfo
if !addr.Container().Equals(bktInfo.CID) && !addr.Container().Equals(cid.ID{}) { if !addr.Container().Equals(bktInfo.CID) && !addr.Container().Equals(cid.ID{}) {
corsBkt = &data.BucketInfo{CID: addr.Container()} corsBkt = &data.BucketInfo{CID: addr.Container()}
@ -106,7 +104,7 @@ func (n *Layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*d
func (n *Layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error { func (n *Layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
objs, err := n.treeService.DeleteBucketCORS(ctx, bktInfo) objs, err := n.treeService.DeleteBucketCORS(ctx, bktInfo)
objNotFound := errors.Is(err, tree.ErrNoNodeToRemove) objNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
if err != nil && !objNotFound { if err != nil && !objNotFound {
return err return err
} }
@ -126,12 +124,12 @@ func checkCORS(cors *data.CORSConfiguration) error {
for _, r := range cors.CORSRules { for _, r := range cors.CORSRules {
for _, m := range r.AllowedMethods { for _, m := range r.AllowedMethods {
if _, ok := supportedMethods[m]; !ok { if _, ok := supportedMethods[m]; !ok {
return apierr.GetAPIErrorWithError(apierr.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m)) return errors.GetAPIErrorWithError(errors.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m))
} }
} }
for _, h := range r.ExposeHeaders { for _, h := range r.ExposeHeaders {
if h == wildcard { if h == wildcard {
return apierr.GetAPIError(apierr.ErrCORSWildcardExposeHeaders) return errors.GetAPIError(errors.ErrCORSWildcardExposeHeaders)
} }
} }
} }

View file

@ -1,15 +1,15 @@
package detector package layer
import ( import (
"io" "io"
"net/http"
) )
type ( type (
Detector struct { detector struct {
io.Reader io.Reader
err error err error
data []byte data []byte
detectFunc func([]byte) string
} }
errReader struct { errReader struct {
data []byte data []byte
@ -36,24 +36,23 @@ func (r *errReader) Read(b []byte) (int, error) {
return n, nil return n, nil
} }
func NewDetector(reader io.Reader, detectFunc func([]byte) string) *Detector { func newDetector(reader io.Reader) *detector {
return &Detector{ return &detector{
data: make([]byte, contentTypeDetectSize), data: make([]byte, contentTypeDetectSize),
Reader: reader, Reader: reader,
detectFunc: detectFunc,
} }
} }
func (d *Detector) Detect() (string, error) { func (d *detector) Detect() (string, error) {
n, err := d.Reader.Read(d.data) n, err := d.Reader.Read(d.data)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
d.err = err d.err = err
return "", err return "", err
} }
d.data = d.data[:n] d.data = d.data[:n]
return d.detectFunc(d.data), nil return http.DetectContentType(d.data), nil
} }
func (d *Detector) RestoredReader() io.Reader { func (d *detector) MultiReader() io.Reader {
return io.MultiReader(newReader(d.data, d.err), d.Reader) return io.MultiReader(newReader(d.data, d.err), d.Reader)
} }

View file

@ -1,4 +1,4 @@
package frostfs package layer
import ( import (
"context" "context"
@ -9,13 +9,13 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/acl"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
) )
// PrmContainerCreate groups parameters of FrostFS.CreateContainer operation. // PrmContainerCreate groups parameters of FrostFS.CreateContainer operation.
@ -38,19 +38,13 @@ type PrmContainerCreate struct {
// Token of the container's creation session. Nil means session absence. // Token of the container's creation session. Nil means session absence.
SessionToken *session.Container SessionToken *session.Container
// Basic ACL of the container.
BasicACL acl.Basic
// Attributes for optional parameters. // Attributes for optional parameters.
AdditionalAttributes [][2]string AdditionalAttributes [][2]string
} }
// PrmAddContainerPolicyChain groups parameter of FrostFS.AddContainerPolicyChain operation.
type PrmAddContainerPolicyChain struct {
// ContainerID is a container identifier.
ContainerID cid.ID
// Chain is Access Policy Engine chain that contains rules which provide access to specific actions in container.
Chain chain.Chain
}
// PrmContainer groups parameters of FrostFS.Container operation. // PrmContainer groups parameters of FrostFS.Container operation.
type PrmContainer struct { type PrmContainer struct {
// Container identifier. // Container identifier.
@ -172,12 +166,6 @@ type PrmObjectCreate struct {
BufferMaxSize uint64 BufferMaxSize uint64
} }
// CreateObjectResult is a result parameter of FrostFS.CreateObject operation.
type CreateObjectResult struct {
ObjectID oid.ID
CreationEpoch uint64
}
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation. // PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
type PrmObjectDelete struct { type PrmObjectDelete struct {
// Authentication parameters. // Authentication parameters.
@ -206,27 +194,6 @@ type PrmObjectSearch struct {
FilePrefix string FilePrefix string
} }
// PrmObjectPatch groups parameters of FrostFS.PatchObject operation.
type PrmObjectPatch struct {
// Authentication parameters.
PrmAuth
// Container of the patched object.
Container cid.ID
// Identifier of the patched object.
Object oid.ID
// Object patch payload encapsulated in io.Reader primitive.
Payload io.Reader
// Object range to patch.
Offset, Length uint64
// Size of original object payload.
ObjectSize uint64
}
var ( var (
// ErrAccessDenied is returned from FrostFS in case of access violation. // ErrAccessDenied is returned from FrostFS in case of access violation.
ErrAccessDenied = errors.New("access denied") ErrAccessDenied = errors.New("access denied")
@ -245,10 +212,6 @@ type FrostFS interface {
// prevented the container from being created. // prevented the container from being created.
CreateContainer(context.Context, PrmContainerCreate) (*ContainerCreateResult, error) CreateContainer(context.Context, PrmContainerCreate) (*ContainerCreateResult, error)
// AddContainerPolicyChain create new policy chain for container.
// Can be invoked only by container owner.
AddContainerPolicyChain(context.Context, PrmAddContainerPolicyChain) error
// Container reads a container from FrostFS by ID. // Container reads a container from FrostFS by ID.
// //
// It returns exactly one non-nil value. It returns any error encountered which // It returns exactly one non-nil value. It returns any error encountered which
@ -298,15 +261,15 @@ type FrostFS interface {
// CreateObject creates and saves a parameterized object in the FrostFS container. // CreateObject creates and saves a parameterized object in the FrostFS container.
// It sets 'Timestamp' attribute to the current time. // It sets 'Timestamp' attribute to the current time.
// It returns the ID and creation epoch of the saved object. // It returns the ID of the saved object.
// //
// Creation time should be written into the object (UTC). // Creation time should be written into the object (UTC).
// //
// It returns ErrAccessDenied on write access violation. // It returns ErrAccessDenied on write access violation.
// //
// It returns exactly one non-nil value. It returns any error encountered which // It returns exactly one non-zero value. It returns any error encountered which
// prevented the object from being created. // prevented the container from being created.
CreateObject(context.Context, PrmObjectCreate) (*CreateObjectResult, error) CreateObject(context.Context, PrmObjectCreate) (oid.ID, error)
// DeleteObject marks the object to be removed from the FrostFS container by identifier. // DeleteObject marks the object to be removed from the FrostFS container by identifier.
// Successful return does not guarantee actual removal. // Successful return does not guarantee actual removal.
@ -325,15 +288,6 @@ type FrostFS interface {
// prevented the objects from being selected. // prevented the objects from being selected.
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error) SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
// PatchObject performs object patch in the FrostFS container.
// It returns the ID of the patched object.
//
// It returns ErrAccessDenied on selection access violation.
//
// It returns exactly one non-nil value. It returns any error encountered which
// prevented the objects from being patched.
PatchObject(context.Context, PrmObjectPatch) (oid.ID, error)
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time. // TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
// Note: // Note:
// * future time must be after the now // * future time must be after the now
@ -341,7 +295,4 @@ type FrostFS interface {
// //
// It returns any error encountered which prevented computing epochs. // It returns any error encountered which prevented computing epochs.
TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error) TimeToEpoch(ctx context.Context, now time.Time, future time.Time) (uint64, uint64, error)
// NetworkInfo returns parameters of FrostFS network.
NetworkInfo(context.Context) (netmap.NetworkInfo, error)
} }

View file

@ -5,7 +5,6 @@ import (
"context" "context"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"errors"
"fmt" "fmt"
"io" "io"
"strings" "strings"
@ -13,20 +12,16 @@ import (
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" objectv2 "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/checksum"
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status" apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys" "github.com/nspcc-dev/neo-go/pkg/crypto/keys"
) )
@ -55,22 +50,21 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
k.md5Enabled = md5Enabled k.md5Enabled = md5Enabled
} }
func (k *FeatureSettingsMock) FormContainerZone(ns string) string { func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
if ns == "" { if ns == "" {
return v2container.SysAttributeZoneDefault return v2container.SysAttributeZoneDefault, true
} }
return ns + ".ns" return ns + ".ns", false
} }
var _ frostfs.FrostFS = (*TestFrostFS)(nil)
type TestFrostFS struct { type TestFrostFS struct {
FrostFS
objects map[string]*object.Object objects map[string]*object.Object
objectErrors map[string]error objectErrors map[string]error
objectPutErrors map[string]error objectPutErrors map[string]error
containers map[string]*container.Container containers map[string]*container.Container
chains map[string][]chain.Chain
currentEpoch uint64 currentEpoch uint64
key *keys.PrivateKey key *keys.PrivateKey
} }
@ -81,7 +75,6 @@ func NewTestFrostFS(key *keys.PrivateKey) *TestFrostFS {
objectErrors: make(map[string]error), objectErrors: make(map[string]error),
objectPutErrors: make(map[string]error), objectPutErrors: make(map[string]error),
containers: make(map[string]*container.Container), containers: make(map[string]*container.Container),
chains: make(map[string][]chain.Chain),
key: key, key: key,
} }
} }
@ -144,11 +137,12 @@ func (t *TestFrostFS) SetContainer(cnrID cid.ID, cnr *container.Container) {
t.containers[cnrID.EncodeToString()] = cnr t.containers[cnrID.EncodeToString()] = cnr
} }
func (t *TestFrostFS) CreateContainer(_ context.Context, prm frostfs.PrmContainerCreate) (*frostfs.ContainerCreateResult, error) { func (t *TestFrostFS) CreateContainer(_ context.Context, prm PrmContainerCreate) (*ContainerCreateResult, error) {
var cnr container.Container var cnr container.Container
cnr.Init() cnr.Init()
cnr.SetOwner(prm.Creator) cnr.SetOwner(prm.Creator)
cnr.SetPlacementPolicy(prm.Policy) cnr.SetPlacementPolicy(prm.Policy)
cnr.SetBasicACL(prm.BasicACL)
creationTime := prm.CreationTime creationTime := prm.CreationTime
if creationTime.IsZero() { if creationTime.IsZero() {
@ -177,9 +171,8 @@ func (t *TestFrostFS) CreateContainer(_ context.Context, prm frostfs.PrmContaine
var id cid.ID var id cid.ID
id.SetSHA256(sha256.Sum256(b)) id.SetSHA256(sha256.Sum256(b))
t.containers[id.EncodeToString()] = &cnr t.containers[id.EncodeToString()] = &cnr
t.chains[id.EncodeToString()] = []chain.Chain{}
return &frostfs.ContainerCreateResult{ContainerID: id}, nil return &ContainerCreateResult{ContainerID: id}, nil
} }
func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error { func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *session.Container) error {
@ -188,7 +181,7 @@ func (t *TestFrostFS) DeleteContainer(_ context.Context, cnrID cid.ID, _ *sessio
return nil return nil
} }
func (t *TestFrostFS) Container(_ context.Context, prm frostfs.PrmContainer) (*container.Container, error) { func (t *TestFrostFS) Container(_ context.Context, prm PrmContainer) (*container.Container, error) {
for k, v := range t.containers { for k, v := range t.containers {
if k == prm.ContainerID.EncodeToString() { if k == prm.ContainerID.EncodeToString() {
return v, nil return v, nil
@ -198,7 +191,7 @@ func (t *TestFrostFS) Container(_ context.Context, prm frostfs.PrmContainer) (*c
return nil, fmt.Errorf("container not found %s", prm.ContainerID) return nil, fmt.Errorf("container not found %s", prm.ContainerID)
} }
func (t *TestFrostFS) UserContainers(context.Context, frostfs.PrmUserContainers) ([]cid.ID, error) { func (t *TestFrostFS) UserContainers(context.Context, PrmUserContainers) ([]cid.ID, error) {
var res []cid.ID var res []cid.ID
for k := range t.containers { for k := range t.containers {
var idCnr cid.ID var idCnr cid.ID
@ -225,7 +218,7 @@ func (t *TestFrostFS) retrieveObject(ctx context.Context, cnrID cid.ID, objID oi
if obj, ok := t.objects[sAddr]; ok { if obj, ok := t.objects[sAddr]; ok {
owner := getBearerOwner(ctx) owner := getBearerOwner(ctx)
if !t.checkAccess(cnrID, owner) { if !t.checkAccess(cnrID, owner) {
return nil, frostfs.ErrAccessDenied return nil, ErrAccessDenied
} }
return obj, nil return obj, nil
@ -234,23 +227,23 @@ func (t *TestFrostFS) retrieveObject(ctx context.Context, cnrID cid.ID, objID oi
return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr) return nil, fmt.Errorf("%w: %s", &apistatus.ObjectNotFound{}, addr)
} }
func (t *TestFrostFS) HeadObject(ctx context.Context, prm frostfs.PrmObjectHead) (*object.Object, error) { func (t *TestFrostFS) HeadObject(ctx context.Context, prm PrmObjectHead) (*object.Object, error) {
return t.retrieveObject(ctx, prm.Container, prm.Object) return t.retrieveObject(ctx, prm.Container, prm.Object)
} }
func (t *TestFrostFS) GetObject(ctx context.Context, prm frostfs.PrmObjectGet) (*frostfs.Object, error) { func (t *TestFrostFS) GetObject(ctx context.Context, prm PrmObjectGet) (*Object, error) {
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object) obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &frostfs.Object{ return &Object{
Header: *obj, Header: *obj,
Payload: io.NopCloser(bytes.NewReader(obj.Payload())), Payload: io.NopCloser(bytes.NewReader(obj.Payload())),
}, nil }, nil
} }
func (t *TestFrostFS) RangeObject(ctx context.Context, prm frostfs.PrmObjectRange) (io.ReadCloser, error) { func (t *TestFrostFS) RangeObject(ctx context.Context, prm PrmObjectRange) (io.ReadCloser, error) {
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object) obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
if err != nil { if err != nil {
return nil, err return nil, err
@ -262,10 +255,10 @@ func (t *TestFrostFS) RangeObject(ctx context.Context, prm frostfs.PrmObjectRang
return io.NopCloser(bytes.NewReader(payload)), nil return io.NopCloser(bytes.NewReader(payload)), nil
} }
func (t *TestFrostFS) CreateObject(_ context.Context, prm frostfs.PrmObjectCreate) (*frostfs.CreateObjectResult, error) { func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.ID, error) {
b := make([]byte, 32) b := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, b); err != nil { if _, err := io.ReadFull(rand.Reader, b); err != nil {
return nil, err return oid.ID{}, err
} }
var id oid.ID var id oid.ID
id.SetSHA256(sha256.Sum256(b)) id.SetSHA256(sha256.Sum256(b))
@ -273,7 +266,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm frostfs.PrmObjectCreat
attrs := make([]object.Attribute, 0) attrs := make([]object.Attribute, 0)
if err := t.objectPutErrors[prm.Filepath]; err != nil { if err := t.objectPutErrors[prm.Filepath]; err != nil {
return nil, err return oid.ID{}, err
} }
if prm.Filepath != "" { if prm.Filepath != "" {
@ -318,7 +311,7 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm frostfs.PrmObjectCreat
if prm.Payload != nil { if prm.Payload != nil {
all, err := io.ReadAll(prm.Payload) all, err := io.ReadAll(prm.Payload)
if err != nil { if err != nil {
return nil, err return oid.ID{}, err
} }
obj.SetPayload(all) obj.SetPayload(all)
obj.SetPayloadSize(uint64(len(all))) obj.SetPayloadSize(uint64(len(all)))
@ -332,13 +325,10 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm frostfs.PrmObjectCreat
addr := newAddress(cnrID, objID) addr := newAddress(cnrID, objID)
t.objects[addr.EncodeToString()] = obj t.objects[addr.EncodeToString()] = obj
return &frostfs.CreateObjectResult{ return objID, nil
ObjectID: objID,
CreationEpoch: t.currentEpoch - 1,
}, nil
} }
func (t *TestFrostFS) DeleteObject(ctx context.Context, prm frostfs.PrmObjectDelete) error { func (t *TestFrostFS) DeleteObject(ctx context.Context, prm PrmObjectDelete) error {
var addr oid.Address var addr oid.Address
addr.SetContainer(prm.Container) addr.SetContainer(prm.Container)
addr.SetObject(prm.Object) addr.SetObject(prm.Object)
@ -350,7 +340,7 @@ func (t *TestFrostFS) DeleteObject(ctx context.Context, prm frostfs.PrmObjectDel
if _, ok := t.objects[addr.EncodeToString()]; ok { if _, ok := t.objects[addr.EncodeToString()]; ok {
owner := getBearerOwner(ctx) owner := getBearerOwner(ctx)
if !t.checkAccess(prm.Container, owner) { if !t.checkAccess(prm.Container, owner) {
return frostfs.ErrAccessDenied return ErrAccessDenied
} }
delete(t.objects, addr.EncodeToString()) delete(t.objects, addr.EncodeToString())
@ -377,7 +367,7 @@ func (t *TestFrostFS) AllObjects(cnrID cid.ID) []oid.ID {
return result return result
} }
func (t *TestFrostFS) SearchObjects(_ context.Context, prm frostfs.PrmObjectSearch) ([]oid.ID, error) { func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) ([]oid.ID, error) {
filters := object.NewSearchFilters() filters := object.NewSearchFilters()
filters.AddRootFilter() filters.AddRootFilter()
@ -414,62 +404,6 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm frostfs.PrmObjectSear
return res, nil return res, nil
} }
func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
ni := netmap.NetworkInfo{}
ni.SetCurrentEpoch(t.currentEpoch)
ni.SetEpochDuration(60)
ni.SetMsPerBlock(1000)
return ni, nil
}
func (t *TestFrostFS) PatchObject(ctx context.Context, prm frostfs.PrmObjectPatch) (oid.ID, error) {
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
if err != nil {
return oid.ID{}, err
}
newObj := *obj
patchBytes, err := io.ReadAll(prm.Payload)
if err != nil {
return oid.ID{}, err
}
var newPayload []byte
if prm.Offset > 0 {
newPayload = append(newPayload, obj.Payload()[:prm.Offset]...)
}
newPayload = append(newPayload, patchBytes...)
if prm.Offset+prm.Length < obj.PayloadSize() {
newPayload = append(newPayload, obj.Payload()[prm.Offset+prm.Length:]...)
}
newObj.SetPayload(newPayload)
newObj.SetPayloadSize(uint64(len(newPayload)))
var hash checksum.Checksum
checksum.Calculate(&hash, checksum.SHA256, newPayload)
newObj.SetPayloadChecksum(hash)
newID := oidtest.ID()
newObj.SetID(newID)
t.objects[newAddress(prm.Container, newID).EncodeToString()] = &newObj
return newID, nil
}
func (t *TestFrostFS) AddContainerPolicyChain(_ context.Context, prm frostfs.PrmAddContainerPolicyChain) error {
list, ok := t.chains[prm.ContainerID.EncodeToString()]
if !ok {
return errors.New("container not found")
}
t.chains[prm.ContainerID.EncodeToString()] = append(list, prm.Chain)
return nil
}
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool { func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
cnr, ok := t.containers[cnrID.EncodeToString()] cnr, ok := t.containers[cnrID.EncodeToString()]
if !ok { if !ok {

View file

@ -6,7 +6,7 @@ import (
"crypto/rand" "crypto/rand"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"errors" stderrors "errors"
"fmt" "fmt"
"io" "io"
"net/url" "net/url"
@ -17,10 +17,9 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
@ -36,28 +35,27 @@ import (
type ( type (
BucketResolver interface { BucketResolver interface {
Resolve(ctx context.Context, zone, name string) (cid.ID, error) Resolve(ctx context.Context, name string) (cid.ID, error)
} }
FeatureSettings interface { FeatureSettings interface {
ClientCut() bool ClientCut() bool
BufferMaxSizeForPut() uint64 BufferMaxSizeForPut() uint64
MD5Enabled() bool MD5Enabled() bool
FormContainerZone(ns string) string FormContainerZone(ns string) (zone string, isDefault bool)
} }
Layer struct { Layer struct {
frostFS frostfs.FrostFS frostFS FrostFS
gateOwner user.ID gateOwner user.ID
log *zap.Logger log *zap.Logger
anonKey AnonymousKey anonKey AnonymousKey
resolver BucketResolver resolver BucketResolver
cache *Cache cache *Cache
treeService tree.Service treeService TreeService
features FeatureSettings features FeatureSettings
gateKey *keys.PrivateKey gateKey *keys.PrivateKey
corsCnrInfo *data.BucketInfo corsCnrInfo *data.BucketInfo
lifecycleCnrInfo *data.BucketInfo
} }
Config struct { Config struct {
@ -66,11 +64,10 @@ type (
Cache *Cache Cache *Cache
AnonKey AnonymousKey AnonKey AnonymousKey
Resolver BucketResolver Resolver BucketResolver
TreeService tree.Service TreeService TreeService
Features FeatureSettings Features FeatureSettings
GateKey *keys.PrivateKey GateKey *keys.PrivateKey
CORSCnrInfo *data.BucketInfo CORSCnrInfo *data.BucketInfo
LifecycleCnrInfo *data.BucketInfo
} }
// AnonymousKey contains data for anonymous requests. // AnonymousKey contains data for anonymous requests.
@ -104,14 +101,14 @@ type (
PutObjectParams struct { PutObjectParams struct {
BktInfo *data.BucketInfo BktInfo *data.BucketInfo
Object string Object string
Size *uint64 Size uint64
Reader io.Reader Reader io.Reader
Header map[string]string Header map[string]string
Lock *data.ObjectLock Lock *data.ObjectLock
Encryption encryption.Params Encryption encryption.Params
CopiesNumbers []uint32 CopiesNumbers []uint32
CompleteMD5Hash string CompleteMD5Hash string
ContentMD5 *string ContentMD5 string
ContentSHA256Hash string ContentSHA256Hash string
} }
@ -128,7 +125,6 @@ type (
BktInfo *data.BucketInfo BktInfo *data.BucketInfo
Objects []*VersionedObject Objects []*VersionedObject
Settings *data.BucketSettings Settings *data.BucketSettings
NetworkInfo netmap.NetworkInfo
IsMultiple bool IsMultiple bool
} }
@ -161,7 +157,6 @@ type (
DstEncryption encryption.Params DstEncryption encryption.Params
CopiesNumbers []uint32 CopiesNumbers []uint32
} }
// CreateBucketParams stores bucket create request parameters. // CreateBucketParams stores bucket create request parameters.
CreateBucketParams struct { CreateBucketParams struct {
Name string Name string
@ -236,7 +231,7 @@ func (p HeadObjectParams) Versioned() bool {
// NewLayer creates an instance of a Layer. It checks credentials // NewLayer creates an instance of a Layer. It checks credentials
// and establishes gRPC connection with the node. // and establishes gRPC connection with the node.
func NewLayer(log *zap.Logger, frostFS frostfs.FrostFS, config *Config) *Layer { func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) *Layer {
return &Layer{ return &Layer{
frostFS: frostFS, frostFS: frostFS,
log: log, log: log,
@ -248,7 +243,6 @@ func NewLayer(log *zap.Logger, frostFS frostfs.FrostFS, config *Config) *Layer {
features: config.Features, features: config.Features,
gateKey: config.GateKey, gateKey: config.GateKey,
corsCnrInfo: config.CORSCnrInfo, corsCnrInfo: config.CORSCnrInfo,
lifecycleCnrInfo: config.LifecycleCnrInfo,
} }
} }
@ -300,7 +294,7 @@ func (n *Layer) reqLogger(ctx context.Context) *zap.Logger {
return n.log return n.log
} }
func (n *Layer) prepareAuthParameters(ctx context.Context, prm *frostfs.PrmAuth, bktOwner user.ID) { func (n *Layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
if prm.BearerToken != nil || prm.PrivateKey != nil { if prm.BearerToken != nil || prm.PrivateKey != nil {
return return
} }
@ -323,21 +317,21 @@ func (n *Layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
} }
reqInfo := middleware.GetReqInfo(ctx) reqInfo := middleware.GetReqInfo(ctx)
zone := n.features.FormContainerZone(reqInfo.Namespace) zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil { if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
return bktInfo, nil return bktInfo, nil
} }
containerID, err := n.ResolveBucket(ctx, zone, name) containerID, err := n.ResolveBucket(ctx, name)
if err != nil { if err != nil {
if strings.Contains(err.Error(), "not found") { if strings.Contains(err.Error(), "not found") {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchBucket), err.Error()) return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
} }
return nil, err return nil, err
} }
prm := frostfs.PrmContainer{ prm := PrmContainer{
ContainerID: containerID, ContainerID: containerID,
SessionToken: n.SessionTokenForRead(ctx), SessionToken: n.SessionTokenForRead(ctx),
} }
@ -353,13 +347,13 @@ func (n *Layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
} }
reqInfo := middleware.GetReqInfo(ctx) reqInfo := middleware.GetReqInfo(ctx)
zone := n.features.FormContainerZone(reqInfo.Namespace) zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil { if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
return bktInfo.CID, nil return bktInfo.CID, nil
} }
return n.ResolveBucket(ctx, zone, name) return n.ResolveBucket(ctx, name)
} }
// ListBuckets returns all user containers. The name of the bucket is a container // ListBuckets returns all user containers. The name of the bucket is a container
@ -398,9 +392,9 @@ func (n *Layer) GetObject(ctx context.Context, p *GetObjectParams) (*ObjectPaylo
if err != nil { if err != nil {
if client.IsErrObjectNotFound(err) { if client.IsErrObjectNotFound(err) {
if p.Versioned { if p.Versioned {
err = fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error()) err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchVersion), err.Error())
} else { } else {
err = fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) err = fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchKey), err.Error())
} }
} }
@ -529,7 +523,7 @@ func (n *Layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
return n.PutObject(ctx, &PutObjectParams{ return n.PutObject(ctx, &PutObjectParams{
BktInfo: p.DstBktInfo, BktInfo: p.DstBktInfo,
Object: p.DstObject, Object: p.DstObject,
Size: &p.DstSize, Size: p.DstSize,
Reader: objPayload, Reader: objPayload,
Header: p.Header, Header: p.Header,
Encryption: p.DstEncryption, Encryption: p.DstEncryption,
@ -548,8 +542,7 @@ func getRandomOID() (oid.ID, error) {
return objID, nil return objID, nil
} }
func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject, func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject) *VersionedObject {
networkInfo netmap.NetworkInfo) *VersionedObject {
if len(obj.VersionID) != 0 || settings.Unversioned() { if len(obj.VersionID) != 0 || settings.Unversioned() {
var nodeVersions []*data.NodeVersion var nodeVersions []*data.NodeVersion
if nodeVersions, obj.Error = n.getNodeVersionsToDelete(ctx, bkt, obj); obj.Error != nil { if nodeVersions, obj.Error = n.getNodeVersionsToDelete(ctx, bkt, obj); obj.Error != nil {
@ -631,7 +624,6 @@ func (n *Layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings
Created: &now, Created: &now,
Owner: &n.gateOwner, Owner: &n.gateOwner,
IsDeleteMarker: true, IsDeleteMarker: true,
CreationEpoch: networkInfo.CurrentEpoch(),
}, },
IsUnversioned: settings.VersioningSuspended(), IsUnversioned: settings.VersioningSuspended(),
} }
@ -656,22 +648,22 @@ func (n *Layer) handleNotFoundError(bkt *data.BucketInfo, obj *VersionedObject)
} }
func isNotFoundError(err error) bool { func isNotFoundError(err error) bool {
return apierr.IsS3Error(err, apierr.ErrNoSuchKey) || return errors.IsS3Error(err, errors.ErrNoSuchKey) ||
apierr.IsS3Error(err, apierr.ErrNoSuchVersion) errors.IsS3Error(err, errors.ErrNoSuchVersion)
} }
func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) ([]*data.NodeVersion, error) { func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) ([]*data.NodeVersion, error) {
var versionsToDelete []*data.NodeVersion var versionsToDelete []*data.NodeVersion
versions, err := n.treeService.GetVersions(ctx, bkt, obj.Name) versions, err := n.treeService.GetVersions(ctx, bkt, obj.Name)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if stderrors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
return nil, err return nil, err
} }
if len(versions) == 0 { if len(versions) == 0 {
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion)) return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
} }
sort.Slice(versions, func(i, j int) bool { sort.Slice(versions, func(i, j int) bool {
@ -713,7 +705,7 @@ func (n *Layer) getNodeVersionsToDelete(ctx context.Context, bkt *data.BucketInf
} }
if len(versionsToDelete) == 0 { if len(versionsToDelete) == 0 {
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion)) return nil, fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
} }
n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids)) n.reqLogger(ctx).Debug(logs.GetTreeNodeToDelete, zap.Stringer("cid", bkt.CID), zap.Strings("oids", oids))
@ -774,7 +766,7 @@ func (n *Layer) removeCombinedObject(ctx context.Context, bkt *data.BucketInfo,
// DeleteObjects from the storage. // DeleteObjects from the storage.
func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject { func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
for i, obj := range p.Objects { for i, obj := range p.Objects {
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj, p.NetworkInfo) p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj)
if p.IsMultiple && p.Objects[i].Error != nil { if p.IsMultiple && p.Objects[i].Error != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error)) n.reqLogger(ctx).Error(logs.CouldntDeleteObject, zap.String("object", obj.String()), zap.Error(p.Objects[i].Error))
} }
@ -786,23 +778,23 @@ func (n *Layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*Ver
func (n *Layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) { func (n *Layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
bktInfo, err := n.GetBucketInfo(ctx, p.Name) bktInfo, err := n.GetBucketInfo(ctx, p.Name)
if err != nil { if err != nil {
if apierr.IsS3Error(err, apierr.ErrNoSuchBucket) { if errors.IsS3Error(err, errors.ErrNoSuchBucket) {
return n.createContainer(ctx, p) return n.createContainer(ctx, p)
} }
return nil, err return nil, err
} }
if p.SessionContainerCreation != nil && session.IssuedBy(*p.SessionContainerCreation, bktInfo.Owner) { if p.SessionContainerCreation != nil && session.IssuedBy(*p.SessionContainerCreation, bktInfo.Owner) {
return nil, apierr.GetAPIError(apierr.ErrBucketAlreadyOwnedByYou) return nil, errors.GetAPIError(errors.ErrBucketAlreadyOwnedByYou)
} }
return nil, apierr.GetAPIError(apierr.ErrBucketAlreadyExists) return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
} }
func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, error) { func (n *Layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
var cnrID cid.ID var cnrID cid.ID
if err := cnrID.DecodeString(name); err != nil { if err := cnrID.DecodeString(name); err != nil {
if cnrID, err = n.resolver.Resolve(ctx, zone, name); err != nil { if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
return cid.ID{}, err return cid.ID{}, err
} }
@ -823,7 +815,7 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
return err return err
} }
if len(res) != 0 { if len(res) != 0 {
return apierr.GetAPIError(apierr.ErrBucketNotEmpty) return errors.GetAPIError(errors.ErrBucketNotEmpty)
} }
} }
@ -834,11 +826,6 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err)) n.reqLogger(ctx).Error(logs.GetBucketCors, zap.Error(err))
} }
lifecycleObj, treeErr := n.treeService.GetBucketLifecycleConfiguration(ctx, p.BktInfo)
if treeErr != nil {
n.reqLogger(ctx).Error(logs.GetBucketLifecycle, zap.Error(treeErr))
}
err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken) err = n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
if err != nil { if err != nil {
return fmt.Errorf("delete container: %w", err) return fmt.Errorf("delete container: %w", err)
@ -848,33 +835,5 @@ func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
n.deleteCORSObject(ctx, p.BktInfo, corsObj) n.deleteCORSObject(ctx, p.BktInfo, corsObj)
} }
if treeErr == nil && !lifecycleObj.Container().Equals(p.BktInfo.CID) {
n.deleteLifecycleObject(ctx, p.BktInfo, lifecycleObj)
}
return nil return nil
} }
func (n *Layer) DeleteContainer(ctx context.Context, p *DeleteBucketParams) error {
n.cache.DeleteBucket(p.BktInfo)
if err := n.frostFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken); err != nil {
return fmt.Errorf("delete container: %w", err)
}
return nil
}
func (n *Layer) GetNetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) {
cachedInfo := n.cache.GetNetworkInfo()
if cachedInfo != nil {
return *cachedInfo, nil
}
networkInfo, err := n.frostFS.NetworkInfo(ctx)
if err != nil {
return netmap.NetworkInfo{}, fmt.Errorf("get network info: %w", err)
}
n.cache.PutNetworkInfo(networkInfo)
return networkInfo, nil
}

View file

@ -1,146 +0,0 @@
package layer
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"go.uber.org/zap"
)
type PutBucketLifecycleParams struct {
BktInfo *data.BucketInfo
LifecycleCfg *data.LifecycleConfiguration
CopiesNumbers []uint32
}
func (n *Layer) PutBucketLifecycleConfiguration(ctx context.Context, p *PutBucketLifecycleParams) error {
cfgBytes, err := xml.Marshal(p.LifecycleCfg)
if err != nil {
return fmt.Errorf("marshal lifecycle configuration: %w", err)
}
prm := frostfs.PrmObjectCreate{
Payload: bytes.NewReader(cfgBytes),
Filepath: p.BktInfo.LifecycleConfigurationObjectName(),
CreationTime: TimeNow(ctx),
}
var lifecycleBkt *data.BucketInfo
if n.lifecycleCnrInfo == nil {
lifecycleBkt = p.BktInfo
prm.CopiesNumber = p.CopiesNumbers
} else {
lifecycleBkt = n.lifecycleCnrInfo
prm.PrmAuth.PrivateKey = &n.gateKey.PrivateKey
}
prm.Container = lifecycleBkt.CID
createdObj, err := n.objectPutAndHash(ctx, prm, lifecycleBkt)
if err != nil {
return fmt.Errorf("put lifecycle object: %w", err)
}
objsToDelete, err := n.treeService.PutBucketLifecycleConfiguration(ctx, p.BktInfo, newAddress(lifecycleBkt.CID, createdObj.ID))
objsToDeleteNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
if err != nil && !objsToDeleteNotFound {
return err
}
if !objsToDeleteNotFound {
for _, addr := range objsToDelete {
n.deleteLifecycleObject(ctx, p.BktInfo, addr)
}
}
n.cache.PutLifecycleConfiguration(n.BearerOwner(ctx), p.BktInfo, p.LifecycleCfg)
return nil
}
// deleteLifecycleObject removes object and logs in case of error.
func (n *Layer) deleteLifecycleObject(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) {
var prmAuth frostfs.PrmAuth
lifecycleBkt := bktInfo
if !addr.Container().Equals(bktInfo.CID) {
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
prmAuth.PrivateKey = &n.gateKey.PrivateKey
}
if err := n.objectDeleteWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth); err != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteLifecycleObject, zap.Error(err),
zap.String("cid", lifecycleBkt.CID.EncodeToString()),
zap.String("oid", addr.Object().EncodeToString()))
}
}
func (n *Layer) GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.LifecycleConfiguration, error) {
owner := n.BearerOwner(ctx)
if cfg := n.cache.GetLifecycleConfiguration(owner, bktInfo); cfg != nil {
return cfg, nil
}
addr, err := n.treeService.GetBucketLifecycleConfiguration(ctx, bktInfo)
objNotFound := errors.Is(err, tree.ErrNodeNotFound)
if err != nil && !objNotFound {
return nil, err
}
if objNotFound {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), err.Error())
}
var prmAuth frostfs.PrmAuth
lifecycleBkt := bktInfo
if !addr.Container().Equals(bktInfo.CID) {
lifecycleBkt = &data.BucketInfo{CID: addr.Container()}
prmAuth.PrivateKey = &n.gateKey.PrivateKey
}
obj, err := n.objectGetWithAuth(ctx, lifecycleBkt, addr.Object(), prmAuth)
if err != nil {
return nil, fmt.Errorf("get lifecycle object: %w", err)
}
lifecycleCfg := &data.LifecycleConfiguration{}
if err = xml.NewDecoder(obj.Payload).Decode(&lifecycleCfg); err != nil {
return nil, fmt.Errorf("unmarshal lifecycle configuration: %w", err)
}
n.cache.PutLifecycleConfiguration(owner, bktInfo, lifecycleCfg)
for i := range lifecycleCfg.Rules {
if lifecycleCfg.Rules[i].Expiration != nil {
lifecycleCfg.Rules[i].Expiration.Epoch = nil
}
}
return lifecycleCfg, nil
}
func (n *Layer) DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) error {
objs, err := n.treeService.DeleteBucketLifecycleConfiguration(ctx, bktInfo)
objsNotFound := errors.Is(err, tree.ErrNoNodeToRemove)
if err != nil && !objsNotFound {
return err
}
if !objsNotFound {
for _, addr := range objs {
n.deleteLifecycleObject(ctx, bktInfo, addr)
}
}
n.cache.DeleteLifecycleConfiguration(bktInfo)
return nil
}

View file

@ -1,61 +0,0 @@
package layer
import (
"crypto/md5"
"encoding/xml"
"testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
frosterr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"github.com/stretchr/testify/require"
)
func TestBucketLifecycle(t *testing.T) {
tc := prepareContext(t)
lifecycle := &data.LifecycleConfiguration{
XMLName: xml.Name{
Space: `http://s3.amazonaws.com/doc/2006-03-01/`,
Local: "LifecycleConfiguration",
},
Rules: []data.LifecycleRule{
{
Status: data.LifecycleStatusEnabled,
Expiration: &data.LifecycleExpiration{
Days: ptr(21),
},
},
},
}
lifecycleBytes, err := xml.Marshal(lifecycle)
require.NoError(t, err)
hash := md5.New()
hash.Write(lifecycleBytes)
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.Equal(t, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), frosterr.UnwrapErr(err))
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
err = tc.layer.PutBucketLifecycleConfiguration(tc.ctx, &PutBucketLifecycleParams{
BktInfo: tc.bktInfo,
LifecycleCfg: lifecycle,
})
require.NoError(t, err)
cfg, err := tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
require.Equal(t, *lifecycle, *cfg)
err = tc.layer.DeleteBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.NoError(t, err)
_, err = tc.layer.GetBucketLifecycleConfiguration(tc.ctx, tc.bktInfo)
require.Equal(t, apierr.GetAPIError(apierr.ErrNoSuchLifecycleConfiguration), frosterr.UnwrapErr(err))
}
func ptr[T any](t T) *T {
return &t
}

View file

@ -11,7 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"github.com/panjf2000/ants/v2" "github.com/panjf2000/ants/v2"
@ -585,7 +585,7 @@ func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, e
return true return true
} }
if p.Bookmark != "" && p.Bookmark != p.Marker { if p.Bookmark != "" {
if _, ok := existed[continuationToken]; !ok { if _, ok := existed[continuationToken]; !ok {
if p.Bookmark != node.NodeVersion.OID.EncodeToString() { if p.Bookmark != node.NodeVersion.OID.EncodeToString() {
return true return true
@ -691,10 +691,10 @@ func filterVersionsByMarker(objects []*data.ExtendedNodeVersion, p *ListObjectVe
return objects[j+1:], nil return objects[j+1:], nil
} }
} }
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion) return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
} else if obj.NodeVersion.FilePath > p.KeyMarker { } else if obj.NodeVersion.FilePath > p.KeyMarker {
if p.VersionIDMarker != "" { if p.VersionIDMarker != "" {
return nil, apierr.GetAPIError(apierr.ErrInvalidVersion) return nil, s3errors.GetAPIError(s3errors.ErrInvalidVersion)
} }
return objects[i:], nil return objects[i:], nil
} }

View file

@ -17,10 +17,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
@ -152,11 +150,6 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
metaSize += len(p.Data.TagSet) metaSize += len(p.Data.TagSet)
} }
networkInfo, err := n.GetNetworkInfo(ctx)
if err != nil {
return err
}
info := &data.MultipartInfo{ info := &data.MultipartInfo{
Key: p.Info.Key, Key: p.Info.Key,
UploadID: p.Info.UploadID, UploadID: p.Info.UploadID,
@ -164,7 +157,6 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
Created: TimeNow(ctx), Created: TimeNow(ctx),
Meta: make(map[string]string, metaSize), Meta: make(map[string]string, metaSize),
CopiesNumbers: p.CopiesNumbers, CopiesNumbers: p.CopiesNumbers,
CreationEpoch: networkInfo.CurrentEpoch(),
} }
for key, val := range p.Header { for key, val := range p.Header {
@ -189,14 +181,14 @@ func (n *Layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
func (n *Layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) { func (n *Layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID) multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return "", fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error()) return "", fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
} }
return "", err return "", err
} }
if p.Size > UploadMaxSize { if p.Size > UploadMaxSize {
return "", fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooLarge), p.Size, UploadMaxSize) return "", fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), p.Size, UploadMaxSize)
} }
objInfo, err := n.uploadPart(ctx, multipartInfo, p) objInfo, err := n.uploadPart(ctx, multipartInfo, p)
@ -211,11 +203,11 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
encInfo := FormEncryptionInfo(multipartInfo.Meta) encInfo := FormEncryptionInfo(multipartInfo.Meta)
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil { if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err)) n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters) return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
} }
bktInfo := p.Info.Bkt bktInfo := p.Info.Bkt
prm := frostfs.PrmObjectCreate{ prm := PrmObjectCreate{
Container: bktInfo.CID, Container: bktInfo.CID,
Attributes: make([][2]string, 2), Attributes: make([][2]string, 2),
Payload: p.Reader, Payload: p.Reader,
@ -237,78 +229,76 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber) prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo) size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(p.ContentMD5) > 0 { if len(p.ContentMD5) > 0 {
hashBytes, err := base64.StdEncoding.DecodeString(p.ContentMD5) hashBytes, err := base64.StdEncoding.DecodeString(p.ContentMD5)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest) return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
} }
if hex.EncodeToString(hashBytes) != hex.EncodeToString(createdObj.MD5Sum) { if hex.EncodeToString(hashBytes) != hex.EncodeToString(md5Hash) {
prm := frostfs.PrmObjectDelete{ prm := PrmObjectDelete{
Object: createdObj.ID, Object: id,
Container: bktInfo.CID, Container: bktInfo.CID,
} }
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
err = n.frostFS.DeleteObject(ctx, prm) err = n.frostFS.DeleteObject(ctx, prm)
if err != nil { if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
} }
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest) return nil, s3errors.GetAPIError(s3errors.ErrInvalidDigest)
} }
} }
if p.Info.Encryption.Enabled() { if p.Info.Encryption.Enabled() {
createdObj.Size = decSize size = decSize
} }
if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) { if !p.Info.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash) contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
} }
if !bytes.Equal(contentHashBytes, createdObj.HashSum) { if !bytes.Equal(contentHashBytes, hash) {
err = n.objectDelete(ctx, bktInfo, createdObj.ID) err = n.objectDelete(ctx, bktInfo, id)
if err != nil { if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
} }
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) return nil, s3errors.GetAPIError(s3errors.ErrContentSHA256Mismatch)
} }
} }
n.reqLogger(ctx).Debug(logs.UploadPart, n.reqLogger(ctx).Debug(logs.UploadPart,
zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber), zap.String("multipart upload", p.Info.UploadID), zap.Int("part number", p.PartNumber),
zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", createdObj.ID)) zap.Stringer("cid", bktInfo.CID), zap.Stringer("oid", id))
partInfo := &data.PartInfo{ partInfo := &data.PartInfo{
Key: p.Info.Key, Key: p.Info.Key,
UploadID: p.Info.UploadID, UploadID: p.Info.UploadID,
Number: p.PartNumber, Number: p.PartNumber,
OID: createdObj.ID, OID: id,
Size: createdObj.Size, Size: size,
ETag: hex.EncodeToString(createdObj.HashSum), ETag: hex.EncodeToString(hash),
Created: prm.CreationTime, Created: prm.CreationTime,
MD5: hex.EncodeToString(createdObj.MD5Sum), MD5: hex.EncodeToString(md5Hash),
} }
oldPartIDs, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo) oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
oldPartIDNotFound := errors.Is(err, tree.ErrNoNodeToRemove) oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
if err != nil && !oldPartIDNotFound { if err != nil && !oldPartIDNotFound {
return nil, err return nil, err
} }
if !oldPartIDNotFound { if !oldPartIDNotFound {
for _, oldPartID := range oldPartIDs {
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil { if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err), n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
zap.String("cid", bktInfo.CID.EncodeToString()), zap.String("cid", bktInfo.CID.EncodeToString()),
zap.String("oid", oldPartID.EncodeToString())) zap.String("oid", oldPartID.EncodeToString()))
} }
} }
}
objInfo := &data.ObjectInfo{ objInfo := &data.ObjectInfo{
ID: createdObj.ID, ID: id,
CID: bktInfo.CID, CID: bktInfo.CID,
Owner: bktInfo.Owner, Owner: bktInfo.Owner,
@ -325,8 +315,8 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) { func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID) multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error()) return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
} }
return nil, err return nil, err
} }
@ -342,11 +332,11 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
if p.Range != nil { if p.Range != nil {
size = p.Range.End - p.Range.Start + 1 size = p.Range.End - p.Range.Start + 1
if p.Range.End > srcObjectSize { if p.Range.End > srcObjectSize {
return nil, fmt.Errorf("%w: %d-%d/%d", apierr.GetAPIError(apierr.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, srcObjectSize) return nil, fmt.Errorf("%w: %d-%d/%d", s3errors.GetAPIError(s3errors.ErrInvalidCopyPartRangeSource), p.Range.Start, p.Range.End, srcObjectSize)
} }
} }
if size > UploadMaxSize { if size > UploadMaxSize {
return nil, fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooLarge), size, UploadMaxSize) return nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooLarge), size, UploadMaxSize)
} }
objPayload, err := n.GetObject(ctx, &GetObjectParams{ objPayload, err := n.GetObject(ctx, &GetObjectParams{
@ -373,7 +363,7 @@ func (n *Layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.
func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) { func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
for i := 1; i < len(p.Parts); i++ { for i := 1; i < len(p.Parts); i++ {
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber { if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
return nil, nil, apierr.GetAPIError(apierr.ErrInvalidPartOrder) return nil, nil, s3errors.GetAPIError(s3errors.ErrInvalidPartOrder)
} }
} }
@ -384,24 +374,25 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
encInfo := FormEncryptionInfo(multipartInfo.Meta) encInfo := FormEncryptionInfo(multipartInfo.Meta)
if len(partsInfo) < len(p.Parts) { if len(partsInfo) < len(p.Parts) {
return nil, nil, fmt.Errorf("%w: found %d parts, need %d", apierr.GetAPIError(apierr.ErrInvalidPart), len(partsInfo), len(p.Parts)) return nil, nil, fmt.Errorf("%w: found %d parts, need %d", s3errors.GetAPIError(s3errors.ErrInvalidPart), len(partsInfo), len(p.Parts))
} }
var multipartObjetSize uint64 var multipartObjetSize uint64
var encMultipartObjectSize uint64 var encMultipartObjectSize uint64
parts := make([]*data.PartInfoExtended, 0, len(p.Parts)) parts := make([]*data.PartInfo, 0, len(p.Parts))
var completedPartsHeader strings.Builder var completedPartsHeader strings.Builder
md5Hash := md5.New() md5Hash := md5.New()
for i, part := range p.Parts { for i, part := range p.Parts {
partInfo := partsInfo.Extract(part.PartNumber, data.UnQuote(part.ETag), n.features.MD5Enabled()) partInfo := partsInfo[part.PartNumber]
if partInfo == nil { if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", apierr.GetAPIError(apierr.ErrInvalidPart), part.PartNumber) return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
} }
delete(partsInfo, part.PartNumber)
// for the last part we have no minimum size limit // for the last part we have no minimum size limit
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize { if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
return nil, nil, fmt.Errorf("%w: %d/%d", apierr.GetAPIError(apierr.ErrEntityTooSmall), partInfo.Size, UploadMinSize) return nil, nil, fmt.Errorf("%w: %d/%d", s3errors.GetAPIError(s3errors.ErrEntityTooSmall), partInfo.Size, UploadMinSize)
} }
parts = append(parts, partInfo) parts = append(parts, partInfo)
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted) multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
@ -462,7 +453,7 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
Object: p.Info.Key, Object: p.Info.Key,
Reader: bytes.NewReader(partsData), Reader: bytes.NewReader(partsData),
Header: initMetadata, Header: initMetadata,
Size: &multipartObjetSize, Size: multipartObjetSize,
Encryption: p.Info.Encryption, Encryption: p.Info.Encryption,
CopiesNumbers: multipartInfo.CopiesNumbers, CopiesNumbers: multipartInfo.CopiesNumbers,
CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)), CompleteMD5Hash: hex.EncodeToString(md5Hash.Sum(nil)) + "-" + strconv.Itoa(len(p.Parts)),
@ -473,13 +464,12 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
zap.String("uploadKey", p.Info.Key), zap.String("uploadKey", p.Info.Key),
zap.Error(err)) zap.Error(err))
return nil, nil, apierr.GetAPIError(apierr.ErrInternalError) return nil, nil, s3errors.GetAPIError(s3errors.ErrInternalError)
} }
var addr oid.Address var addr oid.Address
addr.SetContainer(p.Info.Bkt.CID) addr.SetContainer(p.Info.Bkt.CID)
for _, prts := range partsInfo { for _, partInfo := range partsInfo {
for _, partInfo := range prts {
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil { if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart, n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID), zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
@ -488,7 +478,6 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
addr.SetObject(partInfo.OID) addr.SetObject(partInfo.OID)
n.cache.DeleteObject(addr) n.cache.DeleteObject(addr)
} }
}
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo) return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
} }
@ -559,14 +548,12 @@ func (n *Layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
return err return err
} }
for _, infos := range parts { for _, info := range parts {
for _, info := range infos {
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil { if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()), n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err)) zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
} }
} }
}
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo) return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo)
} }
@ -581,19 +568,14 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
encInfo := FormEncryptionInfo(multipartInfo.Meta) encInfo := FormEncryptionInfo(multipartInfo.Meta)
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil { if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err)) n.reqLogger(ctx).Warn(logs.MismatchedObjEncryptionInfo, zap.Error(err))
return nil, apierr.GetAPIError(apierr.ErrInvalidEncryptionParameters) return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
} }
res.Owner = multipartInfo.Owner res.Owner = multipartInfo.Owner
parts := make([]*Part, 0, len(partsInfo)) parts := make([]*Part, 0, len(partsInfo))
for _, infos := range partsInfo { for _, partInfo := range partsInfo {
sort.Slice(infos, func(i, j int) bool {
return infos[i].Timestamp < infos[j].Timestamp
})
partInfo := infos[len(infos)-1]
parts = append(parts, &Part{ parts = append(parts, &Part{
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())), ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
LastModified: partInfo.Created.UTC().Format(time.RFC3339), LastModified: partInfo.Created.UTC().Format(time.RFC3339),
@ -630,26 +612,11 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
return &res, nil return &res, nil
} }
type PartsInfo map[int][]*data.PartInfoExtended func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
func (p PartsInfo) Extract(part int, etag string, md5Enabled bool) *data.PartInfoExtended {
parts := p[part]
for i, info := range parts {
if info.GetETag(md5Enabled) == etag {
p[part] = append(parts[:i], parts[i+1:]...)
return info
}
}
return nil
}
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, PartsInfo, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID) multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return nil, nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchUpload), err.Error()) return nil, nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchUpload), err.Error())
} }
return nil, nil, err return nil, nil, err
} }
@ -659,11 +626,11 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
return nil, nil, err return nil, nil, err
} }
res := make(map[int][]*data.PartInfoExtended, len(parts)) res := make(map[int]*data.PartInfo, len(parts))
partsNumbers := make([]int, len(parts)) partsNumbers := make([]int, len(parts))
oids := make([]string, len(parts)) oids := make([]string, len(parts))
for i, part := range parts { for i, part := range parts {
res[part.Number] = append(res[part.Number], part) res[part.Number] = part
partsNumbers[i] = part.Number partsNumbers[i] = part.Number
oids[i] = part.OID.EncodeToString() oids[i] = part.OID.EncodeToString()
} }

View file

@ -12,7 +12,6 @@ import (
"fmt" "fmt"
"io" "io"
"mime" "mime"
"net/http"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -20,11 +19,8 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/detector"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -51,7 +47,7 @@ type (
} }
DeleteMarkerError struct { DeleteMarkerError struct {
ErrorCode apierr.ErrorCode ErrorCode apiErrors.ErrorCode
} }
) )
@ -72,7 +68,7 @@ func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
// objectHead returns all object's headers. // objectHead returns all object's headers.
func (n *Layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) { func (n *Layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
prm := frostfs.PrmObjectHead{ prm := PrmObjectHead{
Container: bktInfo.CID, Container: bktInfo.CID,
Object: idObj, Object: idObj,
} }
@ -130,11 +126,11 @@ func (n *Layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Re
// initializes payload reader of the FrostFS object. // initializes payload reader of the FrostFS object.
// Zero range corresponds to full payload (panics if only offset is set). // Zero range corresponds to full payload (panics if only offset is set).
func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) { func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFSParams) (io.Reader, error) {
var prmAuth frostfs.PrmAuth var prmAuth PrmAuth
n.prepareAuthParameters(ctx, &prmAuth, p.bktInfo.Owner) n.prepareAuthParameters(ctx, &prmAuth, p.bktInfo.Owner)
if p.off+p.ln != 0 { if p.off+p.ln != 0 {
prm := frostfs.PrmObjectRange{ prm := PrmObjectRange{
PrmAuth: prmAuth, PrmAuth: prmAuth,
Container: p.bktInfo.CID, Container: p.bktInfo.CID,
Object: p.oid, Object: p.oid,
@ -144,7 +140,7 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
return n.frostFS.RangeObject(ctx, prm) return n.frostFS.RangeObject(ctx, prm)
} }
prm := frostfs.PrmObjectGet{ prm := PrmObjectGet{
PrmAuth: prmAuth, PrmAuth: prmAuth,
Container: p.bktInfo.CID, Container: p.bktInfo.CID,
Object: p.oid, Object: p.oid,
@ -159,17 +155,17 @@ func (n *Layer) initFrostFSObjectPayloadReader(ctx context.Context, p getFrostFS
} }
// objectGet returns an object with payload in the object. // objectGet returns an object with payload in the object.
func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*frostfs.Object, error) { func (n *Layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*Object, error) {
return n.objectGetBase(ctx, bktInfo, objID, frostfs.PrmAuth{}) return n.objectGetBase(ctx, bktInfo, objID, PrmAuth{})
} }
// objectGetWithAuth returns an object with payload in the object. Uses provided PrmAuth. // objectGetWithAuth returns an object with payload in the object. Uses provided PrmAuth.
func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth frostfs.PrmAuth) (*frostfs.Object, error) { func (n *Layer) objectGetWithAuth(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
return n.objectGetBase(ctx, bktInfo, objID, auth) return n.objectGetBase(ctx, bktInfo, objID, auth)
} }
func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth frostfs.PrmAuth) (*frostfs.Object, error) { func (n *Layer) objectGetBase(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, auth PrmAuth) (*Object, error) {
prm := frostfs.PrmObjectGet{ prm := PrmObjectGet{
PrmAuth: auth, PrmAuth: auth,
Container: bktInfo.CID, Container: bktInfo.CID,
Object: objID, Object: objID,
@ -234,46 +230,40 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
r := p.Reader r := p.Reader
if p.Encryption.Enabled() { if p.Encryption.Enabled() {
var size uint64 p.Header[AttributeDecryptedSize] = strconv.FormatUint(p.Size, 10)
if p.Size != nil {
size = *p.Size
}
p.Header[AttributeDecryptedSize] = strconv.FormatUint(size, 10)
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil { if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
return nil, fmt.Errorf("add encryption header: %w", err) return nil, fmt.Errorf("add encryption header: %w", err)
} }
var encSize uint64 var encSize uint64
if r, encSize, err = encryptionReader(p.Reader, size, p.Encryption.Key()); err != nil { if r, encSize, err = encryptionReader(p.Reader, p.Size, p.Encryption.Key()); err != nil {
return nil, fmt.Errorf("create encrypter: %w", err) return nil, fmt.Errorf("create encrypter: %w", err)
} }
p.Size = &encSize p.Size = encSize
} }
if r != nil { if r != nil {
if len(p.Header[api.ContentType]) == 0 { if len(p.Header[api.ContentType]) == 0 {
if contentType := MimeByFilePath(p.Object); len(contentType) == 0 { if contentType := MimeByFilePath(p.Object); len(contentType) == 0 {
d := detector.NewDetector(r, http.DetectContentType) d := newDetector(r)
if contentType, err := d.Detect(); err == nil { if contentType, err := d.Detect(); err == nil {
p.Header[api.ContentType] = contentType p.Header[api.ContentType] = contentType
} }
r = d.RestoredReader() r = d.MultiReader()
} else { } else {
p.Header[api.ContentType] = contentType p.Header[api.ContentType] = contentType
} }
} }
} }
prm := frostfs.PrmObjectCreate{ prm := PrmObjectCreate{
Container: p.BktInfo.CID, Container: p.BktInfo.CID,
PayloadSize: p.Size,
Filepath: p.Object, Filepath: p.Object,
Payload: r, Payload: r,
CreationTime: TimeNow(ctx), CreationTime: TimeNow(ctx),
CopiesNumber: p.CopiesNumbers, CopiesNumber: p.CopiesNumbers,
} }
if p.Size != nil {
prm.PayloadSize = *p.Size
}
prm.Attributes = make([][2]string, 0, len(p.Header)) prm.Attributes = make([][2]string, 0, len(p.Header))
@ -281,67 +271,56 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
prm.Attributes = append(prm.Attributes, [2]string{k, v}) prm.Attributes = append(prm.Attributes, [2]string{k, v})
} }
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo) size, id, hash, md5Hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(p.ContentMD5) > 0 {
if !p.Encryption.Enabled() && p.ContentMD5 != nil { headerMd5Hash, err := base64.StdEncoding.DecodeString(p.ContentMD5)
if len(*p.ContentMD5) == 0 {
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest)
}
headerMd5Hash, err := base64.StdEncoding.DecodeString(*p.ContentMD5)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidDigest) return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
} }
if !bytes.Equal(headerMd5Hash, createdObj.MD5Sum) { if !bytes.Equal(headerMd5Hash, md5Hash) {
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID) err = n.objectDelete(ctx, p.BktInfo, id)
if err != nil { if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
} }
return nil, apierr.GetAPIError(apierr.ErrBadDigest) return nil, apiErrors.GetAPIError(apiErrors.ErrInvalidDigest)
} }
} }
if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) { if !p.Encryption.Enabled() && len(p.ContentSHA256Hash) > 0 && !auth.IsStandardContentSHA256(p.ContentSHA256Hash) {
contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash) contentHashBytes, err := hex.DecodeString(p.ContentSHA256Hash)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
} }
if !bytes.Equal(contentHashBytes, createdObj.HashSum) { if !bytes.Equal(contentHashBytes, hash) {
err = n.objectDelete(ctx, p.BktInfo, createdObj.ID) err = n.objectDelete(ctx, p.BktInfo, id)
if err != nil { if err != nil {
n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) n.reqLogger(ctx).Debug(logs.FailedToDeleteObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
} }
return nil, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch) return nil, apiErrors.GetAPIError(apiErrors.ErrContentSHA256Mismatch)
} }
} }
n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", createdObj.ID)) n.reqLogger(ctx).Debug(logs.PutObject, zap.Stringer("cid", p.BktInfo.CID), zap.Stringer("oid", id))
now := TimeNow(ctx) now := TimeNow(ctx)
newVersion := &data.NodeVersion{ newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{ BaseNodeVersion: data.BaseNodeVersion{
OID: createdObj.ID, OID: id,
ETag: hex.EncodeToString(createdObj.HashSum), ETag: hex.EncodeToString(hash),
FilePath: p.Object, FilePath: p.Object,
Size: p.Size,
Created: &now, Created: &now,
Owner: &n.gateOwner, Owner: &n.gateOwner,
CreationEpoch: createdObj.CreationEpoch,
}, },
IsUnversioned: !bktSettings.VersioningEnabled(), IsUnversioned: !bktSettings.VersioningEnabled(),
IsCombined: p.Header[MultipartObjectSize] != "", IsCombined: p.Header[MultipartObjectSize] != "",
} }
if len(p.CompleteMD5Hash) > 0 { if len(p.CompleteMD5Hash) > 0 {
newVersion.MD5 = p.CompleteMD5Hash newVersion.MD5 = p.CompleteMD5Hash
} else { } else {
newVersion.MD5 = hex.EncodeToString(createdObj.MD5Sum) newVersion.MD5 = hex.EncodeToString(md5Hash)
}
if p.Size != nil {
newVersion.Size = *p.Size
} else {
newVersion.Size = createdObj.Size
} }
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil { if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
@ -353,7 +332,7 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
ObjVersion: &data.ObjectVersion{ ObjVersion: &data.ObjectVersion{
BktInfo: p.BktInfo, BktInfo: p.BktInfo,
ObjectName: p.Object, ObjectName: p.Object,
VersionID: createdObj.ID.EncodeToString(), VersionID: id.EncodeToString(),
}, },
NewLock: p.Lock, NewLock: p.Lock,
CopiesNumbers: p.CopiesNumbers, CopiesNumbers: p.CopiesNumbers,
@ -368,13 +347,13 @@ func (n *Layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID) n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
objInfo := &data.ObjectInfo{ objInfo := &data.ObjectInfo{
ID: createdObj.ID, ID: id,
CID: p.BktInfo.CID, CID: p.BktInfo.CID,
Owner: n.gateOwner, Owner: n.gateOwner,
Bucket: p.BktInfo.Name, Bucket: p.BktInfo.Name,
Name: p.Object, Name: p.Object,
Size: createdObj.Size, Size: size,
Created: prm.CreationTime, Created: prm.CreationTime,
Headers: p.Header, Headers: p.Header,
ContentType: p.Header[api.ContentType], ContentType: p.Header[api.ContentType],
@ -400,20 +379,20 @@ func (n *Layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.Bucke
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName) node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error())
} }
return nil, err return nil, err
} }
if node.IsDeleteMarker { if node.IsDeleteMarker {
return nil, DeleteMarkerError{ErrorCode: apierr.ErrNoSuchKey} return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrNoSuchKey}
} }
meta, err := n.objectHead(ctx, bkt, node.OID) meta, err := n.objectHead(ctx, bkt, node.OID)
if err != nil { if err != nil {
if client.IsErrObjectNotFound(err) { if client.IsErrObjectNotFound(err) {
return nil, fmt.Errorf("%w: %s; %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error(), node.OID.EncodeToString()) return nil, fmt.Errorf("%w: %s; %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchKey), err.Error(), node.OID.EncodeToString())
} }
return nil, err return nil, err
} }
@ -436,8 +415,8 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
if p.VersionID == data.UnversionedObjectVersionID { if p.VersionID == data.UnversionedObjectVersionID {
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object) foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error()) return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
} }
return nil, err return nil, err
} }
@ -454,7 +433,7 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
} }
} }
if foundVersion == nil { if foundVersion == nil {
return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion)) return nil, fmt.Errorf("%w: there isn't tree node with requested version id", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion))
} }
} }
@ -464,13 +443,13 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
} }
if foundVersion.IsDeleteMarker { if foundVersion.IsDeleteMarker {
return nil, DeleteMarkerError{ErrorCode: apierr.ErrMethodNotAllowed} return nil, DeleteMarkerError{ErrorCode: apiErrors.ErrMethodNotAllowed}
} }
meta, err := n.objectHead(ctx, bkt, foundVersion.OID) meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
if err != nil { if err != nil {
if client.IsErrObjectNotFound(err) { if client.IsErrObjectNotFound(err) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchVersion), err.Error()) return nil, fmt.Errorf("%w: %s", apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion), err.Error())
} }
return nil, err return nil, err
} }
@ -489,16 +468,16 @@ func (n *Layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadOb
// objectDelete puts tombstone object into frostfs. // objectDelete puts tombstone object into frostfs.
func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error { func (n *Layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
return n.objectDeleteBase(ctx, bktInfo, idObj, frostfs.PrmAuth{}) return n.objectDeleteBase(ctx, bktInfo, idObj, PrmAuth{})
} }
// objectDeleteWithAuth puts tombstone object into frostfs. Uses provided PrmAuth. // objectDeleteWithAuth puts tombstone object into frostfs. Uses provided PrmAuth.
func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth frostfs.PrmAuth) error { func (n *Layer) objectDeleteWithAuth(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
return n.objectDeleteBase(ctx, bktInfo, idObj, auth) return n.objectDeleteBase(ctx, bktInfo, idObj, auth)
} }
func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth frostfs.PrmAuth) error { func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID, auth PrmAuth) error {
prm := frostfs.PrmObjectDelete{ prm := PrmObjectDelete{
PrmAuth: auth, PrmAuth: auth,
Container: bktInfo.CID, Container: bktInfo.CID,
Object: idObj, Object: idObj,
@ -512,7 +491,8 @@ func (n *Layer) objectDeleteBase(ctx context.Context, bktInfo *data.BucketInfo,
} }
// objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject. // objectPutAndHash prepare auth parameters and invoke frostfs.CreateObject.
func (n *Layer) objectPutAndHash(ctx context.Context, prm frostfs.PrmObjectCreate, bktInfo *data.BucketInfo) (*data.CreatedObjectInfo, error) { // Returns object ID and payload sha256 hash.
func (n *Layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, []byte, error) {
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner) n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
prm.ClientCut = n.features.ClientCut() prm.ClientCut = n.features.ClientCut()
prm.BufferMaxSize = n.features.BufferMaxSizeForPut() prm.BufferMaxSize = n.features.BufferMaxSizeForPut()
@ -525,21 +505,15 @@ func (n *Layer) objectPutAndHash(ctx context.Context, prm frostfs.PrmObjectCreat
hash.Write(buf) hash.Write(buf)
md5Hash.Write(buf) md5Hash.Write(buf)
}) })
res, err := n.frostFS.CreateObject(ctx, prm) id, err := n.frostFS.CreateObject(ctx, prm)
if err != nil { if err != nil {
if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil { if _, errDiscard := io.Copy(io.Discard, prm.Payload); errDiscard != nil {
n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard)) n.reqLogger(ctx).Warn(logs.FailedToDiscardPutPayloadProbablyGoroutineLeaks, zap.Error(errDiscard))
} }
return nil, err return 0, oid.ID{}, nil, nil, err
} }
return &data.CreatedObjectInfo{ return size, id, hash.Sum(nil), md5Hash.Sum(nil), nil
ID: res.ObjectID,
Size: size,
HashSum: hash.Sum(nil),
MD5Sum: md5Hash.Sum(nil),
CreationEpoch: res.CreationEpoch,
}, nil
} }
type logWrapper struct { type logWrapper struct {

View file

@ -8,7 +8,6 @@ import (
"io" "io"
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -38,14 +37,14 @@ func TestGoroutinesDontLeakInPutAndHash(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
payload := bytes.NewReader(content) payload := bytes.NewReader(content)
prm := frostfs.PrmObjectCreate{ prm := PrmObjectCreate{
Filepath: tc.obj, Filepath: tc.obj,
Payload: payload, Payload: payload,
} }
expErr := errors.New("some error") expErr := errors.New("some error")
tc.testFrostFS.SetObjectPutError(tc.obj, expErr) tc.testFrostFS.SetObjectPutError(tc.obj, expErr)
_, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo) _, _, _, _, err = tc.layer.objectPutAndHash(tc.ctx, prm, tc.bktInfo)
require.ErrorIs(t, err, expErr) require.ErrorIs(t, err, expErr)
require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader") require.Empty(t, payload.Len(), "body must be read out otherwise goroutines can leak in wrapReader")
} }

View file

@ -1,265 +0,0 @@
package layer
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
)
type PatchObjectParams struct {
Object *data.ExtendedObjectInfo
BktInfo *data.BucketInfo
NewBytes io.Reader
Range *RangeParams
VersioningEnabled bool
CopiesNumbers []uint32
}
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
if p.Object.ObjectInfo.Headers[AttributeDecryptedSize] != "" {
return nil, fmt.Errorf("patch encrypted object")
}
if p.Object.ObjectInfo.Headers[MultipartObjectSize] != "" {
return n.patchMultipartObject(ctx, p)
}
prmPatch := frostfs.PrmObjectPatch{
Container: p.BktInfo.CID,
Object: p.Object.ObjectInfo.ID,
Payload: p.NewBytes,
Offset: p.Range.Start,
Length: p.Range.End - p.Range.Start + 1,
ObjectSize: p.Object.ObjectInfo.Size,
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
createdObj, err := n.patchObject(ctx, prmPatch)
if err != nil {
return nil, fmt.Errorf("patch object: %w", err)
}
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: createdObj.ID,
ETag: hex.EncodeToString(createdObj.HashSum),
FilePath: p.Object.ObjectInfo.Name,
Size: createdObj.Size,
Created: &p.Object.ObjectInfo.Created,
Owner: &n.gateOwner,
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
},
IsUnversioned: !p.VersioningEnabled,
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
}
p.Object.ObjectInfo.ID = createdObj.ID
p.Object.ObjectInfo.Size = createdObj.Size
p.Object.ObjectInfo.MD5Sum = ""
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
p.Object.NodeVersion = newVersion
return p.Object, nil
}
func (n *Layer) patchObject(ctx context.Context, p frostfs.PrmObjectPatch) (*data.CreatedObjectInfo, error) {
objID, err := n.frostFS.PatchObject(ctx, p)
if err != nil {
return nil, fmt.Errorf("patch object: %w", err)
}
prmHead := frostfs.PrmObjectHead{
PrmAuth: p.PrmAuth,
Container: p.Container,
Object: objID,
}
obj, err := n.frostFS.HeadObject(ctx, prmHead)
if err != nil {
return nil, fmt.Errorf("head object: %w", err)
}
payloadChecksum, _ := obj.PayloadChecksum()
return &data.CreatedObjectInfo{
ID: objID,
Size: obj.PayloadSize(),
HashSum: payloadChecksum.Value(),
}, nil
}
func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
combinedObj, err := n.objectGet(ctx, p.BktInfo, p.Object.ObjectInfo.ID)
if err != nil {
return nil, fmt.Errorf("get combined object '%s': %w", p.Object.ObjectInfo.ID.EncodeToString(), err)
}
var parts []*data.PartInfo
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
}
prmPatch := frostfs.PrmObjectPatch{
Container: p.BktInfo.CID,
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
off, ln := p.Range.Start, p.Range.End-p.Range.Start+1
var multipartObjectSize uint64
for i, part := range parts {
if off > part.Size || (off == part.Size && i != len(parts)-1) || ln == 0 {
multipartObjectSize += part.Size
if ln != 0 {
off -= part.Size
}
continue
}
var createdObj *data.CreatedObjectInfo
createdObj, off, ln, err = n.patchPart(ctx, part, p, &prmPatch, off, ln, i == len(parts)-1)
if err != nil {
return nil, fmt.Errorf("patch part: %w", err)
}
parts[i].OID = createdObj.ID
parts[i].Size = createdObj.Size
parts[i].MD5 = ""
parts[i].ETag = hex.EncodeToString(createdObj.HashSum)
multipartObjectSize += createdObj.Size
}
return n.updateCombinedObject(ctx, parts, multipartObjectSize, p)
}
// Returns patched part info, updated offset and length.
func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObjectParams, prmPatch *frostfs.PrmObjectPatch, off, ln uint64, lastPart bool) (*data.CreatedObjectInfo, uint64, uint64, error) {
if off == 0 && ln >= part.Size {
curLen := part.Size
if lastPart {
curLen = ln
}
prm := frostfs.PrmObjectCreate{
Container: p.BktInfo.CID,
Payload: io.LimitReader(p.NewBytes, int64(curLen)),
CreationTime: part.Created,
CopiesNumber: p.CopiesNumbers,
}
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil {
return nil, 0, 0, fmt.Errorf("put new part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
return createdObj, off, ln, err
}
curLen := ln
if off+curLen > part.Size && !lastPart {
curLen = part.Size - off
}
prmPatch.Object = part.OID
prmPatch.ObjectSize = part.Size
prmPatch.Offset = off
prmPatch.Length = curLen
prmPatch.Payload = io.LimitReader(p.NewBytes, int64(prmPatch.Length))
createdObj, err := n.patchObject(ctx, *prmPatch)
if err != nil {
return nil, 0, 0, fmt.Errorf("patch part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
off = 0
return createdObj, off, ln, nil
}
func (n *Layer) updateCombinedObject(ctx context.Context, parts []*data.PartInfo, fullObjSize uint64, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
newParts, err := json.Marshal(parts)
if err != nil {
return nil, fmt.Errorf("marshal parts for combined object: %w", err)
}
var headerParts strings.Builder
for i, part := range parts {
headerPart := part.ToHeaderString()
if i != len(parts)-1 {
headerPart += ","
}
headerParts.WriteString(headerPart)
}
prm := frostfs.PrmObjectCreate{
Container: p.BktInfo.CID,
PayloadSize: fullObjSize,
Filepath: p.Object.ObjectInfo.Name,
Payload: bytes.NewReader(newParts),
CreationTime: p.Object.ObjectInfo.Created,
CopiesNumber: p.CopiesNumbers,
}
prm.Attributes = make([][2]string, 0, len(p.Object.ObjectInfo.Headers)+1)
for k, v := range p.Object.ObjectInfo.Headers {
switch k {
case MultipartObjectSize:
prm.Attributes = append(prm.Attributes, [2]string{MultipartObjectSize, strconv.FormatUint(fullObjSize, 10)})
case UploadCompletedParts:
prm.Attributes = append(prm.Attributes, [2]string{UploadCompletedParts, headerParts.String()})
case api.ContentType:
default:
prm.Attributes = append(prm.Attributes, [2]string{k, v})
}
}
prm.Attributes = append(prm.Attributes, [2]string{api.ContentType, p.Object.ObjectInfo.ContentType})
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil {
return nil, fmt.Errorf("put new combined object: %w", err)
}
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: createdObj.ID,
ETag: hex.EncodeToString(createdObj.HashSum),
MD5: hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts)),
FilePath: p.Object.ObjectInfo.Name,
Size: fullObjSize,
Created: &p.Object.ObjectInfo.Created,
Owner: &n.gateOwner,
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
},
IsUnversioned: !p.VersioningEnabled,
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
}
p.Object.ObjectInfo.ID = createdObj.ID
p.Object.ObjectInfo.Size = createdObj.Size
p.Object.ObjectInfo.MD5Sum = hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts))
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
p.Object.ObjectInfo.Headers[MultipartObjectSize] = strconv.FormatUint(fullObjSize, 10)
p.Object.ObjectInfo.Headers[UploadCompletedParts] = headerParts.String()
p.Object.NodeVersion = newVersion
return p.Object, nil
}

View file

@ -3,7 +3,7 @@ package layer
import ( import (
"context" "context"
"encoding/xml" "encoding/xml"
"errors" errorsStd "errors"
"fmt" "fmt"
"math" "math"
"strconv" "strconv"
@ -11,9 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/object"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
) )
@ -42,7 +40,7 @@ func (n *Layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
} }
lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID) lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID)
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) { if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
return err return err
} }
@ -115,7 +113,7 @@ func (n *Layer) getNodeVersionFromCacheOrFrostfs(ctx context.Context, objVersion
} }
func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) { func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber []uint32) (oid.ID, error) {
prm := frostfs.PrmObjectCreate{ prm := PrmObjectCreate{
Container: bktInfo.CID, Container: bktInfo.CID,
Locks: []oid.ID{objID}, Locks: []oid.ID{objID},
CreationTime: TimeNow(ctx), CreationTime: TimeNow(ctx),
@ -128,12 +126,8 @@ func (n *Layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
return oid.ID{}, err return oid.ID{}, err
} }
createdObj, err := n.objectPutAndHash(ctx, prm, bktInfo) _, id, _, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
if err != nil { return id, err
return oid.ID{}, err
}
return createdObj.ID, nil
} }
func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion) (*data.LockInfo, error) { func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion) (*data.LockInfo, error) {
@ -148,7 +142,7 @@ func (n *Layer) GetLockInfo(ctx context.Context, objVersion *data.ObjectVersion)
} }
lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID) lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID)
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) { if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
return nil, err return nil, err
} }
if lockInfo == nil { if lockInfo == nil {
@ -167,16 +161,16 @@ func (n *Layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
} }
addr, err := n.treeService.GetBucketCORS(ctx, bkt) addr, err := n.treeService.GetBucketCORS(ctx, bkt)
objNotFound := errors.Is(err, tree.ErrNodeNotFound) objNotFound := errorsStd.Is(err, ErrNodeNotFound)
if err != nil && !objNotFound { if err != nil && !objNotFound {
return nil, err return nil, err
} }
if objNotFound { if objNotFound {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchCORSConfiguration), err.Error()) return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchCORSConfiguration), err.Error())
} }
var prmAuth frostfs.PrmAuth var prmAuth PrmAuth
corsBkt := bkt corsBkt := bkt
if !addr.Container().Equals(bkt.CID) && !addr.Container().Equals(cid.ID{}) { if !addr.Container().Equals(bkt.CID) && !addr.Container().Equals(cid.ID{}) {
corsBkt = &data.BucketInfo{CID: addr.Container()} corsBkt = &data.BucketInfo{CID: addr.Container()}
@ -211,7 +205,7 @@ func (n *Layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo)
settings, err := n.treeService.GetSettingsNode(ctx, bktInfo) settings, err := n.treeService.GetSettingsNode(ctx, bktInfo)
if err != nil { if err != nil {
if !errors.Is(err, tree.ErrNodeNotFound) { if !errorsStd.Is(err, ErrNodeNotFound) {
return nil, err return nil, err
} }
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned} settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}

View file

@ -6,8 +6,7 @@ import (
"fmt" "fmt"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -40,8 +39,8 @@ func (n *Layer) GetObjectTagging(ctx context.Context, p *data.GetObjectTaggingPa
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion) tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return "", nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return "", nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
return "", nil, err return "", nil, err
} }
@ -63,8 +62,8 @@ func (n *Layer) PutObjectTagging(ctx context.Context, p *data.PutObjectTaggingPa
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet) err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
return err return err
} }
@ -82,8 +81,8 @@ func (n *Layer) DeleteObjectTagging(ctx context.Context, p *data.ObjectVersion)
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version) err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
if err != nil { if err != nil {
if errors.Is(err, tree.ErrNodeNotFound) { if errors.Is(err, ErrNodeNotFound) {
return fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
return err return err
} }
@ -103,7 +102,7 @@ func (n *Layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo)
} }
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo) tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
if err != nil && !errors.Is(err, tree.ErrNodeNotFound) { if err != nil && !errors.Is(err, ErrNodeNotFound) {
return nil, err return nil, err
} }
@ -156,14 +155,14 @@ func (n *Layer) getNodeVersion(ctx context.Context, objVersion *data.ObjectVersi
} }
} }
if version == nil { if version == nil {
err = fmt.Errorf("%w: there isn't tree node with requested version id", apierr.GetAPIError(apierr.ErrNoSuchVersion)) err = fmt.Errorf("%w: there isn't tree node with requested version id", s3errors.GetAPIError(s3errors.ErrNoSuchVersion))
} }
} }
if err == nil && version.IsDeleteMarker && !objVersion.NoErrorOnDeleteMarker { if err == nil && version.IsDeleteMarker && !objVersion.NoErrorOnDeleteMarker {
return nil, fmt.Errorf("%w: found version is delete marker", apierr.GetAPIError(apierr.ErrNoSuchKey)) return nil, fmt.Errorf("%w: found version is delete marker", s3errors.GetAPIError(s3errors.ErrNoSuchKey))
} else if errors.Is(err, tree.ErrNodeNotFound) { } else if errors.Is(err, ErrNodeNotFound) {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrNoSuchKey), err.Error()) return nil, fmt.Errorf("%w: %s", s3errors.GetAPIError(s3errors.ErrNoSuchKey), err.Error())
} }
if err == nil && version != nil && !version.IsDeleteMarker { if err == nil && version != nil && !version.IsDeleteMarker {

View file

@ -6,10 +6,8 @@ import (
"io" "io"
"sort" "sort"
"strings" "strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/tree"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
) )
@ -35,7 +33,7 @@ type TreeServiceMock struct {
locks map[string]map[uint64]*data.LockInfo locks map[string]map[uint64]*data.LockInfo
tags map[string]map[uint64]map[string]string tags map[string]map[uint64]map[string]string
multiparts map[string]map[string][]*data.MultipartInfo multiparts map[string]map[string][]*data.MultipartInfo
parts map[string]map[int]*data.PartInfoExtended parts map[string]map[int]*data.PartInfo
} }
func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) { func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
@ -94,7 +92,7 @@ func NewTreeService() *TreeServiceMock {
locks: make(map[string]map[uint64]*data.LockInfo), locks: make(map[string]map[uint64]*data.LockInfo),
tags: make(map[string]map[uint64]map[string]string), tags: make(map[string]map[uint64]map[string]string),
multiparts: make(map[string]map[string][]*data.MultipartInfo), multiparts: make(map[string]map[string][]*data.MultipartInfo),
parts: make(map[string]map[int]*data.PartInfoExtended), parts: make(map[string]map[int]*data.PartInfo),
} }
} }
@ -106,7 +104,7 @@ func (t *TreeServiceMock) PutSettingsNode(_ context.Context, bktInfo *data.Bucke
func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) { func (t *TreeServiceMock) GetSettingsNode(_ context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
settings, ok := t.settings[bktInfo.CID.EncodeToString()] settings, ok := t.settings[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
return settings, nil return settings, nil
@ -141,7 +139,7 @@ func (t *TreeServiceMock) PutBucketCORS(_ context.Context, bktInfo *data.BucketI
t.system[bktInfo.CID.EncodeToString()] = systemMap t.system[bktInfo.CID.EncodeToString()] = systemMap
return nil, tree.ErrNoNodeToRemove return nil, ErrNoNodeToRemove
} }
func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([]oid.Address, error) { func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([]oid.Address, error) {
@ -151,12 +149,12 @@ func (t *TreeServiceMock) DeleteBucketCORS(context.Context, *data.BucketInfo) ([
func (t *TreeServiceMock) GetVersions(_ context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error) { func (t *TreeServiceMock) GetVersions(_ context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error) {
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()] cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
versions, ok := cnrVersionsMap[objectName] versions, ok := cnrVersionsMap[objectName]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
return versions, nil return versions, nil
@ -165,12 +163,12 @@ func (t *TreeServiceMock) GetVersions(_ context.Context, bktInfo *data.BucketInf
func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) { func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()] cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
versions, ok := cnrVersionsMap[objectName] versions, ok := cnrVersionsMap[objectName]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
sort.Slice(versions, func(i, j int) bool { sort.Slice(versions, func(i, j int) bool {
@ -181,13 +179,13 @@ func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.Buck
return versions[len(versions)-1], nil return versions[len(versions)-1], nil
} }
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) { func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()] cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
var result []*data.NodeVersion var result []*data.NodeVersion
@ -219,12 +217,12 @@ func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo
func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) { func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error) {
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()] cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
versions, ok := cnrVersionsMap[objectName] versions, ok := cnrVersionsMap[objectName]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
for _, version := range versions { for _, version := range versions {
@ -233,7 +231,7 @@ func (t *TreeServiceMock) GetUnversioned(_ context.Context, bktInfo *data.Bucket
} }
} }
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
func (t *TreeServiceMock) AddVersion(_ context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error) { func (t *TreeServiceMock) AddVersion(_ context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error) {
@ -279,7 +277,7 @@ func (t *TreeServiceMock) AddVersion(_ context.Context, bktInfo *data.BucketInfo
func (t *TreeServiceMock) RemoveVersion(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) error { func (t *TreeServiceMock) RemoveVersion(_ context.Context, bktInfo *data.BucketInfo, nodeID uint64) error {
cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()] cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return tree.ErrNodeNotFound return ErrNodeNotFound
} }
for key, versions := range cnrVersionsMap { for key, versions := range cnrVersionsMap {
@ -291,7 +289,7 @@ func (t *TreeServiceMock) RemoveVersion(_ context.Context, bktInfo *data.BucketI
} }
} }
return tree.ErrNodeNotFound return ErrNodeNotFound
} }
func (t *TreeServiceMock) GetAllVersionsByPrefix(_ context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) { func (t *TreeServiceMock) GetAllVersionsByPrefix(_ context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
@ -335,7 +333,7 @@ func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.Buc
func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) { func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) {
cnrMultipartsMap, ok := t.multiparts[bktInfo.CID.EncodeToString()] cnrMultipartsMap, ok := t.multiparts[bktInfo.CID.EncodeToString()]
if !ok { if !ok {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
multiparts := cnrMultipartsMap[objectName] multiparts := cnrMultipartsMap[objectName]
@ -345,34 +343,31 @@ func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.Bu
} }
} }
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) { func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID) multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID)
if err != nil { if err != nil {
return nil, err return oid.ID{}, err
} }
if multipartInfo.ID != multipartNodeID { if multipartInfo.ID != multipartNodeID {
return nil, fmt.Errorf("invalid multipart info id") return oid.ID{}, fmt.Errorf("invalid multipart info id")
} }
partsMap, ok := t.parts[info.UploadID] partsMap, ok := t.parts[info.UploadID]
if !ok { if !ok {
partsMap = make(map[int]*data.PartInfoExtended) partsMap = make(map[int]*data.PartInfo)
} }
partsMap[info.Number] = &data.PartInfoExtended{ partsMap[info.Number] = info
PartInfo: *info,
Timestamp: uint64(time.Now().UnixMicro()),
}
t.parts[info.UploadID] = partsMap t.parts[info.UploadID] = partsMap
return nil, nil return oid.ID{}, nil
} }
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) { func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()] cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
var foundMultipart *data.MultipartInfo var foundMultipart *data.MultipartInfo
@ -388,11 +383,11 @@ LOOP:
} }
if foundMultipart == nil { if foundMultipart == nil {
return nil, tree.ErrNodeNotFound return nil, ErrNodeNotFound
} }
partsMap := t.parts[foundMultipart.UploadID] partsMap := t.parts[foundMultipart.UploadID]
result := make([]*data.PartInfoExtended, 0, len(partsMap)) result := make([]*data.PartInfo, 0, len(partsMap))
for _, part := range partsMap { for _, part := range partsMap {
result = append(result, part) result = append(result, part)
} }
@ -400,51 +395,6 @@ LOOP:
return result, nil return result, nil
} }
func (t *TreeServiceMock) PutBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
systemMap = make(map[string]*data.BaseNodeVersion)
}
systemMap["lifecycle"] = &data.BaseNodeVersion{
OID: addr.Object(),
}
t.system[bktInfo.CID.EncodeToString()] = systemMap
return nil, tree.ErrNoNodeToRemove
}
func (t *TreeServiceMock) GetBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) (oid.Address, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
return oid.Address{}, tree.ErrNodeNotFound
}
node, ok := systemMap["lifecycle"]
if !ok {
return oid.Address{}, tree.ErrNodeNotFound
}
return newAddress(bktInfo.CID, node.OID), nil
}
func (t *TreeServiceMock) DeleteBucketLifecycleConfiguration(_ context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error) {
systemMap, ok := t.system[bktInfo.CID.EncodeToString()]
if !ok {
return nil, tree.ErrNoNodeToRemove
}
node, ok := systemMap["lifecycle"]
if !ok {
return nil, tree.ErrNoNodeToRemove
}
delete(systemMap, "lifecycle")
return []oid.Address{newAddress(bktInfo.CID, node.OID)}, nil
}
func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error { func (t *TreeServiceMock) DeleteMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()] cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
@ -462,7 +412,7 @@ LOOP:
} }
if uploadID == "" { if uploadID == "" {
return tree.ErrNodeNotFound return ErrNodeNotFound
} }
delete(t.parts, uploadID) delete(t.parts, uploadID)

View file

@ -1,4 +1,4 @@
package tree package layer
import ( import (
"context" "context"
@ -8,8 +8,8 @@ import (
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id" oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
) )
// Service provide interface to interact with tree service using s3 data models. // TreeService provide interface to interact with tree service using s3 data models.
type Service interface { type TreeService interface {
// PutSettingsNode update or create new settings node in tree service. // PutSettingsNode update or create new settings node in tree service.
PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error
@ -57,15 +57,11 @@ type Service interface {
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
// AddPart puts a node to a system tree as a child of appropriate multipart upload // AddPart puts a node to a system tree as a child of appropriate multipart upload
// and returns objectIDs of a previous part/s which must be deleted in FrostFS. // and returns objectID of a previous part which must be deleted in FrostFS.
// //
// If object ids to remove is not found returns ErrNoNodeToRemove error. // If object id to remove is not found returns ErrNoNodeToRemove error.
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error)
PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error)
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)
DeleteBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) ([]oid.Address, error)
// Compound methods for optimizations // Compound methods for optimizations

View file

@ -7,7 +7,6 @@ import (
"testing" "testing"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/frostfs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test" bearertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer/test"
@ -24,7 +23,7 @@ func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{ extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
BktInfo: tc.bktInfo, BktInfo: tc.bktInfo,
Object: tc.obj, Object: tc.obj,
Size: ptr(uint64(len(content))), Size: uint64(len(content)),
Reader: bytes.NewReader(content), Reader: bytes.NewReader(content),
Header: make(map[string]string), Header: make(map[string]string),
}) })
@ -155,7 +154,7 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
tp := NewTestFrostFS(key) tp := NewTestFrostFS(key)
bktName := "testbucket1" bktName := "testbucket1"
res, err := tp.CreateContainer(ctx, frostfs.PrmContainerCreate{ res, err := tp.CreateContainer(ctx, PrmContainerCreate{
Name: bktName, Name: bktName,
}) })
require.NoError(t, err) require.NoError(t, err)

View file

@ -3,18 +3,14 @@ package middleware
import ( import (
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap" "go.uber.org/zap"
) )
const ( const wildcardPlaceholder = "<wildcard>"
wildcardPlaceholder = "<wildcard>"
enabledVHS = "enabled"
disabledVHS = "disabled"
)
type VHSSettings interface { type VHSSettings interface {
Domains() []string Domains() []string
@ -30,9 +26,9 @@ func PrepareAddressStyle(settings VHSSettings, log *zap.Logger) Func {
ctx := r.Context() ctx := r.Context()
reqInfo := GetReqInfo(ctx) reqInfo := GetReqInfo(ctx)
reqLogger := reqLogOrDefault(ctx, log) reqLogger := reqLogOrDefault(ctx, log)
statusVHS := r.Header.Get(settings.VHSHeader()) headerVHSEnabled := r.Header.Get(settings.VHSHeader())
if isVHSAddress(statusVHS, settings.GlobalVHS(), settings.VHSNamespacesEnabled(), reqInfo.Namespace) { if isVHSAddress(headerVHSEnabled, settings.GlobalVHS(), settings.VHSNamespacesEnabled(), reqInfo.Namespace) {
prepareVHSAddress(reqInfo, r, settings) prepareVHSAddress(reqInfo, r, settings)
} else { } else {
preparePathStyleAddress(reqInfo, r, reqLogger) preparePathStyleAddress(reqInfo, r, reqLogger)
@ -43,13 +39,11 @@ func PrepareAddressStyle(settings VHSSettings, log *zap.Logger) Func {
} }
} }
func isVHSAddress(statusVHS string, enabledFlag bool, vhsNamespaces map[string]bool, namespace string) bool { func isVHSAddress(headerVHSEnabled string, enabledFlag bool, vhsNamespaces map[string]bool, namespace string) bool {
switch statusVHS { if result, err := strconv.ParseBool(headerVHSEnabled); err == nil {
case enabledVHS: return result
return true }
case disabledVHS:
return false
default:
result := enabledFlag result := enabledFlag
if v, ok := vhsNamespaces[namespace]; ok { if v, ok := vhsNamespaces[namespace]; ok {
result = v result = v
@ -57,7 +51,6 @@ func isVHSAddress(statusVHS string, enabledFlag bool, vhsNamespaces map[string]b
return result return result
} }
}
func prepareVHSAddress(reqInfo *ReqInfo, r *http.Request, settings VHSSettings) { func prepareVHSAddress(reqInfo *ReqInfo, r *http.Request, settings VHSSettings) {
reqInfo.RequestVHSEnabled = true reqInfo.RequestVHSEnabled = true

View file

@ -42,7 +42,7 @@ func (v *VHSSettingsMock) VHSNamespacesEnabled() map[string]bool {
func TestIsVHSAddress(t *testing.T) { func TestIsVHSAddress(t *testing.T) {
for _, tc := range []struct { for _, tc := range []struct {
name string name string
headerStatusVHS string headerVHSEnabled string
vhsEnabledFlag bool vhsEnabledFlag bool
vhsNamespaced map[string]bool vhsNamespaced map[string]bool
namespace string namespace string
@ -76,7 +76,7 @@ func TestIsVHSAddress(t *testing.T) {
}, },
{ {
name: "vhs enabled (header)", name: "vhs enabled (header)",
headerStatusVHS: enabledVHS, headerVHSEnabled: "true",
vhsEnabledFlag: false, vhsEnabledFlag: false,
vhsNamespaced: map[string]bool{ vhsNamespaced: map[string]bool{
"kapusta": false, "kapusta": false,
@ -86,7 +86,7 @@ func TestIsVHSAddress(t *testing.T) {
}, },
{ {
name: "vhs disabled (header)", name: "vhs disabled (header)",
headerStatusVHS: disabledVHS, headerVHSEnabled: "false",
vhsEnabledFlag: true, vhsEnabledFlag: true,
vhsNamespaced: map[string]bool{ vhsNamespaced: map[string]bool{
"kapusta": true, "kapusta": true,
@ -96,7 +96,7 @@ func TestIsVHSAddress(t *testing.T) {
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
actual := isVHSAddress(tc.headerStatusVHS, tc.vhsEnabledFlag, tc.vhsNamespaced, tc.namespace) actual := isVHSAddress(tc.headerVHSEnabled, tc.vhsEnabledFlag, tc.vhsNamespaced, tc.namespace)
require.Equal(t, tc.expected, actual) require.Equal(t, tc.expected, actual)
}) })
} }

View file

@ -8,8 +8,9 @@ import (
"time" "time"
"git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl" "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/acl"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/bearer"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
@ -56,9 +57,9 @@ func Auth(center Center, log *zap.Logger) Func {
reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err)) reqLogOrDefault(ctx, log).Debug(logs.CouldntReceiveAccessBoxForGateKeyRandomKeyWillBeUsed, zap.Error(err))
} else { } else {
reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err)) reqLogOrDefault(ctx, log).Error(logs.FailedToPassAuthentication, zap.Error(err))
err = apierr.TransformToS3Error(err) err = frostfsErrors.UnwrapErr(err)
if err.(apierr.Error).ErrCode == apierr.ErrInternalError { if _, ok := err.(apiErrors.Error); !ok {
err = apierr.GetAPIError(apierr.ErrAccessDenied) err = apiErrors.GetAPIError(apiErrors.ErrAccessDenied)
} }
if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil { if _, wrErr := WriteErrorResponse(w, GetReqInfo(r.Context()), err); wrErr != nil {
reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr)) reqLogOrDefault(ctx, log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))

View file

@ -74,7 +74,6 @@ const (
AbortMultipartUploadOperation = "AbortMultipartUpload" AbortMultipartUploadOperation = "AbortMultipartUpload"
DeleteObjectTaggingOperation = "DeleteObjectTagging" DeleteObjectTaggingOperation = "DeleteObjectTagging"
DeleteObjectOperation = "DeleteObject" DeleteObjectOperation = "DeleteObject"
PatchObjectOperation = "PatchObject"
) )
const ( const (
@ -107,9 +106,3 @@ const (
PartNumberQuery = "partNumber" PartNumberQuery = "partNumber"
LegalHoldQuery = "legal-hold" LegalHoldQuery = "legal-hold"
) )
const (
StdoutPath = "stdout"
StderrPath = "stderr"
SinkName = "lumberjack"
)

View file

@ -1,237 +0,0 @@
//go:build loghttp
package middleware
import (
"bytes"
"io"
"net/http"
"os"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/detector"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/pkg/xmlutils"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
type (
LogHTTPSettings interface {
LogHTTPConfig() LogHTTPConfig
}
LogHTTPConfig struct {
Enabled bool
MaxBody int64
MaxLogSize int
OutputPath string
UseGzip bool
log *httpLogger
}
httpLogger struct {
*zap.Logger
logRoller *lumberjack.Logger
}
// responseReadWriter helps read http response body.
responseReadWriter struct {
http.ResponseWriter
response *bytes.Buffer
statusCode int
}
)
const (
payloadLabel = "payload"
responseLabel = "response"
)
func (lc *LogHTTPConfig) InitHTTPLogger(log *zap.Logger) {
if err := lc.initHTTPLogger(); err != nil {
log.Error(logs.FailedToInitializeHTTPLogger, zap.Error(err))
}
}
// initHTTPLogger returns registers zap sink and returns new httpLogger.
func (lc *LogHTTPConfig) initHTTPLogger() (err error) {
lc.log = &httpLogger{
Logger: zap.NewNop(),
logRoller: &lumberjack.Logger{},
}
c := newLoggerConfig()
lc.log.Logger, err = c.Build()
if err != nil {
return err
}
lc.setLogOutput()
return nil
}
// newLoggerConfig creates new zap.Config with disabled base fields.
func newLoggerConfig() zap.Config {
c := zap.NewProductionConfig()
c.DisableCaller = true
c.DisableStacktrace = true
c.EncoderConfig = newEncoderConfig()
c.Sampling = nil
return c
}
func (lc *LogHTTPConfig) setLogOutput() {
var output zapcore.WriteSyncer
switch lc.OutputPath {
case "", StdoutPath:
output = zapcore.AddSync(os.Stdout)
case StderrPath:
output = zapcore.AddSync(os.Stderr)
default:
output = zapcore.AddSync(&lumberjack.Logger{
Filename: lc.OutputPath,
MaxSize: lc.MaxLogSize,
Compress: lc.UseGzip,
})
}
// create logger with new sync
lc.log.Logger = lc.log.Logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
return zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), output, zapcore.InfoLevel)
}))
}
func newEncoderConfig() zapcore.EncoderConfig {
c := zap.NewProductionEncoderConfig()
c.MessageKey = zapcore.OmitKey
c.LevelKey = zapcore.OmitKey
c.TimeKey = zapcore.OmitKey
c.FunctionKey = zapcore.OmitKey
return c
}
func (ww *responseReadWriter) Write(data []byte) (int, error) {
ww.response.Write(data)
return ww.ResponseWriter.Write(data)
}
func (ww *responseReadWriter) WriteHeader(code int) {
ww.statusCode = code
ww.ResponseWriter.WriteHeader(code)
}
func (ww *responseReadWriter) Flush() {
if f, ok := ww.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
// LogHTTP logs http parameters from s3 request.
func LogHTTP(l *zap.Logger, settings LogHTTPSettings) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
config := settings.LogHTTPConfig()
if !config.Enabled || config.log == nil {
h.ServeHTTP(w, r)
return
}
httplog := config.log.getHTTPLogger(r).
withFieldIfExist("query", r.URL.Query()).
withFieldIfExist("headers", r.Header)
payload := getBody(r.Body, l)
r.Body = io.NopCloser(bytes.NewReader(payload))
payloadReader := io.LimitReader(bytes.NewReader(payload), config.MaxBody)
httplog = httplog.withProcessedBody(payloadLabel, payloadReader, l)
wr := newResponseReadWriter(w)
h.ServeHTTP(wr, r)
respReader := io.LimitReader(wr.response, config.MaxBody)
httplog = httplog.withProcessedBody(responseLabel, respReader, l)
httplog = httplog.with(zap.Int("status", wr.statusCode))
httplog.Info(logs.LogHTTP)
})
}
}
// withFieldIfExist checks whether data is not empty and attach it to log output.
func (lg *httpLogger) withFieldIfExist(label string, data map[string][]string) *httpLogger {
if len(data) != 0 {
return lg.with(zap.Any(label, data))
}
return lg
}
func (lg *httpLogger) with(fields ...zap.Field) *httpLogger {
return &httpLogger{
Logger: lg.Logger.With(fields...),
logRoller: lg.logRoller,
}
}
func (lg *httpLogger) getHTTPLogger(r *http.Request) *httpLogger {
return lg.with(
zap.String("from", r.RemoteAddr),
zap.String("URI", r.RequestURI),
zap.String("method", r.Method),
zap.String("protocol", r.Proto),
)
}
func (lg *httpLogger) withProcessedBody(label string, bodyReader io.Reader, l *zap.Logger) *httpLogger {
resp, err := processBody(bodyReader)
if err != nil {
l.Error(logs.FailedToProcessHTTPBody,
zap.Error(err),
zap.String("body type", payloadLabel))
return lg
}
return lg.with(zap.ByteString(label, resp))
}
func newResponseReadWriter(w http.ResponseWriter) *responseReadWriter {
return &responseReadWriter{
ResponseWriter: w,
response: &bytes.Buffer{},
}
}
func getBody(httpBody io.ReadCloser, l *zap.Logger) []byte {
defer func(httpBody io.ReadCloser) {
if err := httpBody.Close(); err != nil {
l.Error(logs.FailedToCloseHTTPBody, zap.Error(err))
}
}(httpBody)
body, err := io.ReadAll(httpBody)
if err != nil {
l.Error(logs.FailedToReadHTTPBody,
zap.Error(err),
zap.String("body type", payloadLabel))
return nil
}
return body
}
// processBody reads body and base64 encode it if it's not XML.
func processBody(bodyReader io.Reader) ([]byte, error) {
resultBody := &bytes.Buffer{}
detect := detector.NewDetector(bodyReader, xmlutils.DetectXML)
dataType, err := detect.Detect()
if err != nil {
return nil, err
}
writer := xmlutils.ChooseWriter(dataType, resultBody)
if _, err = io.Copy(writer, detect.RestoredReader()); err != nil {
return nil, err
}
if err = writer.Close(); err != nil {
return nil, err
}
return resultBody.Bytes(), nil
}

View file

@ -1,36 +0,0 @@
//go:build !loghttp
package middleware
import (
"net/http"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
)
type (
LogHTTPSettings interface {
LogHTTPConfig() LogHTTPConfig
}
LogHTTPConfig struct {
Enabled bool
MaxBody int64
MaxLogSize int
OutputPath string
UseGzip bool
}
)
func LogHTTP(l *zap.Logger, _ LogHTTPSettings) Func {
l.Warn(logs.LogHTTPDisabledInThisBuild)
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
}
}
func (*LogHTTPConfig) InitHTTPLogger(*zap.Logger) {
// ignore
}

View file

@ -11,7 +11,8 @@ import (
"strings" "strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" apiErr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
frostfsErrors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/frostfs/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/chain"
"git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine" "git.frostfs.info/TrueCloudLab/policy-engine/pkg/engine"
@ -28,11 +29,7 @@ const (
QueryPrefix = "prefix" QueryPrefix = "prefix"
QueryDelimiter = "delimiter" QueryDelimiter = "delimiter"
QueryMaxKeys = "max-keys" QueryMaxKeys = "max-keys"
QueryMarker = "marker"
QueryEncodingType = "encoding-type"
amzTagging = "x-amz-tagging" amzTagging = "x-amz-tagging"
unmatchedBucketOperation = "UnmatchedBucketOperation"
) )
// In these operations we don't check resource tags because // In these operations we don't check resource tags because
@ -88,7 +85,7 @@ func PolicyCheck(cfg PolicyConfig) Func {
ctx := r.Context() ctx := r.Context()
if err := policyCheck(r, cfg); err != nil { if err := policyCheck(r, cfg); err != nil {
reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err)) reqLogOrDefault(ctx, cfg.Log).Error(logs.PolicyValidationFailed, zap.Error(err))
err = apierr.TransformToS3Error(err) err = frostfsErrors.UnwrapErr(err)
if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil { if _, wrErr := WriteErrorResponse(w, GetReqInfo(ctx), err); wrErr != nil {
reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr)) reqLogOrDefault(ctx, cfg.Log).Error(logs.FailedToWriteResponse, zap.Error(wrErr))
} }
@ -148,11 +145,11 @@ func policyCheck(r *http.Request, cfg PolicyConfig) error {
case st == chain.Allow: case st == chain.Allow:
return nil return nil
case st != chain.NoRuleFound: case st != chain.NoRuleFound:
return apierr.GetAPIErrorWithError(apierr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String())) return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
} }
if cfg.Settings.PolicyDenyByDefault() { if cfg.Settings.PolicyDenyByDefault() {
return apierr.GetAPIErrorWithError(apierr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String())) return apiErr.GetAPIErrorWithError(apiErr.ErrAccessDenied, fmt.Errorf("policy check: %s", st.String()))
} }
return nil return nil
@ -272,17 +269,8 @@ func determineBucketOperation(r *http.Request) string {
return ListObjectsV2MOperation return ListObjectsV2MOperation
case query.Get(ListTypeQuery) == "2": case query.Get(ListTypeQuery) == "2":
return ListObjectsV2Operation return ListObjectsV2Operation
case len(query) == 0 || func() bool {
for key := range query {
if key != QueryDelimiter && key != QueryMaxKeys && key != QueryPrefix && key != QueryMarker && key != QueryEncodingType {
return false
}
}
return true
}():
return ListObjectsV1Operation
default: default:
return unmatchedBucketOperation return ListObjectsV1Operation
} }
case http.MethodPut: case http.MethodPut:
switch { switch {
@ -304,10 +292,8 @@ func determineBucketOperation(r *http.Request) string {
return PutBucketVersioningOperation return PutBucketVersioningOperation
case query.Has(NotificationQuery): case query.Has(NotificationQuery):
return PutBucketNotificationOperation return PutBucketNotificationOperation
case len(query) == 0:
return CreateBucketOperation
default: default:
return unmatchedBucketOperation return CreateBucketOperation
} }
case http.MethodPost: case http.MethodPost:
switch { switch {
@ -330,14 +316,12 @@ func determineBucketOperation(r *http.Request) string {
return DeleteBucketLifecycleOperation return DeleteBucketLifecycleOperation
case query.Has(EncryptionQuery): case query.Has(EncryptionQuery):
return DeleteBucketEncryptionOperation return DeleteBucketEncryptionOperation
case len(query) == 0:
return DeleteBucketOperation
default: default:
return unmatchedBucketOperation return DeleteBucketOperation
} }
} }
return unmatchedBucketOperation return "UnmatchedBucketOperation"
} }
func determineObjectOperation(r *http.Request) string { func determineObjectOperation(r *http.Request) string {
@ -345,8 +329,6 @@ func determineObjectOperation(r *http.Request) string {
switch r.Method { switch r.Method {
case http.MethodOptions: case http.MethodOptions:
return OptionsObjectOperation return OptionsObjectOperation
case http.MethodPatch:
return PatchObjectOperation
case http.MethodHead: case http.MethodHead:
return HeadObjectOperation return HeadObjectOperation
case http.MethodGet: case http.MethodGet:
@ -477,7 +459,7 @@ func determineRequestTags(r *http.Request, decoder XMLDecoder, op string) (map[s
if strings.HasSuffix(op, PutObjectTaggingOperation) || strings.HasSuffix(op, PutBucketTaggingOperation) { if strings.HasSuffix(op, PutObjectTaggingOperation) || strings.HasSuffix(op, PutBucketTaggingOperation) {
tagging := new(data.Tagging) tagging := new(data.Tagging)
if err := decoder.NewXMLDecoder(r.Body).Decode(tagging); err != nil { if err := decoder.NewXMLDecoder(r.Body).Decode(tagging); err != nil {
return nil, fmt.Errorf("%w: %s", apierr.GetAPIError(apierr.ErrMalformedXML), err.Error()) return nil, fmt.Errorf("%w: %s", apiErr.GetAPIError(apiErr.ErrMalformedXML), err.Error())
} }
GetReqInfo(r.Context()).Tagging = tagging GetReqInfo(r.Context()).Tagging = tagging
@ -489,7 +471,7 @@ func determineRequestTags(r *http.Request, decoder XMLDecoder, op string) (map[s
if tagging := r.Header.Get(amzTagging); len(tagging) > 0 { if tagging := r.Header.Get(amzTagging); len(tagging) > 0 {
queries, err := url.ParseQuery(tagging) queries, err := url.ParseQuery(tagging)
if err != nil { if err != nil {
return nil, apierr.GetAPIError(apierr.ErrInvalidArgument) return nil, apiErr.GetAPIError(apiErr.ErrInvalidArgument)
} }
for key := range queries { for key := range queries {
tags[fmt.Sprintf(s3.PropertyKeyFormatRequestTag, key)] = queries.Get(key) tags[fmt.Sprintf(s3.PropertyKeyFormatRequestTag, key)] = queries.Get(key)

View file

@ -152,12 +152,6 @@ func TestDetermineBucketOperation(t *testing.T) {
method: http.MethodGet, method: http.MethodGet,
expected: ListObjectsV1Operation, expected: ListObjectsV1Operation,
}, },
{
name: "UnmatchedBucketOperation GET",
method: http.MethodGet,
queryParam: map[string]string{"query": ""},
expected: unmatchedBucketOperation,
},
{ {
name: "PutBucketCorsOperation", name: "PutBucketCorsOperation",
method: http.MethodPut, method: http.MethodPut,
@ -217,12 +211,6 @@ func TestDetermineBucketOperation(t *testing.T) {
method: http.MethodPut, method: http.MethodPut,
expected: CreateBucketOperation, expected: CreateBucketOperation,
}, },
{
name: "UnmatchedBucketOperation PUT",
method: http.MethodPut,
queryParam: map[string]string{"query": ""},
expected: unmatchedBucketOperation,
},
{ {
name: "DeleteMultipleObjectsOperation", name: "DeleteMultipleObjectsOperation",
method: http.MethodPost, method: http.MethodPost,
@ -275,16 +263,10 @@ func TestDetermineBucketOperation(t *testing.T) {
method: http.MethodDelete, method: http.MethodDelete,
expected: DeleteBucketOperation, expected: DeleteBucketOperation,
}, },
{
name: "UnmatchedBucketOperation DELETE",
method: http.MethodDelete,
queryParam: map[string]string{"query": ""},
expected: unmatchedBucketOperation,
},
{ {
name: "UnmatchedBucketOperation", name: "UnmatchedBucketOperation",
method: "invalid-method", method: "invalid-method",
expected: unmatchedBucketOperation, expected: "UnmatchedBucketOperation",
}, },
} { } {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {

View file

@ -187,21 +187,14 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
r = r.WithContext(treepool.SetRequestID(r.Context(), reqInfo.RequestID)) r = r.WithContext(treepool.SetRequestID(r.Context(), reqInfo.RequestID))
fields := []zap.Field{zap.String("request_id", reqInfo.RequestID)} reqLogger := log.With(zap.String("request_id", reqInfo.RequestID))
ctx, span := StartHTTPServerSpan(r, "REQUEST S3") r = r.WithContext(SetReqLogger(r.Context(), reqLogger))
if traceID := span.SpanContext().TraceID(); traceID.IsValid() {
fields = append(fields, zap.String("trace_id", traceID.String()))
}
lw := &traceResponseWriter{ResponseWriter: w, ctx: ctx, span: span}
reqLogger := log.With(fields...)
r = r.WithContext(SetReqLogger(ctx, reqLogger))
reqLogger.Info(logs.RequestStart, zap.String("host", r.Host), reqLogger.Info(logs.RequestStart, zap.String("host", r.Host),
zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace)) zap.String("remote_host", reqInfo.RemoteHost), zap.String("namespace", reqInfo.Namespace))
// continue execution // continue execution
h.ServeHTTP(lw, r) h.ServeHTTP(w, r)
}) })
} }
} }

View file

@ -11,6 +11,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/version"
"go.opentelemetry.io/otel/trace"
"go.uber.org/zap" "go.uber.org/zap"
) )
@ -61,7 +62,7 @@ const (
hdrSSE = "X-Amz-Server-Side-Encryption" hdrSSE = "X-Amz-Server-Side-Encryption"
// hdrSSECustomerKey is the HTTP header key referencing the // hdrSSECustomerKey is the HTTP header key referencing the
// SSE-C client-provided key. // SSE-C client-provided key..
hdrSSECustomerKey = hdrSSE + "-Customer-Key" hdrSSECustomerKey = hdrSSE + "-Customer-Key"
// hdrSSECopyKey is the HTTP header key referencing the SSE-C // hdrSSECopyKey is the HTTP header key referencing the SSE-C
@ -73,7 +74,7 @@ var (
xmlHeader = []byte(xml.Header) xmlHeader = []byte(xml.Header)
) )
// Non-exhaustive list of AWS S3 standard error responses - // Non exhaustive list of AWS S3 standard error responses -
// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html // http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var s3ErrorResponseMap = map[string]string{ var s3ErrorResponseMap = map[string]string{
"AccessDenied": "Access Denied.", "AccessDenied": "Access Denied.",
@ -331,6 +332,10 @@ func LogSuccessResponse(l *zap.Logger) Func {
fields = append(fields, zap.String("user", reqInfo.User)) fields = append(fields, zap.String("user", reqInfo.User))
} }
if traceID, err := trace.TraceIDFromHex(reqInfo.TraceID); err == nil && traceID.IsValid() {
fields = append(fields, zap.String("trace_id", reqInfo.TraceID))
}
reqLogger.Info(logs.RequestEnd, fields...) reqLogger.Info(logs.RequestEnd, fields...)
}) })
} }

View file

@ -11,6 +11,20 @@ import (
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
// Tracing adds tracing support for requests.
// Must be placed after prepareRequest middleware.
func Tracing() Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
appCtx, span := StartHTTPServerSpan(r, "REQUEST S3")
reqInfo := GetReqInfo(r.Context())
reqInfo.TraceID = span.SpanContext().TraceID().String()
lw := &traceResponseWriter{ResponseWriter: w, ctx: appCtx, span: span}
h.ServeHTTP(lw, r.WithContext(appCtx))
})
}
}
type traceResponseWriter struct { type traceResponseWriter struct {
sync.Once sync.Once
http.ResponseWriter http.ResponseWriter

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"sync" "sync"
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id" cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
@ -29,14 +29,20 @@ type FrostFS interface {
SystemDNS(context.Context) (string, error) SystemDNS(context.Context) (string, error)
} }
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct { type Config struct {
FrostFS FrostFS FrostFS FrostFS
RPCAddress string RPCAddress string
Settings Settings
} }
type BucketResolver struct { type BucketResolver struct {
rpcAddress string rpcAddress string
frostfs FrostFS frostfs FrostFS
settings Settings
mu sync.RWMutex mu sync.RWMutex
resolvers []*Resolver resolvers []*Resolver
@ -44,15 +50,15 @@ type BucketResolver struct {
type Resolver struct { type Resolver struct {
Name string Name string
resolve func(context.Context, string, string) (cid.ID, error) resolve func(context.Context, string) (cid.ID, error)
} }
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (cid.ID, error)) { func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (cid.ID, error)) {
r.resolve = fn r.resolve = fn
} }
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (cid.ID, error) { func (r *Resolver) Resolve(ctx context.Context, name string) (cid.ID, error) {
return r.resolve(ctx, zone, name) return r.resolve(ctx, name)
} }
func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) { func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) {
@ -81,12 +87,12 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
return resolvers, nil return resolvers, nil
} }
func (r *BucketResolver) Resolve(ctx context.Context, zone, bktName string) (cnrID cid.ID, err error) { func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid.ID, err error) {
r.mu.RLock() r.mu.RLock()
defer r.mu.RUnlock() defer r.mu.RUnlock()
for _, resolver := range r.resolvers { for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, zone, bktName) cnrID, resolverErr := resolver.Resolve(ctx, bktName)
if resolverErr != nil { if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr) resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil { if err == nil {
@ -117,6 +123,7 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
cfg := &Config{ cfg := &Config{
FrostFS: r.frostfs, FrostFS: r.frostfs,
RPCAddress: r.rpcAddress, RPCAddress: r.rpcAddress,
Settings: r.settings,
} }
resolvers, err := createResolvers(resolverNames, cfg) resolvers, err := createResolvers(resolverNames, cfg)
@ -145,25 +152,30 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) { func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name { switch name {
case DNSResolver: case DNSResolver:
return NewDNSResolver(cfg.FrostFS) return NewDNSResolver(cfg.FrostFS, cfg.Settings)
case NNSResolver: case NNSResolver:
return NewNNSResolver(cfg.RPCAddress) return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
default: default:
return nil, fmt.Errorf("unknown resolver: %s", name) return nil, fmt.Errorf("unknown resolver: %s", name)
} }
} }
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) { func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
if frostFS == nil { if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver") return nil, fmt.Errorf("pool must not be nil for DNS resolver")
} }
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS var dns ns.DNS
resolveFunc := func(ctx context.Context, zone, name string) (cid.ID, error) { resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
var err error var err error
reqInfo := middleware.GetReqInfo(ctx)
if zone == v2container.SysAttributeZoneDefault { zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
if isDefault {
zone, err = frostFS.SystemDNS(ctx) zone, err = frostFS.SystemDNS(ctx)
if err != nil { if err != nil {
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err) return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
@ -184,10 +196,13 @@ func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
}, nil }, nil
} }
func NewNNSResolver(address string) (*Resolver, error) { func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
if address == "" { if address == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver") return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
} }
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS var nns ns.NNS
@ -195,9 +210,12 @@ func NewNNSResolver(address string) (*Resolver, error) {
return nil, fmt.Errorf("dial %s: %w", address, err) return nil, fmt.Errorf("dial %s: %w", address, err)
} }
resolveFunc := func(_ context.Context, zone, name string) (cid.ID, error) { resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
var d container.Domain var d container.Domain
d.SetName(name) d.SetName(name)
reqInfo := middleware.GetReqInfo(ctx)
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
d.SetZone(zone) d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d) cnrID, err := nns.ResolveContainerDomain(d)

Some files were not shown because too many files have changed in this diff Show more