Compare commits

...

16 commits

Author SHA1 Message Date
d919e6cce2 [#482] Fix containers resolving
All checks were successful
/ DCO (pull_request) Successful in 1m8s
/ Builds (pull_request) Successful in 1m7s
/ Vulncheck (pull_request) Successful in 1m12s
/ Lint (pull_request) Successful in 2m24s
/ Tests (pull_request) Successful in 1m24s
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-05 12:33:14 +03:00
056f168d77 [#448] multipart: Support removing duplicated parts
Previously after tree split we can have duplicated parts
(several objects and tree node referred to the same part number).
Some of them couldn't be deleted after abort or compete action.

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-09-03 13:20:38 +00:00
9bdfe2a016 [#479] Update APE to support s3:PatchObject action
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
d6b506f6d9 [#466] Implement PATCH for multipart objects
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
a2e0b92575 [#473] Add PATCH to extensions doc
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
b08f476ea7 [#462] Implement PATCH for regular objects
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
2024-09-03 11:57:59 +00:00
f4275d837a [#413] Add SECURITY.md
Signed-off-by: Pavel Pogodaev <p.pogodaev@yadro.com>
2024-09-03 11:45:05 +00:00
664f83b2b7 [#480] Add fuzzing tests
All checks were successful
/ DCO (pull_request) Successful in 1m34s
/ Vulncheck (pull_request) Successful in 1m45s
/ Builds (1.22) (pull_request) Successful in 1m51s
/ Builds (1.23) (pull_request) Successful in 1m56s
/ Lint (pull_request) Successful in 3m3s
/ Tests (1.22) (pull_request) Successful in 1m52s
/ Tests (1.23) (pull_request) Successful in 1m51s
Signed-off-by: Roman Ognev <r.ognev@yadro.com>
2024-09-02 15:59:07 +03:00
136b5521fe [#475] Support graceful_close_on_switch_timeout param
This allows in-flight requests finish during rebalance

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-29 13:22:08 +00:00
a5f670d904 [#329] Reduce using mutex when update app settings
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-29 12:03:26 +00:00
d76c4fe2a2 [#472] tree: Don't use sorted GetSubTree for nodes without FileName
Sorted GetSubTree doesn't return nodes without FileName attribute
if there are more than 1000 nodes in subtree

Signed-off-by: Denis Kirillov <d.kirillov@yadro.com>
2024-08-27 09:43:29 +00:00
0637133c61 [#470] Update Go version to 1.22
All checks were successful
/ DCO (pull_request) Successful in 1m7s
/ Vulncheck (pull_request) Successful in 1m13s
/ Builds (1.22) (pull_request) Successful in 1m32s
/ Builds (1.23) (pull_request) Successful in 1m34s
/ Lint (pull_request) Successful in 2m51s
/ Tests (1.22) (pull_request) Successful in 1m39s
/ Tests (1.23) (pull_request) Successful in 1m41s
Signed-off-by: Nikita Zinkevich <n.zinkevich@yadro.com>
2024-08-23 12:18:42 +03:00
bf00fa6aa9 [#449] Add support headers for vhs and servername
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
ff690ce996 [#446] Add tests for address style middleware
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
534ae7f0f1 [#446] Add support virtual-hosted-style
Signed-off-by: Roman Loginov <r.loginov@yadro.com>
2024-08-23 08:35:05 +00:00
77673797f9 [#474] lint: Update golangci-lint to v1.60 and fix issues
All checks were successful
/ DCO (pull_request) Successful in 55s
/ Vulncheck (pull_request) Successful in 1m17s
/ Builds (1.21) (pull_request) Successful in 1m34s
/ Builds (1.22) (pull_request) Successful in 1m32s
/ Lint (pull_request) Successful in 2m30s
/ Tests (1.21) (pull_request) Successful in 1m44s
/ Tests (1.22) (pull_request) Successful in 1m42s
Signed-off-by: Ekaterina Lebedeva <ekaterina.lebedeva@yadro.com>
2024-08-22 19:13:52 +03:00
59 changed files with 3950 additions and 520 deletions

View file

@ -1,4 +1,4 @@
FROM golang:1.21 AS builder
FROM golang:1.22 AS builder
ARG BUILD=now
ARG REPO=git.frostfs.info/TrueCloudLab/frostfs-s3-gw

View file

@ -6,7 +6,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.21', '1.22' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

View file

@ -12,7 +12,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.23'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3

View file

@ -10,7 +10,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.23'
cache: true
- name: Install linters
@ -24,7 +24,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
go_versions: [ '1.21', '1.22' ]
go_versions: [ '1.22', '1.23' ]
fail-fast: false
steps:
- uses: actions/checkout@v3

View file

@ -12,7 +12,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.22'
go-version: '1.23'
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest

View file

@ -12,7 +12,8 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
format: tab
formats:
- format: tab
# all available settings of specific linters
linters-settings:

View file

@ -4,6 +4,14 @@ This document outlines major changes between releases.
## [Unreleased]
### Added
- Add support for virtual hosted style addressing (#446, #449)
- Support new param `frostfs.graceful_close_on_switch_timeout` (#475)
- Support patch object method (#479)
### Changed
- Update go version to go1.19 (#470)
## [0.30.0] - Kangshung -2024-07-19
### Fixed

View file

@ -4,8 +4,8 @@
REPO ?= $(shell go list -m)
VERSION ?= $(shell git describe --tags --dirty --match "v*" --always --abbrev=8 2>/dev/null || cat VERSION 2>/dev/null || echo "develop")
GO_VERSION ?= 1.22
LINT_VERSION ?= 1.56.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.5
LINT_VERSION ?= 1.60.1
TRUECLOUDLAB_LINT_VERSION ?= 0.0.6
BINDIR = bin
METRICS_DUMP_OUT ?= ./metrics-dump.json
@ -23,6 +23,12 @@ OUTPUT_LINT_DIR ?= $(shell pwd)/bin
LINT_DIR = $(OUTPUT_LINT_DIR)/golangci-lint-$(LINT_VERSION)-v$(TRUECLOUDLAB_LINT_VERSION)
TMP_DIR := .cache
# Variables for fuzzing
FUZZ_NGFUZZ_DIR ?= ""
FUZZ_TIMEOUT ?= 30
FUZZ_FUNCTIONS ?= "all"
FUZZ_AUX ?= ""
.PHONY: all $(BINS) $(BINDIR) dep docker/ test cover format image image-push dirty-image lint docker/lint pre-commit unpre-commit version clean protoc
# .deb package versioning
@ -76,6 +82,34 @@ cover:
@go test -v -race ./... -coverprofile=coverage.txt -covermode=atomic
@go tool cover -html=coverage.txt -o coverage.html
# Run fuzzing
CLANG := $(shell which clang-17 2>/dev/null)
.PHONY: check-clang all
check-clang:
ifeq ($(CLANG),)
@echo "clang-17 is not installed. Please install it before proceeding - https://apt.llvm.org/llvm.sh "
@exit 1
endif
.PHONY: check-ngfuzz all
check-ngfuzz:
@if [ -z "$(FUZZ_NGFUZZ_DIR)" ]; then \
echo "Please set a variable FUZZ_NGFUZZ_DIR to specify path to the ngfuzz"; \
exit 1; \
fi
.PHONY: install-fuzzing-deps
install-fuzzing-deps: check-clang check-ngfuzz
.PHONY: fuzz
fuzz: install-fuzzing-deps
@START_PATH=$$(pwd); \
ROOT_PATH=$$(realpath --relative-to=$(FUZZ_NGFUZZ_DIR) $$START_PATH) ; \
cd $(FUZZ_NGFUZZ_DIR) && \
./ngfuzz -clean && \
./ngfuzz -fuzz $(FUZZ_FUNCTIONS) -rootdir $$ROOT_PATH -timeout $(FUZZ_TIMEOUT) $(FUZZ_AUX) && \
./ngfuzz -report
# Reformat code
format:
@echo "⇒ Processing gofmt check"

View file

@ -93,6 +93,24 @@ HTTP/1.1 200 OK
Also, you can configure domains using `.env` variables or `yaml` file.
## Fuzzing
To run fuzzing tests use the following command:
```shell
$ make fuzz
```
This command will install dependencies for the fuzzing process and run existing fuzzing tests.
You can also use the following arguments:
```
FUZZ_TIMEOUT - time to run each fuzzing test (default 30)
FUZZ_FUNCTIONS - fuzzing tests that will be started (default "all")
FUZZ_AUX - additional parameters for the fuzzer (for example, "-debug")
FUZZ_NGFUZZ_DIR - path to ngfuzz tool
````
## Documentation
- [Configuration](./docs/configuration.md)

26
SECURITY.md Normal file
View file

@ -0,0 +1,26 @@
# Security Policy
## How To Report a Vulnerability
If you think you have found a vulnerability in this repository, please report it to us through coordinated disclosure.
**Please do not report security vulnerabilities through public issues, discussions, or change requests.**
Instead, you can report it using one of the following ways:
* Contact the [TrueCloudLab Security Team](mailto:security@frostfs.info) via email
Please include as much of the information listed below as you can to help us better understand and resolve the issue:
* The type of issue (e.g., buffer overflow, or cross-site scripting)
* Affected version(s)
* Impact of the issue, including how an attacker might exploit the issue
* Step-by-step instructions to reproduce the issue
* The location of the affected source code (tag/branch/commit or direct URL)
* Full paths of source file(s) related to the manifestation of the issue
* Any special configuration required to reproduce the issue
* Any log files that are related to this issue (if possible)
* Proof-of-concept or exploit code (if possible)
This information will help us triage your report more quickly.

View file

@ -0,0 +1,88 @@
//go:build gofuzz
// +build gofuzz
package auth
import (
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/aws/aws-sdk-go/aws/credentials"
utils "github.com/trailofbits/go-fuzz-utils"
)
const (
fuzzSuccessExitCode = 0
fuzzFailExitCode = -1
)
func InitFuzzAuthenticate() {
}
func DoFuzzAuthenticate(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var accessKeyAddr oid.Address
err = tp.Fill(accessKeyAddr)
if err != nil {
return fuzzFailExitCode
}
accessKeyID := strings.ReplaceAll(accessKeyAddr.String(), "/", "0")
secretKey, err := tp.GetString()
awsCreds := credentials.NewStaticCredentials(accessKeyID, secretKey, "")
reqData := RequestData{
Method: "GET",
Endpoint: "http://localhost:8084",
Bucket: "my-bucket",
Object: "@obj/name",
}
presignData := PresignData{
Service: "s3",
Region: "spb",
Lifetime: 10 * time.Minute,
SignTime: time.Now().UTC(),
}
req, err := PresignRequest(awsCreds, reqData, presignData)
if req == nil {
return fuzzFailExitCode
}
expBox := &accessbox.Box{
Gate: &accessbox.GateData{
SecretKey: secretKey,
},
}
mock := newTokensFrostfsMock()
mock.addBox(accessKeyAddr, expBox)
c := &Center{
cli: mock,
reg: NewRegexpMatcher(authorizationFieldRegexp),
postReg: NewRegexpMatcher(postPolicyCredentialRegexp),
}
_, _ = c.Authenticate(req)
return fuzzSuccessExitCode
}
func FuzzAuthenticate(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzAuthenticate(data)
})
}

View file

@ -126,6 +126,14 @@ type PartInfo struct {
Created time.Time `json:"created"`
}
type PartInfoExtended struct {
PartInfo
// Timestamp is used to find the latest version of part info in case of tree split
// when there are multiple nodes for the same part.
Timestamp uint64
}
// ToHeaderString form short part representation to use in S3-Completed-Parts header.
func (p *PartInfo) ToHeaderString() string {
// ETag value contains SHA256 checksum which is used while getting object parts attributes.

View file

@ -187,6 +187,9 @@ const (
ErrInvalidRequestLargeCopy
ErrInvalidStorageClass
VersionIDMarkerWithoutKeyMarker
ErrInvalidRangeLength
ErrRangeOutOfBounds
ErrMissingContentRange
ErrMalformedJSON
ErrInsecureClientRequest
@ -1739,6 +1742,24 @@ var errorCodes = errorCodeMap{
Description: "Part number must be an integer between 1 and 10000, inclusive",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInvalidRangeLength: {
ErrCode: ErrInvalidRangeLength,
Code: "InvalidRange",
Description: "Provided range length must be equal to content length",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrRangeOutOfBounds: {
ErrCode: ErrRangeOutOfBounds,
Code: "InvalidRange",
Description: "Provided range is outside of object bounds",
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
},
ErrMissingContentRange: {
ErrCode: ErrMissingContentRange,
Code: "MissingContentRange",
Description: "Content-Range header is mandatory for this type of request",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here.
}

View file

@ -41,7 +41,6 @@ type (
RetryMaxAttempts() int
RetryMaxBackoff() time.Duration
RetryStrategy() RetryStrategy
Domains() []string
}
FrostFSID interface {

View file

@ -288,6 +288,21 @@ func completeMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID
return w
}
func abortMultipartUpload(hc *handlerContext, bktName, objName, uploadID string) {
w := abortMultipartUploadBase(hc, bktName, objName, uploadID)
assertStatus(hc.t, w, http.StatusNoContent)
}
func abortMultipartUploadBase(hc *handlerContext, bktName, objName, uploadID string) *httptest.ResponseRecorder {
query := make(url.Values)
query.Set(uploadIDQuery, uploadID)
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
hc.Handler().AbortMultipartUploadHandler(w, r)
return w
}
func uploadPartEncrypted(hc *handlerContext, bktName, objName, uploadID string, num, size int) (string, []byte) {
return uploadPartBase(hc, bktName, objName, true, uploadID, num, size)
}

View file

@ -228,6 +228,14 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
return content
}
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte {
w := getObjectBaseResponse(tc, bktName, objName, version)
assertStatus(tc.t, w, http.StatusOK)
content, err := io.ReadAll(w.Result().Body)
require.NoError(tc.t, err)
return content
}
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
w := getObjectBaseResponse(hc, bktName, objName, version)
assertS3Error(hc.t, w, errors.GetAPIError(code))

View file

@ -0,0 +1,998 @@
//go:build gofuzz
// +build gofuzz
package handler
import (
"bytes"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/xml"
"mime/multipart"
"net/http"
"net/http/httptest"
"testing"
tt "testing" // read https://github.com/AdamKorcz/go-118-fuzz-build?tab=readme-ov-file#workflow
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
engineiam "git.frostfs.info/TrueCloudLab/policy-engine/iam"
utils "github.com/trailofbits/go-fuzz-utils"
"go.uber.org/zap/zaptest"
)
var (
fuzzBktName string
fuzzBox *accessbox.Box
fuzzHc *handlerContextBase
fuzzt *tt.T
)
const (
fuzzSuccessExitCode = 0
fuzzFailExitCode = -1
)
func createTestBucketAndInitContext() {
fuzzt = new(tt.T)
log := zaptest.NewLogger(fuzzt)
var err error
fuzzHc, err = prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
if err != nil {
panic(err)
}
fuzzBktName = "bucket"
fuzzBox, _ = createAccessBox(fuzzt)
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL, nil)
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
fuzzHc.Handler().CreateBucketHandler(w, r)
}
func prepareStrings(tp *utils.TypeProvider, count int) ([]string, error) {
array := make([]string, count)
var err error
for i := 0; i < count; i++ {
err = tp.Reset()
if err != nil {
return nil, err
}
array[i], err = tp.GetString()
if err != nil {
return nil, err
}
}
return array, nil
}
func addMD5Header(tp *utils.TypeProvider, r *http.Request, rawBody []byte) error {
if len(rawBody) == 0 {
return nil
}
rand, err := tp.GetBool()
if err != nil {
return err
}
if rand == true {
var dst []byte
base64.StdEncoding.Encode(dst, rawBody)
hash := md5.Sum(dst)
r.Header.Set("Content-Md5", hex.EncodeToString(hash[:]))
}
return nil
}
func generateParams(tp *utils.TypeProvider, input string, params []string) (string, error) {
input += "?"
count, err := tp.GetInt()
if err != nil {
return "", err
}
count = count % len(params)
if count < 0 {
count += len(params)
}
for i := 0; i < count; i++ {
position, err := tp.GetInt()
if err != nil {
return "", err
}
position = position % len(params)
if position < 0 {
position += len(params)
}
v, err := tp.GetString()
if err != nil {
return "", err
}
input += params[position] + "=" + v + "&"
}
return input, nil
}
func generateHeaders(tp *utils.TypeProvider, r *http.Request, params []string) error {
count, err := tp.GetInt()
if err != nil {
return err
}
count = count % len(params)
if count < 0 {
count += len(params)
}
for i := 0; i < count; i++ {
position, err := tp.GetInt()
if err != nil {
return err
}
position = position % len(params)
if position < 0 {
position += len(params)
}
v, err := tp.GetString()
if err != nil {
return err
}
r.Header.Set(params[position], v)
}
return nil
}
func InitFuzzCreateBucketHandler() {
fuzzt = new(tt.T)
log := zaptest.NewLogger(fuzzt)
var err error
fuzzHc, err = prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
if err != nil {
panic(err)
}
fuzzBox, _ = createAccessBox(fuzzt)
}
func DoFuzzCreateBucketHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
strings, err := prepareStrings(tp, 4)
if err != nil {
return fuzzFailExitCode
}
bktName := strings[0]
body := strings[1]
bodyXml, err := xml.Marshal(body)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL, bytes.NewReader(bodyXml))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: bktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-acl", "x-amz-bucket-object-lock-enabled", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write", "x-amz-grant-write-acp", "x-amz-object-ownership"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().CreateBucketHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzCreateBucketHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzCreateBucketHandler(data)
})
}
func InitFuzzPutBucketCorsHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutBucketCorsHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var cors data.CORSConfiguration
err = tp.Fill(&cors)
if err != nil {
return fuzzFailExitCode
}
bodyXml, err := xml.Marshal(cors)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+"?cors", bytes.NewReader(bodyXml))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutBucketCorsHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutBucketCorsHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutBucketCorsHandler(data)
})
}
func InitFuzzPutBucketPolicyHandler() {
createTestBucketAndInitContext()
}
func FuzzPutBucketPolicyHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutBucketPolicyHandler(data)
})
}
func DoFuzzPutBucketPolicyHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var policy engineiam.Policy
err = tp.Fill(&policy)
if err != nil {
return fuzzFailExitCode
}
bodyXml, err := xml.Marshal(policy)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+"?policy", bytes.NewReader(bodyXml))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-confirm-remove-self-bucket-access"})
if err != nil {
return fuzzFailExitCode
}
err = addMD5Header(tp, r, bodyXml)
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutBucketPolicyHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzDeleteMultipleObjectsHandler() {
createTestBucketAndInitContext()
}
func FuzzDeleteMultipleObjectsHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDeleteMultipleObjectsHandler(data)
})
}
func DoFuzzDeleteMultipleObjectsHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var body DeleteObjectsRequest
err = tp.Fill(&body)
if err != nil {
return fuzzFailExitCode
}
bodyXml, err := xml.Marshal(body)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPost, defaultURL+"?delete", bytes.NewReader(bodyXml))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention", "x-amz-mfa"})
if err != nil {
return fuzzFailExitCode
}
err = addMD5Header(tp, r, bodyXml)
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().DeleteMultipleObjectsHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzPostObject() {
createTestBucketAndInitContext()
}
func FuzzPostObject(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPostObject(data)
})
}
func postObject(tp *utils.TypeProvider) ([]byte, string, error) {
strings, err := prepareStrings(tp, 2)
if err != nil {
return nil, "", err
}
bodyXml, err := xml.Marshal(strings[0])
if err != nil {
return nil, "", err
}
objName := strings[1]
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPost, defaultURL, bytes.NewReader(bodyXml))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"X-Amz-Grant-Read", "X-Amz-Grant-Full-Control", "X-Amz-Grant-Write", "X-Amz-Acl", "x-amz-expected-bucket-owner"})
if err != nil {
return nil, "", err
}
var file multipart.Form
err = tp.Fill(&file)
if err != nil {
return nil, "", err
}
r.MultipartForm = &file
fuzzHc.Handler().PostObject(w, r)
return bodyXml, objName, nil
}
func DoFuzzPostObject(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, _, err = postObject(tp)
if err != nil {
return fuzzFailExitCode
}
return fuzzSuccessExitCode
}
func InitFuzzDeleteBucketHandler() {
createTestBucketAndInitContext()
}
func FuzzDeleteBucketHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDeleteBucketHandler(data)
})
}
func DoFuzzDeleteBucketHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodDelete, defaultURL, nil)
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().DeleteBucketHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzDeleteBucketCorsHandler() {
createTestBucketAndInitContext()
}
func FuzzDeleteBucketCorsHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDeleteBucketCorsHandler(data)
})
}
func DoFuzzDeleteBucketCorsHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodDelete, defaultURL+"?cors", nil)
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().DeleteBucketCorsHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzDeleteBucketPolicyHandler() {
createTestBucketAndInitContext()
}
func FuzzDeleteBucketPolicyHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDeleteBucketPolicyHandler(data)
})
}
func DoFuzzDeleteBucketPolicyHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodDelete, defaultURL+"?policy", nil)
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().DeleteBucketPolicyHandler(w, r)
return fuzzFailExitCode
}
func InitFuzzCopyObjectHandler() {
createTestBucketAndInitContext()
}
func FuzzCopyObjectHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzCopyObjectHandler(data)
})
}
func DoFuzzCopyObjectHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
var r *http.Request
key, err := tp.GetString()
if err != nil {
return fuzzFailExitCode
}
params, err := generateParams(tp, key, []string{"versionId"})
if err != nil {
return fuzzFailExitCode
}
r = httptest.NewRequest(http.MethodPut, defaultURL+params, nil)
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-acl", "x-amz-checksum-algorithm", "x-amz-copy-source", "x-amz-copy-source-if-match", "x-amz-copy-source-if-match", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-none-match", "x-amz-copy-source-if-modified-since", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-if-match", "x-amz-copy-source-if-unmodified-since", "x-amz-copy-source-server-side-encryption-customer-algorithm", "x-amz-copy-source-server-side-encryption-customer-key", "x-amz-copy-source-server-side-encryption-customer-key-MD5", "x-amz-expected-bucket-owner", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write-acp", "x-amz-metadata-directive", "x-amz-website-redirect-location", "x-amz-object-lock-legal-hold", "x-amz-object-lock-mode", "x-amz-object-lock-retain-until-date", "x-amz-request-payer", "x-amz-server-side-encryption", "x-amz-server-side-encryption-aws-kms-key-id", "x-amz-server-side-encryption-bucket-key-enabled", "x-amz-server-side-encryption-context", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "x-amz-source-expected-bucket-owner", "x-amz-storage-class", "x-amz-tagging", "x-amz-tagging-directive", "x-amz-website-redirect-location"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().CopyObjectHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzDeleteObjectHandler() {
createTestBucketAndInitContext()
}
func FuzzDeleteObjectHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzDeleteObjectHandler(data)
})
}
func DoFuzzDeleteObjectHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, objName, err := postObject(tp)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
var r *http.Request
params, err := generateParams(tp, objName, []string{"versionId"})
if err != nil {
return fuzzFailExitCode
}
r = httptest.NewRequest(http.MethodDelete, defaultURL+params, nil)
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention", "x-amz-mfa"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().DeleteObjectHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzGetObjectHandler() {
createTestBucketAndInitContext()
}
func FuzzGetObjectHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzGetObjectHandler(data)
})
}
func DoFuzzGetObjectHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, objName, err := postObject(tp)
if err != nil {
return fuzzFailExitCode
}
params, err := generateParams(tp, objName, []string{"versionId", "partNumber", "Range", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding"})
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, defaultURL+params, nil)
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "If-Match", "If-None-Match", "If-Modified-Since", "If-Unmodified-Since", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "Range"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().GetObjectHandler(w, r)
return fuzzSuccessExitCode
}
func InitFuzzPutObjectHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutObjectHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
objName, err := tp.GetString()
if err != nil {
return fuzzFailExitCode
}
body, err := tp.GetBytes()
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+objName, bytes.NewReader(body))
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "X-Amz-Grant-Read", "X-Amz-Grant-Full-Control", "X-Amz-Grant-Write", "X-Amz-Acl", "X-Amz-Tagging", "Content-Type", "Cache-Control", "Expires", "Content-Language", "Content-Encoding", "x-amz-server-side-encryption-customer-algorithm", "x-amz-server-side-encryption-customer-key", "x-amz-server-side-encryption-customer-key-MD5", "X-Amz-Content-Sha256", "X-Amz-Object-Lock-Legal-Hold", "X-Amz-Object-Lock-Mode", "X-Amz-Object-Lock-Retain-Until-Date", "X-Amz-Bypass-Governance-Retention", "X-Amz-Meta-*"})
if err != nil {
return fuzzFailExitCode
}
err = addMD5Header(tp, r, body)
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutObjectHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutObjectHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutObjectHandler(data)
})
}
func InitFuzzPutObjectLegalHoldHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutObjectLegalHoldHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, objName, err := postObject(tp)
if err != nil {
return fuzzFailExitCode
}
var hold data.LegalHold
err = tp.Fill(&hold)
if err != nil {
return fuzzFailExitCode
}
rawBody, err := xml.Marshal(hold)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+objName+"?legal-hold", bytes.NewReader(rawBody))
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = addMD5Header(tp, r, rawBody)
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutObjectLegalHoldHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutObjectLegalHoldHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutObjectLegalHoldHandler(data)
})
}
func InitFuzzPutBucketObjectLockConfigHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutBucketObjectLockConfigHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var hold data.ObjectLockConfiguration
err = tp.Fill(&hold)
if err != nil {
return fuzzFailExitCode
}
rawBody, err := xml.Marshal(&hold)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+"?object-lock", bytes.NewReader(rawBody))
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = addMD5Header(tp, r, rawBody)
if err != nil {
return fuzzFailExitCode
}
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bucket-object-lock-token"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutBucketObjectLockConfigHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutBucketObjectLockConfigHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutBucketObjectLockConfigHandler(data)
})
}
func InitFuzzPutObjectRetentionHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutObjectRetentionHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
_, objName, err := postObject(tp)
if err != nil {
return fuzzFailExitCode
}
var retention data.Retention
err = tp.Fill(&retention)
if err != nil {
return fuzzFailExitCode
}
rawBody, err := xml.Marshal(retention)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+objName+"?retention", bytes.NewReader(rawBody))
if r != nil {
return fuzzFailExitCode
}
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: objName}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = addMD5Header(tp, r, rawBody)
if err != nil {
return fuzzFailExitCode
}
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-bypass-governance-retention"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutObjectRetentionHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutObjectRetentionHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutObjectRetentionHandler(data)
})
}
func InitFuzzPutBucketAclHandler() {
createTestBucketAndInitContext()
}
func DoFuzzPutBucketAclHandler(input []byte) int {
// FUZZER INIT
if len(input) < 100 {
return fuzzFailExitCode
}
tp, err := utils.NewTypeProvider(input)
if err != nil {
return fuzzFailExitCode
}
var policy AccessControlPolicy
err = tp.Fill(&policy)
if err != nil {
return fuzzFailExitCode
}
rawBody, err := xml.Marshal(policy)
if err != nil {
return fuzzFailExitCode
}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodPut, defaultURL+"?acl", bytes.NewReader(rawBody))
reqInfo := middleware.NewReqInfo(w, r, middleware.ObjectRequest{Bucket: fuzzBktName, Object: ""}, "")
r = r.WithContext(middleware.SetReqInfo(fuzzHc.Context(), reqInfo))
r = r.WithContext(middleware.SetBox(r.Context(), &middleware.Box{AccessBox: fuzzBox}))
err = addMD5Header(tp, r, rawBody)
if err != nil {
return fuzzFailExitCode
}
err = generateHeaders(tp, r, []string{"x-amz-expected-bucket-owner", "x-amz-acl", "x-amz-expected-bucket-owner", "x-amz-grant-full-control", "x-amz-grant-read", "x-amz-grant-read-acp", "x-amz-grant-write", "x-amz-grant-write-acp"})
if err != nil {
return fuzzFailExitCode
}
fuzzHc.Handler().PutBucketACLHandler(w, r)
return fuzzSuccessExitCode
}
func FuzzPutBucketAclHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, data []byte) {
DoFuzzPutBucketAclHandler(data)
})
}

View file

@ -33,13 +33,16 @@ import (
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zaptest"
"golang.org/x/exp/slices"
)
type handlerContext struct {
*handlerContextBase
t *testing.T
}
type handlerContextBase struct {
owner user.ID
t *testing.T
h *handler
tp *layer.TestFrostFS
tree *tree.Tree
@ -51,19 +54,19 @@ type handlerContext struct {
cache *layer.Cache
}
func (hc *handlerContext) Handler() *handler {
func (hc *handlerContextBase) Handler() *handler {
return hc.h
}
func (hc *handlerContext) MockedPool() *layer.TestFrostFS {
func (hc *handlerContextBase) MockedPool() *layer.TestFrostFS {
return hc.tp
}
func (hc *handlerContext) Layer() *layer.Layer {
func (hc *handlerContextBase) Layer() *layer.Layer {
return hc.h.obj
}
func (hc *handlerContext) Context() context.Context {
func (hc *handlerContextBase) Context() context.Context {
return hc.context
}
@ -73,7 +76,6 @@ type configMock struct {
defaultCopiesNumbers []uint32
bypassContentEncodingInChunks bool
md5Enabled bool
domains []string
}
func (c *configMock) DefaultPlacementPolicy(_ string) netmap.PlacementPolicy {
@ -137,28 +139,35 @@ func (c *configMock) RetryStrategy() RetryStrategy {
return RetryStrategyConstant
}
func (c *configMock) Domains() []string {
return c.domains
}
func prepareHandlerContext(t *testing.T) *handlerContext {
log := zaptest.NewLogger(t)
return prepareHandlerContextBase(t, layer.DefaultCachesConfigs(log), log)
hc, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(zap.NewExample()))
require.NoError(t, err)
return &handlerContext{
handlerContextBase: hc,
t: t,
}
}
func prepareHandlerContextWithMinCache(t *testing.T) *handlerContext {
log := zaptest.NewLogger(t)
return prepareHandlerContextBase(t, getMinCacheConfig(log), log)
hc, err := prepareHandlerContextBase(getMinCacheConfig(zap.NewExample()))
require.NoError(t, err)
return &handlerContext{
handlerContextBase: hc,
t: t,
}
}
func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig, log *zap.Logger) *handlerContext {
func prepareHandlerContextBase(cacheCfg *layer.CachesConfig) (*handlerContextBase, error) {
key, err := keys.NewPrivateKey()
require.NoError(t, err)
if err != nil {
return nil, err
}
log := zap.NewExample()
tp := layer.NewTestFrostFS(key)
testResolver := &resolver.Resolver{Name: "test_resolver"}
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
testResolver.SetResolveFunc(func(_ context.Context, _, name string) (cid.ID, error) {
return tp.ContainerID(name)
})
@ -166,9 +175,11 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig, log *
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
memCli, err := tree.NewTreeServiceClientMemory()
require.NoError(t, err)
if err != nil {
return nil, err
}
treeMock := tree.NewTree(memCli, log)
treeMock := tree.NewTree(memCli, zap.NewExample())
features := &layer.FeatureSettingsMock{}
@ -183,7 +194,9 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig, log *
var pp netmap.PlacementPolicy
err = pp.DecodeString("REP 1")
require.NoError(t, err)
if err != nil {
return nil, err
}
cfg := &configMock{
defaultPolicy: pp,
@ -196,19 +209,23 @@ func prepareHandlerContextBase(t *testing.T, cacheCfg *layer.CachesConfig, log *
frostfsid: newFrostfsIDMock(),
}
return &handlerContext{
accessBox, err := newTestAccessBox(key)
if err != nil {
return nil, err
}
return &handlerContextBase{
owner: owner,
t: t,
h: h,
tp: tp,
tree: treeMock,
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: newTestAccessBox(t, key)}),
context: middleware.SetBox(context.Background(), &middleware.Box{AccessBox: accessBox}),
config: cfg,
layerFeatures: features,
treeMock: memCli,
cache: layerCfg.Cache,
}
}, nil
}
func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {

View file

@ -119,21 +119,25 @@ func TestIsAvailableToResolve(t *testing.T) {
}
}
func newTestAccessBox(t *testing.T, key *keys.PrivateKey) *accessbox.Box {
func newTestAccessBox(key *keys.PrivateKey) (*accessbox.Box, error) {
var err error
if key == nil {
key, err = keys.NewPrivateKey()
require.NoError(t, err)
if err != nil {
return nil, err
}
}
var btoken bearer.Token
btoken.SetImpersonate(true)
err = btoken.Sign(key.PrivateKey)
require.NoError(t, err)
if err != nil {
return nil, err
}
return &accessbox.Box{
Gate: &accessbox.GateData{
BearerToken: &btoken,
},
}
}, nil
}

View file

@ -7,7 +7,6 @@ import (
"net/url"
"path"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
@ -429,7 +428,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
Bucket: objInfo.Bucket,
Key: objInfo.Name,
ETag: data.Quote(objInfo.ETag(h.cfg.MD5Enabled())),
Location: getObjectLocation(r, h.cfg.Domains(), reqInfo.BucketName, reqInfo.ObjectName),
Location: getObjectLocation(r, reqInfo.BucketName, reqInfo.ObjectName, reqInfo.RequestVHSEnabled),
}
if settings.VersioningEnabled() {
@ -450,7 +449,7 @@ func getURLScheme(r *http.Request) string {
}
// getObjectLocation gets the fully qualified URL of an object.
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
func getObjectLocation(r *http.Request, bucket, object string, vhsEnabled bool) string {
proto := middleware.GetSourceScheme(r)
if proto == "" {
proto = getURLScheme(r)
@ -460,13 +459,12 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
Path: path.Join("/", bucket, object),
Scheme: proto,
}
// If domain is set then we need to use bucket DNS style.
for _, domain := range domains {
if strings.HasPrefix(r.Host, bucket+"."+domain) {
u.Path = path.Join("/", object)
break
}
// If vhs enabled then we need to use bucket DNS style.
if vhsEnabled {
u.Path = path.Join("/", object)
}
return u.String()
}

View file

@ -17,6 +17,10 @@ import (
s3Errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/stretchr/testify/require"
)
@ -122,6 +126,108 @@ func TestMultipartReUploadPart(t *testing.T) {
equalDataSlices(t, append(data1, data2...), data)
}
func TestMultipartRemovePartsSplit(t *testing.T) {
bktName, objName := "bucket-to-upload-part", "object-multipart"
partSize := 8
t.Run("reupload part", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
require.Len(t, list.Parts, 1)
require.Equal(t, `"etag"`, list.Parts[0].ETag)
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
require.Len(t, list.Parts, 1)
require.Equal(t, etag1, list.Parts[0].ETag)
require.Len(t, hc.tp.Objects(), 1)
})
t.Run("abort multipart", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
require.Empty(t, hc.tp.Objects())
})
t.Run("complete multipart", func(t *testing.T) {
hc := prepareHandlerContext(t)
bktInfo := createTestBucket(hc, bktName)
uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
require.NoError(t, err)
objID := oidtest.ID()
_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
"Number": "1",
"OID": objID.EncodeToString(),
"Owner": usertest.ID().EncodeToString(),
"ETag": "etag",
})
require.NoError(t, err)
hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
require.Len(t, hc.tp.Objects(), 2)
completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
})
}
func containsOID(objects []*object.Object, objID oid.ID) bool {
for _, o := range objects {
oID, _ := o.ID()
if oID.Equals(objID) {
return true
}
}
return false
}
func TestListMultipartUploads(t *testing.T) {
hc := prepareHandlerContext(t)
@ -443,11 +549,11 @@ func TestUploadPartCheckContentSHA256(t *testing.T) {
func TestMultipartObjectLocation(t *testing.T) {
for _, tc := range []struct {
req *http.Request
bucket string
object string
domains []string
expected string
req *http.Request
bucket string
object string
vhsEnabled bool
expected string
}{
{
req: &http.Request{
@ -492,24 +598,24 @@ func TestMultipartObjectLocation(t *testing.T) {
req: &http.Request{
Host: "mybucket.s3dev.frostfs.devenv",
},
domains: []string{"s3dev.frostfs.devenv"},
bucket: "mybucket",
object: "test/1.txt",
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
bucket: "mybucket",
object: "test/1.txt",
vhsEnabled: true,
expected: "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
},
{
req: &http.Request{
Host: "mybucket.s3dev.frostfs.devenv",
Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
},
domains: []string{"s3dev.frostfs.devenv"},
bucket: "mybucket",
object: "test/1.txt",
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
bucket: "mybucket",
object: "test/1.txt",
vhsEnabled: true,
expected: "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
},
} {
t.Run("", func(t *testing.T) {
location := getObjectLocation(tc.req, tc.domains, tc.bucket, tc.object)
location := getObjectLocation(tc.req, tc.bucket, tc.object, tc.vhsEnabled)
require.Equal(t, tc.expected, location)
})
}

View file

@ -100,7 +100,13 @@ func TestListObjectsWithOldTreeNodes(t *testing.T) {
func TestListObjectsVersionsSkipLogTaggingNodesError(t *testing.T) {
loggerCore, observedLog := observer.New(zap.DebugLevel)
log := zap.New(loggerCore)
hc := prepareHandlerContextBase(t, layer.DefaultCachesConfigs(log), log)
hcBase, err := prepareHandlerContextBase(layer.DefaultCachesConfigs(log))
require.NoError(t, err)
hc := &handlerContext{
handlerContextBase: hcBase,
t: t,
}
bktName, objName := "bucket-versioning-enabled", "versions/object"
bktInfo := createTestBucket(hc, bktName)
@ -119,7 +125,7 @@ func TestListObjectsVersionsSkipLogTaggingNodesError(t *testing.T) {
}
func makeAllTreeObjectsOld(hc *handlerContext, bktInfo *data.BucketInfo) {
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0)
nodes, err := hc.treeMock.GetSubTree(hc.Context(), bktInfo, "version", []uint64{0}, 0, true)
require.NoError(hc.t, err)
for _, node := range nodes {
@ -168,7 +174,12 @@ func TestListObjectsContextCanceled(t *testing.T) {
layerCfg.SessionList.Lifetime = time.Hour
layerCfg.SessionList.Size = 1
hc := prepareHandlerContextBase(t, layerCfg, log)
hcBase, err := prepareHandlerContextBase(layerCfg)
require.NoError(t, err)
hc := &handlerContext{
handlerContextBase: hcBase,
t: t,
}
bktName := "bucket-versioning-enabled"
bktInfo := createTestBucket(hc, bktName)

195
api/handler/patch.go Normal file
View file

@ -0,0 +1,195 @@
package handler
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
"go.uber.org/zap"
)
const maxPatchSize = 5 * 1024 * 1024 * 1024 // 5GB
func (h *handler) PatchObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := middleware.GetReqInfo(ctx)
if _, ok := r.Header[api.ContentRange]; !ok {
h.logAndSendError(w, "missing Content-Range", reqInfo, errors.GetAPIError(errors.ErrMissingContentRange))
return
}
if _, ok := r.Header[api.ContentLength]; !ok {
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
return
}
conditional, err := parsePatchConditionalHeaders(r.Header)
if err != nil {
h.logAndSendError(w, "could not parse conditional headers", reqInfo, err)
return
}
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
if err != nil {
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
return
}
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
if err != nil {
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
return
}
srcObjPrm := &layer.HeadObjectParams{
Object: reqInfo.ObjectName,
BktInfo: bktInfo,
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
}
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
if err != nil {
h.logAndSendError(w, "could not find object", reqInfo, err)
return
}
srcObjInfo := extendedSrcObjInfo.ObjectInfo
if err = checkPreconditions(srcObjInfo, conditional, h.cfg.MD5Enabled()); err != nil {
h.logAndSendError(w, "precondition failed", reqInfo, err)
return
}
srcSize, err := layer.GetObjectSize(srcObjInfo)
if err != nil {
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
return
}
byteRange, err := parsePatchByteRange(r.Header.Get(api.ContentRange), srcSize)
if err != nil {
h.logAndSendError(w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if maxPatchSize < byteRange.End-byteRange.Start+1 {
h.logAndSendError(w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
return
}
if uint64(r.ContentLength) != (byteRange.End - byteRange.Start + 1) {
h.logAndSendError(w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength))
return
}
if byteRange.Start > srcSize {
h.logAndSendError(w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
return
}
params := &layer.PatchObjectParams{
Object: extendedSrcObjInfo,
BktInfo: bktInfo,
NewBytes: r.Body,
Range: byteRange,
VersioningEnabled: settings.VersioningEnabled(),
}
params.CopiesNumbers, err = h.pickCopiesNumbers(nil, reqInfo.Namespace, bktInfo.LocationConstraint)
if err != nil {
h.logAndSendError(w, "invalid copies number", reqInfo, err)
return
}
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
if err != nil {
if isErrObjectLocked(err) {
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
} else {
h.logAndSendError(w, "could not patch object", reqInfo, err)
}
return
}
if settings.VersioningEnabled() {
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
}
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
resp := PatchObjectResult{
Object: PatchObject{
LastModified: extendedObjInfo.ObjectInfo.Created.UTC().Format(time.RFC3339),
ETag: data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())),
},
}
if err = middleware.EncodeToResponse(w, resp); err != nil {
h.logAndSendError(w, "could not encode PatchObjectResult to response", reqInfo, err)
return
}
}
func parsePatchConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
var err error
args := &conditionalArgs{
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
}
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
return nil, err
}
return args, nil
}
func parsePatchByteRange(rangeStr string, objSize uint64) (*layer.RangeParams, error) {
const prefix = "bytes "
if rangeStr == "" {
return nil, fmt.Errorf("empty range")
}
if !strings.HasPrefix(rangeStr, prefix) {
return nil, fmt.Errorf("unknown unit in range header")
}
rangeStr, _, found := strings.Cut(strings.TrimPrefix(rangeStr, prefix), "/") // value after / is ignored
if !found {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
startStr, endStr, found := strings.Cut(rangeStr, "-")
if !found {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
start, err := strconv.ParseUint(startStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid start byte: %s", startStr)
}
end := objSize - 1
if len(endStr) > 0 {
end, err = strconv.ParseUint(endStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid end byte: %s", endStr)
}
}
if start > end {
return nil, fmt.Errorf("start byte is greater than end byte")
}
return &layer.RangeParams{
Start: start,
End: end,
}, nil
}

524
api/handler/patch_test.go Normal file
View file

@ -0,0 +1,524 @@
package handler
import (
"bytes"
"crypto/md5"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"strings"
"testing"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
"github.com/stretchr/testify/require"
)
func TestPatch(t *testing.T) {
tc := prepareHandlerContext(t)
tc.config.md5Enabled = true
bktName, objName := "bucket-for-patch", "object-for-patch"
createTestBucket(tc, bktName)
content := []byte("old object content")
md5Hash := md5.New()
md5Hash.Write(content)
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
created := time.Now()
tc.Handler().PutObjectHandler(w, r)
require.Equal(t, etag, w.Header().Get(api.ETag))
patchPayload := []byte("new")
sha256Hash := sha256.New()
sha256Hash.Write(patchPayload)
sha256Hash.Write(content[len(patchPayload):])
hash := hex.EncodeToString(sha256Hash.Sum(nil))
for _, tt := range []struct {
name string
rng string
headers map[string]string
code s3errors.ErrorCode
}{
{
name: "success",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
api.IfMatch: etag,
},
},
{
name: "invalid range syntax",
rng: "bytes 0-2",
code: s3errors.ErrInvalidRange,
},
{
name: "invalid range length",
rng: "bytes 0-5/*",
code: s3errors.ErrInvalidRangeLength,
},
{
name: "invalid range start",
rng: "bytes 20-22/*",
code: s3errors.ErrRangeOutOfBounds,
},
{
name: "range is too long",
rng: "bytes 0-5368709120/*",
code: s3errors.ErrInvalidRange,
},
{
name: "If-Unmodified-Since precondition are not satisfied",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
},
code: s3errors.ErrPreconditionFailed,
},
{
name: "If-Match precondition are not satisfied",
rng: "bytes 0-2/*",
headers: map[string]string{
api.IfMatch: "etag",
},
code: s3errors.ErrPreconditionFailed,
},
} {
t.Run(tt.name, func(t *testing.T) {
if tt.code == 0 {
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
require.Equal(t, data.Quote(hash), res.Object.ETag)
} else {
patchObjectErr(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
}
})
}
}
func TestPatchMultipartObject(t *testing.T) {
tc := prepareHandlerContextWithMinCache(t)
tc.config.md5Enabled = true
bktName, objName, partSize := "bucket-for-multipart-patch", "object-for-multipart-patch", 5*1024*1024
createTestBucket(tc, bktName)
t.Run("patch beginning of the first part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(patchSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{patchBody, data1[patchSize:], data2, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch middle of the first part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/4)+"-"+strconv.Itoa(partSize*3/4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/4], patchBody, data1[partSize*3/4:], data2, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch first and second parts", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize / 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3/4)+"-"+strconv.Itoa(partSize*5/4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize*3/4], patchBody, data2[partSize/4:], data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch all parts", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize * 2
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2-1)+"-"+strconv.Itoa(partSize/2+patchSize-2)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2-1], patchBody, data3[partSize/2-1:]}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch all parts and append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchSize := partSize * 3
patchBody := make([]byte, patchSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize/2)+"-"+strconv.Itoa(partSize/2+patchSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1[:partSize/2], patchBody}, []byte("")), object)
require.Equal(t, partSize*7/2, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch second part", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize)+"-"+strconv.Itoa(partSize*2-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, patchBody, data3}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part, equal size", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
require.Equal(t, partSize*3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part, increase size", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize+1)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2)+"-"+strconv.Itoa(partSize*3)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, patchBody}, []byte("")), object)
require.Equal(t, partSize*3+1, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch last part with offset and append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*2+3)+"-"+strconv.Itoa(partSize*3+2)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3[:3], patchBody}, []byte("")), object)
require.Equal(t, partSize*3+3, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("append bytes", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag1, data1 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, partSize)
etag2, data2 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 2, partSize)
etag3, data3 := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 3, partSize)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag1, etag2, etag3})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes "+strconv.Itoa(partSize*3)+"-"+strconv.Itoa(partSize*4-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, bytes.Join([][]byte{data1, data2, data3, patchBody}, []byte("")), object)
require.Equal(t, partSize*4, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-3"))
})
t.Run("patch empty multipart", func(t *testing.T) {
multipartInfo := createMultipartUpload(tc, bktName, objName, map[string]string{})
etag, _ := uploadPart(tc, bktName, objName, multipartInfo.UploadID, 1, 0)
completeMultipartUpload(tc, bktName, objName, multipartInfo.UploadID, []string{etag})
patchBody := make([]byte, partSize)
_, err := rand.Read(patchBody)
require.NoError(t, err)
patchObject(t, tc, bktName, objName, "bytes 0-"+strconv.Itoa(partSize-1)+"/*", patchBody, nil)
object, header := getObject(tc, bktName, objName)
contentLen, err := strconv.Atoi(header.Get(api.ContentLength))
require.NoError(t, err)
equalDataSlices(t, patchBody, object)
require.Equal(t, partSize, contentLen)
require.True(t, strings.HasSuffix(data.UnQuote(header.Get(api.ETag)), "-1"))
})
}
func TestPatchWithVersion(t *testing.T) {
hc := prepareHandlerContextWithMinCache(t)
bktName, objName := "bucket", "obj"
createVersionedBucket(hc, bktName)
objHeader := putObjectContent(hc, bktName, objName, "content")
putObjectContent(hc, bktName, objName, "some content")
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
require.False(t, res.IsTruncated)
require.Len(t, res.Version, 3)
for _, version := range res.Version {
content := getObjectVersion(hc, bktName, objName, version.VersionID)
if version.IsLatest {
require.Equal(t, []byte("content updated"), content)
continue
}
if version.VersionID == objHeader.Get(api.AmzVersionID) {
require.Equal(t, []byte("content"), content)
continue
}
require.Equal(t, []byte("some content"), content)
}
}
func TestPatchEncryptedObject(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
createTestBucket(tc, bktName)
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
setEncryptHeaders(r)
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
patchObjectErr(t, tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, s3errors.ErrInternalError)
}
func TestPatchMissingHeaders(t *testing.T) {
tc := prepareHandlerContext(t)
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
createTestBucket(tc, bktName)
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
setEncryptHeaders(r)
tc.Handler().PutObjectHandler(w, r)
assertStatus(t, w, http.StatusOK)
w = httptest.NewRecorder()
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
tc.Handler().PatchObjectHandler(w, r)
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentRange))
w = httptest.NewRecorder()
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
r.Header.Set(api.ContentRange, "bytes 0-2/*")
tc.Handler().PatchObjectHandler(w, r)
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
}
func TestParsePatchByteRange(t *testing.T) {
for _, tt := range []struct {
rng string
size uint64
expected *layer.RangeParams
err bool
}{
{
rng: "bytes 2-7/*",
expected: &layer.RangeParams{
Start: 2,
End: 7,
},
},
{
rng: "bytes 2-7/3",
expected: &layer.RangeParams{
Start: 2,
End: 7,
},
},
{
rng: "bytes 2-/*",
size: 9,
expected: &layer.RangeParams{
Start: 2,
End: 8,
},
},
{
rng: "bytes 2-/3",
size: 9,
expected: &layer.RangeParams{
Start: 2,
End: 8,
},
},
{
rng: "",
err: true,
},
{
rng: "2-7/*",
err: true,
},
{
rng: "bytes 7-2/*",
err: true,
},
{
rng: "bytes 2-7",
err: true,
},
{
rng: "bytes 2/*",
err: true,
},
{
rng: "bytes a-7/*",
err: true,
},
{
rng: "bytes 2-a/*",
err: true,
},
} {
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
rng, err := parsePatchByteRange(tt.rng, tt.size)
if tt.err {
require.Error(t, err)
} else {
require.NoError(t, err)
require.Equal(t, tt.expected.Start, rng.Start)
require.Equal(t, tt.expected.End, rng.End)
}
})
}
}
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
assertStatus(t, w, http.StatusOK)
result := &PatchObjectResult{}
err := xml.NewDecoder(w.Result().Body).Decode(result)
require.NoError(t, err)
return result
}
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
assertStatus(t, w, http.StatusOK)
result := &PatchObjectResult{}
err := xml.NewDecoder(w.Result().Body).Decode(result)
require.NoError(t, err)
return result
}
func patchObjectErr(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code s3errors.ErrorCode) {
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
assertS3Error(t, w, s3errors.GetAPIError(code))
}
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
query := make(url.Values)
if len(version) > 0 {
query.Add(api.QueryVersionID, version)
}
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
r.Header.Set(api.ContentRange, rng)
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
for k, v := range headers {
r.Header.Set(k, v)
}
tc.Handler().PatchObjectHandler(w, r)
return w
}

View file

@ -195,6 +195,15 @@ type PostResponse struct {
ETag string `xml:"Etag"`
}
type PatchObjectResult struct {
Object PatchObject `xml:"Object"`
}
type PatchObject struct {
LastModified string `xml:"LastModified"`
ETag string `xml:"ETag"`
}
// MarshalXML -- StringMap marshals into XML.
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
tokens := []xml.Token{start}

View file

@ -64,7 +64,7 @@ func (n *Layer) containerInfo(ctx context.Context, prm PrmContainer) (*data.Buck
}
}
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
zone := n.features.FormContainerZone(reqInfo.Namespace)
if zone != info.Zone {
return nil, fmt.Errorf("ns '%s' and zone '%s' are mismatched for container '%s'", zone, info.Zone, prm.ContainerID)
}
@ -111,7 +111,7 @@ func (n *Layer) createContainer(ctx context.Context, p *CreateBucketParams) (*da
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
}
zone, _ := n.features.FormContainerZone(p.Namespace)
zone := n.features.FormContainerZone(p.Namespace)
bktInfo := &data.BucketInfo{
Name: p.Name,

View file

@ -200,6 +200,27 @@ type PrmObjectSearch struct {
FilePrefix string
}
// PrmObjectPatch groups parameters of FrostFS.PatchObject operation.
type PrmObjectPatch struct {
// Authentication parameters.
PrmAuth
// Container of the patched object.
Container cid.ID
// Identifier of the patched object.
Object oid.ID
// Object patch payload encapsulated in io.Reader primitive.
Payload io.Reader
// Object range to patch.
Offset, Length uint64
// Size of original object payload.
ObjectSize uint64
}
var (
// ErrAccessDenied is returned from FrostFS in case of access violation.
ErrAccessDenied = errors.New("access denied")
@ -294,6 +315,15 @@ type FrostFS interface {
// prevented the objects from being selected.
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
// PatchObject performs object patch in the FrostFS container.
// It returns the ID of the patched object.
//
// It returns ErrAccessDenied on selection access violation.
//
// It returns exactly one non-nil value. It returns any error encountered which
// prevented the objects from being patched.
PatchObject(context.Context, PrmObjectPatch) (oid.ID, error)
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
// Note:
// * future time must be after the now

View file

@ -21,6 +21,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -51,12 +52,12 @@ func (k *FeatureSettingsMock) SetMD5Enabled(md5Enabled bool) {
k.md5Enabled = md5Enabled
}
func (k *FeatureSettingsMock) FormContainerZone(ns string) (zone string, isDefault bool) {
func (k *FeatureSettingsMock) FormContainerZone(ns string) string {
if ns == "" {
return v2container.SysAttributeZoneDefault, true
return v2container.SysAttributeZoneDefault
}
return ns + ".ns", false
return ns + ".ns"
}
type TestFrostFS struct {
@ -415,6 +416,42 @@ func (t *TestFrostFS) NetworkInfo(context.Context) (netmap.NetworkInfo, error) {
return ni, nil
}
func (t *TestFrostFS) PatchObject(ctx context.Context, prm PrmObjectPatch) (oid.ID, error) {
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
if err != nil {
return oid.ID{}, err
}
newObj := *obj
patchBytes, err := io.ReadAll(prm.Payload)
if err != nil {
return oid.ID{}, err
}
var newPayload []byte
if prm.Offset > 0 {
newPayload = append(newPayload, obj.Payload()[:prm.Offset]...)
}
newPayload = append(newPayload, patchBytes...)
if prm.Offset+prm.Length < obj.PayloadSize() {
newPayload = append(newPayload, obj.Payload()[prm.Offset+prm.Length:]...)
}
newObj.SetPayload(newPayload)
newObj.SetPayloadSize(uint64(len(newPayload)))
var hash checksum.Checksum
checksum.Calculate(&hash, checksum.SHA256, newPayload)
newObj.SetPayloadChecksum(hash)
newID := oidtest.ID()
newObj.SetID(newID)
t.objects[newAddress(prm.Container, newID).EncodeToString()] = &newObj
return newID, nil
}
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
cnr, ok := t.containers[cnrID.EncodeToString()]
if !ok {

View file

@ -35,14 +35,14 @@ import (
type (
BucketResolver interface {
Resolve(ctx context.Context, name string) (cid.ID, error)
Resolve(ctx context.Context, zone, name string) (cid.ID, error)
}
FeatureSettings interface {
ClientCut() bool
BufferMaxSizeForPut() uint64
MD5Enabled() bool
FormContainerZone(ns string) (zone string, isDefault bool)
FormContainerZone(ns string) string
}
Layer struct {
@ -160,6 +160,7 @@ type (
DstEncryption encryption.Params
CopiesNumbers []uint32
}
// CreateBucketParams stores bucket create request parameters.
CreateBucketParams struct {
Name string
@ -321,13 +322,13 @@ func (n *Layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInf
}
reqInfo := middleware.GetReqInfo(ctx)
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
zone := n.features.FormContainerZone(reqInfo.Namespace)
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
return bktInfo, nil
}
containerID, err := n.ResolveBucket(ctx, name)
containerID, err := n.ResolveBucket(ctx, zone, name)
if err != nil {
if strings.Contains(err.Error(), "not found") {
return nil, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrNoSuchBucket), err.Error())
@ -351,13 +352,13 @@ func (n *Layer) ResolveCID(ctx context.Context, name string) (cid.ID, error) {
}
reqInfo := middleware.GetReqInfo(ctx)
zone, _ := n.features.FormContainerZone(reqInfo.Namespace)
zone := n.features.FormContainerZone(reqInfo.Namespace)
if bktInfo := n.cache.GetBucket(zone, name); bktInfo != nil {
return bktInfo.CID, nil
}
return n.ResolveBucket(ctx, name)
return n.ResolveBucket(ctx, zone, name)
}
// ListBuckets returns all user containers. The name of the bucket is a container
@ -797,10 +798,10 @@ func (n *Layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.
return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
}
func (n *Layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, error) {
var cnrID cid.ID
if err := cnrID.DecodeString(name); err != nil {
if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
if cnrID, err = n.resolver.Resolve(ctx, zone, name); err != nil {
return cid.ID{}, err
}

View file

@ -290,16 +290,18 @@ func (n *Layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
MD5: hex.EncodeToString(createdObj.MD5Sum),
}
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
oldPartIDs, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
if err != nil && !oldPartIDNotFound {
return nil, err
}
if !oldPartIDNotFound {
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
zap.String("cid", bktInfo.CID.EncodeToString()),
zap.String("oid", oldPartID.EncodeToString()))
for _, oldPartID := range oldPartIDs {
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
n.reqLogger(ctx).Error(logs.CouldntDeleteOldPartObject, zap.Error(err),
zap.String("cid", bktInfo.CID.EncodeToString()),
zap.String("oid", oldPartID.EncodeToString()))
}
}
}
@ -385,16 +387,15 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
var multipartObjetSize uint64
var encMultipartObjectSize uint64
parts := make([]*data.PartInfo, 0, len(p.Parts))
parts := make([]*data.PartInfoExtended, 0, len(p.Parts))
var completedPartsHeader strings.Builder
md5Hash := md5.New()
for i, part := range p.Parts {
partInfo := partsInfo[part.PartNumber]
if partInfo == nil || data.UnQuote(part.ETag) != partInfo.GetETag(n.features.MD5Enabled()) {
partInfo := partsInfo.Extract(part.PartNumber, data.UnQuote(part.ETag), n.features.MD5Enabled())
if partInfo == nil {
return nil, nil, fmt.Errorf("%w: unknown part %d or etag mismatched", s3errors.GetAPIError(s3errors.ErrInvalidPart), part.PartNumber)
}
delete(partsInfo, part.PartNumber)
// for the last part we have no minimum size limit
if i != len(p.Parts)-1 && partInfo.Size < UploadMinSize {
@ -475,14 +476,16 @@ func (n *Layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
var addr oid.Address
addr.SetContainer(p.Info.Bkt.CID)
for _, partInfo := range partsInfo {
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
zap.Error(err))
for _, prts := range partsInfo {
for _, partInfo := range prts {
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldNotDeleteUploadPart,
zap.Stringer("cid", p.Info.Bkt.CID), zap.Stringer("oid", &partInfo.OID),
zap.Error(err))
}
addr.SetObject(partInfo.OID)
n.cache.DeleteObject(addr)
}
addr.SetObject(partInfo.OID)
n.cache.DeleteObject(addr)
}
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo)
@ -554,10 +557,12 @@ func (n *Layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) e
return err
}
for _, info := range parts {
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
for _, infos := range parts {
for _, info := range infos {
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
n.reqLogger(ctx).Warn(logs.CouldntDeletePart, zap.String("cid", p.Bkt.CID.EncodeToString()),
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
}
}
}
@ -581,7 +586,12 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
parts := make([]*Part, 0, len(partsInfo))
for _, partInfo := range partsInfo {
for _, infos := range partsInfo {
sort.Slice(infos, func(i, j int) bool {
return infos[i].Timestamp < infos[j].Timestamp
})
partInfo := infos[len(infos)-1]
parts = append(parts, &Part{
ETag: data.Quote(partInfo.GetETag(n.features.MD5Enabled())),
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
@ -618,7 +628,22 @@ func (n *Layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsIn
return &res, nil
}
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
type PartsInfo map[int][]*data.PartInfoExtended
func (p PartsInfo) Extract(part int, etag string, md5Enabled bool) *data.PartInfoExtended {
parts := p[part]
for i, info := range parts {
if info.GetETag(md5Enabled) == etag {
p[part] = append(parts[:i], parts[i+1:]...)
return info
}
}
return nil
}
func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, PartsInfo, error) {
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
if err != nil {
if errors.Is(err, ErrNodeNotFound) {
@ -632,11 +657,11 @@ func (n *Layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
return nil, nil, err
}
res := make(map[int]*data.PartInfo, len(parts))
res := make(map[int][]*data.PartInfoExtended, len(parts))
partsNumbers := make([]int, len(parts))
oids := make([]string, len(parts))
for i, part := range parts {
res[part.Number] = part
res[part.Number] = append(res[part.Number], part)
partsNumbers[i] = part.Number
oids[i] = part.OID.EncodeToString()
}

264
api/layer/patch.go Normal file
View file

@ -0,0 +1,264 @@
package layer
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
)
type PatchObjectParams struct {
Object *data.ExtendedObjectInfo
BktInfo *data.BucketInfo
NewBytes io.Reader
Range *RangeParams
VersioningEnabled bool
CopiesNumbers []uint32
}
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
if p.Object.ObjectInfo.Headers[AttributeDecryptedSize] != "" {
return nil, fmt.Errorf("patch encrypted object")
}
if p.Object.ObjectInfo.Headers[MultipartObjectSize] != "" {
return n.patchMultipartObject(ctx, p)
}
prmPatch := PrmObjectPatch{
Container: p.BktInfo.CID,
Object: p.Object.ObjectInfo.ID,
Payload: p.NewBytes,
Offset: p.Range.Start,
Length: p.Range.End - p.Range.Start + 1,
ObjectSize: p.Object.ObjectInfo.Size,
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
createdObj, err := n.patchObject(ctx, prmPatch)
if err != nil {
return nil, fmt.Errorf("patch object: %w", err)
}
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: createdObj.ID,
ETag: hex.EncodeToString(createdObj.HashSum),
FilePath: p.Object.ObjectInfo.Name,
Size: createdObj.Size,
Created: &p.Object.ObjectInfo.Created,
Owner: &n.gateOwner,
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
},
IsUnversioned: !p.VersioningEnabled,
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
}
p.Object.ObjectInfo.ID = createdObj.ID
p.Object.ObjectInfo.Size = createdObj.Size
p.Object.ObjectInfo.MD5Sum = ""
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
p.Object.NodeVersion = newVersion
return p.Object, nil
}
func (n *Layer) patchObject(ctx context.Context, p PrmObjectPatch) (*data.CreatedObjectInfo, error) {
objID, err := n.frostFS.PatchObject(ctx, p)
if err != nil {
return nil, fmt.Errorf("patch object: %w", err)
}
prmHead := PrmObjectHead{
PrmAuth: p.PrmAuth,
Container: p.Container,
Object: objID,
}
obj, err := n.frostFS.HeadObject(ctx, prmHead)
if err != nil {
return nil, fmt.Errorf("head object: %w", err)
}
payloadChecksum, _ := obj.PayloadChecksum()
return &data.CreatedObjectInfo{
ID: objID,
Size: obj.PayloadSize(),
HashSum: payloadChecksum.Value(),
}, nil
}
func (n *Layer) patchMultipartObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
combinedObj, err := n.objectGet(ctx, p.BktInfo, p.Object.ObjectInfo.ID)
if err != nil {
return nil, fmt.Errorf("get combined object '%s': %w", p.Object.ObjectInfo.ID.EncodeToString(), err)
}
var parts []*data.PartInfo
if err = json.NewDecoder(combinedObj.Payload).Decode(&parts); err != nil {
return nil, fmt.Errorf("unmarshal combined object parts: %w", err)
}
prmPatch := PrmObjectPatch{
Container: p.BktInfo.CID,
}
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
off, ln := p.Range.Start, p.Range.End-p.Range.Start+1
var multipartObjectSize uint64
for i, part := range parts {
if off > part.Size || (off == part.Size && i != len(parts)-1) || ln == 0 {
multipartObjectSize += part.Size
if ln != 0 {
off -= part.Size
}
continue
}
var createdObj *data.CreatedObjectInfo
createdObj, off, ln, err = n.patchPart(ctx, part, p, &prmPatch, off, ln, i == len(parts)-1)
if err != nil {
return nil, fmt.Errorf("patch part: %w", err)
}
parts[i].OID = createdObj.ID
parts[i].Size = createdObj.Size
parts[i].MD5 = ""
parts[i].ETag = hex.EncodeToString(createdObj.HashSum)
multipartObjectSize += createdObj.Size
}
return n.updateCombinedObject(ctx, parts, multipartObjectSize, p)
}
// Returns patched part info, updated offset and length.
func (n *Layer) patchPart(ctx context.Context, part *data.PartInfo, p *PatchObjectParams, prmPatch *PrmObjectPatch, off, ln uint64, lastPart bool) (*data.CreatedObjectInfo, uint64, uint64, error) {
if off == 0 && ln >= part.Size {
curLen := part.Size
if lastPart {
curLen = ln
}
prm := PrmObjectCreate{
Container: p.BktInfo.CID,
Payload: io.LimitReader(p.NewBytes, int64(curLen)),
CreationTime: part.Created,
CopiesNumber: p.CopiesNumbers,
}
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil {
return nil, 0, 0, fmt.Errorf("put new part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
return createdObj, off, ln, err
}
curLen := ln
if off+curLen > part.Size && !lastPart {
curLen = part.Size - off
}
prmPatch.Object = part.OID
prmPatch.ObjectSize = part.Size
prmPatch.Offset = off
prmPatch.Length = curLen
prmPatch.Payload = io.LimitReader(p.NewBytes, int64(prmPatch.Length))
createdObj, err := n.patchObject(ctx, *prmPatch)
if err != nil {
return nil, 0, 0, fmt.Errorf("patch part object '%s': %w", part.OID.EncodeToString(), err)
}
ln -= curLen
off = 0
return createdObj, off, ln, nil
}
func (n *Layer) updateCombinedObject(ctx context.Context, parts []*data.PartInfo, fullObjSize uint64, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
newParts, err := json.Marshal(parts)
if err != nil {
return nil, fmt.Errorf("marshal parts for combined object: %w", err)
}
var headerParts strings.Builder
for i, part := range parts {
headerPart := part.ToHeaderString()
if i != len(parts)-1 {
headerPart += ","
}
headerParts.WriteString(headerPart)
}
prm := PrmObjectCreate{
Container: p.BktInfo.CID,
PayloadSize: fullObjSize,
Filepath: p.Object.ObjectInfo.Name,
Payload: bytes.NewReader(newParts),
CreationTime: p.Object.ObjectInfo.Created,
CopiesNumber: p.CopiesNumbers,
}
prm.Attributes = make([][2]string, 0, len(p.Object.ObjectInfo.Headers)+1)
for k, v := range p.Object.ObjectInfo.Headers {
switch k {
case MultipartObjectSize:
prm.Attributes = append(prm.Attributes, [2]string{MultipartObjectSize, strconv.FormatUint(fullObjSize, 10)})
case UploadCompletedParts:
prm.Attributes = append(prm.Attributes, [2]string{UploadCompletedParts, headerParts.String()})
case api.ContentType:
default:
prm.Attributes = append(prm.Attributes, [2]string{k, v})
}
}
prm.Attributes = append(prm.Attributes, [2]string{api.ContentType, p.Object.ObjectInfo.ContentType})
createdObj, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
if err != nil {
return nil, fmt.Errorf("put new combined object: %w", err)
}
newVersion := &data.NodeVersion{
BaseNodeVersion: data.BaseNodeVersion{
OID: createdObj.ID,
ETag: hex.EncodeToString(createdObj.HashSum),
MD5: hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts)),
FilePath: p.Object.ObjectInfo.Name,
Size: fullObjSize,
Created: &p.Object.ObjectInfo.Created,
Owner: &n.gateOwner,
CreationEpoch: p.Object.NodeVersion.CreationEpoch,
},
IsUnversioned: !p.VersioningEnabled,
IsCombined: p.Object.ObjectInfo.Headers[MultipartObjectSize] != "",
}
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
return nil, fmt.Errorf("couldn't add new version to tree service: %w", err)
}
p.Object.ObjectInfo.ID = createdObj.ID
p.Object.ObjectInfo.Size = createdObj.Size
p.Object.ObjectInfo.MD5Sum = hex.EncodeToString(createdObj.MD5Sum) + "-" + strconv.Itoa(len(parts))
p.Object.ObjectInfo.HashSum = hex.EncodeToString(createdObj.HashSum)
p.Object.ObjectInfo.Headers[MultipartObjectSize] = strconv.FormatUint(fullObjSize, 10)
p.Object.ObjectInfo.Headers[UploadCompletedParts] = headerParts.String()
p.Object.NodeVersion = newVersion
return p.Object, nil
}

View file

@ -6,6 +6,7 @@ import (
"io"
"sort"
"strings"
"time"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
@ -33,7 +34,7 @@ type TreeServiceMock struct {
locks map[string]map[uint64]*data.LockInfo
tags map[string]map[uint64]map[string]string
multiparts map[string]map[string][]*data.MultipartInfo
parts map[string]map[int]*data.PartInfo
parts map[string]map[int]*data.PartInfoExtended
}
func (t *TreeServiceMock) GetObjectTaggingAndLock(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
@ -92,7 +93,7 @@ func NewTreeService() *TreeServiceMock {
locks: make(map[string]map[uint64]*data.LockInfo),
tags: make(map[string]map[uint64]map[string]string),
multiparts: make(map[string]map[string][]*data.MultipartInfo),
parts: make(map[string]map[int]*data.PartInfo),
parts: make(map[string]map[int]*data.PartInfoExtended),
}
}
@ -346,28 +347,31 @@ func (t *TreeServiceMock) GetMultipartUpload(_ context.Context, bktInfo *data.Bu
return nil, ErrNodeNotFound
}
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
func (t *TreeServiceMock) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) {
multipartInfo, err := t.GetMultipartUpload(ctx, bktInfo, info.Key, info.UploadID)
if err != nil {
return oid.ID{}, err
return nil, err
}
if multipartInfo.ID != multipartNodeID {
return oid.ID{}, fmt.Errorf("invalid multipart info id")
return nil, fmt.Errorf("invalid multipart info id")
}
partsMap, ok := t.parts[info.UploadID]
if !ok {
partsMap = make(map[int]*data.PartInfo)
partsMap = make(map[int]*data.PartInfoExtended)
}
partsMap[info.Number] = info
partsMap[info.Number] = &data.PartInfoExtended{
PartInfo: *info,
Timestamp: uint64(time.Now().UnixMicro()),
}
t.parts[info.UploadID] = partsMap
return oid.ID{}, nil
return nil, nil
}
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
func (t *TreeServiceMock) GetParts(_ context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) {
cnrMultipartsMap := t.multiparts[bktInfo.CID.EncodeToString()]
var foundMultipart *data.MultipartInfo
@ -387,7 +391,7 @@ LOOP:
}
partsMap := t.parts[foundMultipart.UploadID]
result := make([]*data.PartInfo, 0, len(partsMap))
result := make([]*data.PartInfoExtended, 0, len(partsMap))
for _, part := range partsMap {
result = append(result, part)
}

View file

@ -57,11 +57,11 @@ type TreeService interface {
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
// AddPart puts a node to a system tree as a child of appropriate multipart upload
// and returns objectID of a previous part which must be deleted in FrostFS.
// and returns objectIDs of a previous part/s which must be deleted in FrostFS.
//
// If object id to remove is not found returns ErrNoNodeToRemove error.
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error)
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error)
// If object ids to remove is not found returns ErrNoNodeToRemove error.
AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error)
GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error)
PutBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo, addr oid.Address) ([]oid.Address, error)
GetBucketLifecycleConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (oid.Address, error)

View file

@ -0,0 +1,148 @@
package middleware
import (
"net/http"
"net/url"
"strconv"
"strings"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
"go.uber.org/zap"
)
const wildcardPlaceholder = "<wildcard>"
type VHSSettings interface {
Domains() []string
GlobalVHS() bool
VHSHeader() string
ServernameHeader() string
VHSNamespacesEnabled() map[string]bool
}
func PrepareAddressStyle(settings VHSSettings, log *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := GetReqInfo(ctx)
reqLogger := reqLogOrDefault(ctx, log)
headerVHSEnabled := r.Header.Get(settings.VHSHeader())
if isVHSAddress(headerVHSEnabled, settings.GlobalVHS(), settings.VHSNamespacesEnabled(), reqInfo.Namespace) {
prepareVHSAddress(reqInfo, r, settings)
} else {
preparePathStyleAddress(reqInfo, r, reqLogger)
}
h.ServeHTTP(w, r)
})
}
}
func isVHSAddress(headerVHSEnabled string, enabledFlag bool, vhsNamespaces map[string]bool, namespace string) bool {
if result, err := strconv.ParseBool(headerVHSEnabled); err == nil {
return result
}
result := enabledFlag
if v, ok := vhsNamespaces[namespace]; ok {
result = v
}
return result
}
func prepareVHSAddress(reqInfo *ReqInfo, r *http.Request, settings VHSSettings) {
reqInfo.RequestVHSEnabled = true
bktName, match := checkDomain(r.Host, getDomains(r, settings))
if match {
if bktName == "" {
reqInfo.RequestType = noneType
} else {
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
reqInfo.RequestType = objectType
reqInfo.ObjectName = objName
reqInfo.BucketName = bktName
} else {
reqInfo.RequestType = bucketType
reqInfo.BucketName = bktName
}
}
} else {
parts := strings.Split(r.Host, ".")
reqInfo.BucketName = parts[0]
if objName := strings.TrimPrefix(r.URL.Path, "/"); objName != "" {
reqInfo.RequestType = objectType
reqInfo.ObjectName = objName
} else {
reqInfo.RequestType = bucketType
}
}
}
func getDomains(r *http.Request, settings VHSSettings) []string {
if headerServername := r.Header.Get(settings.ServernameHeader()); headerServername != "" {
return []string{headerServername}
}
return settings.Domains()
}
func preparePathStyleAddress(reqInfo *ReqInfo, r *http.Request, reqLogger *zap.Logger) {
bktObj := strings.TrimPrefix(r.URL.Path, "/")
if bktObj == "" {
reqInfo.RequestType = noneType
} else if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
reqInfo.RequestType = objectType
reqInfo.BucketName = bktObj[:ind]
reqInfo.ObjectName = bktObj[ind+1:]
if r.URL.RawPath != "" {
// we have to do this because of
// https://github.com/go-chi/chi/issues/641
// https://github.com/go-chi/chi/issues/642
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
} else {
reqInfo.ObjectName = obj
}
}
} else {
reqInfo.RequestType = bucketType
reqInfo.BucketName = strings.TrimSuffix(bktObj, "/")
}
}
func checkDomain(host string, domains []string) (bktName string, match bool) {
partsHost := strings.Split(host, ".")
for _, pattern := range domains {
partsPattern := strings.Split(pattern, ".")
bktName, match = compareMatch(partsHost, partsPattern)
if match {
break
}
}
return
}
func compareMatch(host, pattern []string) (bktName string, match bool) {
if len(host) < len(pattern) {
return "", false
}
i, j := len(host)-1, len(pattern)-1
for j >= 0 && (pattern[j] == wildcardPlaceholder || host[i] == pattern[j]) {
i--
j--
}
switch {
case i == -1:
return "", true
case i == 0 && (j != 0 || host[i] == pattern[j]):
return host[0], true
default:
return "", false
}
}

View file

@ -0,0 +1,443 @@
package middleware
import (
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
const (
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
FrostfsServernameHeader = "X-Frostfs-Servername"
)
type VHSSettingsMock struct {
domains []string
}
func (v *VHSSettingsMock) Domains() []string {
return v.domains
}
func (v *VHSSettingsMock) GlobalVHS() bool {
return false
}
func (v *VHSSettingsMock) VHSHeader() string {
return FrostfsVHSHeader
}
func (v *VHSSettingsMock) ServernameHeader() string {
return FrostfsServernameHeader
}
func (v *VHSSettingsMock) VHSNamespacesEnabled() map[string]bool {
return make(map[string]bool)
}
func TestIsVHSAddress(t *testing.T) {
for _, tc := range []struct {
name string
headerVHSEnabled string
vhsEnabledFlag bool
vhsNamespaced map[string]bool
namespace string
expected bool
}{
{
name: "vhs disabled",
expected: false,
},
{
name: "vhs disabled for namespace",
vhsEnabledFlag: true,
vhsNamespaced: map[string]bool{
"kapusta": false,
},
namespace: "kapusta",
expected: false,
},
{
name: "vhs enabled (global vhs flag)",
vhsEnabledFlag: true,
expected: true,
},
{
name: "vhs enabled for namespace",
vhsNamespaced: map[string]bool{
"kapusta": true,
},
namespace: "kapusta",
expected: true,
},
{
name: "vhs enabled (header)",
headerVHSEnabled: "true",
vhsEnabledFlag: false,
vhsNamespaced: map[string]bool{
"kapusta": false,
},
namespace: "kapusta",
expected: true,
},
{
name: "vhs disabled (header)",
headerVHSEnabled: "false",
vhsEnabledFlag: true,
vhsNamespaced: map[string]bool{
"kapusta": true,
},
namespace: "kapusta",
expected: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
actual := isVHSAddress(tc.headerVHSEnabled, tc.vhsEnabledFlag, tc.vhsNamespaced, tc.namespace)
require.Equal(t, tc.expected, actual)
})
}
}
func TestPreparePathStyleAddress(t *testing.T) {
bkt, obj := "test-bucket", "test-object"
for _, tc := range []struct {
name string
urlParams string
expectedReqType ReqType
expectedBktName string
expectedObjName string
}{
{
name: "bucket request",
urlParams: "/" + bkt,
expectedReqType: bucketType,
expectedBktName: bkt,
},
{
name: "bucket request with slash",
urlParams: "/" + bkt + "/",
expectedReqType: bucketType,
expectedBktName: bkt,
},
{
name: "object request",
urlParams: "/" + bkt + "/" + obj,
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj,
},
{
name: "object request with slash",
urlParams: "/" + bkt + "/" + obj + "/",
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj + "/",
},
{
name: "none type request",
urlParams: "/",
expectedReqType: noneType,
},
} {
t.Run(tc.name, func(t *testing.T) {
reqInfo := &ReqInfo{}
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
preparePathStyleAddress(reqInfo, r, reqLogOrDefault(r.Context(), zaptest.NewLogger(t)))
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
})
}
}
func TestPrepareVHSAddress(t *testing.T) {
bkt, obj, domain := "test-bucket", "test-object", "domain.com"
for _, tc := range []struct {
name string
domains []string
host string
urlParams string
expectedReqType ReqType
expectedBktName string
expectedObjName string
}{
{
name: "bucket request, the domain matched",
domains: []string{domain},
host: bkt + "." + domain,
urlParams: "/",
expectedReqType: bucketType,
expectedBktName: bkt,
},
{
name: "object request, the domain matched",
domains: []string{domain},
host: bkt + "." + domain,
urlParams: "/" + obj,
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj,
},
{
name: "object request with slash, the domain matched",
domains: []string{domain},
host: bkt + "." + domain,
urlParams: "/" + obj + "/",
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj + "/",
},
{
name: "list-buckets request, the domain matched",
domains: []string{domain},
host: domain,
urlParams: "/",
expectedReqType: noneType,
},
{
name: "bucket request, the domain don't match",
host: bkt + "." + domain,
urlParams: "/",
expectedReqType: bucketType,
expectedBktName: bkt,
},
{
name: "object request, the domain don't match",
host: bkt + "." + domain,
urlParams: "/" + obj,
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj,
},
{
name: "object request with slash, the domain don't match",
host: bkt + "." + domain,
urlParams: "/" + obj + "/",
expectedReqType: objectType,
expectedBktName: bkt,
expectedObjName: obj + "/",
},
{
name: "list-buckets request, the domain don't match (list-buckets isn't supported if the domains don't match)",
host: domain,
urlParams: "/",
expectedReqType: bucketType,
expectedBktName: strings.Split(domain, ".")[0],
},
} {
t.Run(tc.name, func(t *testing.T) {
reqInfo := &ReqInfo{}
vhsSettings := &VHSSettingsMock{domains: tc.domains}
r := httptest.NewRequest(http.MethodGet, tc.urlParams, nil)
r.Host = tc.host
prepareVHSAddress(reqInfo, r, vhsSettings)
require.Equal(t, tc.expectedReqType, reqInfo.RequestType)
require.Equal(t, tc.expectedBktName, reqInfo.BucketName)
require.Equal(t, tc.expectedObjName, reqInfo.ObjectName)
})
}
}
func TestCheckDomains(t *testing.T) {
for _, tc := range []struct {
name string
domains []string
requestURL string
expectedBktName string
expectedMatch bool
}{
{
name: "valid url with bktName and namespace (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "bktA.s3.kapusta.domain.com",
expectedBktName: "bktA",
expectedMatch: true,
},
{
name: "valid url without bktName and namespace (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "s3.kapusta.domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "invalid url with invalid bktName (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "bktA.bktB.s3.kapusta.domain.com",
expectedMatch: false,
},
{
name: "invalid url without namespace (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "bktA.s3.domain.com",
expectedMatch: false,
},
{
name: "invalid url with invalid infix (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "bktA.s4.kapusta.domain.com",
expectedMatch: false,
},
{
name: "invalid url with invalid postfix (wildcard after protocol infix)",
domains: []string{"s3.<wildcard>.domain.com"},
requestURL: "bktA.s3.kapusta.dom.su",
expectedMatch: false,
},
{
name: "valid url with bktName and namespace (wildcard at the beginning of the domain)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "bktA.kapusta.domain.com",
expectedBktName: "bktA",
expectedMatch: true,
},
{
name: "valid url without bktName and namespace (wildcard at the beginning of the domain)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "kapusta.domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "invalid url with invalid bktName (wildcard at the beginning of the domain)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "bktA.bktB.kapusta.domain.com",
expectedMatch: false,
},
{
name: "collision test - true, because we cannot clearly distinguish a namespace from a bucket (wildcard at the beginning of the domain)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "bktA.domain.com",
expectedMatch: true,
},
{
name: "invalid url (fewer hosts)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "domain.com",
expectedMatch: false,
},
{
name: "invalid url with invalid postfix (wildcard at the beginning of the domain)",
domains: []string{"<wildcard>.domain.com"},
requestURL: "bktA.kapusta.dom.su",
expectedMatch: false,
},
{
name: "valid url with bktName and without wildcard (root namaspace)",
domains: []string{"domain.com"},
requestURL: "bktA.domain.com",
expectedBktName: "bktA",
expectedMatch: true,
},
{
name: "valid url without bktName and without wildcard (root namaspace)",
domains: []string{"domain.com"},
requestURL: "domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "invalid url with bktName without wildcard (root namaspace)",
domains: []string{"domain.com"},
requestURL: "bktA.dom.su",
expectedMatch: false,
},
{
name: "invalid url without wildcard (root namaspace)",
domains: []string{"domain.com"},
requestURL: "dom.su",
expectedMatch: false,
},
{
name: "valid url, with a sorted list of domains",
domains: []string{"s3.<wildcard>.domain.com", "<wildcard>.domain.com", "domain.com"},
requestURL: "s3.kapusta.domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "valid url with bktName, multiple wildcards (wildcards at the beginning of the domain)",
domains: []string{"<wildcard>.<wildcard>.domain.com"},
requestURL: "bktA.s3.kapusta.domain.com",
expectedBktName: "bktA",
expectedMatch: true,
},
{
name: "valid url without bktName, multiple wildcards (wildcards at the beginning of the domain)",
domains: []string{"<wildcard>.<wildcard>.domain.com"},
requestURL: "s3.kapusta.domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "valid url with bktName, multiply wildcards",
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
requestURL: "bktA.s3.kapusta.subdomain.domain.com",
expectedBktName: "bktA",
expectedMatch: true,
},
{
name: "valid url without bktName, multiply wildcards",
domains: []string{"s3.<wildcard>.subdomain.<wildcard>.com"},
requestURL: "s3.kapusta.subdomain.domain.com",
expectedBktName: "",
expectedMatch: true,
},
{
name: "invalid url without one wildcard",
domains: []string{"<wildcard>.<wildcard>.domain.com"},
requestURL: "kapusta.domain.com",
expectedMatch: false,
},
{
name: "invalid url, multiply wildcards",
domains: []string{"<wildcard>.<wildcard>.domain.com"},
requestURL: "s3.kapusta.dom.com",
expectedMatch: false,
},
{
name: "invalid url with invalid bktName, multiply wildcards",
domains: []string{"<wildcard>.<wildcard>.domain.com"},
requestURL: "bktA.bktB.s3.kapusta.domain.com",
expectedMatch: false,
},
} {
t.Run(tc.name, func(t *testing.T) {
bktName, match := checkDomain(tc.requestURL, tc.domains)
require.Equal(t, tc.expectedBktName, bktName)
require.Equal(t, tc.expectedMatch, match)
})
}
}
func TestGetDomains(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
settings := &VHSSettingsMock{
domains: []string{
"s3.domain.com",
"s3.<wildcard>.domain.com",
"domain.com",
},
}
t.Run("the request does not contain the X-Frostfs-Servername header", func(t *testing.T) {
actualDomains := getDomains(req, settings)
require.Equal(t, settings.domains, actualDomains)
})
serverName := "domain.com"
req.Header.Set(settings.ServernameHeader(), serverName)
t.Run("the request contains the X-Frostfs-Servername header", func(t *testing.T) {
actualDomains := getDomains(req, settings)
require.Equal(t, []string{serverName}, actualDomains)
})
}

View file

@ -74,6 +74,7 @@ const (
AbortMultipartUploadOperation = "AbortMultipartUpload"
DeleteObjectTaggingOperation = "DeleteObjectTagging"
DeleteObjectOperation = "DeleteObject"
PatchObjectOperation = "PatchObject"
)
const (

View file

@ -73,7 +73,6 @@ type PolicyConfig struct {
Storage engine.ChainRouter
FrostfsID FrostFSIDInformer
Settings PolicySettings
Domains []string
Log *zap.Logger
BucketResolver BucketResolveFunc
Decoder XMLDecoder
@ -99,21 +98,21 @@ func PolicyCheck(cfg PolicyConfig) Func {
}
func policyCheck(r *http.Request, cfg PolicyConfig) error {
reqType, bktName, objName := getBucketObject(r, cfg.Domains)
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqType, bktName, objName)
reqInfo := GetReqInfo(r.Context())
req, userKey, userGroups, err := getPolicyRequest(r, cfg, reqInfo.RequestType, reqInfo.BucketName, reqInfo.ObjectName)
if err != nil {
return err
}
var bktInfo *data.BucketInfo
if reqType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
bktInfo, err = cfg.BucketResolver(r.Context(), bktName)
if reqInfo.RequestType != noneType && !strings.HasSuffix(req.Operation(), CreateBucketOperation) {
bktInfo, err = cfg.BucketResolver(r.Context(), reqInfo.BucketName)
if err != nil {
return err
}
}
reqInfo := GetReqInfo(r.Context())
target := engine.NewRequestTargetWithNamespace(reqInfo.Namespace)
if bktInfo != nil {
cnrTarget := engine.ContainerTarget(bktInfo.CID.EncodeToString())
@ -208,33 +207,6 @@ const (
objectType
)
func getBucketObject(r *http.Request, domains []string) (reqType ReqType, bktName string, objName string) {
for _, domain := range domains {
ind := strings.Index(r.Host, "."+domain)
if ind == -1 {
continue
}
bkt := r.Host[:ind]
if obj := strings.TrimPrefix(r.URL.Path, "/"); obj != "" {
return objectType, bkt, obj
}
return bucketType, bkt, ""
}
bktObj := strings.TrimPrefix(r.URL.Path, "/")
if bktObj == "" {
return noneType, "", ""
}
if ind := strings.IndexByte(bktObj, '/'); ind != -1 && bktObj[ind+1:] != "" {
return objectType, bktObj[:ind], bktObj[ind+1:]
}
return bucketType, strings.TrimSuffix(bktObj, "/"), ""
}
func determineOperation(r *http.Request, reqType ReqType) (operation string) {
switch reqType {
case objectType:
@ -357,6 +329,8 @@ func determineObjectOperation(r *http.Request) string {
switch r.Method {
case http.MethodOptions:
return OptionsObjectOperation
case http.MethodPatch:
return PatchObjectOperation
case http.MethodHead:
return HeadObjectOperation
case http.MethodGet:

View file

@ -8,79 +8,6 @@ import (
"github.com/stretchr/testify/require"
)
func TestReqTypeDetermination(t *testing.T) {
bkt, obj, domain := "test-bucket", "test-object", "domain"
for _, tc := range []struct {
name string
target string
host string
domains []string
expectedType ReqType
expectedBktName string
expectedObjName string
}{
{
name: "bucket request, path-style",
target: "/" + bkt,
expectedType: bucketType,
expectedBktName: bkt,
},
{
name: "bucket request with slash, path-style",
target: "/" + bkt + "/",
expectedType: bucketType,
expectedBktName: bkt,
},
{
name: "object request, path-style",
target: "/" + bkt + "/" + obj,
expectedType: objectType,
expectedBktName: bkt,
expectedObjName: obj,
},
{
name: "object request with slash, path-style",
target: "/" + bkt + "/" + obj + "/",
expectedType: objectType,
expectedBktName: bkt,
expectedObjName: obj + "/",
},
{
name: "none type request",
target: "/",
expectedType: noneType,
},
{
name: "bucket request, virtual-hosted style",
target: "/",
host: bkt + "." + domain,
domains: []string{"some-domain", domain},
expectedType: bucketType,
expectedBktName: bkt,
},
{
name: "object request, virtual-hosted style",
target: "/" + obj,
host: bkt + "." + domain,
domains: []string{"some-domain", domain},
expectedType: objectType,
expectedBktName: bkt,
expectedObjName: obj,
},
} {
t.Run(tc.name, func(t *testing.T) {
r := httptest.NewRequest(http.MethodPut, tc.target, nil)
r.Host = tc.host
reqType, bktName, objName := getBucketObject(r, tc.domains)
require.Equal(t, tc.expectedType, reqType)
require.Equal(t, tc.expectedBktName, bktName)
require.Equal(t, tc.expectedObjName, objName)
})
}
}
func TestDetermineBucketOperation(t *testing.T) {
const defaultValue = "value"

View file

@ -12,7 +12,6 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
treepool "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/pool/tree"
"github.com/go-chi/chi/v5"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc/metadata"
@ -28,19 +27,21 @@ type (
// ReqInfo stores the request info.
ReqInfo struct {
sync.RWMutex
RemoteHost string // Client Host/IP
Host string // Node Host/IP
UserAgent string // User Agent
DeploymentID string // random generated s3-deployment-id
RequestID string // x-amz-request-id
API string // API name -- GetObject PutObject NewMultipartUpload etc.
BucketName string // Bucket name
ObjectName string // Object name
TraceID string // Trace ID
URL *url.URL // Request url
Namespace string
User string // User owner id
Tagging *data.Tagging
RemoteHost string // Client Host/IP
Host string // Node Host/IP
UserAgent string // User Agent
DeploymentID string // random generated s3-deployment-id
RequestID string // x-amz-request-id
API string // API name -- GetObject PutObject NewMultipartUpload etc.
BucketName string // Bucket name
ObjectName string // Object name
TraceID string // Trace ID
URL *url.URL // Request url
Namespace string
User string // User owner id
Tagging *data.Tagging
RequestVHSEnabled bool
RequestType ReqType
}
// ObjectRequest represents object request data.
@ -61,10 +62,6 @@ const (
const HdrAmzRequestID = "x-amz-request-id"
const (
BucketURLPrm = "bucket"
)
var deploymentID = uuid.Must(uuid.NewRandom())
var (
@ -202,57 +199,6 @@ func Request(log *zap.Logger, settings RequestSettings) Func {
}
}
// AddBucketName adds bucket name to ReqInfo from context.
func AddBucketName(l *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := GetReqInfo(ctx)
reqInfo.BucketName = chi.URLParam(r, BucketURLPrm)
if reqInfo.BucketName != "" {
reqLogger := reqLogOrDefault(ctx, l)
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("bucket", reqInfo.BucketName))))
}
h.ServeHTTP(w, r)
})
}
}
// AddObjectName adds objects name to ReqInfo from context.
func AddObjectName(l *zap.Logger) Func {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
reqInfo := GetReqInfo(ctx)
reqLogger := reqLogOrDefault(ctx, l)
rctx := chi.RouteContext(ctx)
// trim leading slash (always present)
reqInfo.ObjectName = rctx.RoutePath[1:]
if r.URL.RawPath != "" {
// we have to do this because of
// https://github.com/go-chi/chi/issues/641
// https://github.com/go-chi/chi/issues/642
if obj, err := url.PathUnescape(reqInfo.ObjectName); err != nil {
reqLogger.Warn(logs.FailedToUnescapeObjectName, zap.Error(err))
} else {
reqInfo.ObjectName = obj
}
}
if reqInfo.ObjectName != "" {
r = r.WithContext(SetReqLogger(ctx, reqLogger.With(zap.String("object", reqInfo.ObjectName))))
}
h.ServeHTTP(w, r)
})
}
}
// getSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
// Forwarded headers (in that order), falls back to r.RemoteAddr when everything
// else fails.

View file

@ -6,7 +6,7 @@ import (
"fmt"
"sync"
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
v2container "git.frostfs.info/TrueCloudLab/frostfs-api-go/v2/container"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container"
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/ns"
@ -29,20 +29,14 @@ type FrostFS interface {
SystemDNS(context.Context) (string, error)
}
type Settings interface {
FormContainerZone(ns string) (zone string, isDefault bool)
}
type Config struct {
FrostFS FrostFS
RPCAddress string
Settings Settings
}
type BucketResolver struct {
rpcAddress string
frostfs FrostFS
settings Settings
mu sync.RWMutex
resolvers []*Resolver
@ -50,15 +44,15 @@ type BucketResolver struct {
type Resolver struct {
Name string
resolve func(context.Context, string) (cid.ID, error)
resolve func(context.Context, string, string) (cid.ID, error)
}
func (r *Resolver) SetResolveFunc(fn func(context.Context, string) (cid.ID, error)) {
func (r *Resolver) SetResolveFunc(fn func(context.Context, string, string) (cid.ID, error)) {
r.resolve = fn
}
func (r *Resolver) Resolve(ctx context.Context, name string) (cid.ID, error) {
return r.resolve(ctx, name)
func (r *Resolver) Resolve(ctx context.Context, zone, name string) (cid.ID, error) {
return r.resolve(ctx, zone, name)
}
func NewBucketResolver(resolverNames []string, cfg *Config) (*BucketResolver, error) {
@ -87,12 +81,12 @@ func createResolvers(resolverNames []string, cfg *Config) ([]*Resolver, error) {
return resolvers, nil
}
func (r *BucketResolver) Resolve(ctx context.Context, bktName string) (cnrID cid.ID, err error) {
func (r *BucketResolver) Resolve(ctx context.Context, zone, bktName string) (cnrID cid.ID, err error) {
r.mu.RLock()
defer r.mu.RUnlock()
for _, resolver := range r.resolvers {
cnrID, resolverErr := resolver.Resolve(ctx, bktName)
cnrID, resolverErr := resolver.Resolve(ctx, zone, bktName)
if resolverErr != nil {
resolverErr = fmt.Errorf("%s: %w", resolver.Name, resolverErr)
if err == nil {
@ -123,7 +117,6 @@ func (r *BucketResolver) UpdateResolvers(resolverNames []string) error {
cfg := &Config{
FrostFS: r.frostfs,
RPCAddress: r.rpcAddress,
Settings: r.settings,
}
resolvers, err := createResolvers(resolverNames, cfg)
@ -152,30 +145,25 @@ func (r *BucketResolver) equals(resolverNames []string) bool {
func newResolver(name string, cfg *Config) (*Resolver, error) {
switch name {
case DNSResolver:
return NewDNSResolver(cfg.FrostFS, cfg.Settings)
return NewDNSResolver(cfg.FrostFS)
case NNSResolver:
return NewNNSResolver(cfg.RPCAddress, cfg.Settings)
return NewNNSResolver(cfg.RPCAddress)
default:
return nil, fmt.Errorf("unknown resolver: %s", name)
}
}
func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
func NewDNSResolver(frostFS FrostFS) (*Resolver, error) {
if frostFS == nil {
return nil, fmt.Errorf("pool must not be nil for DNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for DNS resolver")
}
var dns ns.DNS
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
resolveFunc := func(ctx context.Context, zone, name string) (cid.ID, error) {
var err error
reqInfo := middleware.GetReqInfo(ctx)
zone, isDefault := settings.FormContainerZone(reqInfo.Namespace)
if isDefault {
if zone == v2container.SysAttributeZoneDefault {
zone, err = frostFS.SystemDNS(ctx)
if err != nil {
return cid.ID{}, fmt.Errorf("read system DNS parameter of the FrostFS: %w", err)
@ -196,13 +184,10 @@ func NewDNSResolver(frostFS FrostFS, settings Settings) (*Resolver, error) {
}, nil
}
func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
func NewNNSResolver(address string) (*Resolver, error) {
if address == "" {
return nil, fmt.Errorf("rpc address must not be empty for NNS resolver")
}
if settings == nil {
return nil, fmt.Errorf("resolver settings must not be nil for NNS resolver")
}
var nns ns.NNS
@ -210,12 +195,9 @@ func NewNNSResolver(address string, settings Settings) (*Resolver, error) {
return nil, fmt.Errorf("dial %s: %w", address, err)
}
resolveFunc := func(ctx context.Context, name string) (cid.ID, error) {
resolveFunc := func(_ context.Context, zone, name string) (cid.ID, error) {
var d container.Domain
d.SetName(name)
reqInfo := middleware.GetReqInfo(ctx)
zone, _ := settings.FormContainerZone(reqInfo.Namespace)
d.SetZone(zone)
cnrID, err := nns.ResolveContainerDomain(d)

View file

@ -87,6 +87,7 @@ type (
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
ListPartsHandler(w http.ResponseWriter, r *http.Request)
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
PatchObjectHandler(http.ResponseWriter, *http.Request)
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
@ -97,6 +98,7 @@ type Settings interface {
s3middleware.RequestSettings
s3middleware.PolicySettings
s3middleware.MetricsSettings
s3middleware.VHSSettings
}
type FrostFSID interface {
@ -113,9 +115,6 @@ type Config struct {
MiddlewareSettings Settings
// Domains optional. If empty no virtual hosted domains will be attached.
Domains []string
FrostfsID FrostFSID
FrostFSIDValidation bool
@ -142,11 +141,11 @@ func NewRouter(cfg Config) *chi.Mux {
api.Use(s3middleware.FrostfsIDValidation(cfg.FrostfsID, cfg.Log))
}
api.Use(s3middleware.PrepareAddressStyle(cfg.MiddlewareSettings, cfg.Log))
api.Use(s3middleware.PolicyCheck(s3middleware.PolicyConfig{
Storage: cfg.PolicyChecker,
FrostfsID: cfg.FrostfsID,
Settings: cfg.MiddlewareSettings,
Domains: cfg.Domains,
Log: cfg.Log,
BucketResolver: cfg.Handler.ResolveBucket,
Decoder: cfg.XMLDecoder,
@ -154,22 +153,41 @@ func NewRouter(cfg Config) *chi.Mux {
}))
defaultRouter := chi.NewRouter()
defaultRouter.Mount(fmt.Sprintf("/{%s}", s3middleware.BucketURLPrm), bucketRouter(cfg.Handler, cfg.Log))
defaultRouter.Get("/", named("ListBuckets", cfg.Handler.ListBucketsHandler))
defaultRouter.Mount("/{bucket}", bucketRouter(cfg.Handler))
defaultRouter.Get("/", named(s3middleware.ListBucketsOperation, cfg.Handler.ListBucketsHandler))
attachErrorHandler(defaultRouter)
hr := NewHostBucketRouter("bucket")
hr.Default(defaultRouter)
for _, domain := range cfg.Domains {
hr.Map(domain, bucketRouter(cfg.Handler, cfg.Log))
}
api.Mount("/", hr)
vhsRouter := bucketRouter(cfg.Handler)
router := newGlobalRouter(defaultRouter, vhsRouter)
api.Mount("/", router)
attachErrorHandler(api)
return api
}
type globalRouter struct {
pathStyleRouter chi.Router
vhsRouter chi.Router
}
func newGlobalRouter(pathStyleRouter, vhsRouter chi.Router) *globalRouter {
return &globalRouter{
pathStyleRouter: pathStyleRouter,
vhsRouter: vhsRouter,
}
}
func (g *globalRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
router := g.pathStyleRouter
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.RequestVHSEnabled {
router = g.vhsRouter
}
router.ServeHTTP(w, r)
}
func named(name string, handlerFunc http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
reqInfo := s3middleware.GetReqInfo(r.Context())
@ -214,14 +232,13 @@ func attachErrorHandler(api *chi.Mux) {
api.MethodNotAllowed(named("MethodNotAllowed", errorHandler))
}
func bucketRouter(h Handler, log *zap.Logger) chi.Router {
func bucketRouter(h Handler) chi.Router {
bktRouter := chi.NewRouter()
bktRouter.Use(
s3middleware.AddBucketName(log),
s3middleware.WrapHandler(h.AppendCORSHeaders),
)
bktRouter.Mount("/", objectRouter(h, log))
bktRouter.Mount("/", objectRouter(h))
bktRouter.Options("/", named(s3middleware.OptionsBucketOperation, h.Preflight))
@ -293,7 +310,7 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
Add(NewFilter().
Queries(s3middleware.VersionsQuery).
Handler(named(s3middleware.ListBucketObjectVersionsOperation, h.ListBucketObjectVersionsHandler))).
DefaultHandler(named(s3middleware.ListObjectsV1Operation, h.ListObjectsV1Handler)))
DefaultHandler(listWrapper(h)))
})
// PUT method handlers
@ -368,14 +385,27 @@ func bucketRouter(h Handler, log *zap.Logger) chi.Router {
return bktRouter
}
func objectRouter(h Handler, l *zap.Logger) chi.Router {
func listWrapper(h Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if reqInfo := s3middleware.GetReqInfo(r.Context()); reqInfo.BucketName == "" {
reqInfo.API = s3middleware.ListBucketsOperation
h.ListBucketsHandler(w, r)
} else {
reqInfo.API = s3middleware.ListObjectsV1Operation
h.ListObjectsV1Handler(w, r)
}
}
}
func objectRouter(h Handler) chi.Router {
objRouter := chi.NewRouter()
objRouter.Use(s3middleware.AddObjectName(l))
objRouter.Options("/*", named(s3middleware.OptionsObjectOperation, h.Preflight))
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
objRouter.Patch("/*", named(s3middleware.PatchObjectOperation, h.PatchObjectHandler))
// GET method handlers
objRouter.Group(func(r chi.Router) {
r.Method(http.MethodGet, "/*", NewHandlerFilter().

View file

@ -23,7 +23,11 @@ import (
"github.com/stretchr/testify/require"
)
const FrostfsNamespaceHeader = "X-Frostfs-Namespace"
const (
FrostfsNamespaceHeader = "X-Frostfs-Namespace"
FrostfsVHSHeader = "X-Frostfs-S3-VHS"
FrostfsServernameHeader = "X-Frostfs-Servername"
)
type poolStatisticMock struct {
}
@ -71,8 +75,11 @@ func (c *centerMock) Authenticate(*http.Request) (*middleware.Box, error) {
}
type middlewareSettingsMock struct {
denyByDefault bool
sourceIPHeader string
denyByDefault bool
sourceIPHeader string
domains []string
vhsEnabled bool
vhsNamespacesEnabled map[string]bool
}
func (r *middlewareSettingsMock) SourceIPHeader() string {
@ -91,6 +98,26 @@ func (r *middlewareSettingsMock) PolicyDenyByDefault() bool {
return r.denyByDefault
}
func (r *middlewareSettingsMock) Domains() []string {
return r.domains
}
func (r *middlewareSettingsMock) GlobalVHS() bool {
return r.vhsEnabled
}
func (r *middlewareSettingsMock) VHSHeader() string {
return FrostfsVHSHeader
}
func (r *middlewareSettingsMock) ServernameHeader() string {
return FrostfsServernameHeader
}
func (r *middlewareSettingsMock) VHSNamespacesEnabled() map[string]bool {
return r.vhsNamespacesEnabled
}
type frostFSIDMock struct {
tags map[string]string
validateError bool
@ -534,6 +561,10 @@ func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
h.writeResponse(w, res)
}
func (h *handlerMock) PatchObjectHandler(http.ResponseWriter, *http.Request) {
panic("implement me")
}
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
reqInfo := middleware.GetReqInfo(ctx)
bktInfo, ok := h.buckets[reqInfo.Namespace+name]

View file

@ -78,7 +78,6 @@ func prepareRouter(t *testing.T, opts ...option) *routerMock {
Metrics: metrics.NewAppMetrics(metricsConfig),
MiddlewareSettings: middlewareSettings,
PolicyChecker: policyChecker,
Domains: []string{"domain1", "domain2"},
FrostfsID: &frostFSIDMock{},
XMLDecoder: &xmlMock{},
Tagging: &resourceTaggingMock{},
@ -847,6 +846,31 @@ func TestFrostFSIDValidation(t *testing.T) {
createBucketErr(chiRouter, "", "bkt-3", nil, apiErrors.ErrInternalError)
}
func TestRouterListObjectsV2Domains(t *testing.T) {
chiRouter := prepareRouter(t, enableVHSDomains("domain.com"))
chiRouter.handler.buckets["bucket"] = &data.BucketInfo{}
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "/", nil)
r.Host = "bucket.domain.com"
query := make(url.Values)
query.Set(s3middleware.ListTypeQuery, "2")
r.URL.RawQuery = query.Encode()
chiRouter.ServeHTTP(w, r)
resp := readResponse(t, w)
require.Equal(t, s3middleware.ListObjectsV2Operation, resp.Method)
}
func enableVHSDomains(domains ...string) option {
return func(cfg *Config) {
setting := cfg.MiddlewareSettings.(*middlewareSettingsMock)
setting.vhsEnabled = true
setting.domains = domains
}
}
func readResponse(t *testing.T, w *httptest.ResponseRecorder) handlerResult {
var res handlerResult

View file

@ -11,6 +11,7 @@ import (
"os"
"os/signal"
"runtime/debug"
"strings"
"sync"
"syscall"
"time"
@ -105,9 +106,13 @@ type (
policyDenyByDefault bool
sourceIPHeader string
retryMaxAttempts int
domains []string
vhsEnabled bool
vhsHeader string
servernameHeader string
vhsNamespacesEnabled map[string]bool
retryMaxBackoff time.Duration
retryStrategy handler.RetryStrategy
domains []string
}
maxClientsConfig struct {
@ -230,39 +235,85 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
}
func (s *appSettings) update(v *viper.Viper, log *zap.Logger) {
s.updateNamespacesSettings(v, log)
s.useDefaultXMLNamespace(v.GetBool(cfgKludgeUseDefaultXMLNS))
s.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
s.setClientCut(v.GetBool(cfgClientCut))
s.setBufferMaxSizeForPut(v.GetUint64(cfgBufferMaxSizeForPut))
s.setMD5Enabled(v.GetBool(cfgMD5Enabled))
s.setPolicyDenyByDefault(v.GetBool(cfgPolicyDenyByDefault))
s.setSourceIPHeader(v.GetString(cfgSourceIPHeader))
s.setRetryMaxAttempts(fetchRetryMaxAttempts(v))
s.setRetryMaxBackoff(fetchRetryMaxBackoff(v))
s.setRetryStrategy(fetchRetryStrategy(v))
s.setVHSSettings(v, log)
}
func (s *appSettings) updateNamespacesSettings(v *viper.Viper, log *zap.Logger) {
nsHeader := v.GetString(cfgResolveNamespaceHeader)
namespaceHeader := v.GetString(cfgResolveNamespaceHeader)
nsConfig, defaultNamespaces := fetchNamespacesConfig(log, v)
defaultXMLNS := v.GetBool(cfgKludgeUseDefaultXMLNS)
bypassContentEncodingInChunks := v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks)
clientCut := v.GetBool(cfgClientCut)
maxBufferSizeForPut := v.GetUint64(cfgBufferMaxSizeForPut)
md5Enabled := v.GetBool(cfgMD5Enabled)
policyDenyByDefault := v.GetBool(cfgPolicyDenyByDefault)
sourceIPHeader := v.GetString(cfgSourceIPHeader)
retryMaxAttempts := fetchRetryMaxAttempts(v)
retryMaxBackoff := fetchRetryMaxBackoff(v)
retryStrategy := fetchRetryStrategy(v)
domains := fetchDomains(v, log)
vhsEnabled := v.GetBool(cfgVHSEnabled)
vhsHeader := v.GetString(cfgVHSHeader)
servernameHeader := v.GetString(cfgServernameHeader)
vhsNamespacesEnabled := s.prepareVHSNamespaces(v, log)
s.mu.Lock()
defer s.mu.Unlock()
s.namespaceHeader = nsHeader
s.namespaceHeader = namespaceHeader
s.defaultNamespaces = defaultNamespaces
s.namespaces = nsConfig.Namespaces
s.defaultXMLNS = defaultXMLNS
s.bypassContentEncodingInChunks = bypassContentEncodingInChunks
s.clientCut = clientCut
s.maxBufferSizeForPut = maxBufferSizeForPut
s.md5Enabled = md5Enabled
s.policyDenyByDefault = policyDenyByDefault
s.sourceIPHeader = sourceIPHeader
s.retryMaxAttempts = retryMaxAttempts
s.retryMaxBackoff = retryMaxBackoff
s.retryStrategy = retryStrategy
s.domains = domains
s.vhsEnabled = vhsEnabled
s.vhsHeader = vhsHeader
s.servernameHeader = servernameHeader
s.vhsNamespacesEnabled = vhsNamespacesEnabled
}
func (s *appSettings) setVHSSettings(v *viper.Viper, _ *zap.Logger) {
domains := v.GetStringSlice(cfgListenDomains)
func (s *appSettings) prepareVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
nsMap := fetchVHSNamespaces(v, log)
vhsNamespaces := make(map[string]bool, len(nsMap))
for ns, flag := range nsMap {
vhsNamespaces[s.ResolveNamespaceAlias(ns)] = flag
}
s.mu.Lock()
defer s.mu.Unlock()
return vhsNamespaces
}
s.domains = domains
func (s *appSettings) Domains() []string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.domains
}
func (s *appSettings) GlobalVHS() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.vhsEnabled
}
func (s *appSettings) VHSHeader() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.vhsHeader
}
func (s *appSettings) ServernameHeader() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.servernameHeader
}
func (s *appSettings) VHSNamespacesEnabled() map[string]bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.vhsNamespacesEnabled
}
func (s *appSettings) BypassContentEncodingInChunks() bool {
@ -271,36 +322,18 @@ func (s *appSettings) BypassContentEncodingInChunks() bool {
return s.bypassContentEncodingInChunks
}
func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
s.mu.Lock()
s.bypassContentEncodingInChunks = bypass
s.mu.Unlock()
}
func (s *appSettings) ClientCut() bool {
s.mu.RLock()
defer s.mu.RUnlock()
return s.clientCut
}
func (s *appSettings) setClientCut(clientCut bool) {
s.mu.Lock()
s.clientCut = clientCut
s.mu.Unlock()
}
func (s *appSettings) BufferMaxSizeForPut() uint64 {
s.mu.RLock()
defer s.mu.RUnlock()
return s.maxBufferSizeForPut
}
func (s *appSettings) setBufferMaxSizeForPut(size uint64) {
s.mu.Lock()
s.maxBufferSizeForPut = size
s.mu.Unlock()
}
func (s *appSettings) DefaultPlacementPolicy(namespace string) netmap.PlacementPolicy {
s.mu.RLock()
defer s.mu.RUnlock()
@ -348,12 +381,6 @@ func (s *appSettings) NewXMLDecoder(r io.Reader) *xml.Decoder {
return dec
}
func (s *appSettings) useDefaultXMLNamespace(useDefaultNamespace bool) {
s.mu.Lock()
s.defaultXMLNS = useDefaultNamespace
s.mu.Unlock()
}
func (s *appSettings) DefaultMaxAge() int {
return s.defaultMaxAge
}
@ -372,24 +399,18 @@ func (s *appSettings) MD5Enabled() bool {
return s.md5Enabled
}
func (s *appSettings) setMD5Enabled(md5Enabled bool) {
s.mu.Lock()
s.md5Enabled = md5Enabled
s.mu.Unlock()
}
func (s *appSettings) NamespaceHeader() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.namespaceHeader
}
func (s *appSettings) FormContainerZone(ns string) (zone string, isDefault bool) {
func (s *appSettings) FormContainerZone(ns string) string {
if len(ns) == 0 {
return v2container.SysAttributeZoneDefault, true
return v2container.SysAttributeZoneDefault
}
return ns + ".ns", false
return ns + ".ns"
}
func (s *appSettings) isDefaultNamespace(ns string) bool {
@ -413,66 +434,30 @@ func (s *appSettings) PolicyDenyByDefault() bool {
return s.policyDenyByDefault
}
func (s *appSettings) setPolicyDenyByDefault(policyDenyByDefault bool) {
s.mu.Lock()
s.policyDenyByDefault = policyDenyByDefault
s.mu.Unlock()
}
func (s *appSettings) setSourceIPHeader(header string) {
s.mu.Lock()
s.sourceIPHeader = header
s.mu.Unlock()
}
func (s *appSettings) SourceIPHeader() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.sourceIPHeader
}
func (s *appSettings) setRetryMaxAttempts(maxAttempts int) {
s.mu.Lock()
s.retryMaxAttempts = maxAttempts
s.mu.Unlock()
}
func (s *appSettings) RetryMaxAttempts() int {
s.mu.RLock()
defer s.mu.RUnlock()
return s.retryMaxAttempts
}
func (s *appSettings) setRetryMaxBackoff(maxBackoff time.Duration) {
s.mu.Lock()
s.retryMaxBackoff = maxBackoff
s.mu.Unlock()
}
func (s *appSettings) RetryMaxBackoff() time.Duration {
s.mu.RLock()
defer s.mu.RUnlock()
return s.retryMaxBackoff
}
func (s *appSettings) setRetryStrategy(strategy handler.RetryStrategy) {
s.mu.Lock()
s.retryStrategy = strategy
s.mu.Unlock()
}
func (s *appSettings) RetryStrategy() handler.RetryStrategy {
s.mu.RLock()
defer s.mu.RUnlock()
return s.retryStrategy
}
func (s *appSettings) Domains() []string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.domains
}
func (a *App) initAPI(ctx context.Context) {
a.initLayer(ctx)
a.initHandler()
@ -541,7 +526,6 @@ func (a *App) getResolverConfig() *resolver.Config {
return &resolver.Config{
FrostFS: frostfs.NewResolverFrostFS(a.pool),
RPCAddress: a.cfg.GetString(cfgRPCEndpoint),
Settings: a.settings,
}
}
@ -638,6 +622,9 @@ func getPools(ctx context.Context, logger *zap.Logger, cfg *viper.Viper) (*pool.
errorThreshold := fetchErrorThreshold(cfg)
prm.SetErrorThreshold(errorThreshold)
prm.SetGracefulCloseOnSwitchTimeout(fetchSetGracefulCloseOnSwitchTimeout(cfg))
prm.SetLogger(logger)
prmTree.SetLogger(logger)
@ -710,9 +697,6 @@ func (a *App) setHealthStatus() {
// Serve runs HTTP server to handle S3 API requests.
func (a *App) Serve(ctx context.Context) {
// Attach S3 API:
a.log.Info(logs.FetchDomainsPrepareToUseAPI, zap.Strings("domains", a.settings.Domains()))
cfg := api.Config{
Throttle: middleware.ThrottleOpts{
Limit: a.settings.maxClient.count,
@ -722,7 +706,6 @@ func (a *App) Serve(ctx context.Context) {
Center: a.ctr,
Log: a.log,
Metrics: a.metrics,
Domains: a.settings.Domains(),
MiddlewareSettings: a.settings,
PolicyChecker: a.policyStorage,
@ -1079,8 +1062,13 @@ func (a *App) fetchContainerInfo(ctx context.Context, cfgKey string) (info *data
var id cid.ID
if err = id.DecodeString(containerString); err != nil {
if id, err = a.bucketResolver.Resolve(ctx, containerString); err != nil {
return nil, fmt.Errorf("resolve container name %s: %w", containerString, err)
i := strings.Index(containerString, ".")
if i < 0 {
return nil, fmt.Errorf("invalid container address: %s", containerString)
}
if id, err = a.bucketResolver.Resolve(ctx, containerString[i+1:], containerString[:i]); err != nil {
return nil, fmt.Errorf("resolve container address %s: %w", containerString, err)
}
}

View file

@ -30,6 +30,8 @@ import (
const (
destinationStdout = "stdout"
destinationJournald = "journald"
wildcardPlaceholder = "<wildcard>"
)
const (
@ -39,6 +41,8 @@ const (
defaultStreamTimeout = 10 * time.Second
defaultShutdownTimeout = 15 * time.Second
defaultGracefulCloseOnSwitchTimeout = 10 * time.Second
defaultPoolErrorThreshold uint32 = 100
defaultPlacementPolicy = "REP 3"
@ -52,7 +56,9 @@ const (
defaultAccessBoxCacheRemovingCheckInterval = 5 * time.Minute
defaultNamespaceHeader = "X-Frostfs-Namespace"
defaultNamespaceHeader = "X-Frostfs-Namespace"
defaultVHSHeader = "X-Frostfs-S3-VHS"
defaultServernameHeader = "X-Frostfs-Servername"
defaultConstraintName = "default"
@ -144,6 +150,12 @@ const ( // Settings.
cfgListenDomains = "listen_domains"
// VHS.
cfgVHSEnabled = "vhs.enabled"
cfgVHSHeader = "vhs.vhs_header"
cfgServernameHeader = "vhs.servername_header"
cfgVHSNamespaces = "vhs.namespaces"
// Peers.
cfgPeers = "peers"
@ -200,6 +212,10 @@ const ( // Settings.
// Sets max attempt to make successful tree request.
cfgTreePoolMaxAttempts = "frostfs.tree_pool_max_attempts"
// Specifies the timeout after which unhealthy client be closed during rebalancing
// if it will become healthy back.
cfgGracefulCloseOnSwitchTimeout = "frostfs.graceful_close_on_switch_timeout"
// List of allowed AccessKeyID prefixes.
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
@ -286,6 +302,15 @@ func fetchRebalanceInterval(cfg *viper.Viper) time.Duration {
return rebalanceInterval
}
func fetchSetGracefulCloseOnSwitchTimeout(cfg *viper.Viper) time.Duration {
val := cfg.GetDuration(cfgGracefulCloseOnSwitchTimeout)
if val <= 0 {
val = defaultGracefulCloseOnSwitchTimeout
}
return val
}
func fetchErrorThreshold(cfg *viper.Viper) uint32 {
errorThreshold := cfg.GetUint32(cfgPoolErrorThreshold)
if errorThreshold <= 0 {
@ -668,6 +693,41 @@ func fetchServers(v *viper.Viper, log *zap.Logger) []ServerInfo {
return servers
}
func fetchDomains(v *viper.Viper, log *zap.Logger) []string {
domains := validateDomains(v.GetStringSlice(cfgListenDomains), log)
countParts := func(domain string) int {
return strings.Count(domain, ".")
}
sort.Slice(domains, func(i, j int) bool {
return countParts(domains[i]) > countParts(domains[j])
})
return domains
}
func fetchVHSNamespaces(v *viper.Viper, log *zap.Logger) map[string]bool {
vhsNamespacesEnabled := make(map[string]bool)
nsMap := v.GetStringMap(cfgVHSNamespaces)
for ns, val := range nsMap {
if _, ok := vhsNamespacesEnabled[ns]; ok {
log.Warn(logs.WarnDuplicateNamespaceVHS, zap.String("namespace", ns))
continue
}
enabledFlag, ok := val.(bool)
if !ok {
log.Warn(logs.WarnValueVHSEnabledFlagWrongType, zap.String("namespace", ns))
continue
}
vhsNamespacesEnabled[ns] = enabledFlag
}
return vhsNamespacesEnabled
}
func newSettings() *viper.Viper {
v := viper.New()
@ -754,6 +814,10 @@ func newSettings() *viper.Viper {
v.SetDefault(cfgRetryMaxAttempts, defaultRetryMaxAttempts)
v.SetDefault(cfgRetryMaxBackoff, defaultRetryMaxBackoff)
// vhs
v.SetDefault(cfgVHSHeader, defaultVHSHeader)
v.SetDefault(cfgServernameHeader, defaultServernameHeader)
// Bind flags
if err := bindFlags(v, flags); err != nil {
panic(fmt.Errorf("bind flags: %w", err))
@ -1029,3 +1093,19 @@ func getLogLevel(v *viper.Viper) (zapcore.Level, error) {
}
return lvl, nil
}
func validateDomains(domains []string, log *zap.Logger) []string {
validDomains := make([]string, 0, len(domains))
LOOP:
for _, domain := range domains {
domainParts := strings.Split(domain, ".")
for _, part := range domainParts {
if strings.ContainsAny(part, "<>") && part != wildcardPlaceholder {
log.Warn(logs.WarnDomainContainsInvalidPlaceholder, zap.String("domain", domain))
continue LOOP
}
}
validDomains = append(validDomains, domain)
}
return validDomains
}

View file

@ -0,0 +1,34 @@
package main
import (
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
)
func TestValidateDomains(t *testing.T) {
inputDomains := []string{
"s3dev.frostfs.devenv",
"s3dev.<invalid>.frostfs.devenv",
"s3dev.<wildcard>.frostfs.devenv",
"s3dev.<wildcard.frostfs.devenv",
"s3dev.wildcard>.frostfs.devenv",
"s3dev.<wild.card>.frostfs.devenv",
"<invalid>.frostfs.devenv",
"<wildcard>.frostfs.devenv>",
"<wildcard>.frostfs.devenv",
"s3dev.fro<stfs.devenv",
"<wildcard>.dev.<wildcard>.frostfs.devenv",
"<wildcard>.dev.<wildc>ard>.frostfs.devenv",
}
expectedDomains := []string{
"s3dev.frostfs.devenv",
"s3dev.<wildcard>.frostfs.devenv",
"<wildcard>.frostfs.devenv",
"<wildcard>.dev.<wildcard>.frostfs.devenv",
}
actualDomains := validateDomains(inputDomains, zaptest.NewLogger(t))
require.Equal(t, expectedDomains, actualDomains)
}

View file

@ -36,8 +36,15 @@ S3_GW_SERVER_1_TLS_KEY_FILE=/path/to/tls/key
# How often to reconnect to the servers
S3_GW_RECONNECT_INTERVAL: 1m
# Domains to be able to use virtual-hosted-style access to bucket.
S3_GW_LISTEN_DOMAINS=s3dev.frostfs.devenv
# Domains to be able to use virtual-hosted-style access to bucket
S3_GW_LISTEN_DOMAINS="domain.com <wildcard>.domain.com"
# VHS enabled flag
S3_GW_VHS_ENABLED=false
# Header for determining whether VHS is enabled for the request
S3_GW_VHS_VHS_HEADER=X-Frostfs-S3-VHS
# Header for determining servername
S3_GW_VHS_SERVERNAME_HEADER=X-Frostfs-Servername
# Config file
S3_GW_CONFIG=/path/to/config/yaml
@ -137,6 +144,8 @@ S3_GW_FROSTFS_BUFFER_MAX_SIZE_FOR_PUT=1048576
# max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
S3_GW_FROSTFS_TREE_POOL_MAX_ATTEMPTS=0
# Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back.
S3_GW_FROSTFS_GRACEFUL_CLOSE_ON_SWITCH_TIMEOUT=10s
# List of allowed AccessKeyID prefixes
# If not set, S3 GW will accept all AccessKeyIDs

View file

@ -42,6 +42,15 @@ server:
# Domains to be able to use virtual-hosted-style access to bucket.
listen_domains:
- s3dev.frostfs.devenv
- s3dev.<wildcard>.frostfs.devenv
vhs:
enabled: false
vhs_header: X-Frostfs-S3-VHS
servername_header: X-Frostfs-Servername
namespaces:
"ns1": false
"ns2": true
logger:
level: debug
@ -162,6 +171,8 @@ frostfs:
client_cut: false
# Sets max buffer size for read payload in put operations.
buffer_max_size_for_put: 1048576
# Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back.
graceful_close_on_switch_timeout: 10s
# List of allowed AccessKeyID prefixes
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs

View file

@ -193,12 +193,14 @@ There are some custom types used for brevity:
| `namespaces` | [Namespaces configuration](#namespaces-section) |
| `retry` | [Retry configuration](#retry-section) |
| `containers` | [Containers configuration](#containers-section) |
| `vhs` | [VHS configuration](#vhs-section) |
### General section
```yaml
listen_domains:
- s3dev.frostfs.devenv
- s3dev.<wildcard>.frostfs.devenv
- s3dev2.frostfs.devenv
rpc_endpoint: http://morph-chain.frostfs.devenv:30333
@ -226,7 +228,7 @@ source_ip_header: "Source-Ip"
| Parameter | Type | SIGHUP reload | Default value | Description |
|----------------------------------|------------|---------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `listen_domains` | `[]string` | no | | Domains to be able to use virtual-hosted-style access to bucket. |
| `listen_domains` | `[]string` | yes | | Domains to be able to use virtual-hosted-style access to bucket. The presence of placeholders of the <wildcard> type is supported. |
| `rpc_endpoint` | `string` | no | | The address of the RPC host to which the gateway connects to resolve bucket names and interact with frostfs contracts (required to use the `nns` resolver and `frostfsid` contract). |
| `resolve_order` | `[]string` | yes | `[dns]` | Order of bucket name resolvers to use. Available resolvers: `dns`, `nns`. |
| `connect_timeout` | `duration` | no | `10s` | Timeout to connect to a node. |
@ -523,14 +525,16 @@ frostfs:
client_cut: false
buffer_max_size_for_put: 1048576 # 1mb
tree_pool_max_attempts: 0
graceful_close_on_switch_timeout: 10s
```
| Parameter | Type | SIGHUP reload | Default value | Description |
|---------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
| `buffer_max_size_for_put` | `uint64` | yes | `1048576` | Sets max buffer size for read payload in put operations. |
| `tree_pool_max_attempts` | `uint32` | no | `0` | Sets max attempt to make successful tree request. Value 0 means the number of attempts equals to number of nodes in pool. |
| `graceful_close_on_switch_timeout` | `duration` | no | `10s` | Specifies the timeout after which unhealthy client be closed during rebalancing if it will become healthy back. |
# `resolve_bucket` section
@ -723,3 +727,24 @@ containers:
|-------------|----------|---------------|---------------|-------------------------------------------------------------------------------------------|
| `cors` | `string` | no | | Container name for CORS configurations. If not set, container of the bucket is used. |
| `lifecycle` | `string` | no | | Container name for lifecycle configurations. If not set, container of the bucket is used. |
# `vhs` section
Configuration of virtual hosted addressing style.
```yaml
vhs:
enabled: false
vhs_header: X-Frostfs-S3-VHS
servername_header: X-Frostfs-Servername
namespaces:
"ns1": false
"ns2": true
```
| Parameter | Type | SIGHUP reload | Default value | Description |
| ------------------- | ----------------- | ------------- | ---------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `bool` | yes | `false` | Enables the use of virtual host addressing for buckets at the application level. |
| `vhs_header` | `string` | yes | `X-Frostfs-S3-VHS` | Header for determining whether VHS is enabled for the request. |
| `servername_header` | `string` | yes | `X-Frostfs-Servername` | Header for determining servername. |
| `namespaces` | `map[string]bool` | yes | | A map in which the keys are the name of the namespace, and the values are the flag responsible for enabling VHS for the specified namespace. Overrides global 'enabled' setting even when it is disabled. |

View file

@ -50,3 +50,172 @@ HTTP/1.1 204 No Content
Connection: close
Server: AmazonS3
```
## Object operations management
### Action to patch object (PatchObject)
Allows to partially change and add data to an existing object.
> **Note**: patch is not supported for objects that were uploaded using SSE.
#### Path parameters
- **Bucket**
Bucket name.
_Required: Yes_
- **Key**
Object name.
_Required: Yes_
#### Query parameters
- **versionId**
Version of the original object to patch.
_Required: No_
#### Request headers
- **Content-Range**
The byte range of the object (or its version) to patch.
The value is formed as follows: `bytes {start byte}-{end byte}/*`.
Range boundaries are included.
The maximum range length is 5GB.
To write additional data to the object, the end byte must be greater than the object size.
The start byte cannot be greater than the object size.
The range length must be equal to the value of the **_Content-Length_** header.
The format corresponds to the [RFC 9110](https://www.rfc-editor.org/rfc/rfc9110#name-content-range) specification with the following
exceptions:
- **_complete-length_** parameter is ignored.
- **_last-pos_** parameter is optional (if not specified, the value is assumed to be equal to the end byte of the object).
_Required: Yes_
- **Content-Length**
Number of bytes sent in the request body.
_Required: Yes_
- **If-Match**
Patch is performed if ETag of the object (or its version) is equal to specified in the header.
_Required: No_
- **If-Unmodified-Since**
Patch is performed if the object (or its version) has not changed since the time specified in the header.
_Required: No_
- **x-amz-expected-bucket-owner**
ID of the intended owner of the bucket.
_Required: No_
#### Request body
Contains new data for the passed byte range of the object.
#### Response
The request returns the following data in XML format.
- **PatchObjectResult**
Root level tag for parameters.
- **Object**
Parent tag for patch results.
- **LastModified**
Time when the object was last modified. Applying patch does not change this value.
- **ETag**
Patched object tag. For regular objects always in SHA-256 format.
If the bucket is versioned, the **_x-amz-version-id_** header is returned with the version of the created object.
#### Errors
- **MissingContentRange**
The required **_Content-Range_** header was not sent.
HTTP Status Code: 400
- **NoSuchBucket**
The specified bucket does not exist.
HTTP Status Code: 404
- **NoSuchKey**
The specified object does not exist.
HTTP Status Code: 404
- **MissingContentLength**
The required **_Content-Length_** header was not sent.
HTTP Status Code: 411
- **PreconditionFailed**
At least one of the preconditions is not satisfied.
HTTP Status Code: 412
- **InvalidRange**
Incorrect value in **_Content-Range_** header.
HTTP Status Code: 416
#### Example
Sample Request
```text
PATCH /example-bucket/example-key HTTP/1.1
Host: data.s3.<Region>.frostfs-s3-gw.com
Content-Range: bytes 0-3/*
Content-Length: 4
&AUTHPARAMS
Body
```
Sample Response
```xml
<PatchObjectResult>
<Object>
<LastModified>2024-07-24T14:54:54Z</LastModified>
<ETag>"e8b53b75afaf3ce898f048c663b11cf4c71f5f13456673dd5b422a247c9e627f"</ETag>
</Object>
</PatchObjectResult>
```

19
go.mod
View file

@ -1,13 +1,13 @@
module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
go 1.21
go 1.22
require (
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240802100114-e83d6b7c6a1a
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240821072038-a1386f6d259a
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
github.com/aws/aws-sdk-go v1.44.6
github.com/aws/aws-sdk-go-v2 v1.18.1
@ -25,6 +25,7 @@ require (
github.com/spf13/viper v1.15.0
github.com/ssgreg/journald v1.0.0
github.com/stretchr/testify v1.9.0
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4
github.com/urfave/cli/v2 v2.3.0
go.opentelemetry.io/otel v1.16.0
go.opentelemetry.io/otel/trace v1.16.0
@ -33,7 +34,7 @@ require (
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
golang.org/x/net v0.23.0
golang.org/x/text v0.14.0
google.golang.org/grpc v1.62.0
google.golang.org/grpc v1.63.2
google.golang.org/protobuf v1.33.0
)
@ -42,6 +43,7 @@ require (
git.frostfs.info/TrueCloudLab/hrw v1.2.1 // indirect
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 // indirect
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 // indirect
github.com/VictoriaMetrics/easyproto v0.1.4 // indirect
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@ -53,7 +55,6 @@ require (
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
@ -92,9 +93,9 @@ require (
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.18.0 // indirect
golang.org/x/term v0.18.0 // indirect
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

34
go.sum
View file

@ -36,20 +36,20 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e h1:gEWT+70E/RvGkxtSv+PlyUN2vtJVymhQa1mypvrXukM=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240726072425-3dfa2f4fd65e/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326 h1:TkH+NSsY4C/Z8MocIJyMcqLm5vEhZcSowOldJyilKKA=
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240819074700-a43110e36326/go.mod h1:zZnHiRv9m5+ESYLhBXY9Jds9A/YIDEUGiuyPUS09HwM=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240802100114-e83d6b7c6a1a h1:/Vv8nOySzl8iuKeo2ynjPqnQHZRVGPQKf8Q9l4fOWa8=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240802100114-e83d6b7c6a1a/go.mod h1:DlJmgV4/qkFkx2ab+YWznlMijiF2yZHnrJswJOB7XGs=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1 h1:+Z55WxE1ad/LBzRX1dqgaWlXAQ/NDjUsBlwEIZ4rn6k=
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240822080251-28f140bf06c1/go.mod h1:Pl77loECndbgIC0Kljj1MFmGJKQ9gotaFINyveW1T8I=
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240821072038-a1386f6d259a h1:uuNs7xOVgFOqO6hUyyZT+/eZ9glXQ85J4GDVe+qKMCI=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240821072038-a1386f6d259a/go.mod h1:SgioiGhQNWqiV5qpFAXRDJF81SEFRBhtwGEiU0FViyA=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b h1:M50kdfrf/h8c3cz0bJ2AEUcbXvAlPFVC1Wp1WkfZ/8E=
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240822104152-a3bc3099bd5b/go.mod h1:GZTk55RI4dKzsK6BCn5h2xxE28UHNfgoq/NJxW/LQ6A=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0 h1:M2KR3iBj7WpY3hP10IevfIB9MURr4O9mwVfJ+SjT3HA=
git.frostfs.info/TrueCloudLab/rfc6979 v0.4.0/go.mod h1:okpbKfVYf/BpejtfFTfhZqFP+sZ8rsHrP8Rr/jYPNRc=
git.frostfs.info/TrueCloudLab/tzhash v1.8.0 h1:UFMnUIk0Zh17m8rjGHJMqku2hCgaXDqjqZzS4gsb4UA=
@ -59,6 +59,8 @@ git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02/go.
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/easyproto v0.1.4 h1:r8cNvo8o6sR4QShBXQd1bKw/VVLSQma/V2KhTBPf+Sc=
github.com/VictoriaMetrics/easyproto v0.1.4/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
@ -160,8 +162,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -321,6 +321,8 @@ github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954 h1:xQdMZ1WLrgkkvOZ/LDQxjVxMLdby7osSh4ZEVa5sIjs=
github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4 h1:GpfJ7OdNjS7BFTVwNCUI9L4aCJOFRbr5fdHqjdhoYE8=
github.com/trailofbits/go-fuzz-utils v0.0.0-20230413173806-58c38daa3cb4/go.mod h1:f3jBhpWvuZmue0HZK52GzRHJOYHYSILs/c8+K2S/J+o=
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
@ -654,12 +656,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -680,8 +682,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View file

@ -403,6 +403,38 @@ func (x *FrostFS) NetworkInfo(ctx context.Context) (netmap.NetworkInfo, error) {
return ni, nil
}
func (x *FrostFS) PatchObject(ctx context.Context, prm layer.PrmObjectPatch) (oid.ID, error) {
var addr oid.Address
addr.SetContainer(prm.Container)
addr.SetObject(prm.Object)
var prmPatch pool.PrmObjectPatch
prmPatch.SetAddress(addr)
var rng object.Range
rng.SetOffset(prm.Offset)
rng.SetLength(prm.Length)
if prm.Length+prm.Offset > prm.ObjectSize {
rng.SetLength(prm.ObjectSize - prm.Offset)
}
prmPatch.SetRange(&rng)
prmPatch.SetPayloadReader(prm.Payload)
if prm.BearerToken != nil {
prmPatch.UseBearer(*prm.BearerToken)
} else {
prmPatch.UseKey(prm.PrivateKey)
}
res, err := x.pool.PatchObject(ctx, prmPatch)
if err != nil {
return oid.ID{}, handleObjectError("patch object via connection pool", err)
}
return res.ObjectID, nil
}
// ResolverFrostFS represents virtual connection to the FrostFS network.
// It implements resolver.FrostFS.
type ResolverFrostFS struct {

View file

@ -189,7 +189,7 @@ func (m *multiTX) wrapCall(method string, args []any) {
if err == nil {
return
}
if !errors.Is(commonclient.ErrTransactionTooLarge, err) {
if !errors.Is(err, commonclient.ErrTransactionTooLarge) {
m.err = err
return
}

View file

@ -102,14 +102,18 @@ func (w *PoolWrapper) GetNodes(ctx context.Context, prm *tree.GetNodesParams) ([
return res, nil
}
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) ([]tree.NodeResponse, error) {
func (w *PoolWrapper) GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]tree.NodeResponse, error) {
order := treepool.NoneOrder
if sort {
order = treepool.AscendingOrder
}
poolPrm := treepool.GetSubTreeParams{
CID: bktInfo.CID,
TreeID: treeID,
RootID: rootID,
Depth: depth,
BearerToken: getBearer(ctx, bktInfo),
Order: treepool.AscendingOrder,
Order: order,
}
if len(rootID) == 1 && rootID[0] == 0 {
// storage node interprets 'nil' value as []uint64{0}

View file

@ -20,7 +20,6 @@ const (
UsingCredentials = "using credentials" // Info in ../../cmd/s3-gw/app.go
ApplicationStarted = "application started" // Info in ../../cmd/s3-gw/app.go
ApplicationFinished = "application finished" // Info in ../../cmd/s3-gw/app.go
FetchDomainsPrepareToUseAPI = "fetch domains, prepare to use API" // Info in ../../cmd/s3-gw/app.go
StartingServer = "starting server" // Info in ../../cmd/s3-gw/app.go
StoppingServer = "stopping server" // Info in ../../cmd/s3-gw/app.go
SIGHUPConfigReloadStarted = "SIGHUP config reload started" // Info in ../../cmd/s3-gw/app.go
@ -159,4 +158,8 @@ const (
CouldNotFetchLifecycleContainerInfo = "couldn't fetch lifecycle container info"
BucketLifecycleNodeHasMultipleIDs = "bucket lifecycle node has multiple ids"
GetBucketLifecycle = "get bucket lifecycle"
WarnDuplicateNamespaceVHS = "duplicate namespace with enabled VHS, config value skipped"
WarnValueVHSEnabledFlagWrongType = "the value of the VHS enable flag for the namespace is of the wrong type, config value skipped"
WarnDomainContainsInvalidPlaceholder = "the domain contains an invalid placeholder, domain skipped"
FailedToRemoveOldPartNode = "failed to remove old part node"
)

View file

@ -33,7 +33,7 @@ type (
// Each method must return ErrNodeNotFound or ErrNodeAccessDenied if relevant.
ServiceClient interface {
GetNodes(ctx context.Context, p *GetNodesParams) ([]NodeResponse, error)
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) ([]NodeResponse, error)
GetSubTree(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error)
GetSubTreeStream(ctx context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) (SubTreeStream, error)
AddNode(ctx context.Context, bktInfo *data.BucketInfo, treeID string, parent uint64, meta map[string]string) (uint64, error)
AddNodeByPath(ctx context.Context, bktInfo *data.BucketInfo, treeID string, path []string, meta map[string]string) (uint64, error)
@ -156,6 +156,10 @@ type NodeResponse interface {
}
func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
if err := validateNodeResponse(nodeInfo); err != nil {
return nil, err
}
tNode := &treeNode{
ID: nodeInfo.GetNodeID(),
ParentID: nodeInfo.GetParentID(),
@ -163,14 +167,6 @@ func newTreeNode(nodeInfo NodeResponse) (*treeNode, error) {
Meta: make(map[string]string, len(nodeInfo.GetMeta())),
}
if len(tNode.ID) == 0 || len(tNode.ParentID) == 0 || len(tNode.TimeStamp) == 0 {
return nil, errors.New("invalid tree node: missing id")
}
if len(tNode.ID) != len(tNode.ParentID) || len(tNode.ID) != len(tNode.TimeStamp) {
return nil, errors.New("invalid tree node: length multiple ids mismatch")
}
for _, kv := range nodeInfo.GetMeta() {
switch kv.GetKey() {
case oidKV:
@ -377,6 +373,10 @@ func newMultipartInfoFromTreeNode(log *zap.Logger, filePath string, treeNode *tr
}
func newMultipartInfo(log *zap.Logger, node NodeResponse) (*data.MultipartInfo, error) {
if err := validateNodeResponse(node); err != nil {
return nil, err
}
if len(node.GetNodeID()) != 1 {
return nil, errors.New("invalid multipart node: this is split node")
}
@ -426,10 +426,36 @@ func newMultipartInfo(log *zap.Logger, node NodeResponse) (*data.MultipartInfo,
return multipartInfo, nil
}
func newPartInfo(node NodeResponse) (*data.PartInfo, error) {
var err error
partInfo := &data.PartInfo{}
func validateNodeResponse(node NodeResponse) error {
ids := node.GetNodeID()
parentIDs := node.GetParentID()
timestamps := node.GetTimestamp()
if len(ids) == 0 || len(parentIDs) == 0 || len(timestamps) == 0 {
return errors.New("invalid node response: missing ids")
}
if len(ids) != len(parentIDs) || len(parentIDs) != len(timestamps) {
return errors.New("invalid node response: multiple ids length mismatch")
}
return nil
}
func newPartInfo(node NodeResponse) (*data.PartInfoExtended, error) {
if err := validateNodeResponse(node); err != nil {
return nil, err
}
if len(node.GetNodeID()) != 1 {
return nil, errors.New("invalid part node: this is split node")
}
partInfo := &data.PartInfoExtended{
Timestamp: node.GetTimestamp()[0],
}
var err error
for _, kv := range node.GetMeta() {
value := string(kv.GetValue())
switch kv.GetKey() {
@ -751,7 +777,7 @@ func (c *Tree) getTreeNode(ctx context.Context, bktInfo *data.BucketInfo, nodeID
}
func (c *Tree) getTreeNodes(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, keys ...string) (map[string]*treeNode, error) {
subtree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, []uint64{nodeID}, 2)
subtree, err := c.service.GetSubTree(ctx, bktInfo, versionTree, []uint64{nodeID}, 2, false)
if err != nil {
return nil, err
}
@ -1147,7 +1173,7 @@ func (c *Tree) getSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo,
return nil, "", err
}
subTree, err := c.service.GetSubTree(ctx, bktInfo, treeID, rootID, 2)
subTree, err := c.service.GetSubTree(ctx, bktInfo, treeID, rootID, 2, false)
if err != nil {
if errors.Is(err, layer.ErrNodeNotFound) {
return nil, "", nil
@ -1305,7 +1331,11 @@ func (c *Tree) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.Bu
}
func (c *Tree) getSubTreeMultipartUploads(ctx context.Context, bktInfo *data.BucketInfo, nodeID []uint64, parentFilePath string) ([]*data.MultipartInfo, error) {
subTree, err := c.service.GetSubTree(ctx, bktInfo, systemTree, nodeID, maxGetSubTreeDepth)
// sorting in getSubTree leads to skipping nodes that doesn't have FileName attribute
// so when we are only interested in multipart nodes, we can set this flag
// (despite we sort multiparts in above layer anyway)
// to skip its children (parts) that don't have FileName
subTree, err := c.service.GetSubTree(ctx, bktInfo, systemTree, nodeID, maxGetSubTreeDepth, true)
if err != nil {
return nil, err
}
@ -1393,10 +1423,10 @@ func (c *Tree) GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo,
return nil, layer.ErrNodeNotFound
}
func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2)
func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDsToDelete []oid.ID, err error) {
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2, false)
if err != nil {
return oid.ID{}, err
return nil, err
}
meta := map[string]string{
@ -1408,48 +1438,76 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
md5KV: info.MD5,
}
objToDelete := make([]oid.ID, 0, 1)
partsToDelete := make([]uint64, 0, 1)
var (
latestPartID uint64
maxTimestamp uint64
)
multiNodeID := MultiID{multipartNodeID}
for _, part := range parts {
if len(part.GetNodeID()) != 1 {
// multipart parts nodeID shouldn't have multiple values
c.reqLogger(ctx).Warn(logs.UnexpectedMultiNodeIDsInSubTreeMultiParts,
zap.String("key", info.Key),
zap.String("upload id", info.UploadID),
zap.Uint64("multipart node id ", multipartNodeID),
zap.Uint64s("node ids", part.GetNodeID()))
continue
}
nodeID := part.GetNodeID()[0]
if nodeID == multipartNodeID {
if multiNodeID.Equal(part.GetNodeID()) {
continue
}
partInfo, err := newPartInfo(part)
if err != nil {
c.reqLogger(ctx).Warn(logs.FailedToParsePartInfo,
zap.String("key", info.Key),
zap.String("upload id", info.UploadID),
zap.Uint64("multipart node id ", multipartNodeID),
zap.Uint64s("id", part.GetNodeID()),
zap.Error(err))
continue
}
if partInfo.Number == info.Number {
return partInfo.OID, c.service.MoveNode(ctx, bktInfo, systemTree, nodeID, multipartNodeID, meta)
nodeID := part.GetNodeID()[0]
objToDelete = append(objToDelete, partInfo.OID)
partsToDelete = append(partsToDelete, nodeID)
timestamp := partInfo.Timestamp
if timestamp > maxTimestamp {
maxTimestamp = timestamp
latestPartID = nodeID
}
}
}
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
return oid.ID{}, err
if len(objToDelete) != 0 {
if err = c.service.MoveNode(ctx, bktInfo, systemTree, latestPartID, multipartNodeID, meta); err != nil {
return nil, fmt.Errorf("move part node: %w", err)
}
for _, nodeID := range partsToDelete {
if nodeID == latestPartID {
continue
}
if err = c.service.RemoveNode(ctx, bktInfo, systemTree, nodeID); err != nil {
c.reqLogger(ctx).Warn(logs.FailedToRemoveOldPartNode,
zap.String("key", info.Key),
zap.String("upload id", info.UploadID),
zap.Uint64("id", nodeID))
}
}
return objToDelete, nil
}
return oid.ID{}, layer.ErrNoNodeToRemove
if _, err = c.service.AddNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
return nil, err
}
return nil, layer.ErrNoNodeToRemove
}
func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfo, error) {
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2)
func (c *Tree) GetParts(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) ([]*data.PartInfoExtended, error) {
parts, err := c.service.GetSubTree(ctx, bktInfo, systemTree, []uint64{multipartNodeID}, 2, false)
if err != nil {
return nil, err
}
result := make([]*data.PartInfo, 0, len(parts))
result := make([]*data.PartInfoExtended, 0, len(parts))
for _, part := range parts {
if len(part.GetNodeID()) != 1 {
// multipart parts nodeID shouldn't have multiple values

View file

@ -234,7 +234,7 @@ func (c *ServiceClientMemory) GetNodes(_ context.Context, p *GetNodesParams) ([]
return res2, nil
}
func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32) ([]NodeResponse, error) {
func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.BucketInfo, treeID string, rootID []uint64, depth uint32, sort bool) ([]NodeResponse, error) {
cnr, ok := c.containers[bktInfo.CID.EncodeToString()]
if !ok {
return nil, nil
@ -254,6 +254,10 @@ func (c *ServiceClientMemory) GetSubTree(_ context.Context, bktInfo *data.Bucket
return nil, ErrNodeNotFound
}
if sort {
sortNode(tr.treeData)
}
// we depth-1 in case of uint32 and 0 as mark to get all subtree leads to overflow and depth is getting quite big to walk all tree levels
return node.listNodes(nil, depth-1), nil
}

View file

@ -7,6 +7,7 @@ import (
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
cidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id/test"
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
@ -304,3 +305,57 @@ func TestGetLatestNode(t *testing.T) {
})
}
}
func TestSplitTreeMultiparts(t *testing.T) {
ctx := context.Background()
memCli, err := NewTreeServiceClientMemory()
require.NoError(t, err)
treeService := NewTree(memCli, zaptest.NewLogger(t))
bktInfo := &data.BucketInfo{
CID: cidtest.ID(),
}
multipartInfo := &data.MultipartInfo{
Key: "multipart",
UploadID: "id",
Meta: map[string]string{},
Owner: usertest.ID(),
}
err = treeService.CreateMultipartUpload(ctx, bktInfo, multipartInfo)
require.NoError(t, err)
multipartInfo, err = treeService.GetMultipartUpload(ctx, bktInfo, multipartInfo.Key, multipartInfo.UploadID)
require.NoError(t, err)
var objIDs []oid.ID
for i := 0; i < 2; i++ {
objID := oidtest.ID()
_, err = memCli.AddNode(ctx, bktInfo, systemTree, multipartInfo.ID, map[string]string{
partNumberKV: "1",
oidKV: objID.EncodeToString(),
ownerKV: usertest.ID().EncodeToString(),
})
require.NoError(t, err)
objIDs = append(objIDs, objID)
}
parts, err := treeService.GetParts(ctx, bktInfo, multipartInfo.ID)
require.NoError(t, err)
require.Len(t, parts, 2)
objToDeletes, err := treeService.AddPart(ctx, bktInfo, multipartInfo.ID, &data.PartInfo{
Key: multipartInfo.Key,
UploadID: multipartInfo.UploadID,
Number: 1,
OID: oidtest.ID(),
})
require.NoError(t, err)
require.EqualValues(t, objIDs, objToDeletes, "oids to delete mismatched")
parts, err = treeService.GetParts(ctx, bktInfo, multipartInfo.ID)
require.NoError(t, err)
require.Len(t, parts, 1)
}